diff --git a/Jenkinsfile b/Jenkinsfile
index 74eee5cbfff..8dce5e1b270 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -38,6 +38,21 @@ pipeline {
MVN_SHOW_TIMESTAMPS="-Dorg.slf4j.simpleLogger.showDateTime=true -Dorg.slf4j.simpleLogger.dateTimeFormat=HH:mm:ss,SSS"
CI = true
LC_CTYPE = 'en_US.UTF-8'
+
+ POSTGRES_MODULES = 'backends-common/postgres,' +
+ 'mailbox/postgres,' +
+ 'server/blob/blob-postgres,' +
+ 'server/data/data-postgres,' +
+ 'server/data/data-jmap-postgres,' +
+ 'server/container/guice/postgres-common,' +
+ 'server/container/guice/mailbox-postgres,' +
+ 'server/apps/postgres-app,' +
+ 'server/protocols/jmap-rfc-8621-integration-tests/postgres-jmap-rfc-8621-integration-tests,' +
+ 'server/protocols/webadmin-integration-test/postgres-webadmin-integration-test,' +
+ 'server/task/task-postgres,' +
+ 'mpt/impl/imap-mailbox/postgres,' +
+ 'event-bus/postgres,' +
+ 'mailbox/plugin/deleted-messages-vault-postgres'
}
tools {
@@ -94,7 +109,7 @@ pipeline {
stage('Stable Tests') {
steps {
echo 'Running tests'
- sh 'mvn -B -e -fae test ${MVN_SHOW_TIMESTAMPS} -P ci-test ${MVN_LOCAL_REPO_OPT} -Dassembly.skipAssembly=true jacoco:report-aggregate@jacoco-report'
+ sh 'mvn -B -e -fae test ${MVN_SHOW_TIMESTAMPS} -P ci-test ${MVN_LOCAL_REPO_OPT} -pl ${POSTGRES_MODULES} -Dassembly.skipAssembly=true jacoco:report-aggregate@jacoco-report'
}
post {
always {
@@ -115,7 +130,7 @@ pipeline {
steps {
echo 'Running unstable tests'
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
- sh 'mvn -B -e -fae test -Punstable-tests ${MVN_SHOW_TIMESTAMPS} -P ci-test ${MVN_LOCAL_REPO_OPT} -Dassembly.skipAssembly=true'
+ sh 'mvn -B -e -fae test -Punstable-tests ${MVN_SHOW_TIMESTAMPS} -P ci-test ${MVN_LOCAL_REPO_OPT} -pl ${POSTGRES_MODULES} -Dassembly.skipAssembly=true'
}
}
post {
diff --git a/backends-common/cassandra/src/main/java/org/apache/james/backends/cassandra/components/CassandraQuotaCurrentValueDao.java b/backends-common/cassandra/src/main/java/org/apache/james/backends/cassandra/components/CassandraQuotaCurrentValueDao.java
index 95618be4f25..aec997b27b4 100644
--- a/backends-common/cassandra/src/main/java/org/apache/james/backends/cassandra/components/CassandraQuotaCurrentValueDao.java
+++ b/backends-common/cassandra/src/main/java/org/apache/james/backends/cassandra/components/CassandraQuotaCurrentValueDao.java
@@ -30,8 +30,6 @@
import static org.apache.james.backends.cassandra.components.CassandraQuotaCurrentValueTable.QUOTA_TYPE;
import static org.apache.james.backends.cassandra.components.CassandraQuotaCurrentValueTable.TABLE_NAME;
-import java.util.Objects;
-
import jakarta.inject.Inject;
import org.apache.james.backends.cassandra.utils.CassandraAsyncExecutor;
@@ -47,66 +45,12 @@
import com.datastax.oss.driver.api.querybuilder.delete.Delete;
import com.datastax.oss.driver.api.querybuilder.select.Select;
import com.datastax.oss.driver.api.querybuilder.update.Update;
-import com.google.common.base.MoreObjects;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
public class CassandraQuotaCurrentValueDao {
- public static class QuotaKey {
-
- public static QuotaKey of(QuotaComponent component, String identifier, QuotaType quotaType) {
- return new QuotaKey(component, identifier, quotaType);
- }
-
- private final QuotaComponent quotaComponent;
- private final String identifier;
- private final QuotaType quotaType;
-
- public QuotaComponent getQuotaComponent() {
- return quotaComponent;
- }
-
- public String getIdentifier() {
- return identifier;
- }
-
- public QuotaType getQuotaType() {
- return quotaType;
- }
-
- private QuotaKey(QuotaComponent quotaComponent, String identifier, QuotaType quotaType) {
- this.quotaComponent = quotaComponent;
- this.identifier = identifier;
- this.quotaType = quotaType;
- }
-
- @Override
- public final int hashCode() {
- return Objects.hash(quotaComponent, identifier, quotaType);
- }
-
- @Override
- public final boolean equals(Object o) {
- if (o instanceof QuotaKey) {
- QuotaKey other = (QuotaKey) o;
- return Objects.equals(quotaComponent, other.quotaComponent)
- && Objects.equals(identifier, other.identifier)
- && Objects.equals(quotaType, other.quotaType);
- }
- return false;
- }
-
- public String toString() {
- return MoreObjects.toStringHelper(this)
- .add("quotaComponent", quotaComponent)
- .add("identifier", identifier)
- .add("quotaType", quotaType)
- .toString();
- }
- }
-
private static final Logger LOGGER = LoggerFactory.getLogger(CassandraQuotaCurrentValueDao.class);
private final CassandraAsyncExecutor queryExecutor;
@@ -126,7 +70,7 @@ public CassandraQuotaCurrentValueDao(CqlSession session) {
this.deleteQuotaCurrentValueStatement = session.prepare(deleteQuotaCurrentValueStatement().build());
}
- public Mono increase(QuotaKey quotaKey, long amount) {
+ public Mono increase(QuotaCurrentValue.Key quotaKey, long amount) {
return queryExecutor.executeVoid(increaseStatement.bind()
.setString(QUOTA_COMPONENT, quotaKey.getQuotaComponent().getValue())
.setString(IDENTIFIER, quotaKey.getIdentifier())
@@ -139,7 +83,7 @@ public Mono increase(QuotaKey quotaKey, long amount) {
});
}
- public Mono decrease(QuotaKey quotaKey, long amount) {
+ public Mono decrease(QuotaCurrentValue.Key quotaKey, long amount) {
return queryExecutor.executeVoid(decreaseStatement.bind()
.setString(QUOTA_COMPONENT, quotaKey.getQuotaComponent().getValue())
.setString(IDENTIFIER, quotaKey.getIdentifier())
@@ -152,7 +96,7 @@ public Mono decrease(QuotaKey quotaKey, long amount) {
});
}
- public Mono getQuotaCurrentValue(QuotaKey quotaKey) {
+ public Mono getQuotaCurrentValue(QuotaCurrentValue.Key quotaKey) {
return queryExecutor.executeSingleRow(getQuotaCurrentValueStatement.bind()
.setString(QUOTA_COMPONENT, quotaKey.getQuotaComponent().getValue())
.setString(IDENTIFIER, quotaKey.getIdentifier())
@@ -160,7 +104,7 @@ public Mono getQuotaCurrentValue(QuotaKey quotaKey) {
.map(row -> convertRowToModel(row));
}
- public Mono deleteQuotaCurrentValue(QuotaKey quotaKey) {
+ public Mono deleteQuotaCurrentValue(QuotaCurrentValue.Key quotaKey) {
return queryExecutor.executeVoid(deleteQuotaCurrentValueStatement.bind()
.setString(QUOTA_COMPONENT, quotaKey.getQuotaComponent().getValue())
.setString(IDENTIFIER, quotaKey.getIdentifier())
diff --git a/backends-common/cassandra/src/main/java/org/apache/james/backends/cassandra/components/CassandraQuotaLimitDao.java b/backends-common/cassandra/src/main/java/org/apache/james/backends/cassandra/components/CassandraQuotaLimitDao.java
index c43442ac5ff..2b3090a6403 100644
--- a/backends-common/cassandra/src/main/java/org/apache/james/backends/cassandra/components/CassandraQuotaLimitDao.java
+++ b/backends-common/cassandra/src/main/java/org/apache/james/backends/cassandra/components/CassandraQuotaLimitDao.java
@@ -31,8 +31,6 @@
import static org.apache.james.backends.cassandra.components.CassandraQuotaLimitTable.QUOTA_TYPE;
import static org.apache.james.backends.cassandra.components.CassandraQuotaLimitTable.TABLE_NAME;
-import java.util.Objects;
-
import jakarta.inject.Inject;
import org.apache.james.backends.cassandra.utils.CassandraAsyncExecutor;
@@ -47,74 +45,11 @@
import com.datastax.oss.driver.api.querybuilder.delete.Delete;
import com.datastax.oss.driver.api.querybuilder.insert.Insert;
import com.datastax.oss.driver.api.querybuilder.select.Select;
-import com.google.common.base.MoreObjects;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
public class CassandraQuotaLimitDao {
-
- public static class QuotaLimitKey {
-
- public static QuotaLimitKey of(QuotaComponent component, QuotaScope scope, String identifier, QuotaType quotaType) {
- return new QuotaLimitKey(component, scope, identifier, quotaType);
- }
-
- private final QuotaComponent quotaComponent;
- private final QuotaScope quotaScope;
- private final String identifier;
- private final QuotaType quotaType;
-
- public QuotaComponent getQuotaComponent() {
- return quotaComponent;
- }
-
- public QuotaScope getQuotaScope() {
- return quotaScope;
- }
-
- public String getIdentifier() {
- return identifier;
- }
-
- public QuotaType getQuotaType() {
- return quotaType;
- }
-
- private QuotaLimitKey(QuotaComponent quotaComponent, QuotaScope quotaScope, String identifier, QuotaType quotaType) {
- this.quotaComponent = quotaComponent;
- this.quotaScope = quotaScope;
- this.identifier = identifier;
- this.quotaType = quotaType;
- }
-
- @Override
- public final int hashCode() {
- return Objects.hash(quotaComponent, quotaScope, identifier, quotaType);
- }
-
- @Override
- public final boolean equals(Object o) {
- if (o instanceof QuotaLimitKey) {
- QuotaLimitKey other = (QuotaLimitKey) o;
- return Objects.equals(quotaComponent, other.quotaComponent)
- && Objects.equals(quotaScope, other.quotaScope)
- && Objects.equals(identifier, other.identifier)
- && Objects.equals(quotaType, other.quotaType);
- }
- return false;
- }
-
- public String toString() {
- return MoreObjects.toStringHelper(this)
- .add("quotaComponent", quotaComponent)
- .add("quotaScope", quotaScope)
- .add("identifier", identifier)
- .add("quotaType", quotaType)
- .toString();
- }
- }
-
private final CassandraAsyncExecutor queryExecutor;
private final PreparedStatement getQuotaLimitStatement;
private final PreparedStatement getQuotaLimitsStatement;
@@ -130,7 +65,7 @@ public CassandraQuotaLimitDao(CqlSession session) {
this.deleteQuotaLimitStatement = session.prepare((deleteQuotaLimitStatement().build()));
}
- public Mono getQuotaLimit(QuotaLimitKey quotaKey) {
+ public Mono getQuotaLimit(QuotaLimit.QuotaLimitKey quotaKey) {
return queryExecutor.executeSingleRow(getQuotaLimitStatement.bind()
.setString(QUOTA_COMPONENT, quotaKey.getQuotaComponent().getValue())
.setString(QUOTA_SCOPE, quotaKey.getQuotaScope().getValue())
@@ -156,7 +91,7 @@ public Mono setQuotaLimit(QuotaLimit quotaLimit) {
.set(QUOTA_LIMIT, quotaLimit.getQuotaLimit().orElse(null), Long.class));
}
- public Mono deleteQuotaLimit(QuotaLimitKey quotaKey) {
+ public Mono deleteQuotaLimit(QuotaLimit.QuotaLimitKey quotaKey) {
return queryExecutor.executeVoid(deleteQuotaLimitStatement.bind()
.setString(QUOTA_COMPONENT, quotaKey.getQuotaComponent().getValue())
.setString(QUOTA_SCOPE, quotaKey.getQuotaScope().getValue())
@@ -203,7 +138,8 @@ private QuotaLimit convertRowToModel(Row row) {
.quotaScope(QuotaScope.of(row.get(QUOTA_SCOPE, String.class)))
.identifier(row.get(IDENTIFIER, String.class))
.quotaType(QuotaType.of(row.get(QUOTA_TYPE, String.class)))
- .quotaLimit(row.get(QUOTA_LIMIT, Long.class)).build();
+ .quotaLimit(row.get(QUOTA_LIMIT, Long.class))
+ .build();
}
}
\ No newline at end of file
diff --git a/backends-common/cassandra/src/test/java/org/apache/james/backends/cassandra/quota/CassandraQuotaCurrentValueDaoTest.java b/backends-common/cassandra/src/test/java/org/apache/james/backends/cassandra/quota/CassandraQuotaCurrentValueDaoTest.java
index e22b6d4c923..ae7817e42b0 100644
--- a/backends-common/cassandra/src/test/java/org/apache/james/backends/cassandra/quota/CassandraQuotaCurrentValueDaoTest.java
+++ b/backends-common/cassandra/src/test/java/org/apache/james/backends/cassandra/quota/CassandraQuotaCurrentValueDaoTest.java
@@ -26,7 +26,6 @@
import org.apache.james.backends.cassandra.CassandraClusterExtension;
import org.apache.james.backends.cassandra.components.CassandraMutualizedQuotaModule;
import org.apache.james.backends.cassandra.components.CassandraQuotaCurrentValueDao;
-import org.apache.james.backends.cassandra.components.CassandraQuotaCurrentValueDao.QuotaKey;
import org.apache.james.core.quota.QuotaComponent;
import org.apache.james.core.quota.QuotaCurrentValue;
import org.apache.james.core.quota.QuotaType;
@@ -36,7 +35,7 @@
import org.junit.jupiter.api.extension.RegisterExtension;
public class CassandraQuotaCurrentValueDaoTest {
- private static final QuotaKey QUOTA_KEY = QuotaKey.of(QuotaComponent.MAILBOX, "james@abc.com", QuotaType.SIZE);
+ private static final QuotaCurrentValue.Key QUOTA_KEY = QuotaCurrentValue.Key.of(QuotaComponent.MAILBOX, "james@abc.com", QuotaType.SIZE);
private CassandraQuotaCurrentValueDao cassandraQuotaCurrentValueDao;
@@ -92,7 +91,7 @@ void decreaseQuotaCurrentValueShouldDecreaseValueSuccessfully() {
@Test
void deleteQuotaCurrentValueShouldDeleteSuccessfully() {
- QuotaKey quotaKey = QuotaKey.of(QuotaComponent.MAILBOX, "andre@abc.com", QuotaType.SIZE);
+ QuotaCurrentValue.Key quotaKey = QuotaCurrentValue.Key.of(QuotaComponent.MAILBOX, "andre@abc.com", QuotaType.SIZE);
cassandraQuotaCurrentValueDao.increase(quotaKey, 100L).block();
cassandraQuotaCurrentValueDao.deleteQuotaCurrentValue(quotaKey).block();
@@ -125,7 +124,7 @@ void decreaseQuotaCurrentValueShouldNotThrowExceptionWhenQueryExecutorThrowExcep
@Test
void getQuotasByComponentShouldGetAllQuotaTypesSuccessfully() {
- QuotaKey countQuotaKey = QuotaKey.of(QuotaComponent.MAILBOX, "james@abc.com", QuotaType.COUNT);
+ QuotaCurrentValue.Key countQuotaKey = QuotaCurrentValue.Key.of(QuotaComponent.MAILBOX, "james@abc.com", QuotaType.COUNT);
QuotaCurrentValue expectedQuotaSize = QuotaCurrentValue.builder().quotaComponent(QUOTA_KEY.getQuotaComponent())
.identifier(QUOTA_KEY.getIdentifier()).quotaType(QUOTA_KEY.getQuotaType()).currentValue(100L).build();
diff --git a/backends-common/cassandra/src/test/java/org/apache/james/backends/cassandra/quota/CassandraQuotaLimitDaoTest.java b/backends-common/cassandra/src/test/java/org/apache/james/backends/cassandra/quota/CassandraQuotaLimitDaoTest.java
index 2c421471756..7fa6f47a462 100644
--- a/backends-common/cassandra/src/test/java/org/apache/james/backends/cassandra/quota/CassandraQuotaLimitDaoTest.java
+++ b/backends-common/cassandra/src/test/java/org/apache/james/backends/cassandra/quota/CassandraQuotaLimitDaoTest.java
@@ -61,7 +61,7 @@ void setQuotaLimitShouldSaveObjectSuccessfully() {
QuotaLimit expected = QuotaLimit.builder().quotaComponent(QuotaComponent.MAILBOX).quotaScope(QuotaScope.DOMAIN).identifier("A").quotaType(QuotaType.COUNT).quotaLimit(100L).build();
cassandraQuotaLimitDao.setQuotaLimit(expected).block();
- assertThat(cassandraQuotaLimitDao.getQuotaLimit(CassandraQuotaLimitDao.QuotaLimitKey.of(QuotaComponent.MAILBOX, QuotaScope.DOMAIN, "A", QuotaType.COUNT)).block())
+ assertThat(cassandraQuotaLimitDao.getQuotaLimit(QuotaLimit.QuotaLimitKey.of(QuotaComponent.MAILBOX, QuotaScope.DOMAIN, "A", QuotaType.COUNT)).block())
.isEqualTo(expected);
}
@@ -70,7 +70,7 @@ void setQuotaLimitWithEmptyQuotaLimitValueShouldNotThrowNullPointerException() {
QuotaLimit emptyQuotaLimitValue = QuotaLimit.builder().quotaComponent(QuotaComponent.MAILBOX).quotaScope(QuotaScope.DOMAIN).identifier("A").quotaType(QuotaType.COUNT).build();
cassandraQuotaLimitDao.setQuotaLimit(emptyQuotaLimitValue).block();
- assertThat(cassandraQuotaLimitDao.getQuotaLimit(CassandraQuotaLimitDao.QuotaLimitKey.of(QuotaComponent.MAILBOX, QuotaScope.DOMAIN, "A", QuotaType.COUNT)).block())
+ assertThat(cassandraQuotaLimitDao.getQuotaLimit(QuotaLimit.QuotaLimitKey.of(QuotaComponent.MAILBOX, QuotaScope.DOMAIN, "A", QuotaType.COUNT)).block())
.isEqualTo(emptyQuotaLimitValue);
}
@@ -79,7 +79,7 @@ void setQuotaLimitShouldSaveObjectSuccessfullyWhenLimitIsMinusOne() {
QuotaLimit expected = QuotaLimit.builder().quotaComponent(QuotaComponent.MAILBOX).quotaScope(QuotaScope.DOMAIN).identifier("A").quotaType(QuotaType.COUNT).quotaLimit(-1L).build();
cassandraQuotaLimitDao.setQuotaLimit(expected).block();
- assertThat(cassandraQuotaLimitDao.getQuotaLimit(CassandraQuotaLimitDao.QuotaLimitKey.of(QuotaComponent.MAILBOX, QuotaScope.DOMAIN, "A", QuotaType.COUNT)).block())
+ assertThat(cassandraQuotaLimitDao.getQuotaLimit(QuotaLimit.QuotaLimitKey.of(QuotaComponent.MAILBOX, QuotaScope.DOMAIN, "A", QuotaType.COUNT)).block())
.isEqualTo(expected);
}
@@ -87,9 +87,9 @@ void setQuotaLimitShouldSaveObjectSuccessfullyWhenLimitIsMinusOne() {
void deleteQuotaLimitShouldDeleteObjectSuccessfully() {
QuotaLimit quotaLimit = QuotaLimit.builder().quotaComponent(QuotaComponent.MAILBOX).quotaScope(QuotaScope.DOMAIN).identifier("A").quotaType(QuotaType.COUNT).quotaLimit(100L).build();
cassandraQuotaLimitDao.setQuotaLimit(quotaLimit).block();
- cassandraQuotaLimitDao.deleteQuotaLimit(CassandraQuotaLimitDao.QuotaLimitKey.of(QuotaComponent.MAILBOX, QuotaScope.DOMAIN, "A", QuotaType.COUNT)).block();
+ cassandraQuotaLimitDao.deleteQuotaLimit(QuotaLimit.QuotaLimitKey.of(QuotaComponent.MAILBOX, QuotaScope.DOMAIN, "A", QuotaType.COUNT)).block();
- assertThat(cassandraQuotaLimitDao.getQuotaLimit(CassandraQuotaLimitDao.QuotaLimitKey.of(QuotaComponent.MAILBOX, QuotaScope.DOMAIN, "A", QuotaType.COUNT)).block())
+ assertThat(cassandraQuotaLimitDao.getQuotaLimit(QuotaLimit.QuotaLimitKey.of(QuotaComponent.MAILBOX, QuotaScope.DOMAIN, "A", QuotaType.COUNT)).block())
.isNull();
}
diff --git a/backends-common/pom.xml b/backends-common/pom.xml
index a0ae2dc5827..0f46ba17d7a 100644
--- a/backends-common/pom.xml
+++ b/backends-common/pom.xml
@@ -37,6 +37,7 @@
cassandrajpaopensearch
+ postgrespulsarrabbitmqredis
diff --git a/backends-common/postgres/pom.xml b/backends-common/postgres/pom.xml
new file mode 100644
index 00000000000..3687a454ee5
--- /dev/null
+++ b/backends-common/postgres/pom.xml
@@ -0,0 +1,112 @@
+
+
+
+ 4.0.0
+
+ org.apache.james
+ james-backends-common
+ 3.9.0-SNAPSHOT
+
+
+ apache-james-backends-postgres
+ Apache James :: Backends Common :: Postgres
+
+
+ 3.19.9
+ 1.0.5.RELEASE
+
+
+
+
+ ${james.groupId}
+ james-core
+
+
+ ${james.groupId}
+ james-server-guice-common
+ test-jar
+ test
+
+
+ ${james.groupId}
+ james-server-lifecycle-api
+
+
+ ${james.groupId}
+ james-server-util
+
+
+ ${james.groupId}
+ metrics-api
+
+
+ ${james.groupId}
+ testing-base
+ test
+
+
+ io.r2dbc
+ r2dbc-pool
+ 1.0.1.RELEASE
+
+
+ jakarta.annotation
+ jakarta.annotation-api
+
+
+ jakarta.inject
+ jakarta.inject-api
+
+
+ org.apache.commons
+ commons-configuration2
+
+
+ org.jooq
+ jooq
+ ${jooq.version}
+
+
+ org.jooq
+ jooq-postgres-extensions
+ ${jooq.version}
+
+
+ org.postgresql
+ r2dbc-postgresql
+ ${r2dbc.postgresql.version}
+
+
+ org.testcontainers
+ junit-jupiter
+ test
+
+
+ org.testcontainers
+ postgresql
+ test
+
+
+ org.testcontainers
+ testcontainers
+ test
+
+
+
diff --git a/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/PostgresCommons.java b/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/PostgresCommons.java
new file mode 100644
index 00000000000..88201ac066c
--- /dev/null
+++ b/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/PostgresCommons.java
@@ -0,0 +1,96 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.backends.postgres;
+
+import java.time.Instant;
+import java.time.LocalDateTime;
+import java.time.OffsetDateTime;
+import java.time.ZoneId;
+import java.time.ZoneOffset;
+import java.time.ZonedDateTime;
+import java.util.Date;
+import java.util.Optional;
+import java.util.function.Function;
+
+import org.jooq.DataType;
+import org.jooq.Field;
+import org.jooq.Record;
+import org.jooq.Table;
+import org.jooq.impl.DSL;
+import org.jooq.impl.DefaultDataType;
+import org.jooq.impl.SQLDataType;
+import org.jooq.postgres.extensions.bindings.HstoreBinding;
+import org.jooq.postgres.extensions.types.Hstore;
+
+public class PostgresCommons {
+
+ public interface DataTypes {
+
+ // hstore
+ DataType HSTORE = DefaultDataType.getDefaultDataType("hstore").asConvertedDataType(new HstoreBinding());
+
+ // timestamp(6)
+ DataType TIMESTAMP = SQLDataType.LOCALDATETIME(6);
+
+ DataType TIMESTAMP_WITH_TIMEZONE = SQLDataType.TIMESTAMPWITHTIMEZONE(6);
+
+ // text[]
+ DataType STRING_ARRAY = SQLDataType.VARCHAR.getArrayDataType();
+ }
+
+
+ public static Field tableField(Table table, Field field) {
+ return DSL.field(table.getName() + "." + field.getName(), field.getDataType());
+ }
+
+ public static final Function DATE_TO_LOCAL_DATE_TIME = date -> Optional.ofNullable(date)
+ .map(value -> LocalDateTime.ofInstant(value.toInstant(), ZoneOffset.UTC))
+ .orElse(null);
+
+ public static final Function ZONED_DATE_TIME_TO_LOCAL_DATE_TIME = date -> Optional.ofNullable(date)
+ .map(value -> value.withZoneSameInstant(ZoneOffset.UTC).toLocalDateTime())
+ .orElse(null);
+
+ public static final Function INSTANT_TO_LOCAL_DATE_TIME = instant -> Optional.ofNullable(instant)
+ .map(value -> LocalDateTime.ofInstant(instant, ZoneOffset.UTC))
+ .orElse(null);
+
+ public static final Function LOCAL_DATE_TIME_DATE_FUNCTION = localDateTime -> Optional.ofNullable(localDateTime)
+ .map(value -> value.toInstant(ZoneOffset.UTC))
+ .map(Date::from)
+ .orElse(null);
+
+ public static final Function LOCAL_DATE_TIME_ZONED_DATE_TIME_FUNCTION = localDateTime -> Optional.ofNullable(localDateTime)
+ .map(value -> value.atZone(ZoneId.of("UTC")))
+ .orElse(null);
+
+ public static final Function OFFSET_DATE_TIME_ZONED_DATE_TIME_FUNCTION = offsetDateTime -> Optional.ofNullable(offsetDateTime)
+ .map(value -> value.atZoneSameInstant(ZoneId.of("UTC")))
+ .orElse(null);
+
+ public static final Function LOCAL_DATE_TIME_INSTANT_FUNCTION = localDateTime -> Optional.ofNullable(localDateTime)
+ .map(value -> value.toInstant(ZoneOffset.UTC))
+ .orElse(null);
+
+ public static final Function, Field>> UNNEST_FIELD = field -> DSL.function("unnest", field.getType().getComponentType(), field);
+
+ public static final int IN_CLAUSE_MAX_SIZE = 32;
+
+}
diff --git a/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/PostgresConfiguration.java b/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/PostgresConfiguration.java
new file mode 100644
index 00000000000..29e5d904762
--- /dev/null
+++ b/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/PostgresConfiguration.java
@@ -0,0 +1,404 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.backends.postgres;
+
+import java.time.Duration;
+import java.time.temporal.ChronoUnit;
+import java.util.Objects;
+import java.util.Optional;
+
+import org.apache.commons.configuration2.Configuration;
+import org.apache.james.util.DurationParser;
+
+import com.google.common.base.Preconditions;
+
+import io.r2dbc.postgresql.client.SSLMode;
+
+public class PostgresConfiguration {
+ public static final String POSTGRES_CONFIGURATION_NAME = "postgres";
+
+ public static final String DATABASE_NAME = "database.name";
+ public static final String DATABASE_NAME_DEFAULT_VALUE = "postgres";
+ public static final String DATABASE_SCHEMA = "database.schema";
+ public static final String DATABASE_SCHEMA_DEFAULT_VALUE = "public";
+ public static final String HOST = "database.host";
+ public static final String HOST_DEFAULT_VALUE = "localhost";
+ public static final String PORT = "database.port";
+ public static final int PORT_DEFAULT_VALUE = 5432;
+ public static final String USERNAME = "database.username";
+ public static final String PASSWORD = "database.password";
+ public static final String BY_PASS_RLS_USERNAME = "database.by-pass-rls.username";
+ public static final String BY_PASS_RLS_PASSWORD = "database.by-pass-rls.password";
+ public static final String RLS_ENABLED = "row.level.security.enabled";
+ public static final String POOL_INITIAL_SIZE = "pool.initial.size";
+ public static final int POOL_INITIAL_SIZE_DEFAULT_VALUE = 10;
+ public static final String POOL_MAX_SIZE = "pool.max.size";
+ public static final int POOL_MAX_SIZE_DEFAULT_VALUE = 15;
+ public static final String BY_PASS_RLS_POOL_INITIAL_SIZE = "by-pass-rls.pool.initial.size";
+ public static final int BY_PASS_RLS_POOL_INITIAL_SIZE_DEFAULT_VALUE = 5;
+ public static final String BY_PASS_RLS_POOL_MAX_SIZE = "by-pass-rls.pool.max.size";
+ public static final int BY_PASS_RLS_POOL_MAX_SIZE_DEFAULT_VALUE = 10;
+ public static final String SSL_MODE = "ssl.mode";
+ public static final String SSL_MODE_DEFAULT_VALUE = "allow";
+ public static final String JOOQ_REACTIVE_TIMEOUT = "jooq.reactive.timeout";
+ public static final Duration JOOQ_REACTIVE_TIMEOUT_DEFAULT_VALUE = Duration.ofSeconds(10);
+
+ public static class Credential {
+ private final String username;
+ private final String password;
+
+
+ public Credential(String username, String password) {
+ this.username = username;
+ this.password = password;
+ }
+
+ public String getUsername() {
+ return username;
+ }
+
+ public String getPassword() {
+ return password;
+ }
+ }
+
+ public static class Builder {
+ private Optional databaseName = Optional.empty();
+ private Optional databaseSchema = Optional.empty();
+ private Optional host = Optional.empty();
+ private Optional port = Optional.empty();
+ private Optional username = Optional.empty();
+ private Optional password = Optional.empty();
+ private Optional byPassRLSUser = Optional.empty();
+ private Optional byPassRLSPassword = Optional.empty();
+ private Optional rowLevelSecurityEnabled = Optional.empty();
+ private Optional poolInitialSize = Optional.empty();
+ private Optional poolMaxSize = Optional.empty();
+ private Optional byPassRLSPoolInitialSize = Optional.empty();
+ private Optional byPassRLSPoolMaxSize = Optional.empty();
+ private Optional sslMode = Optional.empty();
+ private Optional jooqReactiveTimeout = Optional.empty();
+
+ public Builder databaseName(String databaseName) {
+ this.databaseName = Optional.of(databaseName);
+ return this;
+ }
+
+ public Builder databaseName(Optional databaseName) {
+ this.databaseName = databaseName;
+ return this;
+ }
+
+ public Builder databaseSchema(String databaseSchema) {
+ this.databaseSchema = Optional.of(databaseSchema);
+ return this;
+ }
+
+ public Builder databaseSchema(Optional databaseSchema) {
+ this.databaseSchema = databaseSchema;
+ return this;
+ }
+
+ public Builder host(String host) {
+ this.host = Optional.of(host);
+ return this;
+ }
+
+ public Builder host(Optional host) {
+ this.host = host;
+ return this;
+ }
+
+ public Builder port(Integer port) {
+ this.port = Optional.of(port);
+ return this;
+ }
+
+ public Builder port(Optional port) {
+ this.port = port;
+ return this;
+ }
+
+ public Builder username(String username) {
+ this.username = Optional.of(username);
+ return this;
+ }
+
+ public Builder username(Optional username) {
+ this.username = username;
+ return this;
+ }
+
+ public Builder password(String password) {
+ this.password = Optional.of(password);
+ return this;
+ }
+
+ public Builder password(Optional password) {
+ this.password = password;
+ return this;
+ }
+
+ public Builder byPassRLSUser(String byPassRLSUser) {
+ this.byPassRLSUser = Optional.of(byPassRLSUser);
+ return this;
+ }
+
+ public Builder byPassRLSUser(Optional byPassRLSUser) {
+ this.byPassRLSUser = byPassRLSUser;
+ return this;
+ }
+
+ public Builder byPassRLSPassword(String byPassRLSPassword) {
+ this.byPassRLSPassword = Optional.of(byPassRLSPassword);
+ return this;
+ }
+
+ public Builder byPassRLSPassword(Optional byPassRLSPassword) {
+ this.byPassRLSPassword = byPassRLSPassword;
+ return this;
+ }
+
+ public Builder rowLevelSecurityEnabled(boolean rlsEnabled) {
+ this.rowLevelSecurityEnabled = Optional.of(rlsEnabled);
+ return this;
+ }
+
+ public Builder rowLevelSecurityEnabled() {
+ this.rowLevelSecurityEnabled = Optional.of(true);
+ return this;
+ }
+
+ public Builder poolInitialSize(Optional poolInitialSize) {
+ this.poolInitialSize = poolInitialSize;
+ return this;
+ }
+
+ public Builder poolInitialSize(Integer poolInitialSize) {
+ this.poolInitialSize = Optional.of(poolInitialSize);
+ return this;
+ }
+
+ public Builder poolMaxSize(Optional poolMaxSize) {
+ this.poolMaxSize = poolMaxSize;
+ return this;
+ }
+
+ public Builder poolMaxSize(Integer poolMaxSize) {
+ this.poolMaxSize = Optional.of(poolMaxSize);
+ return this;
+ }
+
+ public Builder byPassRLSPoolInitialSize(Optional byPassRLSPoolInitialSize) {
+ this.byPassRLSPoolInitialSize = byPassRLSPoolInitialSize;
+ return this;
+ }
+
+ public Builder byPassRLSPoolInitialSize(Integer byPassRLSPoolInitialSize) {
+ this.byPassRLSPoolInitialSize = Optional.of(byPassRLSPoolInitialSize);
+ return this;
+ }
+
+ public Builder byPassRLSPoolMaxSize(Optional byPassRLSPoolMaxSize) {
+ this.byPassRLSPoolMaxSize = byPassRLSPoolMaxSize;
+ return this;
+ }
+
+ public Builder byPassRLSPoolMaxSize(Integer byPassRLSPoolMaxSize) {
+ this.byPassRLSPoolMaxSize = Optional.of(byPassRLSPoolMaxSize);
+ return this;
+ }
+
+ public Builder sslMode(Optional sslMode) {
+ this.sslMode = sslMode;
+ return this;
+ }
+
+ public Builder sslMode(String sslMode) {
+ this.sslMode = Optional.of(sslMode);
+ return this;
+ }
+
+ public Builder jooqReactiveTimeout(Optional jooqReactiveTimeout) {
+ this.jooqReactiveTimeout = jooqReactiveTimeout;
+ return this;
+ }
+
+ public PostgresConfiguration build() {
+ Preconditions.checkArgument(username.isPresent() && !username.get().isBlank(), "You need to specify username");
+ Preconditions.checkArgument(password.isPresent() && !password.get().isBlank(), "You need to specify password");
+
+ if (rowLevelSecurityEnabled.isPresent() && rowLevelSecurityEnabled.get()) {
+ Preconditions.checkArgument(byPassRLSUser.isPresent() && !byPassRLSUser.get().isBlank(), "You need to specify byPassRLSUser");
+ Preconditions.checkArgument(byPassRLSPassword.isPresent() && !byPassRLSPassword.get().isBlank(), "You need to specify byPassRLSPassword");
+ }
+
+ return new PostgresConfiguration(host.orElse(HOST_DEFAULT_VALUE),
+ port.orElse(PORT_DEFAULT_VALUE),
+ databaseName.orElse(DATABASE_NAME_DEFAULT_VALUE),
+ databaseSchema.orElse(DATABASE_SCHEMA_DEFAULT_VALUE),
+ new Credential(username.get(), password.get()),
+ new Credential(byPassRLSUser.orElse(username.get()), byPassRLSPassword.orElse(password.get())),
+ rowLevelSecurityEnabled.filter(rlsEnabled -> rlsEnabled).map(rlsEnabled -> RowLevelSecurity.ENABLED).orElse(RowLevelSecurity.DISABLED),
+ poolInitialSize.orElse(POOL_INITIAL_SIZE_DEFAULT_VALUE),
+ poolMaxSize.orElse(POOL_MAX_SIZE_DEFAULT_VALUE),
+ byPassRLSPoolInitialSize.orElse(BY_PASS_RLS_POOL_INITIAL_SIZE_DEFAULT_VALUE),
+ byPassRLSPoolMaxSize.orElse(BY_PASS_RLS_POOL_MAX_SIZE_DEFAULT_VALUE),
+ SSLMode.fromValue(sslMode.orElse(SSL_MODE_DEFAULT_VALUE)),
+ jooqReactiveTimeout.orElse(JOOQ_REACTIVE_TIMEOUT_DEFAULT_VALUE));
+ }
+ }
+
+ public static Builder builder() {
+ return new Builder();
+ }
+
+ public static PostgresConfiguration from(Configuration propertiesConfiguration) {
+ return builder()
+ .databaseName(Optional.ofNullable(propertiesConfiguration.getString(DATABASE_NAME)))
+ .databaseSchema(Optional.ofNullable(propertiesConfiguration.getString(DATABASE_SCHEMA)))
+ .host(Optional.ofNullable(propertiesConfiguration.getString(HOST)))
+ .port(propertiesConfiguration.getInt(PORT, PORT_DEFAULT_VALUE))
+ .username(Optional.ofNullable(propertiesConfiguration.getString(USERNAME)))
+ .password(Optional.ofNullable(propertiesConfiguration.getString(PASSWORD)))
+ .byPassRLSUser(Optional.ofNullable(propertiesConfiguration.getString(BY_PASS_RLS_USERNAME)))
+ .byPassRLSPassword(Optional.ofNullable(propertiesConfiguration.getString(BY_PASS_RLS_PASSWORD)))
+ .rowLevelSecurityEnabled(propertiesConfiguration.getBoolean(RLS_ENABLED, false))
+ .poolInitialSize(Optional.ofNullable(propertiesConfiguration.getInteger(POOL_INITIAL_SIZE, null)))
+ .poolMaxSize(Optional.ofNullable(propertiesConfiguration.getInteger(POOL_MAX_SIZE, null)))
+ .byPassRLSPoolInitialSize(Optional.ofNullable(propertiesConfiguration.getInteger(BY_PASS_RLS_POOL_INITIAL_SIZE, null)))
+ .byPassRLSPoolMaxSize(Optional.ofNullable(propertiesConfiguration.getInteger(BY_PASS_RLS_POOL_MAX_SIZE, null)))
+ .sslMode(Optional.ofNullable(propertiesConfiguration.getString(SSL_MODE)))
+ .jooqReactiveTimeout(Optional.ofNullable(propertiesConfiguration.getString(JOOQ_REACTIVE_TIMEOUT))
+ .map(value -> DurationParser.parse(value, ChronoUnit.SECONDS)))
+ .build();
+ }
+
+ private final String host;
+ private final int port;
+ private final String databaseName;
+ private final String databaseSchema;
+ private final Credential defaultCredential;
+ private final Credential byPassRLSCredential;
+ private final RowLevelSecurity rowLevelSecurity;
+ private final Integer poolInitialSize;
+ private final Integer poolMaxSize;
+ private final Integer byPassRLSPoolInitialSize;
+ private final Integer byPassRLSPoolMaxSize;
+ private final SSLMode sslMode;
+ private final Duration jooqReactiveTimeout;
+
+ private PostgresConfiguration(String host, int port, String databaseName, String databaseSchema,
+ Credential defaultCredential, Credential byPassRLSCredential, RowLevelSecurity rowLevelSecurity,
+ Integer poolInitialSize, Integer poolMaxSize,
+ Integer byPassRLSPoolInitialSize, Integer byPassRLSPoolMaxSize,
+ SSLMode sslMode, Duration jooqReactiveTimeout) {
+ this.host = host;
+ this.port = port;
+ this.databaseName = databaseName;
+ this.databaseSchema = databaseSchema;
+ this.defaultCredential = defaultCredential;
+ this.byPassRLSCredential = byPassRLSCredential;
+ this.rowLevelSecurity = rowLevelSecurity;
+ this.poolInitialSize = poolInitialSize;
+ this.poolMaxSize = poolMaxSize;
+ this.byPassRLSPoolInitialSize = byPassRLSPoolInitialSize;
+ this.byPassRLSPoolMaxSize = byPassRLSPoolMaxSize;
+ this.sslMode = sslMode;
+ this.jooqReactiveTimeout = jooqReactiveTimeout;
+ }
+
+ public String getHost() {
+ return host;
+ }
+
+ public int getPort() {
+ return port;
+ }
+
+ public String getDatabaseName() {
+ return databaseName;
+ }
+
+ public String getDatabaseSchema() {
+ return databaseSchema;
+ }
+
+ public Credential getDefaultCredential() {
+ return defaultCredential;
+ }
+
+ public Credential getByPassRLSCredential() {
+ return byPassRLSCredential;
+ }
+
+ public RowLevelSecurity getRowLevelSecurity() {
+ return rowLevelSecurity;
+ }
+
+ public Integer poolInitialSize() {
+ return poolInitialSize;
+ }
+
+ public Integer poolMaxSize() {
+ return poolMaxSize;
+ }
+
+ public Integer byPassRLSPoolInitialSize() {
+ return byPassRLSPoolInitialSize;
+ }
+
+ public Integer byPassRLSPoolMaxSize() {
+ return byPassRLSPoolMaxSize;
+ }
+
+ public SSLMode getSslMode() {
+ return sslMode;
+ }
+
+ public Duration getJooqReactiveTimeout() {
+ return jooqReactiveTimeout;
+ }
+
+ @Override
+ public final int hashCode() {
+ return Objects.hash(host, port, databaseName, databaseSchema, defaultCredential, byPassRLSCredential, rowLevelSecurity, poolInitialSize, poolMaxSize, sslMode, jooqReactiveTimeout);
+ }
+
+ @Override
+ public final boolean equals(Object o) {
+ if (o instanceof PostgresConfiguration) {
+ PostgresConfiguration that = (PostgresConfiguration) o;
+
+ return Objects.equals(this.rowLevelSecurity, that.rowLevelSecurity)
+ && Objects.equals(this.host, that.host)
+ && Objects.equals(this.port, that.port)
+ && Objects.equals(this.defaultCredential, that.defaultCredential)
+ && Objects.equals(this.byPassRLSCredential, that.byPassRLSCredential)
+ && Objects.equals(this.databaseName, that.databaseName)
+ && Objects.equals(this.databaseSchema, that.databaseSchema)
+ && Objects.equals(this.poolInitialSize, that.poolInitialSize)
+ && Objects.equals(this.poolMaxSize, that.poolMaxSize)
+ && Objects.equals(this.sslMode, that.sslMode)
+ && Objects.equals(this.jooqReactiveTimeout, that.jooqReactiveTimeout);
+ }
+ return false;
+ }
+}
diff --git a/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/PostgresIndex.java b/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/PostgresIndex.java
new file mode 100644
index 00000000000..c1a41f2947e
--- /dev/null
+++ b/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/PostgresIndex.java
@@ -0,0 +1,64 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.backends.postgres;
+
+import java.util.function.Function;
+
+import org.jooq.DDLQuery;
+import org.jooq.DSLContext;
+
+import com.google.common.base.Preconditions;
+
+public class PostgresIndex {
+
+ @FunctionalInterface
+ public interface RequireCreateIndexStep {
+ PostgresIndex createIndexStep(CreateIndexFunction createIndexFunction);
+ }
+
+ @FunctionalInterface
+ public interface CreateIndexFunction {
+ DDLQuery createIndex(DSLContext dsl, String indexName);
+ }
+
+ public static RequireCreateIndexStep name(String indexName) {
+ Preconditions.checkNotNull(indexName);
+ String strategyIndexName = indexName.toLowerCase();
+
+ return createIndexFunction -> new PostgresIndex(strategyIndexName, dsl -> createIndexFunction.createIndex(dsl, strategyIndexName));
+ }
+
+ private final String name;
+ private final Function createIndexStepFunction;
+
+ private PostgresIndex(String name, Function createIndexStepFunction) {
+ this.name = name;
+ this.createIndexStepFunction = createIndexStepFunction;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public Function getCreateIndexStepFunction() {
+ return createIndexStepFunction;
+ }
+
+}
diff --git a/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/PostgresModule.java b/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/PostgresModule.java
new file mode 100644
index 00000000000..8f1725fe4b3
--- /dev/null
+++ b/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/PostgresModule.java
@@ -0,0 +1,130 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.backends.postgres;
+
+
+import java.util.Collection;
+import java.util.List;
+
+import com.google.common.collect.ImmutableList;
+
+public interface PostgresModule {
+
+ static PostgresModule aggregateModules(PostgresModule... modules) {
+ return builder()
+ .modules(modules)
+ .build();
+ }
+
+ static PostgresModule aggregateModules(Collection modules) {
+ return builder()
+ .modules(modules)
+ .build();
+ }
+
+ PostgresModule EMPTY_MODULE = builder().build();
+
+ List tables();
+
+ List tableIndexes();
+
+ class Impl implements PostgresModule {
+ private final List tables;
+ private final List tableIndexes;
+
+ private Impl(List tables, List tableIndexes) {
+ this.tables = tables;
+ this.tableIndexes = tableIndexes;
+ }
+
+ @Override
+ public List tables() {
+ return tables;
+ }
+
+ @Override
+ public List tableIndexes() {
+ return tableIndexes;
+ }
+ }
+
+ class Builder {
+ private final ImmutableList.Builder tables;
+ private final ImmutableList.Builder tableIndexes;
+
+ public Builder() {
+ tables = ImmutableList.builder();
+ tableIndexes = ImmutableList.builder();
+ }
+
+ public Builder addTable(PostgresTable... table) {
+ tables.add(table);
+ return this;
+ }
+
+ public Builder addIndex(PostgresIndex... index) {
+ tableIndexes.add(index);
+ return this;
+ }
+
+ public Builder addTable(List tables) {
+ this.tables.addAll(tables);
+ return this;
+ }
+
+ public Builder addIndex(List indexes) {
+ this.tableIndexes.addAll(indexes);
+ return this;
+ }
+
+ public Builder modules(Collection modules) {
+ modules.forEach(module -> {
+ addTable(module.tables());
+ addIndex(module.tableIndexes());
+ });
+ return this;
+ }
+
+ public Builder modules(PostgresModule... modules) {
+ return modules(ImmutableList.copyOf(modules));
+ }
+
+ public PostgresModule build() {
+ return new Impl(tables.build(), tableIndexes.build());
+ }
+ }
+
+ static Builder builder() {
+ return new Builder();
+ }
+
+ static PostgresModule table(PostgresTable... tables) {
+ return builder()
+ .addTable(ImmutableList.copyOf(tables))
+ .build();
+ }
+
+ static PostgresModule tableIndex(PostgresIndex... tableIndexes) {
+ return builder()
+ .addIndex(ImmutableList.copyOf(tableIndexes))
+ .build();
+ }
+
+}
diff --git a/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/PostgresTable.java b/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/PostgresTable.java
new file mode 100644
index 00000000000..f9bd1308c90
--- /dev/null
+++ b/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/PostgresTable.java
@@ -0,0 +1,172 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.backends.postgres;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.function.Function;
+
+import org.jooq.DDLQuery;
+import org.jooq.DSLContext;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableList;
+
+public class PostgresTable {
+ @FunctionalInterface
+ public interface RequireCreateTableStep {
+ RequireRowLevelSecurity createTableStep(CreateTableFunction createTableFunction);
+ }
+
+ @FunctionalInterface
+ public interface CreateTableFunction {
+ DDLQuery createTable(DSLContext dsl, String tableName);
+ }
+
+ @FunctionalInterface
+ public interface RequireRowLevelSecurity {
+ FinalStage supportsRowLevelSecurity(boolean rowLevelSecurityEnabled);
+
+ default FinalStage disableRowLevelSecurity() {
+ return supportsRowLevelSecurity(false);
+ }
+
+ default FinalStage supportsRowLevelSecurity() {
+ return supportsRowLevelSecurity(true);
+ }
+ }
+
+ public abstract static class AdditionalAlterQuery {
+ private String query;
+
+ public AdditionalAlterQuery(String query) {
+ this.query = query;
+ }
+
+ abstract boolean shouldBeApplied(RowLevelSecurity rowLevelSecurity);
+
+ public String getQuery() {
+ return query;
+ }
+ }
+
+ public static class RLSOnlyAdditionalAlterQuery extends AdditionalAlterQuery {
+ public RLSOnlyAdditionalAlterQuery(String query) {
+ super(query);
+ }
+
+ @Override
+ boolean shouldBeApplied(RowLevelSecurity rowLevelSecurity) {
+ return rowLevelSecurity.isRowLevelSecurityEnabled();
+ }
+ }
+
+ public static class NonRLSOnlyAdditionalAlterQuery extends AdditionalAlterQuery {
+ public NonRLSOnlyAdditionalAlterQuery(String query) {
+ super(query);
+ }
+
+ @Override
+ boolean shouldBeApplied(RowLevelSecurity rowLevelSecurity) {
+ return !rowLevelSecurity.isRowLevelSecurityEnabled();
+ }
+ }
+
+ public static class AllCasesAdditionalAlterQuery extends AdditionalAlterQuery {
+ public AllCasesAdditionalAlterQuery(String query) {
+ super(query);
+ }
+
+ @Override
+ boolean shouldBeApplied(RowLevelSecurity rowLevelSecurity) {
+ return true;
+ }
+ }
+
+ public static class FinalStage {
+ private final String tableName;
+ private final boolean supportsRowLevelSecurity;
+ private final Function createTableStepFunction;
+ private final ImmutableList.Builder additionalAlterQueries;
+
+ public FinalStage(String tableName, boolean supportsRowLevelSecurity, Function createTableStepFunction) {
+ this.tableName = tableName;
+ this.supportsRowLevelSecurity = supportsRowLevelSecurity;
+ this.createTableStepFunction = createTableStepFunction;
+ this.additionalAlterQueries = ImmutableList.builder();
+ }
+
+ /**
+ * Raw SQL ALTER queries in case not supported by jOOQ DSL.
+ */
+ public FinalStage addAdditionalAlterQueries(String... additionalAlterQueries) {
+ this.additionalAlterQueries.addAll(Arrays.stream(additionalAlterQueries).map(AllCasesAdditionalAlterQuery::new).toList());
+ return this;
+ }
+
+ /**
+ * Raw SQL ALTER queries in case not supported by jOOQ DSL.
+ */
+ public FinalStage addAdditionalAlterQueries(AdditionalAlterQuery... additionalAlterQueries) {
+ this.additionalAlterQueries.add(additionalAlterQueries);
+ return this;
+ }
+
+ public PostgresTable build() {
+ return new PostgresTable(tableName, supportsRowLevelSecurity, createTableStepFunction, additionalAlterQueries.build());
+ }
+ }
+
+ public static RequireCreateTableStep name(String tableName) {
+ Preconditions.checkNotNull(tableName);
+ String strategyName = tableName.toLowerCase();
+
+ return createTableFunction -> supportsRowLevelSecurity -> new FinalStage(strategyName, supportsRowLevelSecurity, dsl -> createTableFunction.createTable(dsl, strategyName));
+ }
+
+ private final String name;
+ private final boolean supportsRowLevelSecurity;
+ private final Function createTableStepFunction;
+ private final List additionalAlterQueries;
+
+ private PostgresTable(String name, boolean supportsRowLevelSecurity, Function createTableStepFunction, List additionalAlterQueries) {
+ this.name = name;
+ this.supportsRowLevelSecurity = supportsRowLevelSecurity;
+ this.createTableStepFunction = createTableStepFunction;
+ this.additionalAlterQueries = additionalAlterQueries;
+ }
+
+
+ public String getName() {
+ return name;
+ }
+
+ public Function getCreateTableStepFunction() {
+ return createTableStepFunction;
+ }
+
+ public boolean supportsRowLevelSecurity() {
+ return supportsRowLevelSecurity;
+ }
+
+ public List getAdditionalAlterQueries() {
+ return additionalAlterQueries;
+ }
+}
diff --git a/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/PostgresTableManager.java b/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/PostgresTableManager.java
new file mode 100644
index 00000000000..ffb88497682
--- /dev/null
+++ b/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/PostgresTableManager.java
@@ -0,0 +1,216 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.backends.postgres;
+
+import java.util.List;
+
+import jakarta.inject.Inject;
+
+import org.apache.james.backends.postgres.utils.PostgresExecutor;
+import org.apache.james.lifecycle.api.Startable;
+import org.jooq.DSLContext;
+import org.jooq.exception.DataAccessException;
+import org.jooq.impl.DSL;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.annotations.VisibleForTesting;
+
+import io.r2dbc.spi.Connection;
+import io.r2dbc.spi.Result;
+import reactor.core.publisher.Flux;
+import reactor.core.publisher.Mono;
+
+public class PostgresTableManager implements Startable {
+ public static final int INITIALIZATION_PRIORITY = 1;
+ private static final Logger LOGGER = LoggerFactory.getLogger(PostgresTableManager.class);
+ private final PostgresExecutor postgresExecutor;
+ private final PostgresModule module;
+ private final RowLevelSecurity rowLevelSecurity;
+
+ @Inject
+ public PostgresTableManager(PostgresExecutor postgresExecutor,
+ PostgresModule module,
+ PostgresConfiguration postgresConfiguration) {
+ this.postgresExecutor = postgresExecutor;
+ this.module = module;
+ this.rowLevelSecurity = postgresConfiguration.getRowLevelSecurity();
+ }
+
+ @VisibleForTesting
+ public PostgresTableManager(PostgresExecutor postgresExecutor, PostgresModule module, RowLevelSecurity rowLevelSecurity) {
+ this.postgresExecutor = postgresExecutor;
+ this.module = module;
+ this.rowLevelSecurity = rowLevelSecurity;
+ }
+
+ public void initPostgres() {
+ initializePostgresExtension()
+ .then(initializeTables())
+ .then(initializeTableIndexes())
+ .block();
+ }
+
+ public Mono initializePostgresExtension() {
+ return Mono.usingWhen(postgresExecutor.connectionFactory().getConnection(),
+ connection -> Mono.just(connection)
+ .flatMapMany(pgConnection -> pgConnection.createStatement("CREATE EXTENSION IF NOT EXISTS hstore")
+ .execute())
+ .flatMap(Result::getRowsUpdated)
+ .then(),
+ connection -> postgresExecutor.connectionFactory().closeConnection(connection));
+ }
+
+ public Mono initializeTables() {
+ return Mono.usingWhen(postgresExecutor.connectionFactory().getConnection(),
+ connection -> postgresExecutor.dslContext(connection)
+ .flatMapMany(dsl -> listExistTables()
+ .flatMapMany(existTables -> Flux.fromIterable(module.tables())
+ .filter(table -> !existTables.contains(table.getName()))
+ .flatMap(table -> createAndAlterTable(table, dsl, connection))))
+ .then(),
+ connection -> postgresExecutor.connectionFactory().closeConnection(connection));
+ }
+
+ private Mono createAndAlterTable(PostgresTable table, DSLContext dsl, Connection connection) {
+ return Mono.from(table.getCreateTableStepFunction().apply(dsl))
+ .then(alterTableIfNeeded(table, connection))
+ .doOnSuccess(any -> LOGGER.info("Table {} created", table.getName()))
+ .onErrorResume(exception -> handleTableCreationException(table, exception));
+ }
+
+ public Mono> listExistTables() {
+ return Mono.usingWhen(postgresExecutor.connectionFactory().getConnection(),
+ connection -> postgresExecutor.dslContext(connection)
+ .flatMapMany(d -> Flux.from(d.select(DSL.field("tablename"))
+ .from("pg_tables")
+ .where(DSL.field("schemaname")
+ .eq(DSL.currentSchema()))))
+ .map(r -> r.get(0, String.class))
+ .collectList(),
+ connection -> postgresExecutor.connectionFactory().closeConnection(connection));
+ }
+
+ private Mono handleTableCreationException(PostgresTable table, Throwable e) {
+ if (e instanceof DataAccessException && e.getMessage().contains(String.format("\"%s\" already exists", table.getName()))) {
+ return Mono.empty();
+ }
+ LOGGER.error("Error while creating table: {}", table.getName(), e);
+ return Mono.error(e);
+ }
+
+ private Mono alterTableIfNeeded(PostgresTable table, Connection connection) {
+ return executeAdditionalAlterQueries(table, connection)
+ .then(enableRLSIfNeeded(table, connection));
+ }
+
+ private Mono executeAdditionalAlterQueries(PostgresTable table, Connection connection) {
+ return Flux.fromIterable(table.getAdditionalAlterQueries())
+ .filter(additionalAlterQuery -> additionalAlterQuery.shouldBeApplied(rowLevelSecurity))
+ .map(PostgresTable.AdditionalAlterQuery::getQuery)
+ .concatMap(alterSQLQuery -> Mono.just(connection)
+ .flatMapMany(pgConnection -> pgConnection.createStatement(alterSQLQuery)
+ .execute())
+ .flatMap(Result::getRowsUpdated)
+ .then()
+ .onErrorResume(e -> {
+ if (e.getMessage().contains("already exists")) {
+ return Mono.empty();
+ }
+ LOGGER.error("Error while executing ALTER query for table {}", table.getName(), e);
+ return Mono.error(e);
+ }))
+ .then();
+ }
+
+ private Mono enableRLSIfNeeded(PostgresTable table, Connection connection) {
+ if (rowLevelSecurity.isRowLevelSecurityEnabled() && table.supportsRowLevelSecurity()) {
+ return alterTableEnableRLS(table, connection);
+ }
+ return Mono.empty();
+ }
+
+ private Mono alterTableEnableRLS(PostgresTable table, Connection connection) {
+ return Mono.just(connection)
+ .flatMapMany(pgConnection -> pgConnection.createStatement(rowLevelSecurityAlterStatement(table.getName()))
+ .execute())
+ .flatMap(Result::getRowsUpdated)
+ .then();
+ }
+
+ private String rowLevelSecurityAlterStatement(String tableName) {
+ String policyName = "domain_" + tableName + "_policy";
+ return "set app.current_domain = ''; alter table " + tableName + " add column if not exists domain varchar(255) not null default current_setting('app.current_domain')::text ;" +
+ "do $$ \n" +
+ "begin \n" +
+ " if not exists( select policyname from pg_policies where policyname = '" + policyName + "') then \n" +
+ " execute 'alter table " + tableName + " enable row level security; alter table " + tableName + " force row level security; create policy " + policyName + " on " + tableName + " using (domain = current_setting(''app.current_domain'')::text)';\n" +
+ " end if;\n" +
+ "end $$;";
+ }
+
+ public Mono truncate() {
+ return Mono.usingWhen(postgresExecutor.connectionFactory().getConnection(),
+ connection -> postgresExecutor.dslContext(connection)
+ .flatMap(dsl -> Flux.fromIterable(module.tables())
+ .flatMap(table -> Mono.from(dsl.truncateTable(table.getName()))
+ .doOnSuccess(any -> LOGGER.info("Table {} truncated", table.getName()))
+ .doOnError(e -> LOGGER.error("Error while truncating table {}", table.getName(), e)))
+ .then()),
+ connection -> postgresExecutor.connectionFactory().closeConnection(connection));
+ }
+
+ public Mono initializeTableIndexes() {
+ return Mono.usingWhen(postgresExecutor.connectionFactory().getConnection(),
+ connection -> postgresExecutor.dslContext(connection)
+ .flatMapMany(dsl -> listExistIndexes(dsl)
+ .flatMapMany(existIndexes -> Flux.fromIterable(module.tableIndexes())
+ .filter(index -> !existIndexes.contains(index.getName()))
+ .flatMap(index -> createTableIndex(index, dsl))))
+ .then(),
+ connection -> postgresExecutor.connectionFactory().closeConnection(connection));
+ }
+
+ private Mono> listExistIndexes(DSLContext dslContext) {
+ return Mono.just(dslContext)
+ .flatMapMany(dsl -> Flux.from(dsl.select(DSL.field("indexname"))
+ .from("pg_indexes")
+ .where(DSL.field("schemaname")
+ .eq(DSL.currentSchema()))))
+ .map(r -> r.get(0, String.class))
+ .collectList();
+ }
+
+ private Mono createTableIndex(PostgresIndex index, DSLContext dsl) {
+ return Mono.from(index.getCreateIndexStepFunction().apply(dsl))
+ .doOnSuccess(any -> LOGGER.info("Index {} created", index.getName()))
+ .onErrorResume(e -> handleIndexCreationException(index, e))
+ .then();
+ }
+
+ private Mono extends Integer> handleIndexCreationException(PostgresIndex index, Throwable e) {
+ if (e instanceof DataAccessException && e.getMessage().contains(String.format("\"%s\" already exists", index.getName()))) {
+ return Mono.empty();
+ }
+ LOGGER.error("Error while creating index {}", index.getName(), e);
+ return Mono.error(e);
+ }
+
+}
diff --git a/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/RowLevelSecurity.java b/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/RowLevelSecurity.java
new file mode 100644
index 00000000000..2f806b6c74e
--- /dev/null
+++ b/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/RowLevelSecurity.java
@@ -0,0 +1,35 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.backends.postgres;
+
+public enum RowLevelSecurity {
+ ENABLED(true),
+ DISABLED(false);
+
+ private boolean rowLevelSecurityEnabled;
+
+ RowLevelSecurity(boolean rowLevelSecurityEnabled) {
+ this.rowLevelSecurityEnabled = rowLevelSecurityEnabled;
+ }
+
+ public boolean isRowLevelSecurityEnabled() {
+ return rowLevelSecurityEnabled;
+ }
+}
diff --git a/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/quota/PostgresQuotaCurrentValueDAO.java b/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/quota/PostgresQuotaCurrentValueDAO.java
new file mode 100644
index 00000000000..531f58d8e27
--- /dev/null
+++ b/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/quota/PostgresQuotaCurrentValueDAO.java
@@ -0,0 +1,158 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.backends.postgres.quota;
+
+import static org.apache.james.backends.postgres.quota.PostgresQuotaModule.PostgresQuotaCurrentValueTable.COMPONENT;
+import static org.apache.james.backends.postgres.quota.PostgresQuotaModule.PostgresQuotaCurrentValueTable.CURRENT_VALUE;
+import static org.apache.james.backends.postgres.quota.PostgresQuotaModule.PostgresQuotaCurrentValueTable.IDENTIFIER;
+import static org.apache.james.backends.postgres.quota.PostgresQuotaModule.PostgresQuotaCurrentValueTable.PRIMARY_KEY_CONSTRAINT_NAME;
+import static org.apache.james.backends.postgres.quota.PostgresQuotaModule.PostgresQuotaCurrentValueTable.TABLE_NAME;
+import static org.apache.james.backends.postgres.quota.PostgresQuotaModule.PostgresQuotaCurrentValueTable.TYPE;
+import static org.apache.james.backends.postgres.utils.PostgresExecutor.DEFAULT_INJECT;
+
+import java.util.function.Function;
+
+import jakarta.inject.Inject;
+import jakarta.inject.Named;
+
+import org.apache.james.backends.postgres.utils.PostgresExecutor;
+import org.apache.james.core.quota.QuotaComponent;
+import org.apache.james.core.quota.QuotaCurrentValue;
+import org.apache.james.core.quota.QuotaType;
+import org.jooq.Record;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import reactor.core.publisher.Flux;
+import reactor.core.publisher.Mono;
+
+public class PostgresQuotaCurrentValueDAO {
+ private static final Logger LOGGER = LoggerFactory.getLogger(PostgresQuotaCurrentValueDAO.class);
+ private static final boolean IS_INCREASE = true;
+
+ private final PostgresExecutor postgresExecutor;
+
+ @Inject
+ public PostgresQuotaCurrentValueDAO(@Named(DEFAULT_INJECT) PostgresExecutor postgresExecutor) {
+ this.postgresExecutor = postgresExecutor;
+ }
+
+ public Mono increase(QuotaCurrentValue.Key quotaKey, long amount) {
+ return postgresExecutor.executeVoid(dslContext -> Mono.from(dslContext.insertInto(TABLE_NAME)
+ .set(IDENTIFIER, quotaKey.getIdentifier())
+ .set(COMPONENT, quotaKey.getQuotaComponent().getValue())
+ .set(TYPE, quotaKey.getQuotaType().getValue())
+ .set(CURRENT_VALUE, amount)
+ .onConflictOnConstraint(PRIMARY_KEY_CONSTRAINT_NAME)
+ .doUpdate()
+ .set(CURRENT_VALUE, CURRENT_VALUE.plus(amount))))
+ .onErrorResume(ex -> {
+ LOGGER.warn("Failure when increasing {} {} quota for {}. Quota current value is thus not updated and needs re-computation",
+ quotaKey.getQuotaComponent().getValue(), quotaKey.getQuotaType().getValue(), quotaKey.getIdentifier(), ex);
+ return Mono.empty();
+ });
+ }
+
+ public Mono upsert(QuotaCurrentValue.Key quotaKey, long newCurrentValue) {
+ return update(quotaKey, newCurrentValue)
+ .switchIfEmpty(Mono.defer(() -> insert(quotaKey, newCurrentValue, IS_INCREASE)));
+ }
+
+ public Mono update(QuotaCurrentValue.Key quotaKey, long newCurrentValue) {
+ return postgresExecutor.executeRow(dslContext -> Mono.from(dslContext.update(TABLE_NAME)
+ .set(CURRENT_VALUE, newCurrentValue)
+ .where(IDENTIFIER.eq(quotaKey.getIdentifier()),
+ COMPONENT.eq(quotaKey.getQuotaComponent().getValue()),
+ TYPE.eq(quotaKey.getQuotaType().getValue()))
+ .returning(CURRENT_VALUE)))
+ .map(record -> record.get(CURRENT_VALUE));
+ }
+
+ public Mono insert(QuotaCurrentValue.Key quotaKey, long amount, boolean isIncrease) {
+ return postgresExecutor.executeRow(dslContext -> Mono.from(dslContext.insertInto(TABLE_NAME)
+ .set(IDENTIFIER, quotaKey.getIdentifier())
+ .set(COMPONENT, quotaKey.getQuotaComponent().getValue())
+ .set(TYPE, quotaKey.getQuotaType().getValue())
+ .set(CURRENT_VALUE, newCurrentValue(amount, isIncrease))
+ .returning(CURRENT_VALUE)))
+ .map(record -> record.get(CURRENT_VALUE));
+ }
+
+ private Long newCurrentValue(long amount, boolean isIncrease) {
+ if (isIncrease) {
+ return amount;
+ }
+ return -amount;
+ }
+
+ public Mono decrease(QuotaCurrentValue.Key quotaKey, long amount) {
+ return postgresExecutor.executeVoid(dslContext -> Mono.from(dslContext.insertInto(TABLE_NAME)
+ .set(IDENTIFIER, quotaKey.getIdentifier())
+ .set(COMPONENT, quotaKey.getQuotaComponent().getValue())
+ .set(TYPE, quotaKey.getQuotaType().getValue())
+ .set(CURRENT_VALUE, -amount)
+ .onConflictOnConstraint(PRIMARY_KEY_CONSTRAINT_NAME)
+ .doUpdate()
+ .set(CURRENT_VALUE, CURRENT_VALUE.minus(amount))))
+ .onErrorResume(ex -> {
+ LOGGER.warn("Failure when decreasing {} {} quota for {}. Quota current value is thus not updated and needs re-computation",
+ quotaKey.getQuotaComponent().getValue(), quotaKey.getQuotaType().getValue(), quotaKey.getIdentifier(), ex);
+ return Mono.empty();
+ });
+ }
+
+ public Mono getQuotaCurrentValue(QuotaCurrentValue.Key quotaKey) {
+ return postgresExecutor.executeRow(dslContext -> Mono.from(dslContext.select(CURRENT_VALUE)
+ .from(TABLE_NAME)
+ .where(IDENTIFIER.eq(quotaKey.getIdentifier()),
+ COMPONENT.eq(quotaKey.getQuotaComponent().getValue()),
+ TYPE.eq(quotaKey.getQuotaType().getValue()))))
+ .map(toQuotaCurrentValue(quotaKey));
+ }
+
+ public Mono deleteQuotaCurrentValue(QuotaCurrentValue.Key quotaKey) {
+ return postgresExecutor.executeVoid(dslContext -> Mono.from(dslContext.deleteFrom(TABLE_NAME)
+ .where(IDENTIFIER.eq(quotaKey.getIdentifier()),
+ COMPONENT.eq(quotaKey.getQuotaComponent().getValue()),
+ TYPE.eq(quotaKey.getQuotaType().getValue()))));
+ }
+
+ public Flux getQuotaCurrentValues(QuotaComponent quotaComponent, String identifier) {
+ return postgresExecutor.executeRows(dslContext -> Flux.from(dslContext.select(TYPE, CURRENT_VALUE)
+ .from(TABLE_NAME)
+ .where(IDENTIFIER.eq(identifier),
+ COMPONENT.eq(quotaComponent.getValue()))))
+ .map(toQuotaCurrentValue(quotaComponent, identifier));
+ }
+
+ private Function toQuotaCurrentValue(QuotaCurrentValue.Key quotaKey) {
+ return record -> QuotaCurrentValue.builder().quotaComponent(quotaKey.getQuotaComponent())
+ .identifier(quotaKey.getIdentifier())
+ .quotaType(quotaKey.getQuotaType())
+ .currentValue(record.get(CURRENT_VALUE)).build();
+ }
+
+ private static Function toQuotaCurrentValue(QuotaComponent quotaComponent, String identifier) {
+ return record -> QuotaCurrentValue.builder().quotaComponent(quotaComponent)
+ .identifier(identifier)
+ .quotaType(QuotaType.of(record.get(TYPE)))
+ .currentValue(record.get(CURRENT_VALUE)).build();
+ }
+}
diff --git a/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/quota/PostgresQuotaLimitDAO.java b/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/quota/PostgresQuotaLimitDAO.java
new file mode 100644
index 00000000000..02523bae40b
--- /dev/null
+++ b/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/quota/PostgresQuotaLimitDAO.java
@@ -0,0 +1,100 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.backends.postgres.quota;
+
+import static org.apache.james.backends.postgres.quota.PostgresQuotaModule.PostgresQuotaLimitTable.IDENTIFIER;
+import static org.apache.james.backends.postgres.quota.PostgresQuotaModule.PostgresQuotaLimitTable.PK_CONSTRAINT_NAME;
+import static org.apache.james.backends.postgres.quota.PostgresQuotaModule.PostgresQuotaLimitTable.QUOTA_COMPONENT;
+import static org.apache.james.backends.postgres.quota.PostgresQuotaModule.PostgresQuotaLimitTable.QUOTA_LIMIT;
+import static org.apache.james.backends.postgres.quota.PostgresQuotaModule.PostgresQuotaLimitTable.QUOTA_SCOPE;
+import static org.apache.james.backends.postgres.quota.PostgresQuotaModule.PostgresQuotaLimitTable.QUOTA_TYPE;
+import static org.apache.james.backends.postgres.quota.PostgresQuotaModule.PostgresQuotaLimitTable.TABLE_NAME;
+import static org.apache.james.backends.postgres.utils.PostgresExecutor.DEFAULT_INJECT;
+
+import jakarta.inject.Inject;
+import jakarta.inject.Named;
+
+import org.apache.james.backends.postgres.utils.PostgresExecutor;
+import org.apache.james.core.quota.QuotaComponent;
+import org.apache.james.core.quota.QuotaLimit;
+import org.apache.james.core.quota.QuotaScope;
+import org.apache.james.core.quota.QuotaType;
+import org.jooq.Record;
+
+import reactor.core.publisher.Flux;
+import reactor.core.publisher.Mono;
+
+public class PostgresQuotaLimitDAO {
+ private static final Long EMPTY_QUOTA_LIMIT = null;
+
+ private final PostgresExecutor postgresExecutor;
+
+ @Inject
+ public PostgresQuotaLimitDAO(@Named(DEFAULT_INJECT) PostgresExecutor postgresExecutor) {
+ this.postgresExecutor = postgresExecutor;
+ }
+
+ public Mono getQuotaLimit(QuotaLimit.QuotaLimitKey quotaKey) {
+ return postgresExecutor.executeRow(dsl -> Mono.from(dsl.selectFrom(TABLE_NAME)
+ .where(QUOTA_COMPONENT.eq(quotaKey.getQuotaComponent().getValue()))
+ .and(QUOTA_SCOPE.eq(quotaKey.getQuotaScope().getValue()))
+ .and(IDENTIFIER.eq(quotaKey.getIdentifier()))
+ .and(QUOTA_TYPE.eq(quotaKey.getQuotaType().getValue()))))
+ .map(this::asQuotaLimit);
+ }
+
+ public Flux getQuotaLimits(QuotaComponent quotaComponent, QuotaScope quotaScope, String identifier) {
+ return postgresExecutor.executeRows(dsl -> Flux.from(dsl.selectFrom(TABLE_NAME)
+ .where(QUOTA_COMPONENT.eq(quotaComponent.getValue()))
+ .and(QUOTA_SCOPE.eq(quotaScope.getValue()))
+ .and(IDENTIFIER.eq(identifier))))
+ .map(this::asQuotaLimit);
+ }
+
+ public Mono setQuotaLimit(QuotaLimit quotaLimit) {
+ return postgresExecutor.executeVoid(dslContext ->
+ Mono.from(dslContext.insertInto(TABLE_NAME, QUOTA_SCOPE, IDENTIFIER, QUOTA_COMPONENT, QUOTA_TYPE, QUOTA_LIMIT)
+ .values(quotaLimit.getQuotaScope().getValue(),
+ quotaLimit.getIdentifier(),
+ quotaLimit.getQuotaComponent().getValue(),
+ quotaLimit.getQuotaType().getValue(),
+ quotaLimit.getQuotaLimit().orElse(EMPTY_QUOTA_LIMIT))
+ .onConflictOnConstraint(PK_CONSTRAINT_NAME)
+ .doUpdate()
+ .set(QUOTA_LIMIT, quotaLimit.getQuotaLimit().orElse(EMPTY_QUOTA_LIMIT))));
+ }
+
+ public Mono deleteQuotaLimit(QuotaLimit.QuotaLimitKey quotaKey) {
+ return postgresExecutor.executeVoid(dsl -> Mono.from(dsl.deleteFrom(TABLE_NAME)
+ .where(QUOTA_COMPONENT.eq(quotaKey.getQuotaComponent().getValue()))
+ .and(QUOTA_SCOPE.eq(quotaKey.getQuotaScope().getValue()))
+ .and(IDENTIFIER.eq(quotaKey.getIdentifier()))
+ .and(QUOTA_TYPE.eq(quotaKey.getQuotaType().getValue()))));
+ }
+
+ private QuotaLimit asQuotaLimit(Record record) {
+ return QuotaLimit.builder().quotaComponent(QuotaComponent.of(record.get(QUOTA_COMPONENT)))
+ .quotaScope(QuotaScope.of(record.get(QUOTA_SCOPE)))
+ .identifier(record.get(IDENTIFIER))
+ .quotaType(QuotaType.of(record.get(QUOTA_TYPE)))
+ .quotaLimit(record.get(QUOTA_LIMIT))
+ .build();
+ }
+}
diff --git a/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/quota/PostgresQuotaModule.java b/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/quota/PostgresQuotaModule.java
new file mode 100644
index 00000000000..b0e5c814c56
--- /dev/null
+++ b/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/quota/PostgresQuotaModule.java
@@ -0,0 +1,84 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.backends.postgres.quota;
+
+import static org.jooq.impl.DSL.name;
+import static org.jooq.impl.SQLDataType.BIGINT;
+
+import org.apache.james.backends.postgres.PostgresModule;
+import org.apache.james.backends.postgres.PostgresTable;
+import org.jooq.Field;
+import org.jooq.Name;
+import org.jooq.Record;
+import org.jooq.Table;
+import org.jooq.impl.DSL;
+import org.jooq.impl.SQLDataType;
+
+public interface PostgresQuotaModule {
+ interface PostgresQuotaCurrentValueTable {
+ Table TABLE_NAME = DSL.table("quota_current_value");
+
+ Field IDENTIFIER = DSL.field("identifier", SQLDataType.VARCHAR.notNull());
+ Field COMPONENT = DSL.field("component", SQLDataType.VARCHAR.notNull());
+ Field TYPE = DSL.field("type", SQLDataType.VARCHAR.notNull());
+ Field CURRENT_VALUE = DSL.field(name(TABLE_NAME.getName(), "current_value"), BIGINT.notNull());
+
+ Name PRIMARY_KEY_CONSTRAINT_NAME = DSL.name("quota_current_value_primary_key");
+
+ PostgresTable TABLE = PostgresTable.name(TABLE_NAME.getName())
+ .createTableStep(((dsl, tableName) -> dsl.createTableIfNotExists(tableName)
+ .column(IDENTIFIER)
+ .column(COMPONENT)
+ .column(TYPE)
+ .column(CURRENT_VALUE)
+ .constraint(DSL.constraint(PRIMARY_KEY_CONSTRAINT_NAME)
+ .primaryKey(IDENTIFIER, COMPONENT, TYPE))))
+ .disableRowLevelSecurity()
+ .build();
+ }
+
+ interface PostgresQuotaLimitTable {
+ Table TABLE_NAME = DSL.table("quota_limit");
+
+ Field QUOTA_SCOPE = DSL.field("quota_scope", SQLDataType.VARCHAR.notNull());
+ Field IDENTIFIER = DSL.field("identifier", SQLDataType.VARCHAR.notNull());
+ Field QUOTA_COMPONENT = DSL.field("quota_component", SQLDataType.VARCHAR.notNull());
+ Field QUOTA_TYPE = DSL.field("quota_type", SQLDataType.VARCHAR.notNull());
+ Field QUOTA_LIMIT = DSL.field("quota_limit", SQLDataType.BIGINT);
+
+ Name PK_CONSTRAINT_NAME = DSL.name("quota_limit_pkey");
+
+ PostgresTable TABLE = PostgresTable.name(TABLE_NAME.getName())
+ .createTableStep(((dsl, tableName) -> dsl.createTableIfNotExists(tableName)
+ .column(QUOTA_SCOPE)
+ .column(IDENTIFIER)
+ .column(QUOTA_COMPONENT)
+ .column(QUOTA_TYPE)
+ .column(QUOTA_LIMIT)
+ .constraint(DSL.constraint(PK_CONSTRAINT_NAME).primaryKey(QUOTA_SCOPE, IDENTIFIER, QUOTA_COMPONENT, QUOTA_TYPE))))
+ .disableRowLevelSecurity()
+ .build();
+ }
+
+ PostgresModule MODULE = PostgresModule.builder()
+ .addTable(PostgresQuotaCurrentValueTable.TABLE)
+ .addTable(PostgresQuotaLimitTable.TABLE)
+ .build();
+}
\ No newline at end of file
diff --git a/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/utils/JamesPostgresConnectionFactory.java b/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/utils/JamesPostgresConnectionFactory.java
new file mode 100644
index 00000000000..e1b74faf817
--- /dev/null
+++ b/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/utils/JamesPostgresConnectionFactory.java
@@ -0,0 +1,38 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.backends.postgres.utils;
+
+import org.apache.james.core.Domain;
+
+import io.r2dbc.spi.Connection;
+import reactor.core.publisher.Mono;
+
+public interface JamesPostgresConnectionFactory {
+ String DOMAIN_ATTRIBUTE = "app.current_domain";
+ String BY_PASS_RLS_INJECT = "by_pass_rls";
+
+ Mono getConnection(Domain domain);
+
+ Mono getConnection();
+
+ Mono closeConnection(Connection connection);
+
+ Mono close();
+}
diff --git a/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/utils/PoolBackedPostgresConnectionFactory.java b/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/utils/PoolBackedPostgresConnectionFactory.java
new file mode 100644
index 00000000000..465f93a1c38
--- /dev/null
+++ b/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/utils/PoolBackedPostgresConnectionFactory.java
@@ -0,0 +1,85 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.backends.postgres.utils;
+
+import org.apache.james.backends.postgres.RowLevelSecurity;
+import org.apache.james.core.Domain;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import io.r2dbc.pool.ConnectionPool;
+import io.r2dbc.pool.ConnectionPoolConfiguration;
+import io.r2dbc.spi.Connection;
+import io.r2dbc.spi.ConnectionFactory;
+import reactor.core.publisher.Mono;
+
+public class PoolBackedPostgresConnectionFactory implements JamesPostgresConnectionFactory {
+ private static final Logger LOGGER = LoggerFactory.getLogger(PoolBackedPostgresConnectionFactory.class);
+ private static final int DEFAULT_INITIAL_SIZE = 10;
+ private static final int DEFAULT_MAX_SIZE = 20;
+
+ private final RowLevelSecurity rowLevelSecurity;
+ private final ConnectionPool pool;
+
+ public PoolBackedPostgresConnectionFactory(RowLevelSecurity rowLevelSecurity, int initialSize, int maxSize, ConnectionFactory connectionFactory) {
+ this.rowLevelSecurity = rowLevelSecurity;
+ ConnectionPoolConfiguration configuration = ConnectionPoolConfiguration.builder(connectionFactory)
+ .initialSize(initialSize)
+ .maxSize(maxSize)
+ .build();
+ LOGGER.info("Creating new postgres ConnectionPool with initialSize {} and maxSize {}", initialSize, maxSize);
+ pool = new ConnectionPool(configuration);
+ }
+
+ public PoolBackedPostgresConnectionFactory(RowLevelSecurity rowLevelSecurity, ConnectionFactory connectionFactory) {
+ this(rowLevelSecurity, DEFAULT_INITIAL_SIZE, DEFAULT_MAX_SIZE, connectionFactory);
+ }
+
+ @Override
+ public Mono getConnection(Domain domain) {
+ if (rowLevelSecurity.isRowLevelSecurityEnabled()) {
+ return pool.create().flatMap(connection -> setDomainAttributeForConnection(domain.asString(), connection));
+ } else {
+ return pool.create();
+ }
+ }
+
+ @Override
+ public Mono getConnection() {
+ return pool.create();
+ }
+
+ @Override
+ public Mono closeConnection(Connection connection) {
+ return Mono.from(connection.close());
+ }
+
+ @Override
+ public Mono close() {
+ return pool.close();
+ }
+
+ private Mono setDomainAttributeForConnection(String domainAttribute, Connection connection) {
+ return Mono.from(connection.createStatement("SET " + DOMAIN_ATTRIBUTE + " TO '" + domainAttribute + "'") // It should be set value via Bind, but it doesn't work
+ .execute())
+ .doOnError(e -> LOGGER.error("Error while setting domain attribute for domain {}", domainAttribute, e))
+ .then(Mono.just(connection));
+ }
+}
diff --git a/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/utils/PostgresConnectionClosure.java b/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/utils/PostgresConnectionClosure.java
new file mode 100644
index 00000000000..0815177f2e7
--- /dev/null
+++ b/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/utils/PostgresConnectionClosure.java
@@ -0,0 +1,45 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.backends.postgres.utils;
+
+import jakarta.annotation.PreDestroy;
+import jakarta.inject.Inject;
+import jakarta.inject.Named;
+
+import org.apache.james.lifecycle.api.Disposable;
+
+public class PostgresConnectionClosure implements Disposable {
+ private final JamesPostgresConnectionFactory factory;
+ private final JamesPostgresConnectionFactory byPassRLSFactory;
+
+ @Inject
+ public PostgresConnectionClosure(JamesPostgresConnectionFactory factory,
+ @Named(JamesPostgresConnectionFactory.BY_PASS_RLS_INJECT) JamesPostgresConnectionFactory byPassRLSFactory) {
+ this.factory = factory;
+ this.byPassRLSFactory = byPassRLSFactory;
+ }
+
+ @PreDestroy
+ @Override
+ public void dispose() {
+ factory.close().block();
+ byPassRLSFactory.close().block();
+ }
+}
diff --git a/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/utils/PostgresExecutor.java b/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/utils/PostgresExecutor.java
new file mode 100644
index 00000000000..aaa3fadf614
--- /dev/null
+++ b/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/utils/PostgresExecutor.java
@@ -0,0 +1,222 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.backends.postgres.utils;
+
+import static org.jooq.impl.DSL.exists;
+import static org.jooq.impl.DSL.field;
+
+import java.time.Duration;
+import java.util.Optional;
+import java.util.concurrent.TimeoutException;
+import java.util.function.Function;
+import java.util.function.Predicate;
+
+import jakarta.inject.Inject;
+
+import org.apache.james.backends.postgres.PostgresConfiguration;
+import org.apache.james.core.Domain;
+import org.apache.james.metrics.api.MetricFactory;
+import org.jooq.DSLContext;
+import org.jooq.DeleteResultStep;
+import org.jooq.Record;
+import org.jooq.Record1;
+import org.jooq.SQLDialect;
+import org.jooq.SelectConditionStep;
+import org.jooq.conf.Settings;
+import org.jooq.conf.StatementType;
+import org.jooq.impl.DSL;
+import org.reactivestreams.Publisher;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.annotations.VisibleForTesting;
+
+import io.r2dbc.spi.Connection;
+import io.r2dbc.spi.R2dbcBadGrammarException;
+import reactor.core.publisher.Flux;
+import reactor.core.publisher.Mono;
+import reactor.util.retry.Retry;
+
+public class PostgresExecutor {
+
+ public static final String DEFAULT_INJECT = "default";
+ public static final String BY_PASS_RLS_INJECT = "by_pass_rls";
+ public static final int MAX_RETRY_ATTEMPTS = 5;
+ public static final Duration MIN_BACKOFF = Duration.ofMillis(1);
+ private static final Logger LOGGER = LoggerFactory.getLogger(PostgresExecutor.class);
+ private static final String JOOQ_TIMEOUT_ERROR_LOG = "Time out executing Postgres query. May need to check either jOOQ reactive issue or Postgres DB performance.";
+
+ public static class Factory {
+
+ private final JamesPostgresConnectionFactory jamesPostgresConnectionFactory;
+ private final PostgresConfiguration postgresConfiguration;
+ private final MetricFactory metricFactory;
+
+ @Inject
+ public Factory(JamesPostgresConnectionFactory jamesPostgresConnectionFactory,
+ PostgresConfiguration postgresConfiguration,
+ MetricFactory metricFactory) {
+ this.jamesPostgresConnectionFactory = jamesPostgresConnectionFactory;
+ this.postgresConfiguration = postgresConfiguration;
+ this.metricFactory = metricFactory;
+ }
+
+ public PostgresExecutor create(Optional domain) {
+ return new PostgresExecutor(domain, jamesPostgresConnectionFactory, postgresConfiguration, metricFactory);
+ }
+
+ public PostgresExecutor create() {
+ return create(Optional.empty());
+ }
+ }
+
+ private static final SQLDialect PGSQL_DIALECT = SQLDialect.POSTGRES;
+ private static final Settings SETTINGS = new Settings()
+ .withRenderFormatted(true)
+ .withStatementType(StatementType.PREPARED_STATEMENT);
+
+ private final Optional domain;
+ private final JamesPostgresConnectionFactory jamesPostgresConnectionFactory;
+ private final PostgresConfiguration postgresConfiguration;
+ private final MetricFactory metricFactory;
+
+ private PostgresExecutor(Optional domain,
+ JamesPostgresConnectionFactory jamesPostgresConnectionFactory,
+ PostgresConfiguration postgresConfiguration,
+ MetricFactory metricFactory) {
+ this.domain = domain;
+ this.jamesPostgresConnectionFactory = jamesPostgresConnectionFactory;
+ this.postgresConfiguration = postgresConfiguration;
+ this.metricFactory = metricFactory;
+ }
+
+ public Mono dslContext(Connection connection) {
+ return Mono.fromCallable(() -> DSL.using(connection, PGSQL_DIALECT, SETTINGS));
+ }
+
+ public Mono executeVoid(Function> queryFunction) {
+ return Mono.from(metricFactory.decoratePublisherWithTimerMetric("postgres-execution",
+ Mono.usingWhen(getConnection(domain),
+ connection -> dslContext(connection)
+ .flatMap(queryFunction)
+ .timeout(postgresConfiguration.getJooqReactiveTimeout())
+ .doOnError(TimeoutException.class, e -> LOGGER.error(JOOQ_TIMEOUT_ERROR_LOG, e))
+ .retryWhen(Retry.backoff(MAX_RETRY_ATTEMPTS, MIN_BACKOFF)
+ .filter(preparedStatementConflictException()))
+ .then(),
+ jamesPostgresConnectionFactory::closeConnection)));
+ }
+
+ public Flux executeRows(Function> queryFunction) {
+ return Flux.from(metricFactory.decoratePublisherWithTimerMetric("postgres-execution",
+ Flux.usingWhen(getConnection(domain),
+ connection -> dslContext(connection)
+ .flatMapMany(queryFunction)
+ .timeout(postgresConfiguration.getJooqReactiveTimeout())
+ .doOnError(TimeoutException.class, e -> LOGGER.error(JOOQ_TIMEOUT_ERROR_LOG, e))
+ .collectList()
+ .flatMapIterable(list -> list) // Mitigation fix for https://github.com/jOOQ/jOOQ/issues/16556
+ .retryWhen(Retry.backoff(MAX_RETRY_ATTEMPTS, MIN_BACKOFF)
+ .filter(preparedStatementConflictException())),
+ jamesPostgresConnectionFactory::closeConnection)));
+ }
+
+ public Flux executeDeleteAndReturnList(Function> queryFunction) {
+ return Flux.from(metricFactory.decoratePublisherWithTimerMetric("postgres-execution",
+ Flux.usingWhen(getConnection(domain),
+ connection -> dslContext(connection)
+ .flatMapMany(queryFunction)
+ .timeout(postgresConfiguration.getJooqReactiveTimeout())
+ .doOnError(TimeoutException.class, e -> LOGGER.error(JOOQ_TIMEOUT_ERROR_LOG, e))
+ .collectList()
+ .flatMapIterable(list -> list) // The convert Flux -> Mono -> Flux to avoid a hanging issue. See: https://github.com/jOOQ/jOOQ/issues/16055
+ .retryWhen(Retry.backoff(MAX_RETRY_ATTEMPTS, MIN_BACKOFF)
+ .filter(preparedStatementConflictException())),
+ jamesPostgresConnectionFactory::closeConnection)));
+ }
+
+ public Mono executeRow(Function> queryFunction) {
+ return Mono.from(metricFactory.decoratePublisherWithTimerMetric("postgres-execution",
+ Mono.usingWhen(getConnection(domain),
+ connection -> dslContext(connection)
+ .flatMap(queryFunction.andThen(Mono::from))
+ .timeout(postgresConfiguration.getJooqReactiveTimeout())
+ .doOnError(TimeoutException.class, e -> LOGGER.error(JOOQ_TIMEOUT_ERROR_LOG, e))
+ .retryWhen(Retry.backoff(MAX_RETRY_ATTEMPTS, MIN_BACKOFF)
+ .filter(preparedStatementConflictException())),
+ jamesPostgresConnectionFactory::closeConnection)));
+ }
+
+ public Mono> executeSingleRowOptional(Function> queryFunction) {
+ return executeRow(queryFunction)
+ .map(Optional::ofNullable)
+ .switchIfEmpty(Mono.just(Optional.empty()));
+ }
+
+ public Mono executeCount(Function>> queryFunction) {
+ return Mono.from(metricFactory.decoratePublisherWithTimerMetric("postgres-execution",
+ Mono.usingWhen(getConnection(domain),
+ connection -> dslContext(connection)
+ .flatMap(queryFunction)
+ .timeout(postgresConfiguration.getJooqReactiveTimeout())
+ .doOnError(TimeoutException.class, e -> LOGGER.error(JOOQ_TIMEOUT_ERROR_LOG, e))
+ .retryWhen(Retry.backoff(MAX_RETRY_ATTEMPTS, MIN_BACKOFF)
+ .filter(preparedStatementConflictException()))
+ .map(Record1::value1),
+ jamesPostgresConnectionFactory::closeConnection)));
+ }
+
+ public Mono executeExists(Function> queryFunction) {
+ return executeRow(dslContext -> Mono.from(dslContext.select(field(exists(queryFunction.apply(dslContext))))))
+ .map(record -> record.get(0, Boolean.class));
+ }
+
+ public Mono executeReturnAffectedRowsCount(Function> queryFunction) {
+ return Mono.from(metricFactory.decoratePublisherWithTimerMetric("postgres-execution",
+ Mono.usingWhen(getConnection(domain),
+ connection -> dslContext(connection)
+ .flatMap(queryFunction)
+ .timeout(postgresConfiguration.getJooqReactiveTimeout())
+ .doOnError(TimeoutException.class, e -> LOGGER.error(JOOQ_TIMEOUT_ERROR_LOG, e))
+ .retryWhen(Retry.backoff(MAX_RETRY_ATTEMPTS, MIN_BACKOFF)
+ .filter(preparedStatementConflictException())),
+ jamesPostgresConnectionFactory::closeConnection)));
+ }
+
+ public JamesPostgresConnectionFactory connectionFactory() {
+ return jamesPostgresConnectionFactory;
+ }
+
+ @VisibleForTesting
+ public void dispose() {
+ jamesPostgresConnectionFactory.close().block();
+ }
+
+ private Predicate preparedStatementConflictException() {
+ return throwable -> throwable.getCause() instanceof R2dbcBadGrammarException
+ && throwable.getMessage().contains("prepared statement")
+ && throwable.getMessage().contains("already exists");
+ }
+
+ private Mono getConnection(Optional maybeDomain) {
+ return maybeDomain.map(jamesPostgresConnectionFactory::getConnection)
+ .orElseGet(jamesPostgresConnectionFactory::getConnection);
+ }
+}
diff --git a/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/utils/PostgresHealthCheck.java b/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/utils/PostgresHealthCheck.java
new file mode 100644
index 00000000000..2774c3bc79d
--- /dev/null
+++ b/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/utils/PostgresHealthCheck.java
@@ -0,0 +1,55 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.backends.postgres.utils;
+
+import java.time.Duration;
+
+import jakarta.inject.Inject;
+
+import org.apache.james.core.healthcheck.ComponentName;
+import org.apache.james.core.healthcheck.HealthCheck;
+import org.apache.james.core.healthcheck.Result;
+import org.jooq.impl.DSL;
+import org.reactivestreams.Publisher;
+
+import reactor.core.publisher.Mono;
+
+public class PostgresHealthCheck implements HealthCheck {
+ public static final ComponentName COMPONENT_NAME = new ComponentName("Postgres");
+ private final PostgresExecutor postgresExecutor;
+
+ @Inject
+ public PostgresHealthCheck(PostgresExecutor postgresExecutor) {
+ this.postgresExecutor = postgresExecutor;
+ }
+
+ @Override
+ public ComponentName componentName() {
+ return COMPONENT_NAME;
+ }
+
+ @Override
+ public Publisher check() {
+ return postgresExecutor.executeRow(context -> Mono.from(context.select(DSL.now())))
+ .timeout(Duration.ofSeconds(5))
+ .map(any -> Result.healthy(COMPONENT_NAME))
+ .onErrorResume(e -> Mono.just(Result.unhealthy(COMPONENT_NAME, "Failed to execute request against postgres", e)));
+ }
+}
diff --git a/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/utils/PostgresUtils.java b/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/utils/PostgresUtils.java
new file mode 100644
index 00000000000..9f8b075c14a
--- /dev/null
+++ b/backends-common/postgres/src/main/java/org/apache/james/backends/postgres/utils/PostgresUtils.java
@@ -0,0 +1,31 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.backends.postgres.utils;
+
+import java.util.function.Predicate;
+
+import org.jooq.exception.DataAccessException;
+
+public class PostgresUtils {
+ private static final String UNIQUE_CONSTRAINT_VIOLATION_MESSAGE = "duplicate key value violates unique constraint";
+
+ public static final Predicate UNIQUE_CONSTRAINT_VIOLATION_PREDICATE =
+ throwable -> throwable instanceof DataAccessException && throwable.getMessage().contains(UNIQUE_CONSTRAINT_VIOLATION_MESSAGE);
+}
diff --git a/backends-common/postgres/src/test/java/org/apache/james/backends/postgres/DockerPostgresSingleton.java b/backends-common/postgres/src/test/java/org/apache/james/backends/postgres/DockerPostgresSingleton.java
new file mode 100644
index 00000000000..d51fa296752
--- /dev/null
+++ b/backends-common/postgres/src/test/java/org/apache/james/backends/postgres/DockerPostgresSingleton.java
@@ -0,0 +1,39 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.backends.postgres;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.testcontainers.containers.PostgreSQLContainer;
+import org.testcontainers.containers.output.OutputFrame;
+
+public class DockerPostgresSingleton {
+ private static void displayDockerLog(OutputFrame outputFrame) {
+ LOGGER.info(outputFrame.getUtf8String().trim());
+ }
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(DockerPostgresSingleton.class);
+ public static final PostgreSQLContainer> SINGLETON = PostgresFixture.PG_CONTAINER.get()
+ .withLogConsumer(DockerPostgresSingleton::displayDockerLog);
+
+ static {
+ SINGLETON.start();
+ }
+}
diff --git a/backends-common/postgres/src/test/java/org/apache/james/backends/postgres/JamesPostgresConnectionFactoryTest.java b/backends-common/postgres/src/test/java/org/apache/james/backends/postgres/JamesPostgresConnectionFactoryTest.java
new file mode 100644
index 00000000000..6d27f26ca9d
--- /dev/null
+++ b/backends-common/postgres/src/test/java/org/apache/james/backends/postgres/JamesPostgresConnectionFactoryTest.java
@@ -0,0 +1,78 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.backends.postgres;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+import org.apache.james.backends.postgres.utils.JamesPostgresConnectionFactory;
+import org.apache.james.core.Domain;
+import org.junit.jupiter.api.Test;
+
+import com.google.common.collect.ImmutableList;
+
+import io.r2dbc.spi.Connection;
+import reactor.core.publisher.Flux;
+
+public abstract class JamesPostgresConnectionFactoryTest {
+
+ abstract JamesPostgresConnectionFactory jamesPostgresConnectionFactory();
+
+ @Test
+ void getConnectionShouldWork() {
+ Connection connection = jamesPostgresConnectionFactory().getConnection().block();
+ String actual = Flux.from(connection.createStatement("SELECT 1")
+ .execute())
+ .flatMap(result -> result.map((row, rowMetadata) -> row.get(0, String.class)))
+ .collect(ImmutableList.toImmutableList())
+ .block().get(0);
+
+ assertThat(actual).isEqualTo("1");
+ }
+
+ @Test
+ void getConnectionWithDomainShouldWork() {
+ Connection connection = jamesPostgresConnectionFactory().getConnection(Domain.of("james")).block();
+ String actual = Flux.from(connection.createStatement("SELECT 1")
+ .execute())
+ .flatMap(result -> result.map((row, rowMetadata) -> row.get(0, String.class)))
+ .collect(ImmutableList.toImmutableList())
+ .block().get(0);
+
+ assertThat(actual).isEqualTo("1");
+ }
+
+ @Test
+ void getConnectionShouldSetCurrentDomainAttribute() {
+ Domain domain = Domain.of("james");
+ Connection connection = jamesPostgresConnectionFactory().getConnection(domain).block();
+ String actual = getDomainAttributeValue(connection);
+
+ assertThat(actual).isEqualTo(domain.asString());
+ }
+
+ String getDomainAttributeValue(Connection connection) {
+ return Flux.from(connection.createStatement("show " + JamesPostgresConnectionFactory.DOMAIN_ATTRIBUTE)
+ .execute())
+ .flatMap(result -> result.map((row, rowMetadata) -> row.get(0, String.class)))
+ .collect(ImmutableList.toImmutableList())
+ .block().get(0);
+ }
+
+}
diff --git a/backends-common/postgres/src/test/java/org/apache/james/backends/postgres/PoolBackedPostgresConnectionFactoryTest.java b/backends-common/postgres/src/test/java/org/apache/james/backends/postgres/PoolBackedPostgresConnectionFactoryTest.java
new file mode 100644
index 00000000000..4e4cb45b7f0
--- /dev/null
+++ b/backends-common/postgres/src/test/java/org/apache/james/backends/postgres/PoolBackedPostgresConnectionFactoryTest.java
@@ -0,0 +1,34 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.backends.postgres;
+
+import org.apache.james.backends.postgres.utils.JamesPostgresConnectionFactory;
+import org.apache.james.backends.postgres.utils.PoolBackedPostgresConnectionFactory;
+import org.junit.jupiter.api.extension.RegisterExtension;
+
+public class PoolBackedPostgresConnectionFactoryTest extends JamesPostgresConnectionFactoryTest {
+ @RegisterExtension
+ static PostgresExtension postgresExtension = PostgresExtension.empty();
+
+ @Override
+ JamesPostgresConnectionFactory jamesPostgresConnectionFactory() {
+ return new PoolBackedPostgresConnectionFactory(RowLevelSecurity.ENABLED, postgresExtension.getConnectionFactory());
+ }
+}
diff --git a/backends-common/postgres/src/test/java/org/apache/james/backends/postgres/PostgresConfigurationTest.java b/backends-common/postgres/src/test/java/org/apache/james/backends/postgres/PostgresConfigurationTest.java
new file mode 100644
index 00000000000..08d76a23569
--- /dev/null
+++ b/backends-common/postgres/src/test/java/org/apache/james/backends/postgres/PostgresConfigurationTest.java
@@ -0,0 +1,125 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.backends.postgres;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
+
+import org.junit.jupiter.api.Test;
+
+import io.r2dbc.postgresql.client.SSLMode;
+
+class PostgresConfigurationTest {
+
+ @Test
+ void shouldReturnCorrespondingProperties() {
+ PostgresConfiguration configuration = PostgresConfiguration.builder()
+ .host("1.1.1.1")
+ .port(1111)
+ .databaseName("db")
+ .databaseSchema("sc")
+ .username("james")
+ .password("1")
+ .byPassRLSUser("bypassrlsjames")
+ .byPassRLSPassword("2")
+ .rowLevelSecurityEnabled()
+ .sslMode("require")
+ .build();
+
+ assertThat(configuration.getHost()).isEqualTo("1.1.1.1");
+ assertThat(configuration.getPort()).isEqualTo(1111);
+ assertThat(configuration.getDatabaseName()).isEqualTo("db");
+ assertThat(configuration.getDatabaseSchema()).isEqualTo("sc");
+ assertThat(configuration.getDefaultCredential().getUsername()).isEqualTo("james");
+ assertThat(configuration.getDefaultCredential().getPassword()).isEqualTo("1");
+ assertThat(configuration.getByPassRLSCredential().getUsername()).isEqualTo("bypassrlsjames");
+ assertThat(configuration.getByPassRLSCredential().getPassword()).isEqualTo("2");
+ assertThat(configuration.getRowLevelSecurity()).isEqualTo(RowLevelSecurity.ENABLED);
+ assertThat(configuration.getSslMode()).isEqualTo(SSLMode.REQUIRE);
+ }
+
+ @Test
+ void shouldUseDefaultValues() {
+ PostgresConfiguration configuration = PostgresConfiguration.builder()
+ .username("james")
+ .password("1")
+ .build();
+
+ assertThat(configuration.getHost()).isEqualTo(PostgresConfiguration.HOST_DEFAULT_VALUE);
+ assertThat(configuration.getPort()).isEqualTo(PostgresConfiguration.PORT_DEFAULT_VALUE);
+ assertThat(configuration.getDatabaseName()).isEqualTo(PostgresConfiguration.DATABASE_NAME_DEFAULT_VALUE);
+ assertThat(configuration.getDatabaseSchema()).isEqualTo(PostgresConfiguration.DATABASE_SCHEMA_DEFAULT_VALUE);
+ assertThat(configuration.getByPassRLSCredential().getUsername()).isEqualTo("james");
+ assertThat(configuration.getByPassRLSCredential().getPassword()).isEqualTo("1");
+ assertThat(configuration.getRowLevelSecurity()).isEqualTo(RowLevelSecurity.DISABLED);
+ assertThat(configuration.getSslMode()).isEqualTo(SSLMode.ALLOW);
+ }
+
+ @Test
+ void shouldThrowWhenMissingUsername() {
+ assertThatThrownBy(() -> PostgresConfiguration.builder()
+ .build())
+ .isInstanceOf(IllegalArgumentException.class)
+ .hasMessage("You need to specify username");
+ }
+
+ @Test
+ void shouldThrowWhenMissingPassword() {
+ assertThatThrownBy(() -> PostgresConfiguration.builder()
+ .username("james")
+ .build())
+ .isInstanceOf(IllegalArgumentException.class)
+ .hasMessage("You need to specify password");
+ }
+
+ @Test
+ void shouldThrowWhenMissingByPassRLSUserAndRLSIsEnabled() {
+ assertThatThrownBy(() -> PostgresConfiguration.builder()
+ .username("james")
+ .password("1")
+ .rowLevelSecurityEnabled()
+ .build())
+ .isInstanceOf(IllegalArgumentException.class)
+ .hasMessage("You need to specify byPassRLSUser");
+ }
+
+ @Test
+ void shouldThrowWhenMissingByPassRLSPasswordAndRLSIsEnabled() {
+ assertThatThrownBy(() -> PostgresConfiguration.builder()
+ .username("james")
+ .password("1")
+ .byPassRLSUser("bypassrlsjames")
+ .rowLevelSecurityEnabled()
+ .build())
+ .isInstanceOf(IllegalArgumentException.class)
+ .hasMessage("You need to specify byPassRLSPassword");
+ }
+
+ @Test
+ void shouldThrowWhenInvalidSslMode() {
+ assertThatThrownBy(() -> PostgresConfiguration.builder()
+ .username("james")
+ .password("1")
+ .sslMode("invalid")
+ .build())
+ .isInstanceOf(IllegalArgumentException.class)
+ .hasMessage("Invalid ssl mode value: invalid");
+ }
+}
diff --git a/backends-common/postgres/src/test/java/org/apache/james/backends/postgres/PostgresExecutorThreadSafetyTest.java b/backends-common/postgres/src/test/java/org/apache/james/backends/postgres/PostgresExecutorThreadSafetyTest.java
new file mode 100644
index 00000000000..da1ada6db15
--- /dev/null
+++ b/backends-common/postgres/src/test/java/org/apache/james/backends/postgres/PostgresExecutorThreadSafetyTest.java
@@ -0,0 +1,202 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.backends.postgres;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.time.Duration;
+import java.util.List;
+import java.util.Set;
+import java.util.Vector;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Function;
+import java.util.stream.Stream;
+
+import org.apache.james.backends.postgres.utils.PostgresExecutor;
+import org.apache.james.util.concurrency.ConcurrentTestRunner;
+import org.jooq.Record;
+import org.jooq.impl.DSL;
+import org.jooq.impl.SQLDataType;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.RegisterExtension;
+
+import com.google.common.collect.ImmutableSet;
+
+import reactor.core.publisher.Flux;
+import reactor.core.publisher.Mono;
+
+class PostgresExecutorThreadSafetyTest {
+ static final int NUMBER_OF_THREAD = 100;
+
+ @RegisterExtension
+ static PostgresExtension postgresExtension = PostgresExtension.empty();
+
+ private static PostgresExecutor postgresExecutor;
+
+ @BeforeAll
+ static void beforeAll() {
+ postgresExecutor = postgresExtension.getDefaultPostgresExecutor();
+ }
+
+ @BeforeEach
+ void beforeEach() {
+ postgresExecutor.executeVoid(dslContext -> Mono.from(dslContext.createTableIfNotExists("person")
+ .column("id", SQLDataType.INTEGER.identity(true))
+ .column("name", SQLDataType.VARCHAR(50).nullable(false))
+ .constraints(DSL.constraint().primaryKey("id"))
+ .unique("name")))
+ .block();
+ }
+
+ @AfterEach
+ void afterEach() {
+ postgresExecutor.executeVoid(dslContext -> Mono.from(dslContext.dropTableIfExists("person")))
+ .block();
+ }
+
+ @Test
+ void postgresExecutorShouldWorkWellWhenItIsUsedByMultipleThreadsAndAllQueriesAreSelect() throws Exception {
+ provisionData(NUMBER_OF_THREAD);
+
+ List actual = new Vector<>();
+ ConcurrentTestRunner.builder()
+ .reactorOperation((threadNumber, step) -> getData(threadNumber)
+ .doOnNext(actual::add)
+ .then())
+ .threadCount(NUMBER_OF_THREAD)
+ .operationCount(1)
+ .runSuccessfullyWithin(Duration.ofMinutes(1));
+
+ Set expected = Stream.iterate(0, i -> i + 1).limit(NUMBER_OF_THREAD).map(i -> i + "|Peter" + i).collect(ImmutableSet.toImmutableSet());
+
+ assertThat(actual).containsExactlyInAnyOrderElementsOf(expected);
+ }
+
+ @Test
+ void postgresExecutorShouldWorkWellWhenItIsUsedByMultipleThreadsAndAllQueriesAreInsert() throws Exception {
+ ConcurrentTestRunner.builder()
+ .reactorOperation((threadNumber, step) -> createData(threadNumber))
+ .threadCount(NUMBER_OF_THREAD)
+ .operationCount(1)
+ .runSuccessfullyWithin(Duration.ofMinutes(1));
+
+ List actual = getData(0, NUMBER_OF_THREAD);
+ Set expected = Stream.iterate(0, i -> i + 1).limit(NUMBER_OF_THREAD).map(i -> i + "|Peter" + i).collect(ImmutableSet.toImmutableSet());
+
+ assertThat(actual).containsExactlyInAnyOrderElementsOf(expected);
+ }
+
+ @Test
+ void postgresExecutorShouldWorkWellWhenItIsUsedByMultipleThreadsAndInsertQueriesAreDuplicated() throws Exception {
+ AtomicInteger numberOfSuccess = new AtomicInteger(0);
+ AtomicInteger numberOfFail = new AtomicInteger(0);
+ ConcurrentTestRunner.builder()
+ .reactorOperation((threadNumber, step) -> createData(threadNumber % 10)
+ .then(Mono.fromCallable(numberOfSuccess::incrementAndGet))
+ .then()
+ .onErrorResume(throwable -> {
+ if (throwable.getMessage().contains("duplicate key value violates unique constraint")) {
+ numberOfFail.incrementAndGet();
+ }
+ return Mono.empty();
+ }))
+ .threadCount(100)
+ .operationCount(1)
+ .runSuccessfullyWithin(Duration.ofMinutes(1));
+
+ List actual = getData(0, 100);
+ Set expected = Stream.iterate(0, i -> i + 1).limit(10).map(i -> i + "|Peter" + i).collect(ImmutableSet.toImmutableSet());
+
+ assertThat(actual).containsExactlyInAnyOrderElementsOf(expected);
+ assertThat(numberOfSuccess.get()).isEqualTo(10);
+ assertThat(numberOfFail.get()).isEqualTo(90);
+ }
+
+ @Test
+ void postgresExecutorShouldWorkWellWhenItIsUsedByMultipleThreadsAndQueriesIncludeBothSelectAndInsert() throws Exception {
+ provisionData(50);
+
+ List actualSelect = new Vector<>();
+ ConcurrentTestRunner.builder()
+ .reactorOperation((threadNumber, step) -> {
+ if (threadNumber < 50) {
+ return getData(threadNumber)
+ .doOnNext(actualSelect::add)
+ .then();
+ } else {
+ return createData(threadNumber);
+ }
+ })
+ .threadCount(NUMBER_OF_THREAD)
+ .operationCount(1)
+ .runSuccessfullyWithin(Duration.ofMinutes(1));
+
+ List actualInsert = getData(50, 100);
+
+ Set expectedSelect = Stream.iterate(0, i -> i + 1).limit(50).map(i -> i + "|Peter" + i).collect(ImmutableSet.toImmutableSet());
+ Set expectedInsert = Stream.iterate(50, i -> i + 1).limit(50).map(i -> i + "|Peter" + i).collect(ImmutableSet.toImmutableSet());
+
+ assertThat(actualSelect).containsExactlyInAnyOrderElementsOf(expectedSelect);
+ assertThat(actualInsert).containsExactlyInAnyOrderElementsOf(expectedInsert);
+ }
+
+ public Flux getData(int threadNumber) {
+ return postgresExecutor.executeRows(dslContext -> Flux.from(dslContext
+ .select(DSL.field("id"), DSL.field("name"))
+ .from(DSL.table("person"))
+ .where(DSL.field("id").eq(threadNumber))))
+ .map(recordToString());
+ }
+
+ public Mono createData(int threadNumber) {
+ return postgresExecutor.executeVoid(dslContext -> Mono.from(dslContext
+ .insertInto(DSL.table("person"), DSL.field("id"), DSL.field("name"))
+ .values(threadNumber, "Peter" + threadNumber)));
+ }
+
+ private List getData(int lowerBound, int upperBound) {
+ return postgresExecutor.executeRows(dslContext -> Flux.from(dslContext
+ .select(DSL.field("id"), DSL.field("name"))
+ .from(DSL.table("person"))
+ .where(DSL.field("id").greaterOrEqual(lowerBound).and(DSL.field("id").lessThan(upperBound)))))
+ .map(recordToString())
+ .collectList()
+ .block();
+ }
+
+ private void provisionData(int upperBound) {
+ Flux.range(0, upperBound)
+ .flatMap(i -> insertPerson(i, "Peter" + i))
+ .then()
+ .block();
+ }
+
+ private Mono insertPerson(int id, String name) {
+ return postgresExecutor.executeVoid(dslContext -> Mono.from(dslContext.insertInto(DSL.table("person"), DSL.field("id"), DSL.field("name"))
+ .values(id, name)));
+ }
+
+ private Function recordToString() {
+ return record -> record.get(DSL.field("id", Long.class)) + "|" + record.get(DSL.field("name", String.class));
+ }
+}
diff --git a/backends-common/postgres/src/test/java/org/apache/james/backends/postgres/PostgresExtension.java b/backends-common/postgres/src/test/java/org/apache/james/backends/postgres/PostgresExtension.java
new file mode 100644
index 00000000000..dc304746f61
--- /dev/null
+++ b/backends-common/postgres/src/test/java/org/apache/james/backends/postgres/PostgresExtension.java
@@ -0,0 +1,292 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.backends.postgres;
+
+import static org.apache.james.backends.postgres.PostgresFixture.Database.DEFAULT_DATABASE;
+import static org.apache.james.backends.postgres.PostgresFixture.Database.ROW_LEVEL_SECURITY_DATABASE;
+
+import java.io.IOException;
+import java.time.Duration;
+import java.util.List;
+import java.util.Optional;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+
+import org.apache.james.GuiceModuleTestExtension;
+import org.apache.james.backends.postgres.utils.JamesPostgresConnectionFactory;
+import org.apache.james.backends.postgres.utils.PoolBackedPostgresConnectionFactory;
+import org.apache.james.backends.postgres.utils.PostgresExecutor;
+import org.apache.james.metrics.tests.RecordingMetricFactory;
+import org.junit.jupiter.api.extension.ExtensionContext;
+import org.testcontainers.containers.PostgreSQLContainer;
+
+import com.github.fge.lambdas.Throwing;
+import com.google.inject.Module;
+import com.google.inject.util.Modules;
+
+import io.r2dbc.postgresql.PostgresqlConnectionConfiguration;
+import io.r2dbc.postgresql.PostgresqlConnectionFactory;
+import io.r2dbc.spi.Connection;
+import io.r2dbc.spi.ConnectionFactory;
+import reactor.core.publisher.Flux;
+import reactor.core.publisher.Mono;
+
+public class PostgresExtension implements GuiceModuleTestExtension {
+ public enum PoolSize {
+ SMALL(1, 2),
+ LARGE(10, 20);
+
+ private final int min;
+ private final int max;
+
+ PoolSize(int min, int max) {
+ this.min = min;
+ this.max = max;
+ }
+
+ public int getMin() {
+ return min;
+ }
+
+ public int getMax() {
+ return max;
+ }
+ }
+
+ public static PostgresExtension withRowLevelSecurity(PostgresModule module) {
+ return new PostgresExtension(module, RowLevelSecurity.ENABLED);
+ }
+
+ public static PostgresExtension withoutRowLevelSecurity(PostgresModule module) {
+ return withoutRowLevelSecurity(module, PoolSize.SMALL);
+ }
+
+ public static PostgresExtension withoutRowLevelSecurity(PostgresModule module, PoolSize poolSize) {
+ return new PostgresExtension(module, RowLevelSecurity.DISABLED, Optional.of(poolSize));
+ }
+
+ public static PostgresExtension empty() {
+ return withoutRowLevelSecurity(PostgresModule.EMPTY_MODULE);
+ }
+
+ public static final PoolSize DEFAULT_POOL_SIZE = PoolSize.SMALL;
+ public static PostgreSQLContainer> PG_CONTAINER = DockerPostgresSingleton.SINGLETON;
+ private final PostgresModule postgresModule;
+ private final RowLevelSecurity rowLevelSecurity;
+ private final PostgresFixture.Database selectedDatabase;
+ private PoolSize poolSize;
+ private PostgresConfiguration postgresConfiguration;
+ private PostgresExecutor defaultPostgresExecutor;
+ private PostgresExecutor byPassRLSPostgresExecutor;
+ private PostgresqlConnectionFactory connectionFactory;
+ private Connection defaultConnection;
+ private PostgresExecutor.Factory executorFactory;
+ private PostgresTableManager postgresTableManager;
+
+ public void pause() {
+ PG_CONTAINER.getDockerClient().pauseContainerCmd(PG_CONTAINER.getContainerId())
+ .exec();
+ }
+
+ public void unpause() {
+ PG_CONTAINER.getDockerClient().unpauseContainerCmd(PG_CONTAINER.getContainerId())
+ .exec();
+ }
+
+ private PostgresExtension(PostgresModule postgresModule, RowLevelSecurity rowLevelSecurity) {
+ this(postgresModule, rowLevelSecurity, Optional.empty());
+ }
+
+ private PostgresExtension(PostgresModule postgresModule, RowLevelSecurity rowLevelSecurity, Optional maybePoolSize) {
+ this.postgresModule = postgresModule;
+ this.rowLevelSecurity = rowLevelSecurity;
+ if (rowLevelSecurity.isRowLevelSecurityEnabled()) {
+ this.selectedDatabase = PostgresFixture.Database.ROW_LEVEL_SECURITY_DATABASE;
+ } else {
+ this.selectedDatabase = DEFAULT_DATABASE;
+ }
+ this.poolSize = maybePoolSize.orElse(DEFAULT_POOL_SIZE);
+ }
+
+ @Override
+ public void beforeAll(ExtensionContext extensionContext) throws Exception {
+ if (!PG_CONTAINER.isRunning()) {
+ PG_CONTAINER.start();
+ }
+ querySettingRowLevelSecurityIfNeed();
+ querySettingExtension();
+ initPostgresSession();
+ }
+
+ private void querySettingRowLevelSecurityIfNeed() {
+ if (rowLevelSecurity.isRowLevelSecurityEnabled()) {
+ Throwing.runnable(() -> {
+ PG_CONTAINER.execInContainer("psql", "-U", DEFAULT_DATABASE.dbUser(), "-c", "create user " + ROW_LEVEL_SECURITY_DATABASE.dbUser() + " WITH PASSWORD '" + ROW_LEVEL_SECURITY_DATABASE.dbPassword() + "';");
+ PG_CONTAINER.execInContainer("psql", "-U", DEFAULT_DATABASE.dbUser(), "-c", "create database " + ROW_LEVEL_SECURITY_DATABASE.dbName() + ";");
+ PG_CONTAINER.execInContainer("psql", "-U", DEFAULT_DATABASE.dbUser(), "-c", "grant all privileges on database " + ROW_LEVEL_SECURITY_DATABASE.dbName() + " to " + ROW_LEVEL_SECURITY_DATABASE.dbUser() + ";");
+ PG_CONTAINER.execInContainer("psql", "-U", ROW_LEVEL_SECURITY_DATABASE.dbUser(), "-d", ROW_LEVEL_SECURITY_DATABASE.dbName(), "-c", "create schema if not exists " + ROW_LEVEL_SECURITY_DATABASE.schema() + ";");
+ }).sneakyThrow().run();
+ }
+ }
+
+ private void querySettingExtension() throws IOException, InterruptedException {
+ PG_CONTAINER.execInContainer("psql", "-U", selectedDatabase.dbUser(), selectedDatabase.dbName(), "-c", String.format("CREATE EXTENSION IF NOT EXISTS hstore SCHEMA %s;", selectedDatabase.schema()));
+ }
+
+ private void initPostgresSession() {
+ postgresConfiguration = PostgresConfiguration.builder()
+ .databaseName(selectedDatabase.dbName())
+ .databaseSchema(selectedDatabase.schema())
+ .host(getHost())
+ .port(getMappedPort())
+ .username(selectedDatabase.dbUser())
+ .password(selectedDatabase.dbPassword())
+ .byPassRLSUser(DEFAULT_DATABASE.dbUser())
+ .byPassRLSPassword(DEFAULT_DATABASE.dbPassword())
+ .rowLevelSecurityEnabled(rowLevelSecurity.isRowLevelSecurityEnabled())
+ .jooqReactiveTimeout(Optional.of(Duration.ofSeconds(20L)))
+ .build();
+
+ Function postgresqlConnectionConfigurationFunction = credential ->
+ PostgresqlConnectionConfiguration.builder()
+ .host(postgresConfiguration.getHost())
+ .port(postgresConfiguration.getPort())
+ .database(postgresConfiguration.getDatabaseName())
+ .schema(postgresConfiguration.getDatabaseSchema())
+ .username(credential.getUsername())
+ .password(credential.getPassword())
+ .build();
+
+ RecordingMetricFactory metricFactory = new RecordingMetricFactory();
+
+ connectionFactory = new PostgresqlConnectionFactory(postgresqlConnectionConfigurationFunction.apply(postgresConfiguration.getDefaultCredential()));
+ defaultConnection = connectionFactory.create().block();
+ executorFactory = new PostgresExecutor.Factory(
+ getJamesPostgresConnectionFactory(rowLevelSecurity, connectionFactory),
+ postgresConfiguration,
+ metricFactory);
+
+ defaultPostgresExecutor = executorFactory.create();
+
+ PostgresqlConnectionFactory byPassRLSConnectionFactory = new PostgresqlConnectionFactory(postgresqlConnectionConfigurationFunction.apply(postgresConfiguration.getByPassRLSCredential()));
+
+ byPassRLSPostgresExecutor = new PostgresExecutor.Factory(
+ getJamesPostgresConnectionFactory(RowLevelSecurity.DISABLED, byPassRLSConnectionFactory),
+ postgresConfiguration,
+ metricFactory)
+ .create();
+
+ this.postgresTableManager = new PostgresTableManager(defaultPostgresExecutor, postgresModule, rowLevelSecurity);
+ }
+
+ @Override
+ public void afterAll(ExtensionContext extensionContext) {
+ disposePostgresSession();
+ }
+
+ private void disposePostgresSession() {
+ defaultPostgresExecutor.dispose();
+ byPassRLSPostgresExecutor.dispose();
+ Mono.from(defaultConnection.close()).subscribe();
+ }
+
+ @Override
+ public void beforeEach(ExtensionContext extensionContext) {
+ initTablesAndIndexes();
+ }
+
+ @Override
+ public void afterEach(ExtensionContext extensionContext) {
+ resetSchema();
+ }
+
+ public void restartContainer() {
+ PG_CONTAINER.stop();
+ PG_CONTAINER.start();
+ initPostgresSession();
+ }
+
+ @Override
+ public Module getModule() {
+ return Modules.combine(binder -> binder.bind(PostgresConfiguration.class)
+ .toInstance(postgresConfiguration));
+ }
+
+ public String getHost() {
+ return PG_CONTAINER.getHost();
+ }
+
+ public Integer getMappedPort() {
+ return PG_CONTAINER.getMappedPort(PostgresFixture.PORT);
+ }
+
+ public Mono getConnection() {
+ return Mono.just(defaultConnection);
+ }
+
+ public PostgresExecutor getDefaultPostgresExecutor() {
+ return defaultPostgresExecutor;
+ }
+
+ public PostgresExecutor getByPassRLSPostgresExecutor() {
+ return byPassRLSPostgresExecutor;
+ }
+
+ public ConnectionFactory getConnectionFactory() {
+ return connectionFactory;
+ }
+
+ public PostgresExecutor.Factory getExecutorFactory() {
+ return executorFactory;
+ }
+
+ public PostgresConfiguration getPostgresConfiguration() {
+ return postgresConfiguration;
+ }
+
+ private void initTablesAndIndexes() {
+ postgresTableManager.initializeTables().block();
+ postgresTableManager.initializeTableIndexes().block();
+ }
+
+ private void resetSchema() {
+ List tables = postgresTableManager.listExistTables().block();
+ dropTables(tables);
+ }
+
+ private void dropTables(List tables) {
+ String tablesToDelete = tables.stream()
+ .map(tableName -> "\"" + tableName + "\"")
+ .collect(Collectors.joining(", "));
+
+ Flux.from(defaultConnection.createStatement(String.format("DROP table if exists %s cascade;", tablesToDelete))
+ .execute())
+ .then()
+ .block();
+ }
+
+ private JamesPostgresConnectionFactory getJamesPostgresConnectionFactory(RowLevelSecurity rowLevelSecurity, PostgresqlConnectionFactory connectionFactory) {
+ return new PoolBackedPostgresConnectionFactory(
+ rowLevelSecurity,
+ poolSize.getMin(),
+ poolSize.getMax(),
+ connectionFactory);
+ }
+}
diff --git a/backends-common/postgres/src/test/java/org/apache/james/backends/postgres/PostgresExtensionTest.java b/backends-common/postgres/src/test/java/org/apache/james/backends/postgres/PostgresExtensionTest.java
new file mode 100644
index 00000000000..619899ed179
--- /dev/null
+++ b/backends-common/postgres/src/test/java/org/apache/james/backends/postgres/PostgresExtensionTest.java
@@ -0,0 +1,104 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.backends.postgres;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.util.List;
+
+import org.apache.commons.lang3.tuple.Pair;
+import org.jooq.impl.DSL;
+import org.jooq.impl.SQLDataType;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.RegisterExtension;
+
+import reactor.core.publisher.Flux;
+import reactor.core.publisher.Mono;
+
+class PostgresExtensionTest {
+ static PostgresTable TABLE_1 = PostgresTable.name("table1")
+ .createTableStep((dslContext, tableName) -> dslContext.createTable(tableName)
+ .column("column1", SQLDataType.UUID.notNull())
+ .column("column2", SQLDataType.INTEGER)
+ .column("column3", SQLDataType.VARCHAR(255).notNull()))
+ .disableRowLevelSecurity()
+ .build();
+
+ static PostgresIndex INDEX_1 = PostgresIndex.name("index1")
+ .createIndexStep((dslContext, indexName) -> dslContext.createIndex(indexName)
+ .on(DSL.table("table1"), DSL.field("column1").asc()));
+
+ static PostgresTable TABLE_2 = PostgresTable.name("table2")
+ .createTableStep((dslContext, tableName) -> dslContext.createTable(tableName)
+ .column("column1", SQLDataType.INTEGER))
+ .disableRowLevelSecurity()
+ .build();
+
+ static PostgresIndex INDEX_2 = PostgresIndex.name("index2")
+ .createIndexStep((dslContext, indexName) -> dslContext.createIndex(indexName)
+ .on(DSL.table("table2"), DSL.field("column1").desc()));
+
+ static PostgresModule POSTGRES_MODULE = PostgresModule.builder()
+ .addTable(TABLE_1, TABLE_2)
+ .addIndex(INDEX_1, INDEX_2)
+ .build();
+
+ @RegisterExtension
+ static PostgresExtension postgresExtension = PostgresExtension.withoutRowLevelSecurity(POSTGRES_MODULE);
+
+ @Test
+ void postgresExtensionShouldProvisionTablesAndIndexes() {
+ assertThat(getColumnNameAndDataType("table1"))
+ .containsExactlyInAnyOrder(
+ Pair.of("column1", "uuid"),
+ Pair.of("column2", "integer"),
+ Pair.of("column3", "character varying"));
+
+ assertThat(getColumnNameAndDataType("table2"))
+ .containsExactlyInAnyOrder(Pair.of("column1", "integer"));
+
+ assertThat(listIndexToTableMappings())
+ .contains(
+ Pair.of("index1", "table1"),
+ Pair.of("index2", "table2"));
+ }
+
+ private List> getColumnNameAndDataType(String tableName) {
+ return postgresExtension.getConnection()
+ .flatMapMany(connection -> Flux.from(Mono.from(connection.createStatement("SELECT table_name, column_name, data_type FROM information_schema.columns WHERE table_name = $1;")
+ .bind("$1", tableName)
+ .execute())
+ .flatMapMany(result -> result.map((row, rowMetadata) ->
+ Pair.of(row.get("column_name", String.class), row.get("data_type", String.class))))))
+ .collectList()
+ .block();
+ }
+
+ private List> listIndexToTableMappings() {
+ return postgresExtension.getConnection()
+ .flatMapMany(connection -> Mono.from(connection.createStatement("SELECT indexname, tablename FROM pg_indexes;")
+ .execute())
+ .flatMapMany(result ->
+ result.map((row, rowMetadata) ->
+ Pair.of(row.get("indexname", String.class), row.get("tablename", String.class)))))
+ .collectList()
+ .block();
+ }
+}
diff --git a/backends-common/postgres/src/test/java/org/apache/james/backends/postgres/PostgresFixture.java b/backends-common/postgres/src/test/java/org/apache/james/backends/postgres/PostgresFixture.java
new file mode 100644
index 00000000000..c0c28758e75
--- /dev/null
+++ b/backends-common/postgres/src/test/java/org/apache/james/backends/postgres/PostgresFixture.java
@@ -0,0 +1,100 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.backends.postgres;
+
+import static java.util.Collections.singletonMap;
+import static org.apache.james.backends.postgres.PostgresFixture.Database.DEFAULT_DATABASE;
+import static org.testcontainers.containers.PostgreSQLContainer.POSTGRESQL_PORT;
+
+import java.util.UUID;
+import java.util.function.Supplier;
+
+import org.testcontainers.containers.PostgreSQLContainer;
+
+public interface PostgresFixture {
+
+ interface Database {
+
+ Database DEFAULT_DATABASE = new DefaultDatabase();
+ Database ROW_LEVEL_SECURITY_DATABASE = new RowLevelSecurityDatabase();
+
+ String dbUser();
+
+ String dbPassword();
+
+ String dbName();
+
+ String schema();
+
+
+ class DefaultDatabase implements Database {
+ @Override
+ public String dbUser() {
+ return "james";
+ }
+
+ @Override
+ public String dbPassword() {
+ return "secret1";
+ }
+
+ @Override
+ public String dbName() {
+ return "james";
+ }
+
+ @Override
+ public String schema() {
+ return "public";
+ }
+ }
+
+ class RowLevelSecurityDatabase implements Database {
+ @Override
+ public String dbUser() {
+ return "rlsuser";
+ }
+
+ @Override
+ public String dbPassword() {
+ return "secret1";
+ }
+
+ @Override
+ public String dbName() {
+ return "rlsdb";
+ }
+
+ @Override
+ public String schema() {
+ return "rlsschema";
+ }
+ }
+ }
+
+ String IMAGE = "postgres:16.3";
+ Integer PORT = POSTGRESQL_PORT;
+ Supplier> PG_CONTAINER = () -> new PostgreSQLContainer<>(IMAGE)
+ .withDatabaseName(DEFAULT_DATABASE.dbName())
+ .withUsername(DEFAULT_DATABASE.dbUser())
+ .withPassword(DEFAULT_DATABASE.dbPassword())
+ .withCreateContainerCmdModifier(cmd -> cmd.withName("james-postgres-test-" + UUID.randomUUID()))
+ .withTmpFs(singletonMap("/var/lib/postgresql/data", "rw"));
+}
diff --git a/backends-common/postgres/src/test/java/org/apache/james/backends/postgres/PostgresTableManagerTest.java b/backends-common/postgres/src/test/java/org/apache/james/backends/postgres/PostgresTableManagerTest.java
new file mode 100644
index 00000000000..2980885fd8b
--- /dev/null
+++ b/backends-common/postgres/src/test/java/org/apache/james/backends/postgres/PostgresTableManagerTest.java
@@ -0,0 +1,492 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.backends.postgres;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatCode;
+
+import java.util.List;
+import java.util.function.Function;
+import java.util.function.Supplier;
+
+import org.apache.commons.lang3.tuple.Pair;
+import org.jooq.impl.DSL;
+import org.jooq.impl.SQLDataType;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.RegisterExtension;
+
+import reactor.core.publisher.Flux;
+import reactor.core.publisher.Mono;
+
+class PostgresTableManagerTest {
+
+ @RegisterExtension
+ static PostgresExtension postgresExtension = PostgresExtension.withRowLevelSecurity(PostgresModule.EMPTY_MODULE);
+
+ Function tableManagerFactory =
+ module -> new PostgresTableManager(postgresExtension.getDefaultPostgresExecutor(), module, RowLevelSecurity.ENABLED);
+
+ @Test
+ void initializeTableShouldSuccessWhenModuleHasSingleTable() {
+ String tableName = "tablename1";
+
+ PostgresTable table = PostgresTable.name(tableName)
+ .createTableStep((dsl, tbn) -> dsl.createTable(tbn)
+ .column("colum1", SQLDataType.UUID.notNull())
+ .column("colum2", SQLDataType.INTEGER)
+ .column("colum3", SQLDataType.VARCHAR(255).notNull()))
+ .disableRowLevelSecurity()
+ .build();
+
+ PostgresModule module = PostgresModule.table(table);
+
+ PostgresTableManager testee = tableManagerFactory.apply(module);
+
+ testee.initializeTables()
+ .block();
+
+ assertThat(getColumnNameAndDataType(tableName))
+ .containsExactlyInAnyOrder(
+ Pair.of("colum1", "uuid"),
+ Pair.of("colum2", "integer"),
+ Pair.of("colum3", "character varying"));
+ }
+
+ @Test
+ void initializeTableShouldSuccessWhenModuleHasMultiTables() {
+ String tableName1 = "tablename1";
+
+ PostgresTable table1 = PostgresTable.name(tableName1)
+ .createTableStep((dsl, tbn) -> dsl.createTable(tbn)
+ .column("columA", SQLDataType.UUID.notNull())).disableRowLevelSecurity()
+ .build();
+
+ String tableName2 = "tablename2";
+ PostgresTable table2 = PostgresTable.name(tableName2)
+ .createTableStep((dsl, tbn) -> dsl.createTable(tbn)
+ .column("columB", SQLDataType.INTEGER)).disableRowLevelSecurity()
+ .build();
+
+ PostgresTableManager testee = tableManagerFactory.apply(PostgresModule.table(table1, table2));
+
+ testee.initializeTables()
+ .block();
+
+ assertThat(getColumnNameAndDataType(tableName1))
+ .containsExactlyInAnyOrder(
+ Pair.of("columA", "uuid"));
+ assertThat(getColumnNameAndDataType(tableName2))
+ .containsExactlyInAnyOrder(
+ Pair.of("columB", "integer"));
+ }
+
+ @Test
+ void initializeTableShouldNotThrowWhenTableExists() {
+ String tableName1 = "tablename1";
+
+ PostgresTable table1 = PostgresTable.name(tableName1)
+ .createTableStep((dsl, tbn) -> dsl.createTable(tbn)
+ .column("columA", SQLDataType.UUID.notNull())).disableRowLevelSecurity()
+ .build();
+
+ PostgresTableManager testee = tableManagerFactory.apply(PostgresModule.table(table1));
+
+ testee.initializeTables()
+ .block();
+
+ assertThatCode(() -> testee.initializeTables().block())
+ .doesNotThrowAnyException();
+ }
+
+ @Test
+ void initializeTableShouldNotChangeTableStructureOfExistTable() {
+ String tableName1 = "tablename1";
+ PostgresTable table1 = PostgresTable.name(tableName1)
+ .createTableStep((dsl, tbn) -> dsl.createTable(tbn)
+ .column("columA", SQLDataType.UUID.notNull())).disableRowLevelSecurity()
+ .build();
+
+ tableManagerFactory.apply(PostgresModule.table(table1))
+ .initializeTables()
+ .block();
+
+ PostgresTable table1Changed = PostgresTable.name(tableName1)
+ .createTableStep((dsl, tbn) -> dsl.createTable(tbn)
+ .column("columB", SQLDataType.INTEGER)).disableRowLevelSecurity()
+ .build();
+
+ tableManagerFactory.apply(PostgresModule.table(table1Changed))
+ .initializeTables()
+ .block();
+
+ assertThat(getColumnNameAndDataType(tableName1))
+ .containsExactlyInAnyOrder(
+ Pair.of("columA", "uuid"));
+ }
+
+ @Test
+ void initializeIndexShouldSuccessWhenModuleHasSingleIndex() {
+ String tableName = "tb_test_1";
+
+ PostgresTable table = PostgresTable.name(tableName)
+ .createTableStep((dsl, tbn) -> dsl.createTable(tbn)
+ .column("colum1", SQLDataType.UUID.notNull())
+ .column("colum2", SQLDataType.INTEGER)
+ .column("colum3", SQLDataType.VARCHAR(255).notNull()))
+ .disableRowLevelSecurity()
+ .build();
+
+ String indexName = "idx_test_1";
+ PostgresIndex index = PostgresIndex.name(indexName)
+ .createIndexStep((dsl, idn) -> dsl.createIndex(idn)
+ .on(DSL.table(tableName), DSL.field("colum1").asc()));
+
+ PostgresModule module = PostgresModule.builder()
+ .addTable(table)
+ .addIndex(index)
+ .build();
+
+ PostgresTableManager testee = tableManagerFactory.apply(module);
+
+ testee.initializeTables().block();
+
+ testee.initializeTableIndexes().block();
+
+ List> listIndexes = listIndexToTableMappings();
+
+ assertThat(listIndexes)
+ .contains(Pair.of(indexName, tableName));
+ }
+
+ @Test
+ void initializeIndexShouldSuccessWhenModuleHasMultiIndexes() {
+ String tableName = "tb_test_1";
+
+ PostgresTable table = PostgresTable.name(tableName)
+ .createTableStep((dsl, tbn) -> dsl.createTable(tbn)
+ .column("colum1", SQLDataType.UUID.notNull())
+ .column("colum2", SQLDataType.INTEGER)
+ .column("colum3", SQLDataType.VARCHAR(255).notNull()))
+ .disableRowLevelSecurity()
+ .build();
+
+ String indexName1 = "idx_test_1";
+ PostgresIndex index1 = PostgresIndex.name(indexName1)
+ .createIndexStep((dsl, idn) -> dsl.createIndex(idn)
+ .on(DSL.table(tableName), DSL.field("colum1").asc()));
+
+ String indexName2 = "idx_test_2";
+ PostgresIndex index2 = PostgresIndex.name(indexName2)
+ .createIndexStep((dsl, idn) -> dsl.createIndex(idn)
+ .on(DSL.table(tableName), DSL.field("colum2").desc()));
+
+ PostgresModule module = PostgresModule.builder()
+ .addTable(table)
+ .addIndex(index1, index2)
+ .build();
+
+ PostgresTableManager testee = tableManagerFactory.apply(module);
+
+ testee.initializeTables().block();
+
+ testee.initializeTableIndexes().block();
+
+ List> listIndexes = listIndexToTableMappings();
+
+ assertThat(listIndexes)
+ .contains(Pair.of(indexName1, tableName), Pair.of(indexName2, tableName));
+ }
+
+ @Test
+ void initializeIndexShouldNotThrowWhenIndexExists() {
+ String tableName = "tb_test_1";
+
+ PostgresTable table = PostgresTable.name(tableName)
+ .createTableStep((dsl, tbn) -> dsl.createTable(tbn)
+ .column("colum1", SQLDataType.UUID.notNull())
+ .column("colum2", SQLDataType.INTEGER)
+ .column("colum3", SQLDataType.VARCHAR(255).notNull()))
+ .disableRowLevelSecurity()
+ .build();
+
+ String indexName = "idx_test_1";
+ PostgresIndex index = PostgresIndex.name(indexName)
+ .createIndexStep((dsl, idn) -> dsl.createIndex(idn)
+ .on(DSL.table(tableName), DSL.field("colum1").asc()));
+
+ PostgresModule module = PostgresModule.builder()
+ .addTable(table)
+ .addIndex(index)
+ .build();
+
+ PostgresTableManager testee = tableManagerFactory.apply(module);
+
+ testee.initializeTables().block();
+
+ testee.initializeTableIndexes().block();
+
+ assertThatCode(() -> testee.initializeTableIndexes().block())
+ .doesNotThrowAnyException();
+ }
+
+ @Test
+ void truncateShouldEmptyTableData() {
+ // Given table tbn1
+ String tableName1 = "tbn1";
+ PostgresTable table1 = PostgresTable.name(tableName1)
+ .createTableStep((dsl, tbn) -> dsl.createTable(tbn)
+ .column("column1", SQLDataType.INTEGER.notNull())).disableRowLevelSecurity()
+ .build();
+
+ PostgresTableManager testee = tableManagerFactory.apply(PostgresModule.table(table1));
+ testee.initializeTables()
+ .block();
+
+ // insert data
+ postgresExtension.getConnection()
+ .flatMapMany(connection -> Flux.range(0, 10)
+ .flatMap(i -> Mono.from(connection.createStatement("INSERT INTO " + tableName1 + " (column1) VALUES ($1);")
+ .bind("$1", i)
+ .execute())
+ .flatMap(result -> Mono.from(result.getRowsUpdated())))
+ .last())
+ .collectList()
+ .block();
+
+ Supplier getTotalRecordInDB = () -> postgresExtension.getConnection()
+ .flatMapMany(connection -> Mono.from(connection.createStatement("select count(*) FROM " + tableName1)
+ .execute())
+ .flatMapMany(result ->
+ result.map((row, rowMetadata) -> row.get("count", Long.class))))
+ .last()
+ .block();
+
+ assertThat(getTotalRecordInDB.get()).isEqualTo(10L);
+
+ // When truncate table
+ testee.truncate().block();
+
+ // Then table is empty
+ assertThat(getTotalRecordInDB.get()).isEqualTo(0L);
+ }
+
+ @Test
+ void createTableShouldCreateRlsColumnWhenEnableRLS() {
+ String tableName = "tbn1";
+
+ PostgresTable table = PostgresTable.name(tableName)
+ .createTableStep((dsl, tbn) -> dsl.createTable(tbn)
+ .column("clm1", SQLDataType.UUID.notNull())
+ .column("clm2", SQLDataType.VARCHAR(255).notNull()))
+ .supportsRowLevelSecurity()
+ .build();
+
+ PostgresModule module = PostgresModule.table(table);
+
+ PostgresTableManager testee = tableManagerFactory.apply(module);
+
+ testee.initializeTables()
+ .block();
+
+ assertThat(getColumnNameAndDataType(tableName))
+ .containsExactlyInAnyOrder(
+ Pair.of("clm1", "uuid"),
+ Pair.of("clm2", "character varying"),
+ Pair.of("domain", "character varying"));
+
+ List> pgClassCheckResult = postgresExtension.getConnection()
+ .flatMapMany(connection -> Mono.from(connection.createStatement("select relname, relrowsecurity " +
+ "from pg_class " +
+ "where oid = 'tbn1'::regclass;;")
+ .execute())
+ .flatMapMany(result ->
+ result.map((row, rowMetadata) ->
+ Pair.of(row.get("relname", String.class),
+ row.get("relrowsecurity", Boolean.class)))))
+ .collectList()
+ .block();
+
+ assertThat(pgClassCheckResult)
+ .containsExactlyInAnyOrder(
+ Pair.of("tbn1", true));
+ }
+
+ @Test
+ void createTableShouldNotCreateRlsColumnWhenDisableRLS() {
+ String tableName = "tbn1";
+
+ PostgresTable table = PostgresTable.name(tableName)
+ .createTableStep((dsl, tbn) -> dsl.createTable(tbn)
+ .column("clm1", SQLDataType.UUID.notNull())
+ .column("clm2", SQLDataType.VARCHAR(255).notNull()))
+ .supportsRowLevelSecurity()
+ .build();
+
+ PostgresModule module = PostgresModule.table(table);
+ PostgresTableManager testee = new PostgresTableManager(postgresExtension.getDefaultPostgresExecutor(), module, RowLevelSecurity.DISABLED);
+
+ testee.initializeTables()
+ .block();
+
+ Pair rlsColumn = Pair.of("domain", "character varying");
+ assertThat(getColumnNameAndDataType(tableName))
+ .doesNotContain(rlsColumn);
+ }
+
+ @Test
+ void recreateRLSColumnWhenExistedShouldNotFail() {
+ String tableName = "tablename1";
+
+ PostgresTable rlsTable = PostgresTable.name(tableName)
+ .createTableStep((dsl, tbn) -> dsl.createTable(tbn)
+ .column("colum1", SQLDataType.UUID.notNull()))
+ .supportsRowLevelSecurity()
+ .build();
+
+ PostgresModule module = PostgresModule.table(rlsTable);
+
+ PostgresTableManager testee = tableManagerFactory.apply(module);
+ testee.initializeTables().block();
+
+ assertThatCode(() -> testee.initializeTables().block())
+ .doesNotThrowAnyException();
+ }
+
+ @Test
+ void additionalAlterQueryToCreateConstraintShouldSucceed() {
+ String constraintName = "exclude_constraint";
+ PostgresTable table = PostgresTable.name("tbn1")
+ .createTableStep((dsl, tbn) -> dsl.createTable(tbn)
+ .column("clm1", SQLDataType.UUID.notNull())
+ .column("clm2", SQLDataType.VARCHAR(255).notNull()))
+ .disableRowLevelSecurity()
+ .addAdditionalAlterQueries("ALTER TABLE tbn1 ADD CONSTRAINT " + constraintName + " EXCLUDE (clm2 WITH =)")
+ .build();
+ PostgresModule module = PostgresModule.table(table);
+ PostgresTableManager testee = new PostgresTableManager(postgresExtension.getDefaultPostgresExecutor(), module, RowLevelSecurity.DISABLED);
+
+ testee.initializeTables().block();
+
+ boolean constraintExists = postgresExtension.getConnection()
+ .flatMapMany(connection -> connection.createStatement("SELECT EXISTS(SELECT 1 FROM pg_catalog.pg_constraint WHERE conname = $1) AS constraint_exists;")
+ .bind("$1", constraintName)
+ .execute())
+ .flatMap(result -> result.map((row, rowMetaData) -> row.get("constraint_exists", Boolean.class)))
+ .single()
+ .block();
+
+ assertThat(constraintExists).isTrue();
+ }
+
+ @Test
+ void additionalAlterQueryToCreateConstraintShouldSucceedWhenSupportCaseIsNonRLSAndRLSIsDisabled() {
+ String constraintName = "exclude_constraint";
+ PostgresTable table = PostgresTable.name("tbn1")
+ .createTableStep((dsl, tbn) -> dsl.createTable(tbn)
+ .column("clm1", SQLDataType.UUID.notNull())
+ .column("clm2", SQLDataType.VARCHAR(255).notNull()))
+ .disableRowLevelSecurity()
+ .addAdditionalAlterQueries(new PostgresTable.NonRLSOnlyAdditionalAlterQuery("ALTER TABLE tbn1 ADD CONSTRAINT " + constraintName + " EXCLUDE (clm2 WITH =)"))
+ .build();
+ PostgresModule module = PostgresModule.table(table);
+ PostgresTableManager testee = new PostgresTableManager(postgresExtension.getDefaultPostgresExecutor(), module, RowLevelSecurity.DISABLED);
+
+ testee.initializeTables().block();
+
+ boolean constraintExists = postgresExtension.getConnection()
+ .flatMapMany(connection -> connection.createStatement("SELECT EXISTS(SELECT 1 FROM pg_catalog.pg_constraint WHERE conname = $1) AS constraint_exists;")
+ .bind("$1", constraintName)
+ .execute())
+ .flatMap(result -> result.map((row, rowMetaData) -> row.get("constraint_exists", Boolean.class)))
+ .single()
+ .block();
+
+ assertThat(constraintExists).isTrue();
+ }
+
+ @Test
+ void additionalAlterQueryToCreateConstraintShouldNotBeExecutedWhenSupportCaseIsNonRLSAndRLSIsEnabled() {
+ String constraintName = "exclude_constraint";
+ PostgresTable table = PostgresTable.name("tbn1")
+ .createTableStep((dsl, tbn) -> dsl.createTable(tbn)
+ .column("clm1", SQLDataType.UUID.notNull())
+ .column("clm2", SQLDataType.VARCHAR(255).notNull()))
+ .disableRowLevelSecurity()
+ .addAdditionalAlterQueries(new PostgresTable.NonRLSOnlyAdditionalAlterQuery("ALTER TABLE tbn1 ADD CONSTRAINT " + constraintName + " EXCLUDE (clm2 WITH =)"))
+ .build();
+ PostgresModule module = PostgresModule.table(table);
+ PostgresTableManager testee = new PostgresTableManager(postgresExtension.getDefaultPostgresExecutor(), module, RowLevelSecurity.ENABLED);
+
+ testee.initializeTables().block();
+
+ boolean constraintExists = postgresExtension.getConnection()
+ .flatMapMany(connection -> connection.createStatement("SELECT EXISTS(SELECT 1 FROM pg_catalog.pg_constraint WHERE conname = $1) AS constraint_exists;")
+ .bind("$1", constraintName)
+ .execute())
+ .flatMap(result -> result.map((row, rowMetaData) -> row.get("constraint_exists", Boolean.class)))
+ .single()
+ .block();
+
+ assertThat(constraintExists).isFalse();
+ }
+
+ @Test
+ void additionalAlterQueryToReCreateConstraintShouldNotThrow() {
+ String constraintName = "exclude_constraint";
+ PostgresTable table = PostgresTable.name("tbn1")
+ .createTableStep((dsl, tbn) -> dsl.createTable(tbn)
+ .column("clm1", SQLDataType.UUID.notNull())
+ .column("clm2", SQLDataType.VARCHAR(255).notNull()))
+ .disableRowLevelSecurity()
+ .addAdditionalAlterQueries("ALTER TABLE tbn1 ADD CONSTRAINT " + constraintName + " EXCLUDE (clm2 WITH =)")
+ .build();
+ PostgresModule module = PostgresModule.table(table);
+ PostgresTableManager testee = new PostgresTableManager(postgresExtension.getDefaultPostgresExecutor(), module, RowLevelSecurity.DISABLED);
+
+ testee.initializeTables().block();
+
+ assertThatCode(() -> testee.initializeTables().block())
+ .doesNotThrowAnyException();
+ }
+
+ private List> getColumnNameAndDataType(String tableName) {
+ return postgresExtension.getConnection()
+ .flatMapMany(connection -> Flux.from(Mono.from(connection.createStatement("SELECT table_name, column_name, data_type FROM information_schema.columns WHERE table_name = $1;")
+ .bind("$1", tableName)
+ .execute())
+ .flatMapMany(result -> result.map((row, rowMetadata) ->
+ Pair.of(row.get("column_name", String.class), row.get("data_type", String.class))))))
+ .collectList()
+ .block();
+ }
+
+ // return list>
+ private List> listIndexToTableMappings() {
+ return postgresExtension.getConnection()
+ .flatMapMany(connection -> Mono.from(connection.createStatement("SELECT indexname, tablename FROM pg_indexes;")
+ .execute())
+ .flatMapMany(result ->
+ result.map((row, rowMetadata) ->
+ Pair.of(row.get("indexname", String.class), row.get("tablename", String.class)))))
+ .collectList()
+ .block();
+ }
+
+}
diff --git a/backends-common/postgres/src/test/java/org/apache/james/backends/postgres/quota/PostgresQuotaCurrentValueDAOTest.java b/backends-common/postgres/src/test/java/org/apache/james/backends/postgres/quota/PostgresQuotaCurrentValueDAOTest.java
new file mode 100644
index 00000000000..0fc87c8d579
--- /dev/null
+++ b/backends-common/postgres/src/test/java/org/apache/james/backends/postgres/quota/PostgresQuotaCurrentValueDAOTest.java
@@ -0,0 +1,147 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.backends.postgres.quota;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.util.List;
+
+import org.apache.james.backends.postgres.PostgresExtension;
+import org.apache.james.core.quota.QuotaComponent;
+import org.apache.james.core.quota.QuotaCurrentValue;
+import org.apache.james.core.quota.QuotaType;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.RegisterExtension;
+
+class PostgresQuotaCurrentValueDAOTest {
+ @RegisterExtension
+ static PostgresExtension postgresExtension = PostgresExtension.withoutRowLevelSecurity(PostgresQuotaModule.MODULE);
+
+ private static final QuotaCurrentValue.Key QUOTA_KEY = QuotaCurrentValue.Key.of(QuotaComponent.MAILBOX, "james@abc.com", QuotaType.SIZE);
+
+ private PostgresQuotaCurrentValueDAO postgresQuotaCurrentValueDAO;
+
+ @BeforeEach
+ void setup() {
+ postgresQuotaCurrentValueDAO = new PostgresQuotaCurrentValueDAO(postgresExtension.getDefaultPostgresExecutor());
+ }
+
+ @Test
+ void increaseQuotaCurrentValueShouldCreateNewRowSuccessfully() {
+ postgresQuotaCurrentValueDAO.increase(QUOTA_KEY, 100L).block();
+
+ assertThat(postgresQuotaCurrentValueDAO.getQuotaCurrentValue(QUOTA_KEY).block().getCurrentValue())
+ .isEqualTo(100L);
+ }
+
+ @Test
+ void increaseQuotaCurrentValueShouldCreateNewRowSuccessfullyWhenIncreaseAmountIsZero() {
+ postgresQuotaCurrentValueDAO.increase(QUOTA_KEY, 0L).block();
+
+ assertThat(postgresQuotaCurrentValueDAO.getQuotaCurrentValue(QUOTA_KEY).block().getCurrentValue())
+ .isZero();
+ }
+
+ @Test
+ void increaseQuotaCurrentValueShouldIncreaseValueSuccessfully() {
+ assertThat(postgresQuotaCurrentValueDAO.getQuotaCurrentValue(QUOTA_KEY).block()).isNull();
+
+ postgresQuotaCurrentValueDAO.increase(QUOTA_KEY, 100L).block();
+ postgresQuotaCurrentValueDAO.increase(QUOTA_KEY, 100L).block();
+
+ assertThat(postgresQuotaCurrentValueDAO.getQuotaCurrentValue(QUOTA_KEY).block().getCurrentValue())
+ .isEqualTo(200L);
+ }
+
+ @Test
+ void increaseQuotaCurrentValueShouldDecreaseValueSuccessfullyWhenIncreaseAmountIsNegative() {
+ postgresQuotaCurrentValueDAO.increase(QUOTA_KEY, 200L).block();
+ postgresQuotaCurrentValueDAO.increase(QUOTA_KEY, -100L).block();
+
+ assertThat(postgresQuotaCurrentValueDAO.getQuotaCurrentValue(QUOTA_KEY).block().getCurrentValue())
+ .isEqualTo(100L);
+ }
+
+ @Test
+ void decreaseQuotaCurrentValueShouldDecreaseValueSuccessfully() {
+ postgresQuotaCurrentValueDAO.increase(QUOTA_KEY, 200L).block();
+ postgresQuotaCurrentValueDAO.decrease(QUOTA_KEY, 100L).block();
+
+ assertThat(postgresQuotaCurrentValueDAO.getQuotaCurrentValue(QUOTA_KEY).block().getCurrentValue())
+ .isEqualTo(100L);
+ }
+
+ @Test
+ void decreaseQuotaCurrentValueDownToNegativeShouldAllowNegativeValue() {
+ postgresQuotaCurrentValueDAO.increase(QUOTA_KEY, 100L).block();
+ postgresQuotaCurrentValueDAO.decrease(QUOTA_KEY, 1000L).block();
+
+ assertThat(postgresQuotaCurrentValueDAO.getQuotaCurrentValue(QUOTA_KEY).block().getCurrentValue())
+ .isEqualTo(-900L);
+ }
+
+ @Test
+ void decreaseQuotaCurrentValueWhenNoRecordYetShouldNotFail() {
+ postgresQuotaCurrentValueDAO.decrease(QUOTA_KEY, 1000L).block();
+
+ assertThat(postgresQuotaCurrentValueDAO.getQuotaCurrentValue(QUOTA_KEY).block().getCurrentValue())
+ .isEqualTo(-1000L);
+ }
+
+ @Test
+ void deleteQuotaCurrentValueShouldDeleteSuccessfully() {
+ QuotaCurrentValue.Key quotaKey = QuotaCurrentValue.Key.of(QuotaComponent.MAILBOX, "andre@abc.com", QuotaType.SIZE);
+ postgresQuotaCurrentValueDAO.increase(quotaKey, 100L).block();
+ postgresQuotaCurrentValueDAO.deleteQuotaCurrentValue(quotaKey).block();
+
+ assertThat(postgresQuotaCurrentValueDAO.getQuotaCurrentValue(quotaKey).block())
+ .isNull();
+ }
+
+ @Test
+ void deleteQuotaCurrentValueShouldResetCounterForever() {
+ postgresQuotaCurrentValueDAO.increase(QUOTA_KEY, 100L).block();
+ postgresQuotaCurrentValueDAO.deleteQuotaCurrentValue(QUOTA_KEY).block();
+ postgresQuotaCurrentValueDAO.increase(QUOTA_KEY, 100L).block();
+
+ assertThat(postgresQuotaCurrentValueDAO.getQuotaCurrentValue(QUOTA_KEY).block().getCurrentValue())
+ .isEqualTo(100L);
+ }
+
+ @Test
+ void getQuotasByComponentShouldGetAllQuotaTypesSuccessfully() {
+ QuotaCurrentValue.Key countQuotaKey = QuotaCurrentValue.Key.of(QuotaComponent.MAILBOX, "james@abc.com", QuotaType.COUNT);
+
+ QuotaCurrentValue expectedQuotaSize = QuotaCurrentValue.builder().quotaComponent(QUOTA_KEY.getQuotaComponent())
+ .identifier(QUOTA_KEY.getIdentifier()).quotaType(QUOTA_KEY.getQuotaType()).currentValue(100L).build();
+ QuotaCurrentValue expectedQuotaCount = QuotaCurrentValue.builder().quotaComponent(countQuotaKey.getQuotaComponent())
+ .identifier(countQuotaKey.getIdentifier()).quotaType(countQuotaKey.getQuotaType()).currentValue(56L).build();
+
+ postgresQuotaCurrentValueDAO.increase(QUOTA_KEY, 100L).block();
+ postgresQuotaCurrentValueDAO.increase(countQuotaKey, 56L).block();
+
+ List actual = postgresQuotaCurrentValueDAO.getQuotaCurrentValues(QUOTA_KEY.getQuotaComponent(), QUOTA_KEY.getIdentifier())
+ .collectList()
+ .block();
+
+ assertThat(actual).containsExactlyInAnyOrder(expectedQuotaSize, expectedQuotaCount);
+ }
+}
diff --git a/backends-common/postgres/src/test/java/org/apache/james/backends/postgres/quota/PostgresQuotaLimitDaoTest.java b/backends-common/postgres/src/test/java/org/apache/james/backends/postgres/quota/PostgresQuotaLimitDaoTest.java
new file mode 100644
index 00000000000..b489c194e9e
--- /dev/null
+++ b/backends-common/postgres/src/test/java/org/apache/james/backends/postgres/quota/PostgresQuotaLimitDaoTest.java
@@ -0,0 +1,84 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.backends.postgres.quota;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+import org.apache.james.backends.postgres.PostgresExtension;
+import org.apache.james.core.quota.QuotaComponent;
+import org.apache.james.core.quota.QuotaLimit;
+import org.apache.james.core.quota.QuotaScope;
+import org.apache.james.core.quota.QuotaType;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.RegisterExtension;
+
+public class PostgresQuotaLimitDaoTest {
+
+ private PostgresQuotaLimitDAO postgresQuotaLimitDao;
+
+ @RegisterExtension
+ static PostgresExtension postgresExtension = PostgresExtension.withoutRowLevelSecurity(PostgresQuotaModule.MODULE);
+
+ @BeforeEach
+ void setup() {
+ postgresQuotaLimitDao = new PostgresQuotaLimitDAO(postgresExtension.getDefaultPostgresExecutor());
+ }
+
+ @Test
+ void getQuotaLimitsShouldGetSomeQuotaLimitsSuccessfully() {
+ QuotaLimit expectedOne = QuotaLimit.builder().quotaComponent(QuotaComponent.MAILBOX).quotaScope(QuotaScope.DOMAIN).identifier("A").quotaType(QuotaType.COUNT).quotaLimit(200L).build();
+ QuotaLimit expectedTwo = QuotaLimit.builder().quotaComponent(QuotaComponent.MAILBOX).quotaScope(QuotaScope.DOMAIN).identifier("A").quotaType(QuotaType.SIZE).quotaLimit(100L).build();
+ postgresQuotaLimitDao.setQuotaLimit(expectedOne).block();
+ postgresQuotaLimitDao.setQuotaLimit(expectedTwo).block();
+
+ assertThat(postgresQuotaLimitDao.getQuotaLimits(QuotaComponent.MAILBOX, QuotaScope.DOMAIN, "A").collectList().block())
+ .containsExactlyInAnyOrder(expectedOne, expectedTwo);
+ }
+
+ @Test
+ void setQuotaLimitShouldSaveObjectSuccessfully() {
+ QuotaLimit expected = QuotaLimit.builder().quotaComponent(QuotaComponent.MAILBOX).quotaScope(QuotaScope.DOMAIN).identifier("A").quotaType(QuotaType.COUNT).quotaLimit(100L).build();
+ postgresQuotaLimitDao.setQuotaLimit(expected).block();
+
+ assertThat(postgresQuotaLimitDao.getQuotaLimit(QuotaLimit.QuotaLimitKey.of(QuotaComponent.MAILBOX, QuotaScope.DOMAIN, "A", QuotaType.COUNT)).block())
+ .isEqualTo(expected);
+ }
+
+ @Test
+ void setQuotaLimitShouldSaveObjectSuccessfullyWhenLimitIsMinusOne() {
+ QuotaLimit expected = QuotaLimit.builder().quotaComponent(QuotaComponent.MAILBOX).quotaScope(QuotaScope.DOMAIN).identifier("A").quotaType(QuotaType.COUNT).quotaLimit(-1L).build();
+ postgresQuotaLimitDao.setQuotaLimit(expected).block();
+
+ assertThat(postgresQuotaLimitDao.getQuotaLimit(QuotaLimit.QuotaLimitKey.of(QuotaComponent.MAILBOX, QuotaScope.DOMAIN, "A", QuotaType.COUNT)).block())
+ .isEqualTo(expected);
+ }
+
+ @Test
+ void deleteQuotaLimitShouldDeleteObjectSuccessfully() {
+ QuotaLimit quotaLimit = QuotaLimit.builder().quotaComponent(QuotaComponent.MAILBOX).quotaScope(QuotaScope.DOMAIN).identifier("A").quotaType(QuotaType.COUNT).quotaLimit(100L).build();
+ postgresQuotaLimitDao.setQuotaLimit(quotaLimit).block();
+ postgresQuotaLimitDao.deleteQuotaLimit(QuotaLimit.QuotaLimitKey.of(QuotaComponent.MAILBOX, QuotaScope.DOMAIN, "A", QuotaType.COUNT)).block();
+
+ assertThat(postgresQuotaLimitDao.getQuotaLimit(QuotaLimit.QuotaLimitKey.of(QuotaComponent.MAILBOX, QuotaScope.DOMAIN, "A", QuotaType.COUNT)).block())
+ .isNull();
+ }
+
+}
\ No newline at end of file
diff --git a/backends-common/postgres/src/test/java/org/apache/james/backends/postgres/utils/PostgresHealthCheckTest.java b/backends-common/postgres/src/test/java/org/apache/james/backends/postgres/utils/PostgresHealthCheckTest.java
new file mode 100644
index 00000000000..f48f8d5b8c2
--- /dev/null
+++ b/backends-common/postgres/src/test/java/org/apache/james/backends/postgres/utils/PostgresHealthCheckTest.java
@@ -0,0 +1,61 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.backends.postgres.utils;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+import org.apache.james.backends.postgres.PostgresExtension;
+import org.apache.james.backends.postgres.quota.PostgresQuotaModule;
+import org.apache.james.core.healthcheck.Result;
+import org.apache.james.core.healthcheck.ResultStatus;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.RegisterExtension;
+
+import reactor.core.publisher.Mono;
+
+public class PostgresHealthCheckTest {
+ private PostgresHealthCheck testee;
+
+ @RegisterExtension
+ static PostgresExtension postgresExtension = PostgresExtension.withoutRowLevelSecurity(PostgresQuotaModule.MODULE);
+
+ @BeforeEach
+ void setup() {
+ testee = new PostgresHealthCheck(postgresExtension.getDefaultPostgresExecutor());
+ }
+
+ @Test
+ void shouldBeHealthy() {
+ Result result = Mono.from(testee.check()).block();
+ assertThat(result.getStatus()).isEqualTo(ResultStatus.HEALTHY);
+ }
+
+ @Test
+ void shouldBeUnhealthyWhenPaused() {
+ try {
+ postgresExtension.pause();
+ Result result = Mono.from(testee.check()).block();
+ assertThat(result.getStatus()).isEqualTo(ResultStatus.UNHEALTHY);
+ } finally {
+ postgresExtension.unpause();
+ }
+ }
+}
\ No newline at end of file
diff --git a/core/src/main/java/org/apache/james/core/quota/QuotaCurrentValue.java b/core/src/main/java/org/apache/james/core/quota/QuotaCurrentValue.java
index 682f10c7bcb..c1b38bb819f 100644
--- a/core/src/main/java/org/apache/james/core/quota/QuotaCurrentValue.java
+++ b/core/src/main/java/org/apache/james/core/quota/QuotaCurrentValue.java
@@ -26,6 +26,59 @@
public class QuotaCurrentValue {
+ public static class Key {
+
+ public static Key of(QuotaComponent component, String identifier, QuotaType quotaType) {
+ return new Key(component, identifier, quotaType);
+ }
+
+ private final QuotaComponent quotaComponent;
+ private final String identifier;
+ private final QuotaType quotaType;
+
+ public QuotaComponent getQuotaComponent() {
+ return quotaComponent;
+ }
+
+ public String getIdentifier() {
+ return identifier;
+ }
+
+ public QuotaType getQuotaType() {
+ return quotaType;
+ }
+
+ private Key(QuotaComponent quotaComponent, String identifier, QuotaType quotaType) {
+ this.quotaComponent = quotaComponent;
+ this.identifier = identifier;
+ this.quotaType = quotaType;
+ }
+
+ @Override
+ public final int hashCode() {
+ return Objects.hash(quotaComponent, identifier, quotaType);
+ }
+
+ @Override
+ public final boolean equals(Object o) {
+ if (o instanceof Key) {
+ Key other = (Key) o;
+ return Objects.equals(quotaComponent, other.quotaComponent)
+ && Objects.equals(identifier, other.identifier)
+ && Objects.equals(quotaType, other.quotaType);
+ }
+ return false;
+ }
+
+ public String toString() {
+ return MoreObjects.toStringHelper(this)
+ .add("quotaComponent", quotaComponent)
+ .add("identifier", identifier)
+ .add("quotaType", quotaType)
+ .toString();
+ }
+ }
+
public static class Builder {
private QuotaComponent quotaComponent;
private String identifier;
diff --git a/core/src/main/java/org/apache/james/core/quota/QuotaLimit.java b/core/src/main/java/org/apache/james/core/quota/QuotaLimit.java
index 5d49216be7d..0f371a5d051 100644
--- a/core/src/main/java/org/apache/james/core/quota/QuotaLimit.java
+++ b/core/src/main/java/org/apache/james/core/quota/QuotaLimit.java
@@ -26,6 +26,65 @@
import com.google.common.base.Preconditions;
public class QuotaLimit {
+ public static class QuotaLimitKey {
+ public static QuotaLimitKey of(QuotaComponent component, QuotaScope scope, String identifier, QuotaType quotaType) {
+ return new QuotaLimitKey(component, scope, identifier, quotaType);
+ }
+
+ private final QuotaComponent quotaComponent;
+ private final QuotaScope quotaScope;
+ private final String identifier;
+ private final QuotaType quotaType;
+
+ public QuotaComponent getQuotaComponent() {
+ return quotaComponent;
+ }
+
+ public QuotaScope getQuotaScope() {
+ return quotaScope;
+ }
+
+ public String getIdentifier() {
+ return identifier;
+ }
+
+ public QuotaType getQuotaType() {
+ return quotaType;
+ }
+
+ private QuotaLimitKey(QuotaComponent quotaComponent, QuotaScope quotaScope, String identifier, QuotaType quotaType) {
+ this.quotaComponent = quotaComponent;
+ this.quotaScope = quotaScope;
+ this.identifier = identifier;
+ this.quotaType = quotaType;
+ }
+
+ @Override
+ public final int hashCode() {
+ return Objects.hash(quotaComponent, quotaScope, identifier, quotaType);
+ }
+
+ @Override
+ public final boolean equals(Object o) {
+ if (o instanceof QuotaLimitKey) {
+ QuotaLimitKey other = (QuotaLimitKey) o;
+ return Objects.equals(quotaComponent, other.quotaComponent)
+ && Objects.equals(quotaScope, other.quotaScope)
+ && Objects.equals(identifier, other.identifier)
+ && Objects.equals(quotaType, other.quotaType);
+ }
+ return false;
+ }
+
+ public String toString() {
+ return MoreObjects.toStringHelper(this)
+ .add("quotaComponent", quotaComponent)
+ .add("quotaScope", quotaScope)
+ .add("identifier", identifier)
+ .add("quotaType", quotaType)
+ .toString();
+ }
+ }
public static class Builder {
private QuotaComponent quotaComponent;
diff --git a/docs/modules/servers/assets/images/james-imap-base-performance-distributed.png b/docs/modules/servers/assets/images/james-imap-base-performance-distributed.png
new file mode 100644
index 00000000000..aa693982a6f
Binary files /dev/null and b/docs/modules/servers/assets/images/james-imap-base-performance-distributed.png differ
diff --git a/docs/modules/servers/assets/images/james-imap-base-performance-postgres.png b/docs/modules/servers/assets/images/james-imap-base-performance-postgres.png
new file mode 100644
index 00000000000..47bb0eb2c96
Binary files /dev/null and b/docs/modules/servers/assets/images/james-imap-base-performance-postgres.png differ
diff --git a/docs/modules/servers/assets/images/james-imap-base-performance.png b/docs/modules/servers/assets/images/james-imap-base-performance.png
deleted file mode 100644
index 1caa11dc496..00000000000
Binary files a/docs/modules/servers/assets/images/james-imap-base-performance.png and /dev/null differ
diff --git a/docs/modules/servers/assets/images/postgres_pg_stat_statements.png b/docs/modules/servers/assets/images/postgres_pg_stat_statements.png
new file mode 100644
index 00000000000..4cc1e46989d
Binary files /dev/null and b/docs/modules/servers/assets/images/postgres_pg_stat_statements.png differ
diff --git a/docs/modules/servers/assets/images/specialized-instances.png b/docs/modules/servers/assets/images/specialized-instances-distributed.png
similarity index 100%
rename from docs/modules/servers/assets/images/specialized-instances.png
rename to docs/modules/servers/assets/images/specialized-instances-distributed.png
diff --git a/docs/modules/servers/assets/images/specialized-instances-postgres.png b/docs/modules/servers/assets/images/specialized-instances-postgres.png
new file mode 100644
index 00000000000..9b1d226257c
Binary files /dev/null and b/docs/modules/servers/assets/images/specialized-instances-postgres.png differ
diff --git a/docs/modules/servers/assets/images/storage.png b/docs/modules/servers/assets/images/storage_james_distributed.png
similarity index 100%
rename from docs/modules/servers/assets/images/storage.png
rename to docs/modules/servers/assets/images/storage_james_distributed.png
diff --git a/docs/modules/servers/assets/images/storage_james_postgres.png b/docs/modules/servers/assets/images/storage_james_postgres.png
new file mode 100644
index 00000000000..e846fa4d9c9
Binary files /dev/null and b/docs/modules/servers/assets/images/storage_james_postgres.png differ
diff --git a/docs/modules/servers/assets/images/storage_james_postgres.svg b/docs/modules/servers/assets/images/storage_james_postgres.svg
new file mode 100644
index 00000000000..bc8203ce1f0
--- /dev/null
+++ b/docs/modules/servers/assets/images/storage_james_postgres.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/docs/modules/servers/nav.adoc b/docs/modules/servers/nav.adoc
index 7fdb1f8bc13..52a9cda02b8 100644
--- a/docs/modules/servers/nav.adoc
+++ b/docs/modules/servers/nav.adoc
@@ -77,4 +77,56 @@
*** xref:distributed/benchmark/index.adoc[Performance benchmark]
**** xref:distributed/benchmark/db-benchmark.adoc[]
**** xref:distributed/benchmark/james-benchmark.adoc[]
+** xref:postgres/index.adoc[]
+*** xref:postgres/objectives.adoc[]
+*** xref:postgres/architecture/index.adoc[]
+**** xref:postgres/architecture/implemented-standards.adoc[]
+**** xref:postgres/architecture/consistency-model.adoc[]
+**** xref:postgres/architecture/specialized-instances.adoc[]
+*** xref:postgres/run/index.adoc[]
+**** xref:postgres/run/run-java.adoc[Run with Java]
+**** xref:postgres/run/run-docker.adoc[Run with Docker]
+*** xref:postgres/configure/index.adoc[]
+**** Protocols
+***** xref:postgres/configure/imap.adoc[imapserver.xml]
+***** xref:postgres/configure/jmap.adoc[jmap.properties]
+***** xref:postgres/configure/jmx.adoc[jmx.properties]
+***** xref:postgres/configure/smtp.adoc[smtpserver.xml & lmtpserver.xml]
+***** xref:postgres/configure/smtp-hooks.adoc[Packaged SMTP hooks]
+***** xref:postgres/configure/pop3.adoc[pop3server.xml]
+***** xref:postgres/configure/webadmin.adoc[webadmin.properties]
+***** xref:postgres/configure/ssl.adoc[SSL & TLS]
+***** xref:postgres/configure/sieve.adoc[Sieve & ManageSieve]
+**** Storage dependencies
+***** xref:postgres/configure/blobstore.adoc[blobstore.properties]
+***** xref:postgres/configure/opensearch.adoc[opensearch.properties]
+***** xref:postgres/configure/rabbitmq.adoc[rabbitmq.properties]
+***** xref:postgres/configure/redis.adoc[redis.properties]
+***** xref:postgres/configure/tika.adoc[tika.properties]
+**** Core components
+***** xref:postgres/configure/batchsizes.adoc[batchsizes.properties]
+***** xref:postgres/configure/dns.adoc[dnsservice.xml]
+***** xref:postgres/configure/domainlist.adoc[domainlist.xml]
+***** xref:postgres/configure/droplists.adoc[DropLists]
+***** xref:postgres/configure/healthcheck.adoc[healthcheck.properties]
+***** xref:postgres/configure/mailetcontainer.adoc[mailetcontainer.xml]
+***** xref:postgres/configure/mailets.adoc[Packaged Mailets]
+***** xref:postgres/configure/matchers.adoc[Packaged Matchers]
+***** xref:postgres/configure/mailrepositorystore.adoc[mailrepositorystore.xml]
+***** xref:postgres/configure/recipientrewritetable.adoc[recipientrewritetable.xml]
+***** xref:postgres/configure/search.adoc[search.properties]
+***** xref:postgres/configure/usersrepository.adoc[usersrepository.xml]
+*** xref:postgres/operate/index.adoc[Operate]
+**** xref:postgres/operate/guide.adoc[]
+**** xref:postgres/operate/performanceChecklist.adoc[]
+**** xref:postgres/operate/logging.adoc[]
+**** xref:postgres/operate/webadmin.adoc[]
+**** xref:postgres/operate/metrics.adoc[]
+**** xref:postgres/operate/migrating.adoc[]
+**** xref:postgres/operate/cli.adoc[]
+**** xref:postgres/operate/security.adoc[]
+*** xref:postgres/extending/index.adoc[]
+*** xref:postgres/benchmark/index.adoc[]
+**** xref:postgres/benchmark/db-benchmark.adoc[]
+**** xref:postgres/benchmark/james-benchmark.adoc[]
** xref:test.adoc[]
diff --git a/docs/modules/servers/pages/distributed/architecture/consistency-model.adoc b/docs/modules/servers/pages/distributed/architecture/consistency-model.adoc
index 53a951fb181..af72b7d810b 100644
--- a/docs/modules/servers/pages/distributed/architecture/consistency-model.adoc
+++ b/docs/modules/servers/pages/distributed/architecture/consistency-model.adoc
@@ -1,84 +1,14 @@
= Distributed James Server — Consistency Model
:navtitle: Consistency Model
-This page presents the consistency model used by the Distributed Server and
-points to the tools built around it.
+:backend-name: cassandra
+:backend-name-cap: Cassandra
+:server-name: Distributed James Server
+:mailet-repository-path-prefix: cassandra
+:xref-base: distributed
+:data_replication_extend: servers:distributed/architecture/consistency_model_data_replication_extend.adoc
-== Data Replication
-
-The Distributed Server relies on different storage technologies, all having their own
-consistency models.
-
-These data stores replicate data in order to enforce some level of availability.
-
-By consistency, we mean the ability for all replica to hold the same data.
-
-By availability, we mean the ability for a replica to answer a request.
-
-In distributed systems, link:https://en.wikipedia.org/wiki/CAP_theorem[according to the CAP theorem],
-as we will necessarily encounter network partitions, then trade-offs need to be made between
-consistency and availability.
-
-This section details this trade-off for data stores used by the Distributed Server.
-
-=== Cassandra consistency model
-
-link:https://cassandra.apache.org/[Cassandra] is an
-link:https://en.wikipedia.org/wiki/Eventual_consistency[eventually consistent] data store.
-This means that replica can hold diverging data, but are guaranteed to converge over time.
-
-Several mechanisms are built in Cassandra to enforce this convergence, and need to be
-leveraged by *Distributed Server Administrator*. Namely
-link:https://docs.datastax.com/en/dse/5.1/dse-admin/datastax_enterprise/tools/nodetool/toolsRepair.html[nodetool repair],
-link:https://cassandra.apache.org/doc/latest/operating/hints.html[Hinted hand-off] and
-link:https://cassandra.apache.org/doc/latest/operating/read_repair.html[Read repair].
-
-The Distributed Server tries to mitigate inconsistencies by relying on
-link:https://docs.datastax.com/en/archived/cassandra/3.0/cassandra/dml/dmlConfigConsistency.html[QUORUM] read and write levels.
-This means that a majority of replica are needed for read and write operations to be performed. This guaranty is needed
-as the Mailbox is a complex datamodel with several layers of metadata, and needs "read-your-writes" guaranties that QUORUM
-read+writes delivers.
-
-Critical business operations, like UID allocation, rely on strong consistency mechanisms brought by
-link:https://www.datastax.com/blog/2013/07/lightweight-transactions-cassandra-20[lightweight transaction].
-
-==== About multi data-center setups
-
-As strong consistency is required for some operation, especially regarding IMAP monotic UID and MODSEQ generation,
-and as lightweight transactions are slow across data centers, running James with a
-link:https://docs.datastax.com/en/ddac/doc/datastax_enterprise/production/DDACmultiDCperWorkloadType.html[multi data-center]
-Cassandra setup is discouraged.
-
-However, xref:distributed/configure/cassandra.adoc[this page] enables setting alternative read level,
-which could be acceptable regarding limited requirements. `LOCAL_QUORUM` coupled with `LOCAL_SERIAL`
-is likely the only scalable setup. Some options were added to turn off SERIAL consistency usage for message
-and mailbox management. However, the use of Lightweight Transaction cannot be disabled for UIDs and ModSeqs.
-
-Running the Distributed Server IMAP server in a multi datacenter setup will likely result either in data loss,
-or very slow operations - as we rely on monotic UID generation, without strong consistency, UIDs could be allocated
-several times.
-
-We did wire a multi-DC friendly distributed, POP3 only server that leverages acceptable performance while staying
-consistent. This is achieved by having a reduced feature set - supporting only the POP3 server and using messageIds as
-identifiers (generated without synchronisation using TimeUUIDs). You can find this application
-link:https://github.com/apache/james-project/tree/master/server/apps/distributed-pop3-app[on GitHub]. In the future,
-JMAP support could be added, but requires followup developments as some components critically depends on UIDs
-(for instance the search).
-
-=== OpenSearch consistency model
-
-OpenSearch relies on link:https://www.elastic.co/blog/a-new-era-for-cluster-coordination-in-elasticsearch[strong consistency]
-with home grown algorithm.
-
-The 6.x release line, that the distributed server is using is known to be slow to recover from failures.
-
-Be aware that data is asynchronously indexed in OpenSearch, changes will be eventually visible.
-
-=== RabbitMQ consistency model
-
-The Distributed Server can be set up to rely on a RabbitMQ cluster. All queues can be set up in an high availability
-fashion using link:https://www.rabbitmq.com/docs/quorum-queues[quorum queues] - those are replicated queues using the link:https://raft.github.io/[RAFT] consensus protocol and thus are
-strongly consistent.
+include::partial$architecture/consistency-model.adoc[]
== Denormalization
@@ -91,45 +21,11 @@ level across denormalization tables.
We write to a "table of truth" first, then duplicate the data to denormalization tables.
-The Distributed server offers several mechanisms to mitigate these inconsistencies:
+The {server-name} offers several mechanisms to mitigate these inconsistencies:
- - Writes to denormalization tables are retried.
- - Some xref:distributed/operate/guide.adoc#_solving_cassandra_inconsistencies[SolveInconsistencies tasks] are exposed and are able to heal a given denormalization table.
+- Writes to denormalization tables are retried.
+- Some xref:{xref-base}/operate/guide.adoc#_solving_cassandra_inconsistencies[SolveInconsistencies tasks] are exposed and are able to heal a given denormalization table.
They reset the "deduplication tables" content to the "table of truth" content.
- - link:https://github.com/apache/james-project/blob/master/src/adr/0042-applicative-read-repairs.md[Read repairs],
+- link:https://github.com/apache/james-project/blob/master/src/adr/0042-applicative-read-repairs.md[Read repairs],
when implemented for a given denormalization, enables auto-healing. When an inconsistency is detected, They reset the
"deduplication tables" entry to the "table of truth" entry.
-
-== Consistency across data stores
-
-The Distributed Server leverages several data stores:
-
- - Cassandra is used for metadata storage
- - OpenSearch for search
- - Object Storage for large object storage
-
-Thus the Distributed Server also offers mechanisms to enforce consistency across data stores.
-
-=== Write path organisation
-
-The primary data stores are composed of Cassandra for metadata and Object storage for binary data.
-
-To ensure the data referenced in Cassandra is pointing to a valid object in the object store, we write
-the object store payload first, then write the corresponding metadata in Cassandra.
-
-Similarly, metadata is destroyed first before the corresponding object is deleted.
-
-Such a procedure avoids metadata pointing to unexisting blobs, however might lead to some unreferenced
-blobs.
-
-=== Cassandra <=> OpenSearch
-
-After being written to the primary stores (namely Cassandra & Object Storage), email content is
-asynchronously indexed into OpenSearch.
-
-This process, called the EventBus, which retries temporary errors, and stores transient errors for
-later admin-triggered retries is described further xref:distributed/operate/guide.adoc#_mailbox_event_bus[here].
-His role is to spread load and limit inconsistencies.
-
-Furthermore, some xref:distributed/operate/guide.adoc#_usual_troubleshooting_procedures[re-indexing tasks]
-enables to re-synchronise OpenSearch content with the primary data stores
diff --git a/docs/modules/servers/pages/distributed/architecture/consistency_model_data_replication_extend.adoc b/docs/modules/servers/pages/distributed/architecture/consistency_model_data_replication_extend.adoc
new file mode 100644
index 00000000000..08ac3316c5a
--- /dev/null
+++ b/docs/modules/servers/pages/distributed/architecture/consistency_model_data_replication_extend.adoc
@@ -0,0 +1,43 @@
+=== Cassandra consistency model
+
+link:https://cassandra.apache.org/[Cassandra] is an
+link:https://en.wikipedia.org/wiki/Eventual_consistency[eventually consistent] data store.
+This means that replica can hold diverging data, but are guaranteed to converge over time.
+
+Several mechanisms are built in Cassandra to enforce this convergence, and need to be
+leveraged by *Distributed Server Administrator*. Namely
+link:https://docs.datastax.com/en/dse/5.1/dse-admin/datastax_enterprise/tools/nodetool/toolsRepair.html[nodetool repair],
+link:https://cassandra.apache.org/doc/latest/operating/hints.html[Hinted hand-off] and
+link:https://cassandra.apache.org/doc/latest/operating/read_repair.html[Read repair].
+
+The {server-name} tries to mitigate inconsistencies by relying on
+link:https://docs.datastax.com/en/archived/cassandra/3.0/cassandra/dml/dmlConfigConsistency.html[QUORUM] read and write levels.
+This means that a majority of replica are needed for read and write operations to be performed. This guaranty is needed
+as the Mailbox is a complex datamodel with several layers of metadata, and needs "read-your-writes" guaranties that QUORUM
+read+writes delivers.
+
+Critical business operations, like UID allocation, rely on strong consistency mechanisms brought by
+link:https://www.datastax.com/blog/2013/07/lightweight-transactions-cassandra-20[lightweight transaction].
+
+==== About multi data-center setups
+
+As strong consistency is required for some operation, especially regarding IMAP monotic UID and MODSEQ generation,
+and as lightweight transactions are slow across data centers, running James with a
+link:https://docs.datastax.com/en/ddac/doc/datastax_enterprise/production/DDACmultiDCperWorkloadType.html[multi data-center]
+Cassandra setup is discouraged.
+
+However, xref:{xref-base}/configure/cassandra.adoc[this page] enables setting alternative read level,
+which could be acceptable regarding limited requirements. `LOCAL_QUORUM` coupled with `LOCAL_SERIAL`
+is likely the only scalable setup. Some options were added to turn off SERIAL consistency usage for message
+and mailbox management. However, the use of Lightweight Transaction cannot be disabled for UIDs and ModSeqs.
+
+Running the {server-name} IMAP server in a multi datacenter setup will likely result either in data loss,
+or very slow operations - as we rely on monotic UID generation, without strong consistency, UIDs could be allocated
+several times.
+
+We did wire a multi-DC friendly distributed, POP3 only server that leverages acceptable performance while staying
+consistent. This is achieved by having a reduced feature set - supporting only the POP3 server and using messageIds as
+identifiers (generated without synchronisation using TimeUUIDs). You can find this application
+link:https://github.com/apache/james-project/tree/master/server/apps/distributed-pop3-app[on GitHub]. In the future,
+JMAP support could be added, but requires followup developments as some components critically depends on UIDs
+(for instance the search).
diff --git a/docs/modules/servers/pages/distributed/architecture/implemented-standards.adoc b/docs/modules/servers/pages/distributed/architecture/implemented-standards.adoc
index 3c5e1472ea4..82f085c438b 100644
--- a/docs/modules/servers/pages/distributed/architecture/implemented-standards.adoc
+++ b/docs/modules/servers/pages/distributed/architecture/implemented-standards.adoc
@@ -1,121 +1,6 @@
= Distributed James Server — Implemented standards
:navtitle: Implemented standards
-This page details standards implemented by the distributed server.
-
-== Message formats
-
- - link:https://datatracker.ietf.org/doc/html/rfc5322[RFC-5322] Internet Message Format (MIME)
- - link:https://datatracker.ietf.org/doc/html/rfc2045[RFC-2045] Format of Internet Message Bodies
- - link:https://datatracker.ietf.org/doc/html/rfc3464[RFC-3464] An Extensible Message Format for Delivery Status Notifications
- - James allow emmit DSNs from the mailet container.
- - link:https://datatracker.ietf.org/doc/html/rfc8098[RFC-8098] Message Disposition Notification
-
-== TLS & authentication
-
-- link:https://datatracker.ietf.org/doc/html/rfc2595.html[RFC-2595] TLS for IMAP, POP3, SMTP (StartTLS)
-- link:https://datatracker.ietf.org/doc/html/rfc8314.html[RFC-8314] Implicit TLS
-- link:https://www.rfc-editor.org/rfc/rfc4959.html[RFC-4959] SASL IR: Initial client response
-- link:https://datatracker.ietf.org/doc/html/rfc4616[RFC-4616] SASL plain authentication
-- link:https://datatracker.ietf.org/doc/html/rfc8314.html[RFC-7628] SASL for OAUTH
-- Implemented for IMAP and SMTP
-- Support for OIDC standard only.
-
-== SMTP
-
-- link:https://datatracker.ietf.org/doc/html/rfc5321[RFC-5321] SMTP Protocol
-- link:https://datatracker.ietf.org/doc/html/rfc974[RFC-974] MAIL ROUTING AND THE DOMAIN SYSTEM
-- link:https://www.rfc-editor.org/rfc/rfc3461[RFC-3461] Simple Mail Transfer Protocol (SMTP) Service Extension for Delivery Status Notifications (DSNs)
- - Requires extra configuration.
-- link:https://datatracker.ietf.org/doc/html/rfc1652[RFC-1652] SMTP Service Extension for 8bit-MIME transport
-- link:https://datatracker.ietf.org/doc/html/rfc1830[RFC-1830] SMTP Service Extensions for Transmission of Large and Binary MIME Messages
-- link:https://datatracker.ietf.org/doc/html/rfc1869[RFC-1869] SMTP Service Extensions
-- link:https://datatracker.ietf.org/doc/html/rfc1870[RFC-1870] SMTP Service Extension for Message Size Declaration
-- link:https://datatracker.ietf.org/doc/html/rfc1891[RFC-1891] SMTP Service Extension for Delivery Status Notifications
-- link:https://datatracker.ietf.org/doc/html/rfc1893[RFC-1893] Enhanced Mail System Status Codes
-- link:https://datatracker.ietf.org/doc/html/rfc2034[RFC-2034] SMTP Service Extension for Returning Enhanced Error Codes
-- link:https://datatracker.ietf.org/doc/html/rfc2142[RFC-2142] Mailbox Names For Common Services, Roles And Functions
-- link:https://datatracker.ietf.org/doc/html/rfc2197[RFC-2197] SMTP Service Extension for Command Pipelining
-- link:https://datatracker.ietf.org/doc/html/rfc2554[RFC-2554] ESMTP Service Extension for Authentication
-- link:https://datatracker.ietf.org/doc/html/rfc1893[RFC-1893] Enhanced Mail System Status Codes
-- link:https://datatracker.ietf.org/doc/rfc6710/[RFC-6710] SMTP Extension for Message Transfer Priorities
-
-== LMTP
-
- - link:https://james.apache.org/server/rfclist/lmtp/rfc2033.txt[RFC-2033] LMTP Local Mail Transfer Protocol
-
-== IMAP
-
-The following IMAP specifications are implemented:
-
- - link:https://datatracker.ietf.org/doc/html/rfc3501.html[RFC-3501] INTERNET MESSAGE ACCESS PROTOCOL - VERSION 4rev1
- - link:https://datatracker.ietf.org/doc/html/rfc2177.html[RFC-2177] IMAP IDLE (mailbox scoped push notifications)
- - link:https://www.rfc-editor.org/rfc/rfc9208.html[RFC-9208] IMAP QUOTA Extension
- - link:https://datatracker.ietf.org/doc/html/rfc2342.html[RFC-2342] IMAP namespace
- - link:https://datatracker.ietf.org/doc/html/rfc2088.html[RFC-2088] IMAP non synchronized literals
- - link:https://datatracker.ietf.org/doc/html/rfc4315.html[RFC-4315] IMAP UIDPLUS
- - link:https://datatracker.ietf.org/doc/html/rfc5464.html[RFC-5464] IMAP Metadata (annotations on mailboxes)
- - link:https://datatracker.ietf.org/doc/html/rfc4551.html[RFC-4551] IMAP Condstore
- - link:https://datatracker.ietf.org/doc/html/rfc5162.html[RFC-5162] IMAP QRESYNC (synchronisation semantic for deleted messages)
- - We don't store a log of deleted modseq thus clients should rely on known sequences mechanism to optimize exchanges.
- - link:https://datatracker.ietf.org/doc/html/rfc4978.html[RFC-4978] IMAP Compress (optional)
- - link:https://datatracker.ietf.org/doc/html/rfc5161.html[RFC-5161] IMAP ENABLE
- - link:https://datatracker.ietf.org/doc/html/rfc6851.html[RFC-6851] IMAP MOVE command
- - link:https://datatracker.ietf.org/doc/html/rfc5182.html[RFC-5182] IMAP Extension for Referencing the Last SEARCH Result
- - link:https://datatracker.ietf.org/doc/html/rfc5032.html[RFC-5032] IMAP WITHIN (for relative date search semantic)
- - link:https://datatracker.ietf.org/doc/html/rfc4731.html[RFC-4731] IMAP ESEARCH: extentions for IMAP search: new options like min, max, count.
- - link:https://datatracker.ietf.org/doc/html/rfc3348.html[RFC-3348] IMAP Child Mailbox Extension
- - link:https://www.rfc-editor.org/rfc/rfc8508.html[RFC-8508] IMAP Replace Extension
- - link:https://www.rfc-editor.org/rfc/rfc7889.html[RFC-7889] IMAP Extension for APPENDLIMIT
- - link:https://www.rfc-editor.org/rfc/rfc8474.html[RFC-8474] IMAP Extension for Object Identifiers
- - link:https://datatracker.ietf.org/doc/html/rfc2971.html[RFC-2971] IMAP ID Extension
- - link:https://datatracker.ietf.org/doc/html/rfc8438.html[RFC-8438] IMAP Extension for STATUS=SIZE
- - link:https://www.rfc-editor.org/rfc/rfc5258.html[RFC-5258] IMAP LIST Command Extensions
- - link:https://www.rfc-editor.org/rfc/rfc5819.html[RFC-5819] IMAP4 Extension for Returning STATUS Information in Extended LIST
- - link:https://www.rfc-editor.org/rfc/rfc8440.html[RFC-8440] IMAP4 Extension for Returning MYRIGHTS Information in Extended LIST
- - link:https://www.rfc-editor.org/rfc/rfc8440.html[RFC-6154] IMAP LIST Extension for Special-Use Mailboxes
- - link:https://www.rfc-editor.org/rfc/rfc8514.html[RFC-8514] IMAP SAVEDATE Extension
- - link:https://www.rfc-editor.org/rfc/rfc8514.html[RFC-9394] IMAP PARTIAL Extension for Paged SEARCH and FETCH
-
-Partially implemented specifications:
-
- - link:https://datatracker.ietf.org/doc/html/rfc4314.html[RFC-4314] IMAP ACL
- - ACLs can be created and managed but mailbox not belonging to one account cannot, as of today, be accessed in IMAP.
-
-== JMAP
-
- - link:https://datatracker.ietf.org/doc/html/rfc8620[RFC-8620] Json Metadata Application Protocol (JMAP)
- - link:https://datatracker.ietf.org/doc/html/rfc8621[RFC-8621] JMAP for emails
- - link:https://datatracker.ietf.org/doc/html/rfc8887[RFC-8887] JMAP over websockets
- - link:https://datatracker.ietf.org/doc/html/rfc9007.html[RFC-9007] Message Delivery Notifications with JMAP.
- - link:https://datatracker.ietf.org/doc/html/rfc8030.html[RFC-8030] Web PUSH: JMAP enable sending push notifications through a push gateway.
-
-https://jmap.io/[JMAP] is intended to be a new standard for email clients to connect to mail
-stores. It therefore intends to primarily replace IMAP + SMTP submission. It is also designed to be more
-generic. It does not replace MTA-to-MTA SMTP transmission.
-
-The link:https://github.com/apache/james-project/tree/master/server/protocols/jmap-rfc-8621/doc/specs/spec[annotated documentation]
-presents the limits of the JMAP RFC-8621 implementation part of the Apache James project.
-
-Some methods / types are not yet implemented, some implementations are naive, and the PUSH is not supported yet.
-
-Users are invited to read these limitations before using actively the JMAP RFC-8621 implementation, and should ensure their
-client applications only uses supported operations.
-
-== POP3
-
- - link:https://www.ietf.org/rfc/rfc1939.txt[RFC-1939] Post Office Protocol - Version 3
-
-== ManageSieve
-
-Support for manageSieve is experimental.
-
- - link:https://datatracker.ietf.org/doc/html/rfc5804[RFC-5804] A Protocol for Remotely Managing Sieve Scripts
-
-== Sieve
-
- - link:https://datatracker.ietf.org/doc/html/rfc5228[RFC-5228] Sieve: An Email Filtering Language
- - link:https://datatracker.ietf.org/doc/html/rfc5173[RFC-5173] Sieve Email Filtering: Body Extension
- - link:https://datatracker.ietf.org/doc/html/rfc5230[RFC-5230] Sieve Email Filtering: Vacation Extension
-
+:server-name: Distributed James Server
+include::partial$architecture/implemented-standards.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/architecture/index.adoc b/docs/modules/servers/pages/distributed/architecture/index.adoc
index ba5c25541a5..a36103cc8c8 100644
--- a/docs/modules/servers/pages/distributed/architecture/index.adoc
+++ b/docs/modules/servers/pages/distributed/architecture/index.adoc
@@ -1,308 +1,13 @@
= Distributed James Server — Architecture
:navtitle: Architecture
-This sections presents the Distributed Server architecture.
-
-== Storage
-
-In order to deliver its promises, the Distributed Server leverages the following storage strategies:
-
-image::storage.png[Storage responsibilities for the Distributed Server]
-
- * *Cassandra* is used for metadata storage. Cassandra is efficient for a very high workload of small queries following
-a known pattern.
- * The *blob store* storage interface is responsible of storing potentially large binary data. For instance
- email bodies, headers or attachments. Different technologies can be used: *Cassandra*, or S3 compatible *Object Storage*
-(S3 or Swift).
- * *OpenSearch* component empowers full text search on emails. It also enables querying data with unplanned access
-patterns. OpenSearch throughput do not however match the one of Cassandra thus its use is avoided upon regular workloads.
- * *RabbitMQ* enables James nodes of a same cluster to collaborate together. It is used to implement connected protocols,
-notification patterns as well as distributed resilient work queues and mail queue.
- * *Tika* (optional) enables text extraction from attachments, thus improving full text search results.
- * *link:https://spamassassin.apache.org/[SpamAssassin] or link:https://rspamd.com/[Rspamd]* (optional) can be used for Spam detection and user feedback is supported.
-
-xref:distributed/architecture/consistency-model.adoc[This page] further details Distributed James consistency model.
-
-== Protocols
-
-The following protocols are supported and can be used to interact with the Distributed Server:
-
-* *SMTP*
-* *IMAP*
-* xref:distributed/operate/webadmin.adoc[WebAdmin] REST Administration API
-* *LMTP*
-* *POP3*
-
-The following protocols should be considered experimental:
-
-* *JMAP* (RFC-8620 &RFC-8621 specifications and known limitations of the James implementation are defined link:https://github.com/apache/james-project/tree/master/server/protocols/jmap-rfc-8621/doc[here])
-* *ManagedSieve*
-
-Read more on xref:distributed/architecture/implemented-standards.adoc[implemented standards].
-
-== Topology
-
-While it is perfectly possible to deploy homogeneous James instances, with the same configuration and thus the same
-protocols and the same responsibilities one might want to investigate in
-xref:distributed/architecture/specialized-instances.adoc['Specialized instances'].
-
-== Components
-
-This section presents the various components of the Distributed server, providing context about
-their interactions, and about their implementations.
-
-=== High level view
-
-Here is a high level view of the various server components and their interactions:
-
-image::server-components.png[Server components mobilized for SMTP & IMAP]
-
- - The SMTP protocol receives a mail, and enqueue it on the MailQueue
- - The MailetContainer will start processing the mail Asynchronously and will take business decisions like storing the
- email locally in a user mailbox. The behaviour of the MailetContainer is highly customizable thanks to the Mailets and
- the Matcher composibility.
- - The Mailbox component is responsible of storing a user's mails.
- - The user can use the IMAP or the JMAP protocol to retrieve and read his mails.
-
-These components will be presented more in depth below.
-
-=== Mail processing
-
-Mail processing allows to take asynchronously business decisions on
-received emails.
-
-Here are its components:
-
-* The `spooler` takes mail out of the mailQueue and executes mail
-processing within the `mailet container`.
-* The `mailet container` synchronously executes the user defined logic.
-This `logic' is written through the use of `mailet`, `matcher` and
-`processor`.
-* A `mailet` represents an action: mail modification, envelop
-modification, a side effect, or stop processing.
-* A `matcher` represents a condition to execute a mailet.
-* A `processor` is a flow of pair of `matcher` and `mailet` executed
-sequentially. The `ToProcessor` mailet is a `goto` instruction to start
-executing another `processor`
-* A `mail repository` allows storage of a mail as part of its
-processing. Standard configuration relies on the following mail
-repository:
-** `cassandra://var/mail/error/` : unexpected errors that occurred
-during mail processing. Emails impacted by performance related
-exceptions, or logical bug within James code are typically stored here.
-These mails could be reprocessed once the cause of the error is fixed.
-The `Mail.error` field can help diagnose the issue. Correlation with
-logs can be achieved via the use of the `Mail.name` field.
-** `cassandra://var/mail/address-error/` : mail addressed to a
-non-existing recipient of a handled local domain. These mails could be
-reprocessed once the user is created, for instance.
-** `cassandra://var/mail/relay-denied/` : mail for whom relay was
-denied: missing authentication can, for instance, be a cause. In
-addition to prevent disasters upon miss configuration, an email review
-of this mail repository can help refine a host spammer blacklist.
-** `cassandra://var/mail/rrt-error/` : runtime error upon Recipient
-Rewriting occurred. This is typically due to a loop.
-
-=== Mail Queue
-
-An email queue is a mandatory component of SMTP servers. It is a system
-that creates a queue of emails that are waiting to be processed for
-delivery. Email queuing is a form of Message Queuing – an asynchronous
-service-to-service communication. A message queue is meant to decouple a
-producing process from a consuming one. An email queue decouples email
-reception from email processing. It allows them to communicate without
-being connected. As such, the queued emails wait for processing until
-the recipient is available to receive them. As James is an Email Server,
-it also supports mail queue as well.
-
-==== Why Mail Queue is necessary
-
-You might often need to check mail queue to make sure all emails are
-delivered properly. At first, you need to know why email queues get
-clogged. Here are the two core reasons for that:
-
-* Exceeded volume of emails
-
-Some mailbox providers enforce email rate limits on IP addresses. The
-limits are based on the sender reputation. If you exceeded this rate and
-queued too many emails, the delivery speed will decrease.
-
-* Spam-related issues
-
-Another common reason is that your email has been busted by spam
-filters. The filters will let the emails gradually pass to analyze how
-the rest of the recipients react to the message. If there is slow
-progress, it’s okay. Your email campaign is being observed and assessed.
-If it’s stuck, there could be different reasons including the blockage
-of your IP address.
-
-==== Why combining Cassandra, RabbitMQ and Object storage for MailQueue
-
-* RabbitMQ ensures the messaging function, and avoids polling.
-* Cassandra enables administrative operations such as browsing, deleting
-using a time series which might require fine performance tuning (see
-http://cassandra.apache.org/doc/latest/operating/index.html[Operating
-Casandra documentation]).
-* Object Storage stores potentially large binary payload.
-
-However the current design do not implement delays. Delays allow to
-define the time a mail have to be living in the mailqueue before being
-dequeued and is used for example for exponential wait delays upon remote
-delivery retries, or
-
-=== Mailbox
-
-Storage for emails belonging for users.
-
-Metadata are stored in Cassandra while headers, bodies and attachments are stored
-within the xref:#_blobstore[BlobStore].
-
-==== Search index
-
-Emails are indexed asynchronously in OpenSearch via the xref:#_event_bus[EventBus]
-in order to empower advanced and fast email full text search.
-
-Text extraction can be set up using link:https://tika.apache.org/[Tika], allowing
-to extract the text from attachment, allowing to search your emails based on the attachment
-textual content. In such case, the OpenSearch indexer will call a Tika server prior
-indexing.
-
-==== Quotas
-
-Current Quotas of users are hold in a Cassandra projection. Limitations can be defined via
-user, domain or globally.
-
-==== Event Bus
-
-Distributed James relies on an event bus system to enrich mailbox capabilities. Each
-operation performed on the mailbox will trigger related events, that can
-be processed asynchronously by potentially any James node on a
-distributed system.
-
-Many different kind of events can be triggered during a mailbox
-operation, such as:
-
-* `MailboxEvent`: event related to an operation regarding a mailbox:
-** `MailboxDeletion`: a mailbox has been deleted
-** `MailboxAdded`: a mailbox has been added
-** `MailboxRenamed`: a mailbox has been renamed
-** `MailboxACLUpdated`: a mailbox got its rights and permissions updated
-* `MessageEvent`: event related to an operation regarding a message:
-** `Added`: messages have been added to a mailbox
-** `Expunged`: messages have been expunged from a mailbox
-** `FlagsUpdated`: messages had their flags updated
-** `MessageMoveEvent`: messages have been moved from a mailbox to an
-other
-* `QuotaUsageUpdatedEvent`: event related to quota update
-
-Mailbox listeners can register themselves on this event bus system to be
-called when an event is fired, allowing to do different kind of extra
-operations on the system, like:
-
-* Current quota calculation
-* Message indexation with OpenSearch
-* Mailbox annotations cleanup
-* Ham/spam reporting to Spam filtering system
-* …
-
-==== Deleted Messages Vault
-
-Deleted Messages Vault is an interesting feature that will help James
-users have a chance to:
-
-* retain users deleted messages for some time.
-* restore & export deleted messages by various criteria.
-* permanently delete some retained messages.
-
-If the Deleted Messages Vault is enabled when users delete their mails,
-and by that we mean when they try to definitely delete them by emptying
-the trash, James will retain these mails into the Deleted Messages
-Vault, before an email or a mailbox is going to be deleted. And only
-administrators can interact with this component via
-wref:webadmin.adoc#_deleted-messages-vault[WebAdmin] REST APIs].
-
-However, mails are not retained forever as you have to configure a
-retention period before using it (with one-year retention by default if
-not defined). It’s also possible to permanently delete a mail if needed.
-
-=== Data
-
-Storage for domains and users.
-
-Domains are persisted in Cassandra.
-
-Users can be managed in Cassandra, or via a LDAP (read only).
-
-=== Recipient rewrite tables
-
-Storage of Recipients Rewriting rules, in Cassandra.
-
-==== Mapping types
-
-James allows using various mapping types for better expressing the intent of your address rewriting logic:
-
-* *Domain mapping*: Rewrites the domain of mail addresses. Use it for technical purposes, user will not
-be allowed to use the source in their FROM address headers. Domain mappings can be managed via the CLI and
-added via xref:distributed/operate/webadmin.adoc#_domain_mappings[WebAdmin]
-* *Domain aliases*: Rewrites the domain of mail addresses. Express the idea that both domains can be used
-inter-changeably. User will be allowed to use the source in their FROM address headers. Domain aliases can
-be managed via xref:distributed/operate/webadmin.adoc#_get_the_list_of_aliases_for_a_domain[WebAdmin]
-* *Forwards*: Replaces the source address by another one. Vehicles the intent of forwarding incoming mails
-to other users. Listing the forward source in the forward destinations keeps a local copy. User will not be
-allowed to use the source in their FROM address headers. Forward can
-be managed via xref:distributed/operate/webadmin.adoc#_address_forwards[WebAdmin]
-* *Groups*: Replaces the source address by another one. Vehicles the intent of a group registration: group
-address will be swapped by group member addresses (Feature poor mailing list). User will not be
-allowed to use the source in their FROM address headers. Groups can
-be managed via xref:distributed/operate/webadmin.adoc#_address_group[WebAdmin]
-* *Aliases*: Replaces the source address by another one. Represents user owned mail address, with which
-he can interact as if it was his main mail address. User will be allowed to use the source in their FROM
-address headers. Aliases can be managed via xref:distributed/operate/webadmin.adoc#_address_aliases[WebAdmin]
-* *Address mappings*: Replaces the source address by another one. Use for technical purposes, this mapping type do
-not hold specific intent. Prefer using one of the above mapping types... User will not be allowed to use the source
-in their FROM address headers. Address mappings can be managed via the CLI or via
-xref:distributed/operate/webadmin.adoc#_address_mappings[WebAdmin]
-* *Regex mappings*: Applies the regex on the supplied address. User will not be allowed to use the source
-in their FROM address headers. Regex mappings can be managed via the CLI or via
-xref:distributed/operate/webadmin.adoc#_regex_mapping[WebAdmin]
-* *Error*: Throws an error upon processing. User will not be allowed to use the source
-in their FROM address headers. Errors can be managed via the CLI
-
-=== BlobStore
-
-Stores potentially large binary data.
-
-Mailbox component, Mail Queue component, Deleted Message Vault
-component relies on it.
-
-Supported backends include S3 compatible ObjectStorage (link:https://wiki.openstack.org/wiki/Swift[Swift], S3 API).
-
-Encryption can be configured on top of ObjectStorage.
-
-Blobs can currently be deduplicated in order to reduce storage space. This means that two blobs with
-the same content will be stored one once.
-
-The downside is that deletion is more complicated, and a garbage collection needs to be run. A first implementation
-based on bloom filters can be used and triggered using the WebAdmin REST API.
-
-=== Task Manager
-
-Allows to control and schedule long running tasks run by other
-components. Among other it enables scheduling, progress monitoring,
-cancellation of long running tasks.
-
-Distributed James leverage a task manager using Event Sourcing and RabbitMQ for messaging.
-
-=== Event sourcing
-
-link:https://martinfowler.com/eaaDev/EventSourcing.html[Event sourcing] implementation
-for the Distributed server stores events in Cassandra. It enables components
-to rely on event sourcing technics for taking decisions.
-
-A short list of usage are:
-
-* Data leak prevention storage
-* JMAP filtering rules storage
-* Validation of the MailQueue configuration
-* Sending email warnings to user close to their quota
-* Implementation of the TaskManager
+:backend-name: cassandra
+:server-name: Distributed James Server
+:backend-storage-introduce: Cassandra is used for metadata storage. Cassandra is efficient for a very high workload of small queries following a known pattern.
+:storage-picture-file-name: storage_james_distributed.png
+:mailet-repository-path-prefix: cassandra
+:xref-base: distributed
+:mailqueue-combined-extend-backend: , Cassandra
+:mailqueue-combined-extend: servers:distributed/architecture/mailqueue_combined_extend.adoc
+
+include::partial$architecture/index.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/architecture/mailqueue_combined_extend.adoc b/docs/modules/servers/pages/distributed/architecture/mailqueue_combined_extend.adoc
new file mode 100644
index 00000000000..2e381417e5b
--- /dev/null
+++ b/docs/modules/servers/pages/distributed/architecture/mailqueue_combined_extend.adoc
@@ -0,0 +1,4 @@
+* Cassandra enables administrative operations such as browsing, deleting
+using a time series which might require fine performance tuning (see
+http://cassandra.apache.org/doc/latest/operating/index.html[Operating
+Cassandra documentation]).
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/architecture/specialized-instances.adoc b/docs/modules/servers/pages/distributed/architecture/specialized-instances.adoc
index 5c7365da4ba..03a412f0e2f 100644
--- a/docs/modules/servers/pages/distributed/architecture/specialized-instances.adoc
+++ b/docs/modules/servers/pages/distributed/architecture/specialized-instances.adoc
@@ -1,39 +1,7 @@
= Distributed James Server — Specialized instances
:navtitle: Specialized instances
-While it is perfectly possible to deploy homogeneous James instances, with the same configuration and thus the same
-protocols and the same responsibilities one might want to investigate in 'Specialized instances'.
+:server-name: Distributed James Server
+:specialized-instances-file-name: specialized-instances-distributed.png
-This deployment topology consists of Distributed James servers with heterogeneous configurations on top of shared
-data-bases. Groups of James servers will thus handle various protocols and have different responsibilities.
-
-This approach limits cascading failures across protocols and services. Think of *OutOfMemoryErrors*, Cassandra driver
-queue overuse, CPUs starvation, etc.
-
-However, we can't speak of micro-services here: all James instances runs the same code, James is still a monolith, and
-databases need to be shared across instances.
-
-image::specialized-instances.png[Example of Specialized instances topology]
-
-We speak of:
-
- - **Front-line servers** serves protocols. James enables to easily turn protocols on and off. Typically, each protocol would
- be isolated in its own group of James instances: james-imap, james-jmap, james-smtp, james-webadmin, etc... Refer to
- protocols configuration files to learn more.
-
- - **Back-office servers** handles other services like:
-
- - Mail processing.
- - Remote delivery.
- - Event processing.
- - Task execution.
-
-Front-line servers will likely not handle back office responsibilities (but be sure to have back-office servers that do!).
-
- - xref:distributed/configure/mailetcontainer.adoc[Mail processing can be switched off].
- - xref:distributed/configure/listeners.adoc[Mailbox event processing can be switched off].
- - xref:distributed/configure/rabbitmq.adoc[Task execution can be switched off].
- - Remote Delivery service is not started if the RemoteDelivery mailet is not positioned in mailetcontainer.xml.
-
-Of course, the above instances can be collocated at will, to reach some intermediate deployments with fewer
-instances to mitigate costs.
\ No newline at end of file
+include::partial$architecture/specialized-instances.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/benchmark/benchmark_prepare.adoc b/docs/modules/servers/pages/distributed/benchmark/benchmark_prepare.adoc
new file mode 100644
index 00000000000..ab0c01417a7
--- /dev/null
+++ b/docs/modules/servers/pages/distributed/benchmark/benchmark_prepare.adoc
@@ -0,0 +1 @@
+//
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/benchmark/db-benchmark.adoc b/docs/modules/servers/pages/distributed/benchmark/db-benchmark.adoc
index f2172186346..50b91bfeb4a 100644
--- a/docs/modules/servers/pages/distributed/benchmark/db-benchmark.adoc
+++ b/docs/modules/servers/pages/distributed/benchmark/db-benchmark.adoc
@@ -1,32 +1,12 @@
= Distributed James Server -- Database benchmarks
:navtitle: Database benchmarks
-This document provides basic performance of Distributed James' databases, benchmark methodologies as a basis for a James administrator who
-can test and evaluate if his Distributed James databases are performing well.
+:backend-name: cassandra
+:backend-name-cap: Cassandra
+:server-name: Distributed James Server
+:backend-database-extend-sample: Apache Cassandra 4 as main database: 3 nodes, each node has 8 OVH vCores CPU and 30 GB memory limit (OVH b2-30 instance).
-It includes:
-
-* A sample deployment topology
-* Propose benchmark methodology and base performance for each database. This aims to help operators to quickly identify
-performance issues and compliance of their databases.
-
-== Sample deployment topology
-
-We deploy a sample topology of Distributed James with these following databases:
-
-- Apache Cassandra 4 as main database: 3 nodes, each node has 8 OVH vCores CPU and 30 GB memory limit (OVH b2-30 instance).
-- OpenDistro 1.13.1 as search engine: 3 nodes, each node has 8 OVH vCores CPU and 30 GB memory limit (OVH b2-30 instance).
-- RabbitMQ 3.8.17 as message queue: 3 Kubernetes pods, each pod has 0.6 OVH vCore CPU and 2 GB memory limit.
-- OVH Swift S3 as an object storage
-
-With the above system, our email service operates stably with valuable performance.
-For a more details, it can handle a load throughput up to about 1000 JMAP requests per second with 99th percentile latency is 400ms.
-
-== Benchmark methodologies and base performances
-We are willing to share the benchmark methodologies and the result to you as a reference to evaluate your Distributed James' performance.
-Other evaluation methods are welcome, as long as your databases exhibit similar or even better performance than ours.
-It is up to your business needs. If your databases shows results that fall far from our baseline performance, there's a good chance that
-there are problems with your system, and you need to check it out thoroughly.
+include::partial$benchmark/db-benchmark.adoc[]
=== Benchmark Cassandra
@@ -118,350 +98,3 @@ https://www.datastax.com/blog/improved-cassandra-21-stress-tool-benchmark-any-sc
https://www.instaclustr.com/deep-diving-cassandra-stress-part-3-using-yaml-profiles/[Deep Diving cassandra-stress – Part 3 (Using YAML Profiles)]
-=== Benchmark OpenSearch
-
-==== Benchmark methodology
-
-===== Benchmark tool
-We use https://github.com/opensearch-project/opensearch-benchmark[opensearch-benchmark] - an official OpenSearch benchmarking tool.
-It provides the following features:
-
-- Automatically create OpenSearch clusters, stress tests them, and delete them.
-- Manage stress testing data and solutions by OpenSearch version.
-- Present stress testing data in a comprehensive way, allowing you to compare and analyze the data of different stress tests and store the data on a particular OpenSearch instance for secondary analysis.
-- Collect Java Virtual Machine (JVM) details, such as memory and garbage collection (GC) data, to locate performance problems.
-
-===== How to benchmark
-To install the `opensearch-benchmark` tool, you need Python 3.8+ including pip3 first, then run:
-```
-python3 -m pip install opensearch-benchmark
-```
-
-If you have any trouble or need more detailed instructions, please look in the https://github.com/opensearch-project/OpenSearch-Benchmark/blob/main/DEVELOPER_GUIDE.md[detailed installation guide].
-
-Let's see which workloads (simulation profiles) that `opensearch-benchmark` provides: ```opensearch-benchmark list worloads```.
-For our James use case, we are interested in ```pmc``` workload: ```Full-text benchmark with academic papers from PMC```.
-
-Run the below script to benchmark against your OpenSearch cluster:
-
-[source,bash]
-----
-opensearch-benchmark execute_test --pipeline=benchmark-only --workload=[workload-name] --target-host=[ip_node1:port_node1],[ip_node2:port_node2],[ip_node3:port_node3] --client-options="use_ssl:false,verify_certs:false,basic_auth_user:'[user]',basic_auth_password:'[password]'"
-----
-
-In there:
-
-* --pipeline=benchmark-only: benchmark against a running cluster
-* workload-name: the workload you want to benchmark
-* ip:port: OpenSearch Node' socket
-* user/password: OpenSearch authentication credentials
-
-==== Sample benchmark result
-===== PMC worload
-
-[source]
-----
-| Metric | Task | Value | Unit |
-|---------------------------------------------------------------:|------------------------------:|------------:|--------:|
-| Min Throughput | index-append | 734.63 | docs/s |
-| Mean Throughput | index-append | 763.16 | docs/s |
-| Median Throughput | index-append | 746.5 | docs/s |
-| Max Throughput | index-append | 833.51 | docs/s |
-| 50th percentile latency | index-append | 4738.57 | ms |
-| 90th percentile latency | index-append | 8129.1 | ms |
-| 99th percentile latency | index-append | 11734.5 | ms |
-| 100th percentile latency | index-append | 14662.9 | ms |
-| 50th percentile service time | index-append | 4738.57 | ms |
-| 90th percentile service time | index-append | 8129.1 | ms |
-| 99th percentile service time | index-append | 11734.5 | ms |
-| 100th percentile service time | index-append | 14662.9 | ms |
-| error rate | index-append | 0 | % |
-| Min Throughput | default | 19.94 | ops/s |
-| Mean Throughput | default | 19.95 | ops/s |
-| Median Throughput | default | 19.95 | ops/s |
-| Max Throughput | default | 19.96 | ops/s |
-| 50th percentile latency | default | 23.1322 | ms |
-| 90th percentile latency | default | 25.4129 | ms |
-| 99th percentile latency | default | 29.1382 | ms |
-| 100th percentile latency | default | 29.4762 | ms |
-| 50th percentile service time | default | 21.4895 | ms |
-| 90th percentile service time | default | 23.589 | ms |
-| 99th percentile service time | default | 26.6134 | ms |
-| 100th percentile service time | default | 27.9068 | ms |
-| error rate | default | 0 | % |
-| Min Throughput | term | 19.93 | ops/s |
-| Mean Throughput | term | 19.94 | ops/s |
-| Median Throughput | term | 19.94 | ops/s |
-| Max Throughput | term | 19.95 | ops/s |
-| 50th percentile latency | term | 31.0684 | ms |
-| 90th percentile latency | term | 34.1419 | ms |
-| 99th percentile latency | term | 74.7904 | ms |
-| 100th percentile latency | term | 103.663 | ms |
-| 50th percentile service time | term | 29.6775 | ms |
-| 90th percentile service time | term | 32.4288 | ms |
-| 99th percentile service time | term | 36.013 | ms |
-| 100th percentile service time | term | 102.193 | ms |
-| error rate | term | 0 | % |
-| Min Throughput | phrase | 19.94 | ops/s |
-| Mean Throughput | phrase | 19.95 | ops/s |
-| Median Throughput | phrase | 19.95 | ops/s |
-| Max Throughput | phrase | 19.95 | ops/s |
-| 50th percentile latency | phrase | 23.0255 | ms |
-| 90th percentile latency | phrase | 26.1607 | ms |
-| 99th percentile latency | phrase | 31.2094 | ms |
-| 100th percentile latency | phrase | 45.5012 | ms |
-| 50th percentile service time | phrase | 21.5109 | ms |
-| 90th percentile service time | phrase | 24.4144 | ms |
-| 99th percentile service time | phrase | 26.1865 | ms |
-| 100th percentile service time | phrase | 43.5122 | ms |
-| error rate | phrase | 0 | % |
-
-----------------------------------
-[INFO] SUCCESS (took 1772 seconds)
-----------------------------------
-----
-
-===== PMC custom workload
-We customized the PMC workload by increasing search throughput target to figure out our OpenSearch cluster limit.
-
-The result is that with 25-30 request/s we have a 99th percentile latency of 1s.
-
-==== References
-The `opensearch-benchmark` tool seems to be a fork of the official benchmark tool https://github.com/elastic/rally[EsRally] of Elasticsearch.
-The `opensearch-benchmark` tool is not adopted widely yet, so we believe some EsRally references could help as well:
-
-- https://www.alibabacloud.com/blog/esrally-official-stress-testing-tool-for-elasticsearch_597102[esrally: Official Stress Testing Tool for Elasticsearch]
-
-- https://esrally.readthedocs.io/en/latest/adding_tracks.html[Create a custom EsRally track]
-
-- https://discuss.elastic.co/t/why-the-percentile-latency-is-several-times-more-than-service-time/69630[Why the percentile latency is several times more than service time]
-
-=== Benchmark RabbitMQ
-
-==== Benchmark methodology
-
-===== Benchmark tool
-We use https://github.com/rabbitmq/rabbitmq-perf-test[rabbitmq-perf-test] tool.
-
-===== How to benchmark
-Using PerfTestMulti for more friendly:
-
-- Provide input scenario from a single file
-- Provide output result as a single file. Can be visualized result file by the chart (graph WebUI)
-
-Run a command like below:
-
-[source,bash]
-----
-bin/runjava com.rabbitmq.perf.PerfTestMulti [scenario-file] [result-file]
-----
-
-In order to visualize result, coping [result-file] to ```/html/examples/[result-file]```.
-Start webserver to view graph by the command:
-
-[source,bash]
-----
-bin/runjava com.rabbitmq.perf.WebServer
-----
-Then browse: http://localhost:8080/examples/sample.html
-
-==== Sample benchmark result
-- Scenario file:
-
-[source]
-----
-[{'name': 'consume', 'type': 'simple',
-'uri': 'amqp://james:eeN7Auquaeng@localhost:5677',
-'params':
- [{'time-limit': 30, 'producer-count': 2, 'consumer-count': 4}]}]
-----
-
-- Result file:
-
-[source,json]
-----
-{
- "consume": {
- "send-bytes-rate": 0,
- "recv-msg-rate": 4330.225080385852,
- "avg-latency": 18975254,
- "send-msg-rate": 455161.3183279743,
- "recv-bytes-rate": 0,
- "samples": [{
- "elapsed": 15086,
- "send-bytes-rate": 0,
- "recv-msg-rate": 0,
- "send-msg-rate": 0.06628662335940608,
- "recv-bytes-rate": 0
- },
- {
- "elapsed": 16086,
- "send-bytes-rate": 0,
- "recv-msg-rate": 1579,
- "max-latency": 928296,
- "min-latency": 278765,
- "avg-latency": 725508,
- "send-msg-rate": 388994,
- "recv-bytes-rate": 0
- },
- {
- "elapsed": 48184,
- "send-bytes-rate": 0,
- "recv-msg-rate": 3768.4918347742555,
- "max-latency": 32969370,
- "min-latency": 31852685,
- "avg-latency": 32385432,
- "send-msg-rate": 0,
- "recv-bytes-rate": 0
- },
- {
- "elapsed": 49186,
- "send-bytes-rate": 0,
- "recv-msg-rate": 4416.167664670658,
- "max-latency": 33953465,
- "min-latency": 32854771,
- "avg-latency": 33373113,
- "send-msg-rate": 0,
- "recv-bytes-rate": 0
- }]
- }
-}
-----
-
-- Key result points:
-
-|===
-|Metrics |Unit |Result
-
-|Publisher throughput (the sending rate)
-|messages / second
-|3111
-
-|Consumer throughput (the receiving rate)
-|messages / second
-|4404
-|===
-
-=== Benchmark S3 storage
-
-==== Benchmark methodology
-
-===== Benchmark tool
-We use https://github.com/dvassallo/s3-benchmark[s3-benchmark] tool.
-
-===== How to benchmark
-1. Make sure you set up appropriate S3 credentials with `awscli`.
-2. If you are using a compatible S3 storage of cloud providers like OVH, you would need to configure
-`awscli-plugin-endpoint`. E.g: https://docs.ovh.com/au/en/storage/getting_started_with_the_swift_S3_API/[Getting started with the OVH Swift S3 API]
-3. Install `s3-benchmark` tool and run the command:
-
-[source,bash]
-----
-./s3-benchmark -endpoint=[endpoint] -region=[region] -bucket-name=[bucket-name] -payloads-min=[payload-min] -payloads-max=[payload-max] threads-max=[threads-max]
-----
-
-==== Sample benchmark result
-We did S3 performance testing with suitable email objects sizes: 4 KB, 128 KB, 1 MB, 8 MB.
-
-Result:
-
-[source,bash]
-----
---- SETUP --------------------------------------------------------------------------------------------------------------------
-
-Uploading 4 KB objects
- 100% |████████████████████████████████████████| [4s:0s]
-Uploading 128 KB objects
- 100% |████████████████████████████████████████| [9s:0s]
-Uploading 1 MB objects
- 100% |████████████████████████████████████████| [8s:0s]
-Uploading 8 MB objects
- 100% |████████████████████████████████████████| [10s:0s]
-
---- BENCHMARK ----------------------------------------------------------------------------------------------------------------
-
-Download performance with 4 KB objects (b2-30)
- +-------------------------------------------------------------------------------------------------+
- | Time to First Byte (ms) | Time to Last Byte (ms) |
-+---------+----------------+------------------------------------------------+------------------------------------------------+
-| Threads | Throughput | avg min p25 p50 p75 p90 p99 max | avg min p25 p50 p75 p90 p99 max |
-+---------+----------------+------------------------------------------------+------------------------------------------------+
-| 8 | 0.6 MB/s | 36 10 17 22 36 57 233 249 | 37 10 17 22 36 57 233 249 |
-| 9 | 0.6 MB/s | 30 10 15 21 33 45 82 234 | 30 10 15 21 33 45 83 235 |
-| 10 | 0.2 MB/s | 55 11 18 22 28 52 248 1075 | 55 11 18 22 28 52 249 1075 |
-| 11 | 0.3 MB/s | 66 11 18 23 45 233 293 683 | 67 11 19 23 45 233 293 683 |
-| 12 | 0.6 MB/s | 35 12 19 22 43 55 67 235 | 35 12 19 22 43 56 67 235 |
-| 13 | 0.2 MB/s | 68 11 19 26 58 79 279 1037 | 68 11 19 26 58 80 279 1037 |
-| 14 | 0.6 MB/s | 43 17 20 24 52 56 230 236 | 43 17 20 25 52 56 230 236 |
-| 15 | 0.2 MB/s | 69 11 16 23 50 66 274 1299 | 69 11 16 24 50 66 274 1299 |
-| 16 | 0.5 MB/s | 52 9 19 31 81 95 228 237 | 53 9 19 31 81 95 229 237 |
-+---------+----------------+------------------------------------------------+------------------------------------------------+
-
-Download performance with 128 KB objects (b2-30)
- +-------------------------------------------------------------------------------------------------+
- | Time to First Byte (ms) | Time to Last Byte (ms) |
-+---------+----------------+------------------------------------------------+------------------------------------------------+
-| Threads | Throughput | avg min p25 p50 p75 p90 p99 max | avg min p25 p50 p75 p90 p99 max |
-+---------+----------------+------------------------------------------------+------------------------------------------------+
-| 8 | 3.3 MB/s | 71 16 22 28 39 66 232 1768 | 73 16 23 29 43 67 233 1769 |
-| 9 | 3.6 MB/s | 74 9 19 23 34 58 239 1646 | 75 10 20 24 37 59 240 1647 |
-| 10 | 2.9 MB/s | 97 16 21 24 48 89 656 2034 | 99 17 21 26 49 92 657 2035 |
-| 11 | 3.0 MB/s | 100 10 21 26 39 64 1049 2029 | 101 11 21 27 40 65 1050 2030 |
-| 12 | 3.0 MB/s | 76 12 19 24 44 56 256 2012 | 77 13 20 25 48 69 258 2013 |
-| 13 | 6.1 MB/s | 73 10 13 20 43 223 505 1026 | 74 10 15 21 43 224 506 1027 |
-| 14 | 5.5 MB/s | 81 11 15 23 51 240 666 1060 | 82 12 16 23 54 241 667 1060 |
-| 15 | 2.7 MB/s | 80 10 19 28 43 59 234 2222 | 84 11 25 34 47 60 236 2224 |
-| 16 | 18.6 MB/s | 58 10 19 26 61 224 248 266 | 61 10 22 29 65 224 249 267 |
-+---------+----------------+------------------------------------------------+------------------------------------------------+
-
-Download performance with 1 MB objects (b2-30)
- +-------------------------------------------------------------------------------------------------+
- | Time to First Byte (ms) | Time to Last Byte (ms) |
-+---------+----------------+------------------------------------------------+------------------------------------------------+
-| Threads | Throughput | avg min p25 p50 p75 p90 p99 max | avg min p25 p50 p75 p90 p99 max |
-+---------+----------------+------------------------------------------------+------------------------------------------------+
-| 8 | 56.4 MB/s | 41 12 26 34 43 57 94 235 | 136 30 69 100 161 284 345 396 |
-| 9 | 55.2 MB/s | 53 19 32 39 50 69 238 247 | 149 26 84 117 164 245 324 655 |
-| 10 | 33.9 MB/s | 74 17 27 37 50 77 456 1060 | 177 29 97 134 205 273 484 1076 |
-| 11 | 57.3 MB/s | 56 26 35 44 57 71 251 298 | 185 40 93 129 216 329 546 871 |
-| 12 | 37.7 MB/s | 66 21 33 43 58 73 102 1024 | 202 24 81 125 205 427 839 1222 |
-| 13 | 57.6 MB/s | 59 24 35 40 58 71 275 289 | 215 40 94 181 288 393 500 674 |
-| 14 | 47.1 MB/s | 73 18 46 56 66 75 475 519 | 229 30 116 221 272 441 603 686 |
-| 15 | 58.2 MB/s | 65 11 40 51 63 75 260 294 | 243 29 132 174 265 485 831 849 |
-| 16 | 23.1 MB/s | 96 14 46 55 62 80 124 2022 | 278 31 124 187 249 634 827 2028 |
-+---------+----------------+------------------------------------------------+------------------------------------------------+
-
-Download performance with 8 MB objects (b2-30)
- +-------------------------------------------------------------------------------------------------+
- | Time to First Byte (ms) | Time to Last Byte (ms) |
-+---------+----------------+------------------------------------------------+------------------------------------------------+
-| Threads | Throughput | avg min p25 p50 p75 p90 p99 max | avg min p25 p50 p75 p90 p99 max |
-+---------+----------------+------------------------------------------------+------------------------------------------------+
-| 8 | 58.4 MB/s | 88 35 65 79 88 96 288 307 | 1063 458 564 759 928 1151 4967 6841 |
-| 9 | 50.4 MB/s | 137 32 52 69 145 286 509 1404 | 1212 160 471 581 1720 2873 3744 4871 |
-| 10 | 58.2 MB/s | 77 46 54 66 77 98 275 285 | 1319 377 432 962 1264 3232 4266 6151 |
-| 11 | 58.4 MB/s | 97 32 63 72 80 91 323 707 | 1429 325 593 722 1648 3020 6172 6370 |
-| 12 | 58.5 MB/s | 108 26 65 81 91 261 301 519 | 1569 472 696 1101 1915 3175 4066 5110 |
-| 13 | 56.1 MB/s | 115 35 69 83 93 125 329 1092 | 1712 458 801 1165 2354 3559 3865 5945 |
-| 14 | 58.6 MB/s | 103 26 70 78 88 112 309 656 | 1807 789 999 1269 1998 3258 5201 6651 |
-| 15 | 58.3 MB/s | 113 31 55 67 79 134 276 1490 | 1947 497 1081 1756 2730 3557 3799 3974 |
-| 16 | 58.0 MB/s | 99 35 67 79 96 146 282 513 | 2091 531 882 1136 2161 6034 6686 6702 |
-+---------+----------------+------------------------------------------------+------------------------------------------------+
-----
-
-We believe that the actual OVH Swift S3' throughput should be at least about 100 MB/s. This was not fully achieved due to
-network limitations of the client machine performing the benchmark.
-
-=== Benchmark Redis
-
-==== Benchmark methodology
-
-We can use the built-in https://redis.io/docs/latest/operate/oss_and_stack/management/optimization/benchmarks/[redis-benchmark utility].
-
-The tool is easy to use with good documentation. Just to be sure that you specify the redis-benchmark to use multi-thread if it runs against a multi-thread Redis instance.
-
-Example:
-```
-redis-benchmark -n 1000000 --threads 4
-```
-
diff --git a/docs/modules/servers/pages/distributed/benchmark/index.adoc b/docs/modules/servers/pages/distributed/benchmark/index.adoc
index e94aba0a08a..0c299967fe0 100644
--- a/docs/modules/servers/pages/distributed/benchmark/index.adoc
+++ b/docs/modules/servers/pages/distributed/benchmark/index.adoc
@@ -1,10 +1,7 @@
-= Distributed James Server — Performance testing the Distributed server
+= Distributed James Server — Performance testing
:navtitle: Performance testing the Distributed server
-The following pages detail how to do performance testing for the Distributed server also its database.
+:xref-base: distributed
+:server-name: Distributed James Server
-Once you have a Distributed James server up and running you then need to ensure it operates correctly and has a decent performance.
-You may need to do performance testings periodically to make sure your James performs well.
-
-We introduced xref:distributed/benchmark/james-benchmark.adoc[tools and base benchmark result for Distributed James] also xref:distributed/benchmark/db-benchmark.adoc[James database's base performance and how to benchmark them]
-to cover this topic.
\ No newline at end of file
+include::partial$benchmark/index.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/benchmark/james-benchmark.adoc b/docs/modules/servers/pages/distributed/benchmark/james-benchmark.adoc
index 07040bb90fa..fe5d0b7579e 100644
--- a/docs/modules/servers/pages/distributed/benchmark/james-benchmark.adoc
+++ b/docs/modules/servers/pages/distributed/benchmark/james-benchmark.adoc
@@ -1,100 +1,10 @@
= Distributed James Server benchmark
:navtitle: James benchmarks
-This document provides benchmark methodology and basic performance of Distributed James as a basis for a James administrator who
-can test and evaluate if his Distributed James is performing well.
-
-It includes:
-
-* A sample Distributed James deployment topology
-* Propose benchmark methodology
-* Sample performance results
-
-This aims to help operators quickly identify performance issues.
-
-== Sample deployment topology
-
-We deploy a sample topology of Distributed James with these following components:
-
-- Distributed James: 3 Kubernetes pods, each pod has 2 OVH vCore CPU and 4 GB memory limit.
-- Apache Cassandra 4 as main database: 3 nodes, each node has 8 OVH vCores CPU and 30 GB memory limit (OVH b2-30 instance).
-- OpenDistro 1.13.1 as search engine: 3 nodes, each node has 8 OVH vCores CPU and 30 GB memory limit (OVH b2-30 instance).
-- RabbitMQ 3.8.17 as message queue: 3 Kubernetes pods, each pod has 0.6 OVH vCore CPU and 2 GB memory limit.
-- OVH Swift S3 as an object storage
-
-== Benchmark methodology and base performance
-
-=== Provision testing data
-
-Before doing the performance test, you should make sure you have a Distributed James up and running with some provisioned testing
-data so that it is representative of reality.
-
-Please follow these steps to provision testing data:
-
-* Prepare James with a custom `mailetcontainer.xml` having Random storing mailet. This help us easily setting a good amount of
-provisioned emails.
-
-Add this under transport processor
-----
-
-----
-
-* Modify https://github.com/apache/james-project/tree/master/docs/modules/servers/pages/distributed/benchmark/provision.sh[provision.sh]
-upon your need (number of users, mailboxes, emails to be provisioned).
-
-Currently, this script provisions 10 users, 15 mailboxes and hundreds of emails for example. Normally to make the performance test representative, you
-should provision thousands of users, thousands of mailboxes and millions of emails.
-
-* Add the permission to execute the script:
-----
-chmod +x provision.sh
-----
-
-* Install postfix (to get the smtp-source command):
-----
-sudo apt-get install postfix
-----
-
-* Run the provision script:
-----
-./provision.sh
-----
-
-After provisioning once, you should remove the Random storing mailet and move on to performance testing phase.
-
-=== Provide performance testing method
-
-We introduce the tailored https://github.com/linagora/james-gatling[James Gatling] which bases on https://gatling.io/[Gatling - Load testing framework]
-for performance testing against IMAP/JMAP servers. Other testing method is welcome as long as you feel it is appropriate.
-
-Here are steps to do performance testing with James Gatling:
-
-* Setup James Gatling with `sbt` build tool
-
-* Configure the `Configuration.scala` to point to your Distributed James IMAP/JMAP server(s). For more configuration details, please read
-https://github.com/linagora/james-gatling#readme[James Gatling Readme].
-
-* Run the performance testing simulation:
-----
-$ sbt
-> gatling:testOnly SIMULATION_FQDN
-----
-
-In there: `SIMULATION_FQDN` is fully qualified class name of a performance test simulation.
-
-We did provide a lot of simulations in `org.apache.james.gatling.simulation` path. You can have a look and choose the suitable simulation.
-`sbt gatling:testOnly org.apache.james.gatling.simulation.imap.PlatformValidationSimulation` is a good starting point. Or you can even customize your simulation also!
-
-Some symbolic simulations we often use:
-
-* IMAP: `org.apache.james.gatling.simulation.imap.PlatformValidationSimulation`
-* JMAP rfc8621: `org.apache.james.gatling.simulation.jmap.rfc8621.PushPlatformValidationSimulation`
-
-=== Base performance result
-
-A sample IMAP performance testing result (PlatformValidationSimulation):
-
-image::james-imap-base-performance.png[]
-
-If you get a IMAP performance far below this base performance, you should consider investigating for performance issues.
+:server-name: Distributed James Server
+:backend-database-extend-sample: Apache Cassandra 4 as main database: 3 nodes, each node has 8 OVH vCores CPU and 30 GB memory limit (OVH b2-30 instance).
+:provision_file_url: https://github.com/apache/james-project/tree/master/docs/modules/servers/pages/distributed/benchmark/provision.sh
+:benchmark_prepare_extend: servers:distributed/benchmark/benchmark_prepare.adoc
+:james-imap-base-performance-picture: james-imap-base-performance-distributed.png
+include::partial$benchmark/james-benchmark.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/configure/batchsizes.adoc b/docs/modules/servers/pages/distributed/configure/batchsizes.adoc
index 4d6123e468e..be7e6bfb1c2 100644
--- a/docs/modules/servers/pages/distributed/configure/batchsizes.adoc
+++ b/docs/modules/servers/pages/distributed/configure/batchsizes.adoc
@@ -1,34 +1,5 @@
= Distributed James Server — batchsizes.properties
:navtitle: batchsizes.properties
-This files allow to define the amount of data that should be fetched 'at once' when interacting with the mailbox. This is
-needed as IMAP can generate some potentially large requests.
-
-Increasing these values tend to fasten individual requests, at the cost of enabling potential higher load.
-
-Consult this link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/batchsizes.properties[example]
-to get some examples and hints.
-
-.batchsizes.properties content
-|===
-| Property name | explanation
-
-| fetch.metadata
-| Optional, defaults to 200. How many messages should be read in a batch when using FetchType.MetaData
-
-| fetch.headers
-| Optional, defaults to 200. How many messages should be read in a batch when using FetchType.Header
-
-| fetch.body
-| Optional, defaults to 100. How many messages should be read in a batch when using FetchType.Body
-
-| fetch.full
-| Optional, defaults to 50. How many messages should be read in a batch when using FetchType.Full
-
-| copy
-| Optional, defaults to 200. How many messages should be copied in a batch.
-
-| move
-| Optional, defaults to 200. How many messages should be moved in a batch.
-
-|===
\ No newline at end of file
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+include::partial$configure/batchsizes.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/configure/blobstore.adoc b/docs/modules/servers/pages/distributed/configure/blobstore.adoc
index 0ebcf516d5d..84673e86b45 100644
--- a/docs/modules/servers/pages/distributed/configure/blobstore.adoc
+++ b/docs/modules/servers/pages/distributed/configure/blobstore.adoc
@@ -1,6 +1,9 @@
= Distributed James Server — blobstore.properties
:navtitle: blobstore.properties
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+:pages-path: distributed
+
== BlobStore
This file is optional. If omitted, the *cassandra* blob store will be used.
@@ -12,7 +15,7 @@ You can choose the underlying implementation of BlobStore to fit with your James
It could be the implementation on top of Cassandra or file storage service S3 compatible like Openstack Swift and AWS S3.
-Consult link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/blob.properties[blob.properties]
+Consult link:{sample-configuration-prefix-url}/blob.properties[blob.properties]
in GIT to get some examples and hints.
=== Implementation choice
@@ -22,7 +25,7 @@ in GIT to get some examples and hints.
* cassandra: use cassandra based BlobStore
* objectstorage: use Swift/AWS S3 based BlobStore
* file: (experimental) use directly the file system. Useful for legacy architecture based on shared ISCI SANs and/or
- distributed file system with no object store available.
+distributed file system with no object store available.
WARNING: JAMES-3591 Cassandra is not made to store large binary content, its use will be suboptimal compared to
Alternatives (namely S3 compatible BlobStores backed by for instance S3, MinIO or Ozone)
@@ -41,7 +44,7 @@ NOTE: If you are upgrading from James 3.5 or older, the deduplication was enable
Deduplication requires a garbage collector mechanism to effectively drop blobs. A first implementation
based on bloom filters can be used and triggered using the WebAdmin REST API. See
-xref:distributed/operate/webadmin.adoc#_running_blob_garbage_collection[Running blob garbage collection].
+xref:{pages-path}/operate/webadmin.adoc#_running_blob_garbage_collection[Running blob garbage collection].
In order to avoid concurrency issues upon garbage collection, we slice the blobs in generation, the two more recent
generations are not garbage collected.
@@ -52,54 +55,6 @@ but deleted blobs will live longer. Duration, defaults on 30 days, the default u
*deduplication.gc.generation.family*: Every time the duration is changed, this integer counter must be incremented to avoid
conflicts. Defaults to 1.
-=== Encryption choice
-
-Data can be optionally encrypted with a symmetric key using AES before being stored in the blobStore. As many user relies
-on third party for object storage, a compromised third party will not escalate to a data disclosure. Of course, a
-performance price have to be paid, as encryption takes resources.
-
-*encryption.aes.enable* : Optional boolean, defaults to false.
-
-If AES encryption is enabled, then the following properties MUST be present:
-
- - *encryption.aes.password* : String
- - *encryption.aes.salt* : Hexadecimal string
-
-The following properties CAN be supplied:
-
- - *encryption.aes.private.key.algorithm* : String, defaulting to PBKDF2WithHmacSHA512. Previously was
-PBKDF2WithHmacSHA1.
-
-WARNING: Once chosen this choice can not be reverted, all the data is either clear or encrypted. Mixed encryption
-is not supported.
-
-Here is an example of how you can generate the above values (be mindful to customize the byte lengths in order to add
-enough entropy.
-
-....
-# Password generation
-openssl rand -base64 64
-
-# Salt generation
-generate salt with : openssl rand -hex 16
-....
-
-AES blob store supports the following system properties that could be configured in `jvm.properties`:
-
-....
-# Threshold from which we should buffer the blob to a file upon encrypting
-# Unit supported: K, M, G, default to no unit
-james.blob.aes.file.threshold.encrypt=100K
-
-# Threshold from which we should buffer the blob to a file upon decrypting
-# Unit supported: K, M, G, default to no unit
-james.blob.aes.file.threshold.decrypt=256K
-
-# Maximum size of a blob. Larger blobs will be rejected.
-# Unit supported: K, M, G, default to no unit
-james.blob.aes.blob.max.size=100M
-....
-
=== Cassandra BlobStore Cache
A Cassandra cache can be enabled to reduce latency when reading small blobs frequently.
@@ -124,127 +79,4 @@ Supported units: bytes, Kib, MiB, GiB, TiB
Maximum size of stored objects expressed in bytes.
|===
-=== Object storage configuration
-
-==== AWS S3 Configuration
-
-.blobstore.properties S3 related properties
-|===
-| Property name | explanation
-
-| objectstorage.s3.endPoint
-| S3 service endpoint
-
-| objectstorage.s3.region
-| S3 region
-
-| objectstorage.s3.accessKeyId
-| https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys[S3 access key id]
-
-| objectstorage.s3.secretKey
-| https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys[S3 access key secret]
-
-| objectstorage.s3.http.concurrency
-| Allow setting the number of concurrent HTTP requests allowed by the Netty driver.
-
-| objectstorage.s3.truststore.path
-| optional: Verify the S3 server certificate against this trust store file.
-
-| objectstorage.s3.truststore.type
-| optional: Specify the type of the trust store, e.g. JKS, PKCS12
-
-| objectstorage.s3.truststore.secret
-| optional: Use this secret/password to access the trust store; default none
-
-| objectstorage.s3.truststore.algorithm
-| optional: Use this specific trust store algorithm; default SunX509
-
-| objectstorage.s3.trustall
-| optional: boolean. Defaults to false. Cannot be set to true with other trustore options. Wether James should validate
-S3 endpoint SSL certificates.
-
-| objectstorage.s3.read.timeout
-| optional: HTTP read timeout. duration, default value being second. Leaving it empty relies on S3 driver defaults.
-
-| objectstorage.s3.write.timeout
-| optional: HTTP write timeout. duration, default value being second. Leaving it empty relies on S3 driver defaults.
-
-| objectstorage.s3.connection.timeout
-| optional: HTTP connection timeout. duration, default value being second. Leaving it empty relies on S3 driver defaults.
-
-| objectstorage.s3.in.read.limit
-| optional: Object read in memory will be rejected if they exceed the size limit exposed here. Size, exemple `100M`.
-Supported units: K, M, G, defaults to B if no unit is specified. If unspecified, big object won't be prevented
-from being loaded in memory. This settings complements protocol limits.
-
-| objectstorage.s3.upload.retry.maxAttempts
-| optional: Integer. Default is zero. This property specifies the maximum number of retry attempts allowed for failed upload operations.
-
-| objectstorage.s3.upload.retry.backoffDurationMillis
-| optional: Long (Milliseconds). Default is 10 (miliseconds).
-Only takes effect when the "objectstorage.s3.upload.retry.maxAttempts" property is declared.
-This property determines the duration (in milliseconds) to wait between retry attempts for failed upload operations.
-This delay is known as backoff. The jitter factor is 0.5
-
-|===
-
-==== Buckets Configuration
-
-.Bucket configuration
-|===
-| Property name | explanation
-
-| objectstorage.bucketPrefix
-| Bucket is a concept in James and similar to Containers in Swift or Buckets in AWS S3.
-BucketPrefix is the prefix of bucket names in James BlobStore
-
-| objectstorage.namespace
-| BlobStore default bucket name. Most of blobs storing in BlobStore are inside the default bucket.
-Unless a special case like storing blobs of deleted messages.
-|===
-
-== Blob Export
-
-Blob Exporting is the mechanism to help James to export a blob from an user to another user.
-It is commonly used to export deleted messages (consult configuring deleted messages vault).
-The deleted messages are transformed into a blob and James will export that blob to the target user.
-
-This configuration helps you choose the blob exporting mechanism fit with your James setup and it is only applicable with Guice products.
-
-Consult https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/blob.properties[blob.properties]
-in GIT to get some examples and hints.
-
-Configuration for exporting blob content:
-
-.blobstore.properties content
-|===
-| blob.export.implementation
-
-| localFile: Local File Exporting Mechanism (explained below). Default: localFile
-
-| linshare: LinShare Exporting Mechanism (explained below)
-|===
-
-=== Local File Blob Export Configuration
-
-For each request, this mechanism retrieves the content of a blob and save it to a distinct local file, then send an email containing the absolute path of that file to the target mail address.
-
-Note: that absolute file path is the file location on James server. Therefore, if there are two or more James servers connected, it should not be considered an option.
-
-*blob.export.localFile.directory*: The directory URL to store exported blob data in files, and the URL following
-http://james.apache.org/server/3/apidocs/org/apache/james/filesystem/api/FileSystem.html[James File System scheme].
-Default: file://var/blobExporting
-
-=== LinShare Blob Export Configuration
-
-Instead of exporting blobs in local file system, using https://www.linshare.org[LinShare]
-helps you upload your blobs and people you have been shared to can access those blobs by accessing to
-LinShare server and download them.
-
-This way helps you to share via whole network as long as they can access to LinShare server.
-
-To get an example or details explained, visit https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/blob.properties[blob.properties]
-
-*blob.export.linshare.url*: The URL to connect to LinShare
-
-*blob.export.linshare.token*: The authentication token to connect to LinShare
+include::partial$configure/blobstore.adoc[]
diff --git a/docs/modules/servers/pages/distributed/configure/collecting-contacts.adoc b/docs/modules/servers/pages/distributed/configure/collecting-contacts.adoc
index ed00b04d243..418700ad921 100644
--- a/docs/modules/servers/pages/distributed/configure/collecting-contacts.adoc
+++ b/docs/modules/servers/pages/distributed/configure/collecting-contacts.adoc
@@ -1,39 +1,4 @@
= Contact collection
-== Motivation
-
-Many modern applications combines email and contacts.
-
-We want recipients of emails sent by a user to automatically be added to this user contacts, for convenience. This
-should even be performed when a user sends emails via SMTP for example using thunderbird.
-
-== Design
-
-The idea is to send AMQP messages holding information about mail envelope for a traitment via a tierce application.
-
-== Configuration
-
-We can achieve this goal by combining simple mailets building blocks.
-
-Here is a sample pipeline achieving aforementioned objectives :
-
-....
-
- extractedContacts
-
-
- amqp://${env:JAMES_AMQP_USERNAME}:${env:JAMES_AMQP_PASSWORD}@${env:JAMES_AMQP_HOST}:${env:JAMES_AMQP_PORT}
- collector:email
- extractedContacts
-
-
-....
-
-A sample message looks like:
-
-....
-{
- "userEmail": "sender@james.org",
- "emails": ["to@james.org"]
-}
-....
\ No newline at end of file
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+include::partial$configure/collecting-contacts.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/configure/collecting-events.adoc b/docs/modules/servers/pages/distributed/configure/collecting-events.adoc
index f103a76a23d..0d8532bf178 100644
--- a/docs/modules/servers/pages/distributed/configure/collecting-events.adoc
+++ b/docs/modules/servers/pages/distributed/configure/collecting-events.adoc
@@ -1,69 +1,4 @@
= Event collection
-== Motivation
-
-Many calendar application do add events invitation received by email directly in ones calendar.
-
-Such behaviours requires the calendar application to be aware of the ICalendar related emails a user received.
-
-== Design
-
-The idea is to write a portion of mailet pipeline extracting Icalendar attachments and to hold them as attachments that
-can later be sent to other applications over AMQP to be treated in an asynchronous, decoupled fashion.
-
-== Configuration
-
-We can achieve this goal by combining simple mailets building blocks.
-
-Here is a sample pipeline achieving aforementioned objectives :
-
-....
-
-
- text/calendar
- rawIcalendar
-
-
- rawIcalendar
-
-
- rawIcalendar
- icalendar
-
-
- icalendar
-
-
-
- icalendarAsJson
- rawIcalendar
-
-
- amqp://${env:JAMES_AMQP_USERNAME}:${env:JAMES_AMQP_PASSWORD}@${env:JAMES_AMQP_HOST}:${env:JAMES_AMQP_PORT}
- james:events
- icalendarAsJson
-
-
-....
-
-A sample message looks like:
-
-....
-{
- "ical": "RAW_DATA_AS_TEXT_FOLLOWING_ICS_FORMAT",
- "sender": "other@james.apache.org",
- "recipient": "any@james2.apache.org",
- "replyTo": "other@james.apache.org",
- "uid": "f1514f44bf39311568d640727cff54e819573448d09d2e5677987ff29caa01a9e047feb2aab16e43439a608f28671ab7c10e754ce92be513f8e04ae9ff15e65a9819cf285a6962bc",
- "dtstamp": "20170106T115036Z",
- "method": "REQUEST",
- "sequence": "0",
- "recurrence-id": null
-}
-....
-
-The following pipeline positions the X-MEETING-UID in the Header in order for mail user agent to correlate events with this mail.
-The sample look like:
-```
-X-MEETING-UID: f1514f44bf39311568d640727cff54e819573448d09d2e5677987ff29caa01a9e047feb2aab16e43439a608f28671ab7c10e754ce92be513f8e04ae9ff15e65a9819cf285a6962bc
-```
\ No newline at end of file
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+include::partial$configure/collecting-events.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/configure/dns.adoc b/docs/modules/servers/pages/distributed/configure/dns.adoc
index ecc0c80ce38..1954a4b6b35 100644
--- a/docs/modules/servers/pages/distributed/configure/dns.adoc
+++ b/docs/modules/servers/pages/distributed/configure/dns.adoc
@@ -1,55 +1,5 @@
= Distributed James Server — dnsservice.xml
:navtitle: dnsservice.xml
-Consult this link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/dnsservice.xml[example]
-to get some examples and hints.
-
-Specifies DNS Server information for use by various components inside Apache James Server.
-
-DNS Transport services are controlled by a configuration block in
-the dnsservice.xml. This block affects SMTP remote delivery.
-
-The dnsservice tag defines the boundaries of the configuration
-block. It encloses all the relevant configuration for the DNS server.
-The behavior of the DNS service is controlled by the attributes and
-children of this tag.
-
-.dnsservice.xml content
-|===
-| Property name | explanation
-
-| servers
-| Information includes a list of DNS Servers to be used by James. These are
-specified by the server elements, each of which is a child element of the
-servers element. Each server element is the IP address of a single DNS server.
-The server elements can have multiple server children. Enter ip address of your DNS server, one IP address per server
-element. If no DNS servers are found and you have not specified any below, 127.0.0.1 will be used
-
-| autodiscover
-| true or false - If you use autodiscover and add DNS servers manually a combination of all the DNS servers will be used.
-If autodiscover is true, James will attempt to autodiscover the DNS servers configured on your underlying system.
-Currently, this works if the OS has a unix-like /etc/resolv.xml,
-or the system is Windows based with ipconfig or winipcfg. Change autodiscover to false if you would like to turn off autodiscovery
-and set the DNS servers manually in the servers section
-
-| authoritative
-| *true/false* - This tag specifies whether or not
-to require authoritative (non-cached) DNS records; to only accept DNS responses that are
-authoritative for the domain. It is primarily useful in an intranet/extranet environment.
-This should always be *false* unless you understand the implications.
-
-| maxcachesize
-| Maximum number of entries to maintain in the DNS cache (typically 50000)
-
-| negativeCacheTTL
-| Sets the maximum length of time that negative records will be stored in the DNS negative cache in
-seconds (a negative record means the name has not been found in the DNS). Values for this cache
-can be positive meaning the time in seconds before retrying to resolve the name, zero meaning no
-cache or a negative value meaning infinite caching.
-
-| singleIPperMX
-| true or false (default) - Specifies if Apache James Server must try a single server for each multihomed mx host
-
-| verbose
-| Turn on general debugging statements
-|===
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+include::partial$configure/dns.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/configure/domainlist.adoc b/docs/modules/servers/pages/distributed/configure/domainlist.adoc
index 53b9b0f4c46..ad5cbafffea 100644
--- a/docs/modules/servers/pages/distributed/configure/domainlist.adoc
+++ b/docs/modules/servers/pages/distributed/configure/domainlist.adoc
@@ -1,45 +1,5 @@
= Distributed James Server — domainlist.xml
:navtitle: domainlist.xml
-Consult this link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/domainlist.xml[example]
-to get some examples and hints.
-
-This configuration block is defined by the *domainlist* tag.
-
-.domainlist.xml content
-|===
-| Property name | explanation
-
-| domainnames
-| Domainnames identifies the DNS namespace served by this instance of James.
-These domainnames are used for both matcher/mailet processing and SMTP auth
-to determine when a mail is intended for local delivery - Only applicable for XMLDomainList. The entries mentionned here will be created upon start.
-
-|autodetect
-|true or false - If autodetect is true, James wil attempt to discover its own host name AND
-use any explicitly specified servernames.
-If autodetect is false, James will use only the specified domainnames. Defaults to false.
-
-|autodetectIP
-|true or false - If autodetectIP is not false, James will also allow add the IP address for each servername.
-The automatic IP detection is to support RFC 2821, Sec 4.1.3, address literals. Defaults to false.
-
-|defaultDomain
-|Set the default domain which will be used if an email is send to a recipient without a domain part.
-If no defaultdomain is set the first domain of the DomainList gets used. If the default is not yet contained by the Domain List, the domain will be created upon start.
-
-|read.cache.enable
-|Experimental. Boolean, defaults to false.
-Whether or not to cache domainlist.contains calls. Enable a faster execution however writes will take time
-to propagate.
-
-|read.cache.expiracy
-|Experimental. String (duration), defaults to 10 seconds (10s). Supported units are ms, s, m, h, d, w, month, y.
-Expiracy of the cache. Longer means less reads are performed to the backend but writes will take longer to propagate.
-Low values (a few seconds) are advised.
-
-
-|===
-
-To override autodetected domainnames simply add explicit domainname elements.
-In most cases this will be necessary. By default, the domainname 'localhost' is specified. This can be removed, if required.
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+include::partial$configure/domainlist.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/configure/droplists.adoc b/docs/modules/servers/pages/distributed/configure/droplists.adoc
index 375b6156b7f..500aee7a5df 100644
--- a/docs/modules/servers/pages/distributed/configure/droplists.adoc
+++ b/docs/modules/servers/pages/distributed/configure/droplists.adoc
@@ -1,32 +1,6 @@
= Distributed James Server — DropLists
:navtitle: DropLists
-The DropList, also known as the mail blacklist, is a collection of
-domains and email addresses that are denied from sending emails within the system.
-It is disabled by default.
-To enable it, modify the `droplists.properties` file and include the `IsInDropList` matcher in the `mailetcontainer.xml`.
-To disable it, adjust the `droplists.properties` file and remove the `IsInDropList` matcher from the `mailetcontainer.xml`.
-
-.droplists.properties content
-|===
-| Property name | explanation
-
-| enabled
-| Boolean. Governs whether DropLists should be enabled. Defaults to `false`.
-|===
-
-== Enabling Matcher
-
-Plug the `IsInDropList` matcher within `mailetcontainer.xml` :
-
-....
-
- transport
-
-....
-
-== DropList management
-
-DropList management, including adding and deleting entries, is performed through the WebAdmin REST API.
-
-See xref:distributed/operate/webadmin.adoc#_administrating_droplists[WebAdmin DropLists].
\ No newline at end of file
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+:pages-path: distributed
+include::partial$configure/droplists.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/configure/dsn.adoc b/docs/modules/servers/pages/distributed/configure/dsn.adoc
index 714324b6405..8085aaa0dab 100644
--- a/docs/modules/servers/pages/distributed/configure/dsn.adoc
+++ b/docs/modules/servers/pages/distributed/configure/dsn.adoc
@@ -1,218 +1,7 @@
= Distributed James Server — Delivery Submission Notifications
:navtitle: ESMTP DSN setup
-DSN introduced in link:https://tools.ietf.org/html/rfc3461[RFC-3461] allows a SMTP sender to demand status messages,
-defined in link:https://tools.ietf.org/html/rfc3464[RFC-3464] to be sent back to the `Return-Path` upon delivery
-progress.
-
-DSN support is not enabled by default, as it needs specific configuration of the
-xref:distributed/configure/mailetcontainer.adoc[mailetcontainer.xml] to be specification compliant.
-
-To enable it you need to:
-
-- Add DSN SMTP hooks as part of the SMTP server stack
-- Configure xref:distributed/configure/mailetcontainer.adoc[mailetcontainer.xml] to generate DSN bounces when needed
-
-== Enabling DSN in SMTP server stack
-
-For this simply add the `DSN hooks` in the handler chain in `smtpserver.xml` :
-
-....
-
- <...>
-
-
-
-
-
- <...>
-
-
-
-....
-
-== Enabling DSN generation as part of mail processing
-
-For the below conditions to be matched we assume you follow
-xref:distributed/configure/remote-delivery-error-handling.adoc[RemoteDelivery error handling for MXs], which is a
-requirement for detailed RemoteDelivery error and delay handling on top of the Distributed server.
-
-Here is a sample xref:distributed/configure/mailetcontainer.adoc[mailetcontainer.xml] achieving the following DSN generation:
-
-- Generate a generic `delivered` notification if LocalDelivery succeeded, if requested
-- Generate a generic `failed` notification in case of local errors, if requested
-- Generate a specific `failed` notification in case of a non existing local user, if requested
-- Generate a specific `failed` notification in case of an address rewriting loop, if requested
-- Generate a `failed` notification in case of remote permanent errors, if requested. We blame the remote server...
-- Generate a `delayed` notification in case of temporary remote errors we are about to retry, if requested. We blame the remote server...
-- Generate a `failed` notification in case of temporary remote errors we are not going to retry (failed too many time), if requested. We blame the remote server...
-
-....
-
-
-
-
- \
-
-
-
-
-
-
-
-
-
- [FAILED]
- true
- Hi. This is the James mail server at [machine].
-I'm afraid I wasn't able to deliver your message to the following addresses.
-This is a permanent error; I've given up. Sorry it didn't work out. Below
-I include the list of recipients, and the reason why I was unable to deliver
-your message.
- failed
- 5.0.0
-
-
- cassandra://var/mail/error/
-
-
-
-
-
-
-
- false
-
-
-
- [SUCCESS]
- true
- Hi. This is the James mail server at [machine].
-I successfully delivered your message to the following addresses.
-Note that it indicates your recipients received the message but do
-not imply they read it.
- delivered
- 2.0.0
-
-
-
-
-
-
-
- outgoing
- 0
- 0
- 10
- true
-
- remote-delivery-error
-
-
-
- [FAILED]
- true
- Hi. This is the James mail server at [machine].
-I'm afraid I wasn't able to deliver your message to the following addresses.
-This is a permanent error; I've given up. Sorry it didn't work out.
-The remote server we should relay this mail to keep on failing.
-Below I include the list of recipients, and the reason why I was unable to deliver
-your message.
- failed
- 5.0.0
-
-
- cassandra://var/mail/error/remote-delivery/permanent/
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- [FAILED]
- true
- Hi. This is the James mail server at [machine].
-I'm afraid I wasn't able to deliver your message to the following addresses.
-This is a permanent error; I've given up. Sorry it didn't work out.
-The remote server we should relay this mail to returns a permanent error.
-Below I include the list of recipients, and the reason why I was unable to deliver
-your message.
- failed
- 5.0.0
-
-
-
- [DELAYED]
- true
- Hi. This is the James mail server at [machine].
-I'm afraid I wasn't able to deliver your message to the following addresses yet.
-This is a temporary error: I will keep on trying.
-Below I include the list of recipients, and the reason why I was unable to deliver
-your message.
- delayed
- 4.0.0
-
-
-
-
-
-
-
- [FAILED]
- true
- Hi. This is the James mail server at [machine].
-I'm afraid I wasn't able to deliver your message to the following addresses.
-This is a permanent error; I've given up. Sorry it didn't work out.
-The following addresses do not exist here. Sorry.
- failed
- 5.0.0
-
-
- cassandra://var/mail/address-error/
-
-
-
-
-
-
- cassandra://var/mail/relay-denied/
- Warning: You are sending an e-mail to a remote server. You must be authenticated to perform such an operation
-
-
-
-
-
- cassandra://var/mail/rrt-error/
- true
-
-
-
- [FAILED]
- true
- Hi. This is the James mail server at [machine].
-I'm afraid I wasn't able to deliver your message to the following addresses.
-This is a permanent error; I've given up. Sorry it didn't work out.
-The following addresses is caught in a rewriting loop. An admin should come and fix it (you likely want to report it).
-Once resolved the admin should be able to resume the processing of your email.
-Below I include the list of recipients, and the reason why I was unable to deliver
-your message.
- failed
- 5.1.6/defaultStatus>
-
-
-
-
-....
-
-== Limitations
-
-The out of the box tooling do not allow generating `relayed` DSN notification as RemoteDelivery misses a success
-callback.
\ No newline at end of file
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+:pages-path: distributed
+:mailet-repository-path-prefix: cassandra
+include::partial$configure/dsn.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/configure/extensions.adoc b/docs/modules/servers/pages/distributed/configure/extensions.adoc
index a2b496a4453..95f754529c2 100644
--- a/docs/modules/servers/pages/distributed/configure/extensions.adoc
+++ b/docs/modules/servers/pages/distributed/configure/extensions.adoc
@@ -1,61 +1,6 @@
= Distributed James Server — extensions.properties
:navtitle: extensions.properties
-This files enables an operator to define additional bindings used to instantiate others extensions
-
-*guice.extension.module*: come separated list of fully qualified class name. These classes need to implement Guice modules.
-
-Here is an example of such a class :
-
-....
-public class MyServiceModule extends AbstractModule {
- @Override
- protected void configure() {
- bind(MyServiceImpl.class).in(Scopes.SINGLETON);
- bind(MyService.class).to(MyServiceImpl.class);
- }
-}
-....
-
-Recording it in extensions.properties :
-
-....
-guice.extension.module=com.project.MyServiceModule
-....
-
-Enables to inject MyService into your extensions.
-
-
-*guice.extension.tasks*: come separated list of fully qualified class name.
-
-The extension can rely on the Task manager to supervise long-running task execution (progress, await, cancellation, scheduling...).
-These extensions need to implement Task extension modules.
-
-Here is an example of such a class :
-
-....
-public class RspamdTaskExtensionModule implements TaskExtensionModule {
-
- @Inject
- public RspamdTaskExtensionModule() {
- }
-
- @Override
- public Set> taskDTOModules() {
- return Set.of(...);
- }
-
- @Override
- public Set> taskAdditionalInformationDTOModules() {
- return Set.of(...);
- }
-}
-....
-
-Recording it in extensions.properties :
-
-....
-guice.extension.tasks=com.project.RspamdTaskExtensionModule
-....
-
-Read xref:customization:index.adoc#_defining_custom_injections_for_your_extensions[this page] for more details.
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+:pages-path: distributed
+include::partial$configure/extensions.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/configure/healthcheck.adoc b/docs/modules/servers/pages/distributed/configure/healthcheck.adoc
index 37c01f8c818..82a147ea2c6 100644
--- a/docs/modules/servers/pages/distributed/configure/healthcheck.adoc
+++ b/docs/modules/servers/pages/distributed/configure/healthcheck.adoc
@@ -1,25 +1,5 @@
= Distributed James Server — healthcheck.properties
:navtitle: healthcheck.properties
-Consult this link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/healthcheck.properties[example]
-to get some examples and hints.
-
-Use this configuration to define the initial delay and period for the PeriodicalHealthChecks. It is only applicable with Guice products.
-
-.healthcheck.properties content
-|===
-| Property name | explanation
-
-| healthcheck.period
-| Define the period between two periodical health checks (default: 60s). Units supported are (ms - millisecond, s - second, m - minute, h - hour, d - day). Default unit is millisecond.
-
-| reception.check.user
-| User to be using for running the "mail reception" health check. The user must exist.
-If not specified, the mail reception check is a noop.
-
-| reception.check.timeout
-| Period after which mail reception is considered faulty. Defaults to one minute.
-
-| additional.healthchecks
-| List of fully qualified HealthCheck class names in addition to James' default healthchecks. Default to empty list.
-|===
\ No newline at end of file
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+include::partial$configure/healthcheck.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/configure/imap.adoc b/docs/modules/servers/pages/distributed/configure/imap.adoc
index 96ac8c43af6..79c6a9d93a3 100644
--- a/docs/modules/servers/pages/distributed/configure/imap.adoc
+++ b/docs/modules/servers/pages/distributed/configure/imap.adoc
@@ -1,182 +1,6 @@
= Distributed James Server — imapserver.xml
:navtitle: imapserver.xml
-Consult this link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/imapserver.xml[example]
-to get some examples and hints.
-
-The IMAP4 service is controlled by a configuration block in the imap4server.xml.
-The imap4server tag defines the boundaries of the configuration block. It encloses
-all the relevant configuration for the IMAP4 server. The behavior of the IMAP4 service is
-controlled by the attributes and children of this tag.
-
-This tag has an optional boolean attribute - *enabled* - that defines whether the service is active or not.
-The value defaults to "true" if not present.
-
-The standard children of the imapserver tag are:
-
-.imapserver.xml content
-|===
-| Property name | explanation
-
-| bind
-| Configure this to bind to a specific inetaddress. This is an optional integer value. This value is the port on which this IMAP4 server is configured
-to listen. If the tag or value is absent then the service
-will bind to all network interfaces for the machine If the tag or value is omitted, the value will default to the standard IMAP4 port
-port 143 is the well-known/IANA registered port for IMAP
-port 993 is the well-known/IANA registered port for IMAPS ie over SSL/TLS
-
-| connectionBacklog
-| Number of connection backlog of the server (maximum number of queued connection requests)
-
-| compress
-| true or false - Use or don't use COMPRESS extension. Defaults to false.
-
-| maxLineLength
-| Maximal allowed line-length before a BAD response will get returned to the client
-This should be set with caution as a to high value can make the server a target for DOS (Denial of Service)!
-
-| inMemorySizeLimit
-| Optional. Size limit before we will start to stream to a temporary file.
-Defaults to 10MB. Must be a positive integer, optionally with a unit: B, K, M, G.
-
-| literalSizeLimit
-| Optional. Maximum size of a literal (IMAP APPEND).
-Defaults to 0 (unlimited). Must be a positive integer, optionally with a unit: B, K, M, G.
-
-| plainAuthDisallowed
-| Deprecated. Should use `auth.plainAuthEnabled`, `auth.requireSSL` instead.
-Whether to enable Authentication PLAIN if the connection is not encrypted via SSL or STARTTLS. Defaults to `true`.
-
-| auth.plainAuthEnabled
-| Whether to enable Authentication PLAIN/ LOGIN command. Defaults to `true`.
-
-| auth.requireSSL
-| true or false. Defaults to `true`. Whether to require SSL to authenticate. If this is required, the IMAP server will disable authentication on unencrypted channels.
-
-| auth.oidc.oidcConfigurationURL
-| Provide OIDC url address for information to user. Only configure this when you want to authenticate IMAP server using a OIDC provider.
-
-| auth.oidc.jwksURL
-| Provide url to get OIDC's JSON Web Key Set to validate user token. Only configure this when you want to authenticate IMAP server using a OIDC provider.
-
-| auth.oidc.claim
-| Claim string uses to identify user. E.g: "email_address". Only configure this when you want to authenticate IMAP server using a OIDC provider.
-
-| auth.oidc.scope
-| An OAuth scope that is valid to access the service (RF: RFC7628). Only configure this when you want to authenticate IMAP server using a OIDC provider.
-
-| timeout
-| Default to 30 minutes. After this time, inactive channels that have not performed read, write, or both operation for a while
-will be closed. Negative value disable this behaviour.
-
-| enableIdle
-| Default to true. If enabled IDLE commands will generate a server heartbeat on a regular period.
-
-| idleTimeInterval
-| Defaults to 120. Needs to be a strictly positive integer.
-
-| idleTimeIntervalUnit
-| Default to SECONDS. Needs to be a parseable TimeUnit.
-
-| disabledCaps
-| Implemented server capabilities NOT to advertise to the client. Coma separated list. Defaults to no disabled capabilities.
-
-| jmxName
-| The name given to the configuration
-
-| tls
-| Set to true to support STARTTLS or SSL for the Socket.
-To use this you need to copy sunjce_provider.jar to /path/james/lib directory. To create a new keystore execute:
-`keytool -genkey -alias james -keyalg RSA -storetype PKCS12 -keystore /path/to/james/conf/keystore`.
-Please note that each IMAP server exposed on different port can specify its own keystore, independently from any other
-TLS based protocols.
-
-| handler.helloName
-| This is the name used by the server to identify itself in the IMAP4
-protocol. If autodetect is TRUE, the server will discover its
-own host name and use that in the protocol. If discovery fails,
-the value of 'localhost' is used. If autodetect is FALSE, James
-will use the specified value.
-
-| connectiontimeout
-| Connection timeout in seconds
-
-| connectionLimit
-| Set the maximum simultaneous incoming connections for this service
-
-| connectionLimitPerIP
-| Set the maximum simultaneous incoming connections per IP for this service
-
-| concurrentRequests
-| Maximum number of IMAP requests executed simultaneously. Past that limit requests are queued. Defaults to 20.
-Negative values deactivate this feature, leading to unbounded concurrency.
-
-| maxQueueSize
-| Upper bound to the IMAP throttler queue. Upon burst, requests that cannot be queued are rejected and not executed.
-Integer, defaults to 4096, must be positive, 0 means no queue.
-
-| proxyRequired
-| Enables proxy support for this service for incoming connections. HAProxy's protocol
-(https://www.haproxy.org/download/2.7/doc/proxy-protocol.txt) is used and might be compatible
-with other proxies (e.g. traefik). If enabled, it is *required* to initiate the connection
-using HAProxy's proxy protocol.
-
-| bossWorkerCount
-| Set the maximum count of boss threads. Boss threads are responsible for accepting incoming IMAP connections
-and initializing associated resources. Optional integer, by default, boss threads are not used and this responsibility is being dealt with
-by IO threads.
-
-| ioWorkerCount
-| Set the maximum count of IO threads. IO threads are responsible for receiving incoming IMAP messages and framing them
-(split line by line). IO threads also take care of compression and SSL encryption. Their tasks are short-lived and non-blocking.
-Optional integer, defaults to 2 times the count of CPUs.
-
-| ignoreIDLEUponProcessing
-| true or false - Allow disabling the heartbeat handler. Defaults to true.
-
-| useEpoll
-| true or false - If true uses native EPOLL implementation for Netty otherwise uses NIO. Defaults to false.
-
-| gracefulShutdown
-| true or false - If true attempts a graceful shutdown, which is safer but can take time. Defaults to true.
-
-| highWriteBufferWaterMark
-| Netty's write buffer high watermark configuration. Unit supported: none, K, M. Netty defaults applied.
-
-| lowWriteBufferWaterMark
-| Netty's write buffer low watermark configuration. Unit supported: none, K, M. Netty defaults applied.
-|===
-
-== OIDC setup
-James IMAP support XOAUTH2 authentication mechanism which allow authenticating against a OIDC providers.
-Please configure `auth.oidc` part to use this.
-
-We do supply an link:https://github.com/apache/james-project/tree/master/examples/oidc[example] of such a setup.
-It uses the Keycloak OIDC provider, but usage of similar technologies is definitely doable.
-
-== Extending IMAP
-
-IMAP decoders, processors and encoder can be customized. xref:customization:imap.adoc[Read more].
-
-Check this link:https://github.com/apache/james-project/tree/master/examples/custom-imap[example].
-
-The following configuration properties are available for extensions:
-
-.imapserver.xml content
-|===
-| Property name | explanation
-
-| imapPackages
-| Configure (union) of IMAP packages. IMAP packages bundles decoders (parsing IMAP commands) processors and encoders,
-thus enable implementing new IMAP commands or replace existing IMAP processors. List of FQDNs, which can be located in
-James extensions.
-
-| additionalConnectionChecks
-| Configure (union) of additional connection checks. ConnectionCheck will check if the connection IP is secure or not.
-| customProperties
-| Properties for custom extension. Each tag is a property entry, and holds a string under the form key=value.
-|===
-
-== Mail user agents auto-configuration
-
-Check this example on link:https://github.com/apache/james-project/tree/master/examples/imap-autoconf[Mail user agents auto-configuration].
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+:pages-path: distributed
+include::partial$configure/imap.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/configure/index.adoc b/docs/modules/servers/pages/distributed/configure/index.adoc
index 8a99ac9a4d3..76c4453c387 100644
--- a/docs/modules/servers/pages/distributed/configure/index.adoc
+++ b/docs/modules/servers/pages/distributed/configure/index.adoc
@@ -9,85 +9,15 @@ or rely on reasonable defaults.
The following configuration files are exposed:
-== For protocols
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+:xref-base: distributed/configure
+:server-name: Distributed James Server
-By omitting these files, the underlying protocols will be disabled.
+include::partial$configure/forProtocolsPartial.adoc[]
-** xref:distributed/configure/imap.adoc[*imapserver.xml*] allows configuration for the IMAP protocol link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/imapserver.xml[example]
-** xref:distributed/configure/jmap.adoc[*jmap.properties*] allows to configure the JMAP protocol link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/jmap.properties[example]
-** xref:distributed/configure/jmx.adoc[*jmx.properties*] allows configuration of JMX being used by the Command Line Interface link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/jmx.properties[example]
-** xref:distributed/configure/smtp.adoc#_lmtp_configuration[*lmtpserver.xml*] allows configuring the LMTP protocol link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/lmtpserver.xml[example]
-** *managesieveserver.xml* allows configuration for ManagedSieve (unsupported) link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/managesieveserver.xml[example]
-** xref:distributed/configure/pop3.adoc[*pop3server.xml*] allows configuration for the POP3 protocol (experimental) link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/pop3server.xml[example]
-** xref:distributed/configure/smtp.adoc[*smtpserver.xml*] allows configuration for the SMTP protocol link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/smtpserver.xml[example]
-*** xref:distributed/configure/smtp-hooks.adoc[This page] list SMTP hooks that can be used out of the box with the Distributed Server.
-** xref:distributed/configure/webadmin.adoc[*webadmin.properties*] enables configuration for the WebAdmin protocol link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/webadmin.properties[example]
-** xref:distributed/configure/ssl.adoc[This page] details SSL & TLS configuration.
-** xref:distributed/configure/sieve.adoc[This page] details Sieve setup and how to enable ManageSieve.
+include::partial$configure/forStorageDependenciesPartial.adoc[]
+** xref:distributed/configure/cassandra.adoc[*cassandra.properties*] allows to configure the Cassandra driver link:{sample-configuration-prefix-url}/sample-configuration/cassandra.properties[example]
-== For storage dependencies
-
-Except specific documented cases, these files are required, at least to establish a connection with the storage components.
-
-** xref:distributed/configure/blobstore.adoc[*blobstore.properties*] allows to configure the BlobStore link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/blob.properties[example]
-** xref:distributed/configure/cassandra.adoc[*cassandra.properties*] allows to configure the Cassandra driver link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/cassandra.properties[example]
-** xref:distributed/configure/opensearch.adoc[*opensearch.properties*] allows to configure OpenSearch driver link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/opensearch.properties[example]
-** xref:distributed/configure/rabbitmq.adoc[*rabbitmq.properties*] allows configuration for the RabbitMQ driver link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/rabbitmq.properties[example]
-** xref:distributed/configure/redis.adoc[*redis.properties*] allows configuration for the Redis driver link:https://github.com/apache/james-project/blob/fabfdf4874da3aebb04e6fe4a7277322a395536a/server/mailet/rate-limiter-redis/redis.properties[example], that is used by optional
-distributed rate limiting component.
-** xref:distributed/configure/tika.adoc[*tika.properties*] allows configuring Tika as a backend for text extraction link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/tika.properties[example]
-
-== For core components
-
-By omitting these files, sane default values are used.
-
-** xref:distributed/configure/batchsizes.adoc[*batchsizes.properties*] allows to configure mailbox read batch sizes link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/batchsizes.properties[example]
-** xref:distributed/configure/dns.adoc[*dnsservice.xml*] allows to configure DNS resolution link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/dnsservice.xml[example]
-** xref:distributed/configure/domainlist.adoc[*domainlist.xml*] allows to configure Domain storage link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/domainlist.xml[example]
-** xref:distributed/configure/healthcheck.adoc[*healthcheck.properties*] allows to configure periodical healthchecks link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/healthcheck.properties[example]
-** xref:distributed/configure/mailetcontainer.adoc[*mailetcontainer.xml*] allows configuring mail processing link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/mailetcontainer.xml[example]
-*** xref:distributed/configure/mailets.adoc[This page] list matchers that can be used out of the box with the Distributed Server.
-*** xref:distributed/configure/matchers.adoc[This page] list matchers that can be used out of the box with the Distributed Server.
-** xref:distributed/configure/mailrepositorystore.adoc[*mailrepositorystore.xml*] enables registration of allowed MailRepository protcols and link them to MailRepository implementations link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/mailrepositorystore.xml[example]
-** xref:distributed/configure/recipientrewritetable.adoc[*recipientrewritetable.xml*] enables advanced configuration for the Recipient Rewrite Table component link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/recipientrewritetable.xml[example]
-*** xref:distributed/configure/matchers.adoc[This page] allows choosing the indexing technology.
-** xref:distributed/configure/usersrepository.adoc[*usersrepository.xml*] allows configuration of user storage link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/usersrepository.xml[example]
-
-== For extensions
-
-By omitting these files, no extra behaviour is added.
-
-** xref:distributed/configure/vault.adoc[*deletedMessageVault.properties*] allows to configure the DeletedMessageVault link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/deletedMessageVault.properties[example]
-** xref:distributed/configure/listeners.adoc[*listeners.xml*] enables configuration of Mailbox Listeners link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/listeners.xml[example]
-** xref:distributed/configure/extensions.adoc[*extensions.properties*] allows to extend James behaviour by loading your extensions in it link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/extensions.properties[example]
-** xref:distributed/configure/jvm.adoc[*jvm.properties*] lets you specify additional system properties without cluttering your command line
-** xref:distributed/configure/spam.adoc[This page] documents Anti-Spam setup with SpamAssassin, Rspamd.
-** xref:distributed/configure/remote-delivery-error-handling.adoc[This page] proposes a simple strategy for RemoteDelivery error handling.
-** xref:distributed/configure/collecting-contacts.adoc[This page] documents contact collection
-** xref:distributed/configure/collecting-events.adoc[This page] documents event collection
-** xref:distributed/configure/dsn.adoc[this page] specified how to support SMTP Delivery Submission Notification (link:https://tools.ietf.org/html/rfc3461[RFC-3461])
-** xref:distributed/configure/droplists.adoc[This page] allows configuring drop lists.
-
-== System properties
-
-Some tuning can be done via system properties. This includes:
-
-.System properties
-|===
-| Property name | explanation
-
-| james.message.memory.threshold
-| (Optional). String (size, integer + size units, example: `12 KIB`, supported units are bytes KIB MIB GIB TIB). Defaults to 100KIB.
-This governs the threshold MimeMessageInputStreamSource relies on for storing MimeMessage content on disk.
-Below, data is stored in memory. Above data is stored on disk.
-Lower values will lead to longer processing time but will minimize heap memory usage. Modern SSD hardware
-should however support a high throughput. Higher values will lead to faster single mail processing at the cost
-of higher heap usage.
-
-
-| james.message.usememorycopy
-|Optional. Boolean. Defaults to false. Recommended value is false.
-Should MimeMessageWrapper use a copy of the message in memory? Or should bigger message exceeding james.message.memory.threshold
-be copied to temporary files?
-
-|===
\ No newline at end of file
+include::partial$configure/forCoreComponentsPartial.adoc[]
+include::partial$configure/forExtensionsPartial.adoc[]
+include::partial$configure/systemPropertiesPartial.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/configure/jmap.adoc b/docs/modules/servers/pages/distributed/configure/jmap.adoc
index 9d7611ba130..65fb94ab6ef 100644
--- a/docs/modules/servers/pages/distributed/configure/jmap.adoc
+++ b/docs/modules/servers/pages/distributed/configure/jmap.adoc
@@ -1,184 +1,7 @@
= Distributed James Server — jmap.properties
:navtitle: jmap.properties
-https://jmap.io/[JMAP] is intended to be a new standard for email clients to connect to mail
-stores. It therefore intends to primarily replace IMAP + SMTP submission. It is also designed to be more
-generic. It does not replace MTA-to-MTA SMTP transmission.
-
-Consult this link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/jmap.properties[example]
-to get some examples and hints.
-
-.jmap.properties content
-|===
-| Property name | explanation
-
-| enabled
-| true/false. Governs whether JMAP should be enabled
-
-| jmap.port
-| Optional. Defaults to 80. The port this server will be listening on. This value must be a valid
-port, ranging between 1 and 65535 (inclusive)
-
-| tls.keystoreURL
-| Keystore to be used for generating authentication tokens for password authentication mechanism.
-This should not be the same keystore than the ones used by TLS based protocols.
-
-| tls.secret
-| Password used to read the keystore
-
-| jwt.publickeypem.url
-| Optional. Coma separated list of RSA public keys URLs to validate JWT tokens allowing requests to bypass authentication.
-Defaults to an empty list.
-
-| url.prefix
-| Optional. Configuration urlPrefix for JMAP routes. Default value: http://localhost.
-
-| websocket.url.prefix
-| Optional. URL for JMAP WebSocket route. Default value: ws://localhost
-
-| email.send.max.size
-| Optional. Configuration max size for message created in RFC-8621.
-Default value: None. Supported units are B (bytes) K (KB) M (MB) G (GB).
-
-| max.size.attachments.per.mail
-| Optional. Defaults to 20MB. RFC-8621 `maxSizeAttachmentsPerEmail` advertised to JMAP client as part of the
-`urn:ietf:params:jmap:mail` capability. This needs to be at least 33% lower than `email.send.max.size` property
-(in order to account for text body, headers, base64 encoding and MIME structures).
-JMAP clients would use this property in order not to create too big emails.
-Default value: None. Supported units are B (bytes) K (KB) M (MB) G (GB).
-
-| upload.max.size
-| Optional. Configuration max size for each upload file in new JMAP-RFC-8621.
-Default value: 30M. Supported units are B (bytes) K (KB) M (MB) G (GB).
-
-| upload.quota.limit
-| Optional. Configure JMAP upload quota for total existing uploads' size per user. User exceeding the upload quota would result in old uploads being cleaned up.
-Default value: 200M. Supported units are B (bytes) K (KB) M (MB) G (GB).
-
-| view.email.query.enabled
-| Optional boolean. Defaults to false. Should simple Email/query be resolved against a Cassandra projection, or should we resolve them against OpenSearch?
-This enables a higher resilience, but the projection needs to be correctly populated.
-
-| user.provisioning.enabled
-| Optional boolean. Defaults to true. Governs whether authenticated users that do not exist locally should be created in the users repository.
-
-| authentication.strategy.rfc8621
-| Optional List[String] with delimiter `,` . Specify which authentication strategies system admin want to use for JMAP RFC-8621 server.
-The implicit package name is `org.apache.james.jmap.http`. If you have a custom authentication strategy outside this package, you have to specify its FQDN.
-If no authentication strategy is specified, JMAP RFC-8621 server will fallback to default strategies:
-`JWTAuthenticationStrategy`, `BasicAuthenticationStrategy`.
-
-| jmap.version.default
-| Optional string. Defaults to `rfc-8621`. Allowed values: rfc-8621
-Which version of the JMAP protocol should be served when none supplied in the Accept header.
-
-| dynamic.jmap.prefix.resolution.enabled
-| Optional boolean. Defaults to false. Supported Jmap session endpoint returns dynamic prefix in response.
-When its config is true, and the HTTP request to Jmap session endpoint has a `X-JMAP-PREFIX` header with the value `http://new-domain/prefix`,
-then `apiUrl, downloadUrl, uploadUrl, eventSourceUrl, webSocketUrl` in response will be changed with a new prefix. Example: The `apiUrl` will be "http://new-domain/prefix/jmap".
-If the HTTP request to Jmap session endpoint has the `X-JMAP-WEBSOCKET-PREFIX` header with the value `ws://new-domain/prefix`,
-then `capabilities."urn:ietf:params:jmap:websocket".url` in response will be "ws://new-domain/prefix/jmap/ws".
-
-| webpush.prevent.server.side.request.forgery
-| Optional boolean. Prevent server side request forgery by preventing calls to the private network ranges. Defaults to true, can be disabled for testing.
-
-| cassandra.filter.projection.activated
-|Optional boolean. Defaults to false. Casandra backends only. Whether to use or not the Cassandra projection
-for JMAP filters. This projection optimizes reads, but needs to be correctly populated. Turning it on on
-systems with filters already defined would result in those filters to be not read.
-
-| delay.sends.enabled
-| Optional boolean. Defaults to false. Whether to support or not the delay send with JMAP protocol.
-
-| disabled.capabilities
-| Optional, defaults to empty. Coma separated list of JMAP capabilities to reject.
-This allows to prevent users from using some specific JMAP extensions.
-
-| email.get.full.max.size
-| Optional, default value is 5. The max number of items for EmailGet full reads.
-
-| get.max.size
-| Optional, default value is 500. The max number of items for /get methods.
-
-| set.max.size
-| Optional, default value is 500. The max number of items for /set methods.
-|===
-
-== Wire tapping
-
-Enabling *TRACE* on `org.apache.james.jmap.wire` enables reactor-netty wiretap, logging of
-all incoming and outgoing requests, outgoing requests. This will log also potentially sensible information
-like authentication credentials.
-
-== OIDC set up
-
-The use of `XUserAuthenticationStrategy` allow delegating the authentication responsibility to a third party system,
-which could be used to set up authentication against an OIDC provider.
-
-We do supply an link:https://github.com[example] of such a setup. It combines the link:https://www.keycloak.org/[Keycloack]
-OIDC provider with the link:https://www.krakend.io/[Krackend] API gateway, but usage of similar technologies is definitely doable.
-
-== Generating a JWT key pair
-
-Apache James can alternatively be configured to check the validity of JWT tokens itself. No revocation mechanism is
-supported in such a setup, and the `sub` claim is used to identify the user. The key configuration is static.
-
-This requires the `JWTAuthenticationStrategy` authentication strategy to be used.
-
-The Distributed server enforces the use of RSA-SHA-256.
-
-One can use OpenSSL to generate a JWT key pair :
-
- # private key
- openssl genrsa -out rs256-4096-private.rsa 4096
- # public key
- openssl rsa -in rs256-4096-private.rsa -pubout > rs256-4096-public.pem
-
-The private key can be used to generate JWT tokens, for instance
-using link:https://github.com/vandium-io/jwtgen[jwtgen]:
-
- jwtgen -a RS256 -p rs256-4096-private.rsa 4096 -c "sub=bob@domain.tld" -e 3600 -V
-
-This token can then be passed as `Bearer` of the `Authorization` header :
-
- curl -H "Authorization: Bearer $token" -XPOST http://127.0.0.1:80/jmap -d '...'
-
-The public key can be referenced as `jwt.publickeypem.url` of the `jmap.properties` configuration file.
-
-== Annotated specification
-
-The [annotated documentation](https://github.com/apache/james-project/tree/master/server/protocols/jmap-rfc-8621/doc/specs/spec)
-presents the limits of the JMAP RFC-8621 implementation part of the Apache James project. We furthermore implement
-[JSON Meta Application Protocol (JMAP) Subprotocol for WebSocket](https://tools.ietf.org/html/rfc8887).
-
-Some methods / types are not yet implemented, some implementations are naive, and the PUSH is not supported yet.
-
-Users are invited to read these limitations before using actively the JMAP RFC-8621 implementation, and should ensure their
-client applications only uses supported operations.
-
-Contributions enhancing support are furthermore welcomed.
-
-The list of tested JMAP clients are:
-
- - Experiments had been run on top of [LTT.RS](https://github.com/iNPUTmice/lttrs-android). Version in the Accept
- headers needs to be explicitly set to `rfc-8621`. [Read more](https://github.com/linagora/james-project/pull/4089).
-
-== JMAP auto-configuration
-
-link:https://datatracker.ietf.org/doc/html/rfc8620[RFC-8620] defining JMAP core RFC defines precisely service location.
-
-James already redirects `http://jmap.domain.tld/.well-known/jmap` to the JMAP session.
-
-You can further help your clients by publishing extra SRV records.
-
-Eg:
-
-----
-_jmap._tcp.domain.tld. 3600 IN SRV 0 1 443 jmap.domain.tld.
-----
-
-== JMAP reverse-proxy set up
-
-James implementation adds the value of `X-Real-IP` header as part of the logging MDC.
-
-This allows for reverse proxies to cary other the IP address of the client down to the JMAP server for diagnostic purpose.
\ No newline at end of file
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+:server-name: Distributed James Server
+:backend-name: Cassandra
+include::partial$configure/jmap.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/configure/jmx.adoc b/docs/modules/servers/pages/distributed/configure/jmx.adoc
index 04e88db20ce..486a90ca727 100644
--- a/docs/modules/servers/pages/distributed/configure/jmx.adoc
+++ b/docs/modules/servers/pages/distributed/configure/jmx.adoc
@@ -1,67 +1,5 @@
= Distributed James Server — jmx.properties
:navtitle: jmx.properties
-== Disclaimer
-
-JMX poses several security concerns and had been leveraged to conduct arbitrary code execution.
-This threat is mitigated by not allowing remote connections to JMX, setting up authentication and pre-authentication filters.
-However, we recommend to either run James in isolation (docker / own virtual machine) or disable JMX altogether.
-
-James JMX endpoint provides command line utilities and exposes a few metrics, also available on the metric endpoint.
-
-== Configuration
-
-This is used to configure the JMX MBean server via which all management is achieved.
-
-Consult this link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/jmx.properties[example]
-in GIT to get some examples and hints.
-
-.jmx.properties content
-|===
-| Property name | explanation
-
-| jmx.enabled
-| Boolean. Should the JMX server be enabled? Defaults to `true`.
-
-| jmx.address
-|The IP address (host name) the MBean Server will bind/listen to.
-
-| jmx.port
-| The port number the MBean Server will bind/listen to.
-|===
-
-To access from a remote location, it has been reported that `-Dcom.sun.management.jmxremote.ssl=false` is needed as
-a JVM argument.
-
-== JMX Security
-
-In order to set up JMX authentication, we need to put `jmxremote.password` and `jmxremote.access` file
-to `/conf` directory.
-
-- `jmxremote.password`: define the username and password, that will be used by the client (here is james-cli)
-
-File's content example:
-```
-james-admin pass1
-```
-
-- `jmxremote.access`: define the pair of username and access permission
-
-File's content example:
-```
-james-admin readwrite
-```
-
-When James runs with option `-Djames.jmx.credential.generation=true`, James will automatically generate `jmxremote.password` if the file does not exist.
-Then the default username is `james-admin` and a random password. This option defaults to true.
-
-=== James-cli
-
-When the JMX server starts with authentication configuration, it will require the client need provide username/password for bypass.
-To do that, we need set arguments `-username` and `-password` for the command request.
-
-Command example:
-```
-james-cli -h 127.0.0.1 -p 9999 -username james-admin -password pass1 listdomains
-```
-
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+include::partial$configure/jmx.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/configure/jvm.adoc b/docs/modules/servers/pages/distributed/configure/jvm.adoc
index 170869594b9..cbb3998dc41 100644
--- a/docs/modules/servers/pages/distributed/configure/jvm.adoc
+++ b/docs/modules/servers/pages/distributed/configure/jvm.adoc
@@ -1,105 +1,5 @@
= Distributed James Server — jvm.properties
:navtitle: jvm.properties
-This file may contain any additional system properties for tweaking JVM execution. When you normally would add a command line option `-Dmy.property=whatever`, you can put it in this file as `my.property=whatever` instead. These properties will be added as system properties on server start.
-
-Note that in some rare cases this might not work,
-when a property affects very early JVM start behaviour.
-
-For testing purposes, you may specify a different file path via the command line option `-Dextra.props=/some/other/jvm.properties`.
-
-== Control the threshold memory
-This governs the threshold MimeMessageInputStreamSource relies on for storing MimeMessage content on disk.
-
-In `jvm.properties`
-----
-james.message.memory.threshold=12K
-----
-
-(Optional). String (size, integer + size units, example: `12 KIB`, supported units are bytes KIB MIB GIB TIB). Defaults to 100KIB.
-
-== Enable the copy of message in memory
-Should MimeMessageWrapper use a copy of the message in memory? Or should bigger message exceeding james.message.memory.threshold
-be copied to temporary files?
-
-----
-james.message.usememorycopy=true
-----
-
-Optional. Boolean. Defaults to false. Recommended value is false.
-
-== Running resource leak detection
-It is used to detect a resource not be disposed of before it's garbage-collected.
-
-In `jvm.properties`
-----
-james.lifecycle.leak.detection.mode=advanced
-----
-
-Allowed mode values are: none, simple, advanced, testing
-
-The purpose of each mode is introduced in `config-system.xml`
-
-== Disabling host information in protocol MDC logging context
-
-Should we add the host in the MDC logging context for incoming IMAP, SMTP, POP3? Doing so, a DNS resolution
-is attempted for each incoming connection, which can be costly. Remote IP is always added to the logging context.
-
-
-In `jvm.properties`
-----
-james.protocols.mdc.hostname=false
-----
-
-Optional. Boolean. Defaults to true.
-
-== Change the encoding type used for the blobId
-
-By default, the blobId is encoded in base64 url. The property `james.blob.id.hash.encoding` allows to change the encoding type.
-The support value are: base16, hex, base32, base32Hex, base64, base64Url.
-
-Ex in `jvm.properties`
-----
-james.blob.id.hash.encoding=base16
-----
-
-Optional. String. Defaults to base64Url.
-
-== JMAP Quota draft compatibility
-
-Some JMAP clients depend on the JMAP Quota draft specifications. The property `james.jmap.quota.draft.compatibility` allows
-to enable JMAP Quota draft compatibility for those clients and allow them a time window to adapt to the RFC-9245 JMAP Quota.
-
-Optional. Boolean. Default to false.
-
-Ex in `jvm.properties`
-----
-james.jmap.quota.draft.compatibility=true
-----
-To enable the compatibility.
-
-== Enable S3 metrics
-
-James supports extracting some S3 client-level metrics e.g. number of connections being used, time to acquire an S3 connection, total time to finish a S3 request...
-
-The property `james.s3.metrics.enabled` allows to enable S3 metrics collection. Please pay attention that enable this
-would impact a bit on S3 performance.
-
-Optional. Boolean. Default to true.
-
-Ex in `jvm.properties`
-----
-james.s3.metrics.enabled=false
-----
-To disable the S3 metrics.
-
-== Reactor Stream Prefetch
-
-Prefetch to use in Reactor to stream convertions (S3 => InputStream). Default to 1.
-Higher values will tend to block less often at the price of higher memory consumptions.
-
-Ex in `jvm.properties`
-----
-# james.reactor.inputstream.prefetch=4
-----
-
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+include::partial$configure/jvm.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/configure/listeners.adoc b/docs/modules/servers/pages/distributed/configure/listeners.adoc
index 57d7772fba3..d0cf02d8482 100644
--- a/docs/modules/servers/pages/distributed/configure/listeners.adoc
+++ b/docs/modules/servers/pages/distributed/configure/listeners.adoc
@@ -1,77 +1,9 @@
= Distributed James Server — listeners.xml
:navtitle: listeners.xml
-Distributed James relies on an event bus system to enrich mailbox capabilities. Each
-operation performed on the mailbox will trigger related events, that can
-be processed asynchronously by potentially any James node on a
-distributed system.
-
-Mailbox listeners can register themselves on this event bus system to be
-called when an event is fired, allowing to do different kind of extra
-operations on the system.
-
-Distributed James allows the user to register potentially user defined additional mailbox listeners.
-
-Consult this link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/listener.xml[example]
-to get some examples and hints.
-
-== Configuration
-
-The controls whether to launch group mailbox listener consumption. Defaults to true. Use with caution:
-never disable on standalone james servers, and ensure at least some instances do consume group mailbox listeners within a
-clustered topology.
-
-Mailbox listener configuration is under the XML element .
-
-Some MailboxListener allows you to specify if you want to run them synchronously or asynchronously. To do so,
-for MailboxListener that supports this, you can use the *async* attribute (optional, per mailet default) to govern the execution mode.
-If *true* the execution will be scheduled in a reactor elastic scheduler. If *false*, the execution is synchronous.
-
-Already provided additional listeners are documented below.
-
-=== SpamAssassinListener
-
-Provides per user real-time HAM/SPAM feedback to a SpamAssassin server depending on user actions.
-
-This mailet is asynchronous by default, but this behaviour can be overridden by the *async*
-configuration property.
-
-This MailboxListener is supported.
-
-Example:
-
-....
-
-
-
- org.apache.james.mailbox.spamassassin.SpamAssassinListener
-
-
-....
-
-Please note that a `spamassassin.properties` file is needed. Read also
-xref:distributed/configure/spam.adoc[this page] for extra configuration required to support this feature.
-
-=== RspamdListener
-
-Provides HAM/SPAM feedback to a Rspamd server depending on user actions.
-
-This MailboxListener is supported.
-
-Example:
-
-....
-
-
-
- org.apache.james.rspamd.RspamdListener
-
-
-....
-
-Please note that a `rspamd.properties` file is needed. Read also
-xref:distributed/configure/spam.adoc[this page] for extra configuration required to support this feature.
-
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+:server-name: Distributed James Server
+include::partial$configure/listeners.adoc[]
=== MailboxOperationLoggingListener
@@ -81,6 +13,7 @@ This MailboxListener is supported.
Example:
+[source,xml]
....
@@ -89,85 +22,3 @@ Example:
....
-
-=== QuotaThresholdCrossingListener
-
-Sends emails to users exceeding 80% and 99% of their quota to warn them (for instance).
-
-Here are the following properties you can configure:
-
-.QuotaThresholdCrossingListener configuration properties
-|===
-| Property name | explanation
-
-| name
-| Useful when configuring several time this listener. You might want to do so to use different rendering templates for
-different occupation thresholds.
-
-| gracePeriod
-| Period during which no more email for a given threshold should be sent.
-
-| subjectTemplate
-| Mustache template for rendering the subject of the warning email.
-
-| bodyTemplate
-| Mustache template for rendering the body of the warning email.
-
-| thresholds
-| Floating number between 0 and 1 representing the threshold of quota occupation from which a mail should be sent.
-Configuring several thresholds is supported.
-
-|===
-
-Example:
-
-....
-
-
-
- org.apache.james.mailbox.quota.mailing.listeners.QuotaThresholdCrossingListener
- QuotaThresholdCrossingListener-upper-threshold
-
-
-
- 0.8
-
-
- thirst
- conf://templates/QuotaThresholdMailSubject.mustache
- conf://templates/QuotaThresholdMailBody.mustache
- 1week/
-
-
-
-....
-
-Here are examples of templates you can use:
-
-* For subject template: `conf://templates/QuotaThresholdMailSubject.mustache`
-
-....
-Warning: Your email usage just exceeded a configured threshold
-....
-
-* For body template: `conf://templates/QuotaThresholdMailBody.mustache`
-
-....
-You receive this email because you recently exceeded a threshold related to the quotas of your email account.
-
-{{#hasExceededSizeThreshold}}
-You currently occupy more than {{sizeThreshold}} % of the total size allocated to you.
-You currently occupy {{usedSize}}{{#hasSizeLimit}} on a total of {{limitSize}} allocated to you{{/hasSizeLimit}}.
-
-{{/hasExceededSizeThreshold}}
-{{#hasExceededCountThreshold}}
-You currently occupy more than {{countThreshold}} % of the total message count allocated to you.
-You currently have {{usedCount}} messages{{#hasCountLimit}} on a total of {{limitCount}} allowed for you{{/hasCountLimit}}.
-
-{{/hasExceededCountThreshold}}
-You need to be aware that actions leading to exceeded quotas will be denied. This will result in a degraded service.
-To mitigate this issue you might reach your administrator in order to increase your configured quota. You might also delete some non important emails.
-....
-
-This MailboxListener is supported.
-
diff --git a/docs/modules/servers/pages/distributed/configure/mailetcontainer.adoc b/docs/modules/servers/pages/distributed/configure/mailetcontainer.adoc
index f9e1722d7fb..e996c276805 100644
--- a/docs/modules/servers/pages/distributed/configure/mailetcontainer.adoc
+++ b/docs/modules/servers/pages/distributed/configure/mailetcontainer.adoc
@@ -1,96 +1,6 @@
= Distributed James Server — mailetcontainer.xml
:navtitle: mailetcontainer.xml
-This documents explains how to configure Mail processing. Mails pass through the MailetContainer. The
-MailetContainer is a Matchers (condition for executing a mailet) and Mailets (execution units that perform
-actions based on incoming mail) pipeline arranged into processors (List of mailet/matcher pairs allowing
-better logical organisation). You can read more about these concepts on
-xref:distributed/architecture/index.adoc#_mail_processing[the mailet container feature description].
-
-Apache James Server includes a number of xref:distributed/configure/mailets.adoc[Packaged Mailets] and
-xref:distributed/configure/matchers.adoc[Packaged Matchers].
-
-Furthermore, you can write and use with James xref:customization:mail-processing.adoc[your own mailet and matchers].
-
-Consult this link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/mailetcontainer.xml[example]
-to get some examples and hints.
-
-.mailetcontainer.xml content
-|===
-| Property name | explanation
-
-| context.postmaster
-| The body of this element is the address that the server
-will consider its postmaster address. This address will be listed as the sender address
-of all error messages that originate from James. Also, all messages addressed to
-postmaster@, where is one of the domain names whose
-mail is being handled by James, will be redirected to this email address.
-Set this to the appropriate email address for error reports
-If this is set to a non-local email address, the mail server
-will still function, but will generate a warning on startup.
-
-| spooler.threads
-| Number of simultaneous threads used to spool the mails. Set to zero, it disables mail processing - use with
-caution.
-
-| spooler.errorRepository
-| Mail repository to store email in after several unrecoverable errors. Mails failing processing, for which
-the Mailet Container could not handle Error, will be stored there after their processing had been attempted
-5 times. Note that if standard java Exception occurs, *Error handling* section below will be applied
-instead.
-|===
-
-== The Mailet Tag
-
-Consider the following simple *mailet* tag:
-
-....
-
- spam
-
-....
-
-The mailet tag has two required attributes, *match* and *class*.
-
-The *match* attribute is set to the value of the specific Matcher class to be instantiated with a an
-optional argument. If present, the argument is separated from the Matcher class name by an '='. Semantic
-interpretation of the argument is left to the particular mailet.
-
-The *class* attribute is set to the value of the Mailet class that is to be instantiated.
-
-Finally, the children of the *mailet* tag define the configuration that is passed to the Mailet. The
-tags used in this section should have no attributes or children. The names and bodies of the elements will be passed to
-the mailet as (name, value) pairs.
-
-So in the example above, a Matcher instance of RemoteAddrNotInNetwork would be instantiated, and the value "127.0.0.1"
-would be passed to the matcher. The Mailet of the pair will be an instance of ToProcessor, and it will be passed the (name, value)
-pair of ("processor", "spam").
-
-== Error handling
-
-If an exception is encountered during the execution of a mailet or a matcher, the default behaviour is to
-process the mail using the *error* processor.
-
-The *onMailetException* property allows you to override this behaviour. You can specify another
-processor than the *error* one for handling the errors of this mailet.
-
-The *ignore* special value also allows to continue processing and ignore the error.
-
-The *propagate* special value causes the mailet container to rethrow the
-exception, propagating it to the execution context. In an SMTP execution context, the spooler will then requeue
-the item and automatic retries will be setted up - note that attempts will be done for each recipients. In LMTP
-(if LMTP is configured to execute the mailetContainer), the entire mail transaction is reported as failed to the caller.
-
-Moreover, the *onMatcherException* allows you to override matcher error handling. You can
-specify another processor than the *error* one for handling the errors of this mailet. The *matchall*
-special value also allows you to match all recipients when there is an error. The *nomatch*
-special value also allows you to match no recipients when there is an error.
-
-Here is a short example to illustrate this:
-
-....
-
- deliveryError
- nomatch
-
-....
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+:pages-path: distributed
+include::partial$configure/mailetcontainer.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/configure/mailets.adoc b/docs/modules/servers/pages/distributed/configure/mailets.adoc
index cf19932da09..2426eae0657 100644
--- a/docs/modules/servers/pages/distributed/configure/mailets.adoc
+++ b/docs/modules/servers/pages/distributed/configure/mailets.adoc
@@ -1,151 +1,6 @@
= Distributed James Server — Mailets
:navtitle: Mailets
-This documentation page lists and documents Mailet that can be used within the
-Distributed Server MailetContainer in order to write your own mail processing logic with out-of-the-box components.
-
-== Supported mailets
-
-include::partial$AddDeliveredToHeader.adoc[]
-
-include::partial$AddFooter.adoc[]
-
-include::partial$AddSubjectPrefix.adoc[]
-
-include::partial$AmqpForwardAttribute.adoc[]
-
-include::partial$Bounce.adoc[]
-
-include::partial$ContactExtractor.adoc[]
-
-include::partial$ConvertTo7Bit.adoc[]
-
-include::partial$DeconnectionRight.adoc[]
-
-include::partial$DKIMSign.adoc[]
-
-include::partial$DKIMVerify.adoc[]
-
-include::partial$DSNBounce.adoc[]
-
-include::partial$Expires.adoc[]
-
-include::partial$ExtractMDNOriginalJMAPMessageId.adoc[]
-
-include::partial$Forward.adoc[]
-
-include::partial$ICalendarParser.adoc[]
-
-include::partial$ICALToHeader.adoc[]
-
-include::partial$ICALToJsonAttribute.adoc[]
-
-include::partial$ICSSanitizer.adoc[]
-
-include::partial$LocalDelivery.adoc[]
-
-include::partial$LDAPMatchers.adoc[]
-
-include::partial$LogMessage.adoc[]
-
-include::partial$MailAttributesListToMimeHeaders.adoc[]
-
-include::partial$MailAttributesToMimeHeaders.adoc[]
-
-include::partial$MetricsMailet.adoc[]
-
-include::partial$MimeDecodingMailet.adoc[]
-
-include::partial$NotifyPostmaster.adoc[]
-
-include::partial$NotifySender.adoc[]
-
-include::partial$Null.adoc[]
-
-include::partial$PostmasterAlias.adoc[]
-
-include::partial$RandomStoring.adoc[]
-
-include::partial$RecipientRewriteTable.adoc[]
-
-include::partial$RecipientToLowerCase.adoc[]
-
-include::partial$Redirect.adoc[]
-
-include::partial$RemoteDelivery.adoc[]
-
-include::partial$RemoveAllMailAttributes.adoc[]
-
-include::partial$RemoveMailAttribute.adoc[]
-
-include::partial$RemoveMimeHeader.adoc[]
-
-include::partial$RemoveMimeHeaderByPrefix.adoc[]
-
-include::partial$ReplaceContent.adoc[]
-
-include::partial$Resend.adoc[]
-
-include::partial$SetMailAttribute.adoc[]
-
-include::partial$SetMimeHeader.adoc[]
-
-include::partial$Sieve.adoc[]
-
-include::partial$Sign.adoc[]
-
-include::partial$SMIMECheckSignature.adoc[]
-
-include::partial$SMIMEDecrypt.adoc[]
-
-include::partial$SMIMESign.adoc[]
-
-include::partial$SpamAssassin.adoc[]
-
-include::partial$StripAttachment.adoc[]
-
-include::partial$TextCalendarBodyToAttachment.adoc[]
-
-include::partial$ToProcessor.adoc[]
-
-include::partial$ToRepository.adoc[]
-
-include::partial$ToSenderDomainRepository.adoc[]
-
-include::partial$VacationMailet.adoc[]
-
-include::partial$WithPriority.adoc[]
-
-include::partial$WithStorageDirective.adoc[]
-
-== Experimental mailets
-
-include::partial$ClamAVScan.adoc[]
-
-include::partial$ClassifyBounce.adoc[]
-
-include::partial$FromRepository.adoc[]
-
-include::partial$HeadersToHTTP.adoc[]
-
-include::partial$OnlyText.adoc[]
-
-include::partial$ManageSieveMailet.adoc[]
-
-include::partial$RecoverAttachment.adoc[]
-
-include::partial$SerialiseToHTTP.adoc[]
-
-include::partial$ServerTime.adoc[]
-
-include::partial$SPF.adoc[]
-
-include::partial$ToPlainText.adoc[]
-
-include::partial$ToSenderFolder.adoc[]
-
-include::partial$UnwrapText.adoc[]
-
-include::partial$UseHeaderRecipients.adoc[]
-
-include::partial$WrapText.adoc[]
\ No newline at end of file
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+:server-name: Distributed James Server
+include::partial$configure/mailets.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/configure/mailrepositorystore.adoc b/docs/modules/servers/pages/distributed/configure/mailrepositorystore.adoc
index b897530eacc..6968de99ba6 100644
--- a/docs/modules/servers/pages/distributed/configure/mailrepositorystore.adoc
+++ b/docs/modules/servers/pages/distributed/configure/mailrepositorystore.adoc
@@ -1,35 +1,9 @@
= Distributed James Server — mailrepositorystore.xml
-A `mail repository` allows storage of a mail as part of its
-processing. Standard configuration relies on the following mail
-repository.
-
-A mail repository is identified by its *url*, constituted of a *protocol* and a *path*.
-
-For instance in the url `cassandra://var/mail/error/` `cassandra` is the protocol and `var/mail/error` the path.
-
-The *mailrepositorystore.xml* file allows registration of available protocols, and their binding to actual MailRepository
-implementation. Note that extension developers can write their own MailRepository implementations, load them via the
-`extensions-jars` mechanism as documented in xref:customization:index.adoc['writing your own extensions'], and finally
-associated to a protocol in *mailrepositorystore.xml* for a usage in *mailetcontainer.xml*.
-
-== Configuration
-
-Consult this link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/mailrepositorystore.xml[example]
-to get some examples and hints.
-
-....
-
- cassandra
-
-
-
- cassandra
-
-
-
-
-....
-
-Only the *CassandraMailRepository* is available by default for the Distributed Server. Mails metadata are stored in
-Cassandra while the headers and bodies are stored within the xref:distributed/architecture/index.adoc#_blobstore[BlobStore].
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+:pages-path: distributed
+:server-name: Distributed James Server
+:mailet-repository-path-prefix: cassandra
+:mail-repository-protocol: cassandra
+:mail-repository-class: org.apache.james.mailrepository.cassandra.CassandraMailRepository
+include::partial$configure/mailrepositorystore.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/configure/matchers.adoc b/docs/modules/servers/pages/distributed/configure/matchers.adoc
index 2d85fc3465c..944b9e46a7a 100644
--- a/docs/modules/servers/pages/distributed/configure/matchers.adoc
+++ b/docs/modules/servers/pages/distributed/configure/matchers.adoc
@@ -1,166 +1,7 @@
= Distributed James Server — Matchers
:navtitle: Matchers
-This documentation page lists and documents Matchers that can be used within the
-Distributed Server MailetContainer in order to write your own mail processing logic with out-of-the-box components.
-
-== Supported matchers
-
-include::partial$All.adoc[]
-
-include::partial$AtLeastPriority.adoc[]
-
-include::partial$AtMost.adoc[]
-
-include::partial$AtMostPriority.adoc[]
-
-include::partial$DLP.adoc[]
-
-include::partial$FetchedFrom.adoc[]
-
-include::partial$HasAttachment.adoc[]
-
-include::partial$HasException.adoc[]
-
-include::partial$HasHeader.adoc[]
-
-include::partial$HasHeaderWithPrefix.adoc[]
-
-include::partial$HasMailAttribute.adoc[]
-
-include::partial$HasMailAttributeWithValue.adoc[]
-
-include::partial$HasMailAttributeWithValueRegex.adoc[]
-
-include::partial$HasMimeType.adoc[]
-
-include::partial$HasMimeTypeParameter.adoc[]
-
-include::partial$HasPriority.adoc[]
-
-include::partial$HostIs.adoc[]
-
-include::partial$HostIsLocal.adoc[]
-
-include::partial$IsMarkedAsSpam.adoc[]
-
-include::partial$IsOverQuota.adoc[]
-
-include::partial$IsRemoteDeliveryPermanentError.adoc[]
-
-include::partial$IsRemoteDeliveryTemporaryError.adoc[]
-
-include::partial$IsSenderInRRTLoop.adoc[]
-
-include::partial$IsSingleRecipient.adoc[]
-
-include::partial$IsSMIMEEncrypted.adoc[]
-
-include::partial$IsSMIMESigned.adoc[]
-
-include::partial$IsX509CertificateSubject.adoc[]
-
-include::partial$RecipientDomainIs.adoc[]
-
-include::partial$RecipientIs.adoc[]
-
-include::partial$RecipientIsLocal.adoc[]
-
-include::partial$RecipientIsRegex.adoc[]
-
-include::partial$RelayLimit.adoc[]
-
-include::partial$RemoteAddrInNetwork.adoc[]
-
-include::partial$RemoteAddrNotInNetwork.adoc[]
-
-include::partial$RemoteDeliveryFailedWithSMTPCode.adoc[]
-
-include::partial$SenderDomainIs.adoc[]
-
-include::partial$SenderHostIs.adoc[]
-
-include::partial$SenderIs.adoc[]
-
-include::partial$SenderIsLocal.adoc[]
-
-include::partial$SenderIsNull.adoc[]
-
-include::partial$SenderIsRegex.adoc[]
-
-include::partial$SentByJmap.adoc[]
-
-include::partial$SentByMailet.adoc[]
-
-include::partial$SizeGreaterThan.adoc[]
-
-include::partial$SMTPAuthSuccessful.adoc[]
-
-include::partial$SMTPAuthUserIs.adoc[]
-
-include::partial$SMTPIsAuthNetwork.adoc[]
-
-include::partial$SubjectIs.adoc[]
-
-include::partial$SubjectStartsWith.adoc[]
-
-include::partial$TooManyRecipients.adoc[]
-
-include::partial$UserIs.adoc[]
-
-include::partial$XOriginatingIpInNetwork.adoc[]
-
-== Experimental matchers
-
-include::partial$AttachmentFileNameIs.adoc[]
-
-include::partial$CommandForListserv.adoc[]
-
-include::partial$CommandListservMatcher.adoc[]
-
-include::partial$CompareNumericHeaderValue.adoc[]
-
-include::partial$FileRegexMatcher.adoc[]
-
-include::partial$InSpammerBlacklist.adoc[]
-
-include::partial$NESSpamCheck.adoc[]
-
-include::partial$SenderInFakeDomain.adoc[]
-
-== Composite matchers
-
-It is possible to combine together matchers in order to create a composite matcher, thus simplifying your
-Mailet Container logic.
-
-Here are the available logical operations:
-
-* *And* : This matcher performs And conjunction between the two matchers: recipients needs to match both matcher in order to
-match the composite matcher.
-* *Or* : This matcher performs Or conjunction between the two matchers: consider it to be a union of the results.
-It returns recipients from the Or composition results of the child matchers.
-* *Not* : It returns recipients from the negated composition of the child Matcher(s). Consider what wasn't
-in the result set of each child matcher. Of course it is easier to understand if it only
-includes one matcher in the composition, the normal recommended use.
-* *Xor* : It returns Recipients from the Xor composition of the child matchers. Consider it to be the inequality
-operator for recipients. If any recipients match other matcher results
-then the result does not include that recipient.
-
-Here is the syntax to adopt in *mailetcontainer.xml*:
-
-....
-
-
-
-
-
-
-
-
-
-
-
- relay
-
-
-....
\ No newline at end of file
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+:pages-path: distributed
+:server-name: Distributed James Server
+include::partial$configure/matchers.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/configure/opensearch.adoc b/docs/modules/servers/pages/distributed/configure/opensearch.adoc
index c46cd31e86f..2144b928508 100644
--- a/docs/modules/servers/pages/distributed/configure/opensearch.adoc
+++ b/docs/modules/servers/pages/distributed/configure/opensearch.adoc
@@ -1,320 +1,8 @@
= Distributed James Server — opensearch.properties
:navtitle: opensearch.properties
-Consult this link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/opensearch.properties[example]
-to get some examples and hints.
-
-If you want more explanation about OpenSearch configuration, you should visit the dedicated https://opensearch.org/[documentation].
-
-== OpenSearch Configuration
-
-This file section is used to configure the connection tp an OpenSearch cluster.
-
-Here are the properties allowing to do so :
-
-.opensearch.properties content
-|===
-| Property name | explanation
-
-| opensearch.clusterName
-| Is the name of the cluster used by James.
-
-| opensearch.nb.shards
-| Number of shards for index provisionned by James
-
-| opensearch.nb.replica
-| Number of replica for index provisionned by James (default: 0)
-
-| opensearch.index.waitForActiveShards
-| Wait for a certain number of active shard copies before proceeding with the operation. Defaults to 1.
-You may consult the https://www.elastic.co/guide/en/elasticsearch/reference/7.10/docs-index_.html#active-shards[documentation] for more information.
-
-| opensearch.retryConnection.maxRetries
-| Number of retries when connecting the cluster
-
-| opensearch.retryConnection.minDelay
-| Minimum delay between connection attempts
-
-| opensearch.max.connections
-| Maximum count of HTTP connections allowed for the OpenSearch driver. Optional integer, if unspecified driver defaults
-applies (30 connections).
-
-| opensearch.max.connections.per.hosts
-| Maximum count of HTTP connections per host allowed for the OpenSearch driver. Optional integer, if unspecified driver defaults
-applies (10 connections).
-
-|===
-
-=== Mailbox search
-
-The main use of OpenSearch within the Distributed Server is indexing the mailbox content of users in order to enable
-powerful and efficient full-text search of the mailbox content.
-
-Data indexing is performed asynchronously in a reliable fashion via a MailboxListener.
-
-Here are the properties related to the use of OpenSearch for Mailbox Search:
-
-.opensearch.properties content
-|===
-| Property name | explanation
-
-| opensearch.index.mailbox.name
-| Name of the mailbox index backed by the alias. It will be created if missing.
-
-| opensearch.index.name
-| *Deprecated* Use *opensearch.index.mailbox.name* instead.
-Name of the mailbox index backed by the alias. It will be created if missing.
-
-| opensearch.alias.read.mailbox.name
-| Name of the alias to use by Apache James for mailbox reads. It will be created if missing.
-The target of the alias is the index name configured above.
-
-| opensearch.alias.read.name
-| *Deprecated* Use *opensearch.alias.read.mailbox.name* instead.
-Name of the alias to use by Apache James for mailbox reads. It will be created if missing.
-The target of the alias is the index name configured above.
-
-| opensearch.alias.write.mailbox.name
-| Name of the alias to use by Apache James for mailbox writes. It will be created if missing.
-The target of the alias is the index name configured above.
-
-| opensearch.alias.write.name
-| *Deprecated* Use *opensearch.alias.write.mailbox.name* instead.
-Name of the alias to use by Apache James for mailbox writes. It will be created if missing.
-The target of the alias is the index name configured above.
-
-| opensearch.indexAttachments
-| Indicates if you wish to index attachments or not (default: true).
-
-| opensearch.indexHeaders
-| Indicates if you wish to index headers or not (default: true). Note that specific headers
-(From, To, Cc, Bcc, Subject, Message-Id, Date, Content-Type) are still indexed in their dedicated type.
-Header indexing is expensive as each header currently need to be stored as a nested document but
-turning off headers indexing result in non-strict compliance with the IMAP / JMAP standards.
-
-| opensearch.message.index.optimize.move
-| When set to true, James will attempt to reindex from the indexed message when moved.
-If the message is not found, it will fall back to the old behavior (The message will be indexed from the blobStore source)
-Default to false.
-
-| opensearch.text.fuzziness.search
-| Use fuzziness on text searches. This option helps to correct user typing mistakes and makes the result a bit more flexible.
-
-Default to false.
-
-| opensearch.indexBody
-| Indicates if you wish to index body or not (default: true). This can be used to decrease the performance cost associated with indexing.
-
-| opensearch.indexUser
-| Indicates if you wish to index user or not (default: false). This can be used to have per user reports in OpenSearch Dashboards.
-
-|===
-
-=== Quota search
-
-Users are indexed by quota usage, allowing operators a quick audit of users quota occupation.
-
-Users quota are asynchronously indexed upon quota changes via a dedicated MailboxListener.
-
-The following properties affect quota search :
-
-.opensearch.properties content
-|===
-| Property name | explanation
-
-| opensearch.index.quota.ratio.name
-| Specify the OpenSearch alias name used for quotas
-
-| opensearch.alias.read.quota.ratio.name
-| Specify the OpenSearch alias name used for reading quotas
-
-| opensearch.alias.write.quota.ratio.name
-| Specify the OpenSearch alias name used for writing quotas
-|===
-
-=== Disabling OpenSearch
-
-OpenSearch component can be disabled but consider it would make search feature to not work. In particular it will break JMAP protocol and SEARCH IMAP comment in an nondeterministic way.
-This is controlled in the `search.properties` file via the `implementation` property (defaults
-to `OpenSearch`). Setting this configuration parameter to `scanning` will effectively disable OpenSearch, no
-further indexation will be done however searches will rely on the scrolling search, leading to expensive and longer
-searches. Disabling OpenSearch requires no extra action, however
-xref:distributed/operate/webadmin.adoc#_reindexing_all_mails[a full re-indexing]needs to be carried out when enabling OpenSearch.
-
-== SSL Trusting Configuration
-
-By default, James will use the system TrustStore to validate https server certificates, if the certificate on
-ES side is already in the system TrustStore, you can leave the sslValidationStrategy property empty or set it to default.
-
-.opensearch.properties content
-|===
-| Property name | explanation
-
-| opensearch.hostScheme.https.sslValidationStrategy
-| Optional. Accept only *default*, *ignore*, *override*. Default is *default*. default: Use the default SSL TrustStore of the system.
-ignore: Ignore SSL Validation check (not recommended).
-override: Override the SSL Context to use a custom TrustStore containing ES server's certificate.
-
-|===
-
-In some cases, you want to secure the connection from clients to ES by setting up a *https* protocol
-with a self signed certificate. And you prefer to left the system ca-certificates un touch.
-There are possible solutions to let the ES RestHighLevelClient to trust your self signed certificate.
-
-Second solution: importing a TrustStore containing the certificate into SSL context.
-A certificate normally contains two parts: a public part in .crt file, another private part in .key file.
-To trust the server, the client needs to be acknowledged that the server's certificate is in the list of
-client's TrustStore. Basically, you can create a local TrustStore file containing the public part of a remote server
-by execute this command:
-
-....
-keytool -import -v -trustcacerts -file certificatePublicFile.crt -keystore trustStoreFileName.jks -keypass fillThePassword -storepass fillThePassword
-....
-
-When there is a TrustStore file and the password to read, fill two options *trustStorePath*
-and *trustStorePassword* with the TrustStore location and the password. ES client will accept
-the certificate of ES service.
-
-.opensearch.properties content
-|===
-| Property name | explanation
-
-| opensearch.hostScheme.https.trustStorePath
-| Optional. Use it when https is configured in opensearch.hostScheme, and sslValidationStrategy is *override*
-Configure OpenSearch rest client to use this trustStore file to recognize nginx's ssl certificate.
-Once you chose *override*, you need to specify both trustStorePath and trustStorePassword.
-
-| opensearch.hostScheme.https.trustStorePassword
-| Optional. Use it when https is configured in opensearch.hostScheme, and sslValidationStrategy is *override*
-Configure OpenSearch rest client to use this trustStore file with the specified password.
-Once you chose *override*, you need to specify both trustStorePath and trustStorePassword.
-
-|===
-
-During SSL handshaking, the client can determine whether accept or reject connecting to a remote server by its hostname.
-You can configure to use which HostNameVerifier in the client.
-
-.opensearch.properties content
-|===
-| Property name | explanation
-
-| opensearch.hostScheme.https.hostNameVerifier
-| Optional. Default is *default*. default: using the default hostname verifier provided by apache http client.
-accept_any_hostname: accept any host (not recommended).
-
-|===
-
-== Search overrides
-
-*Search overrides* allow resolution of predefined search queries against alternative sources of data
-and allow bypassing OpenSearch. This is useful to handle most resynchronisation queries that
-are simple enough to be resolved against Cassandra.
-
-Possible values are:
- - `org.apache.james.mailbox.cassandra.search.AllSearchOverride` Some IMAP clients uses SEARCH ALL to fully list messages in
- a mailbox and detect deletions. This is typically done by clients not supporting QRESYNC and from an IMAP perspective
- is considered an optimisation as less data is transmitted compared to a FETCH command. Resolving such requests against
- Cassandra is enabled by this search override and likely desirable.
- - `org.apache.james.mailbox.cassandra.search.UidSearchOverride`. Same as above but restricted by ranges.
- - `org.apache.james.mailbox.cassandra.search.DeletedSearchOverride`. Find deleted messages by looking up in the relevant Cassandra
- table.
- - `org.apache.james.mailbox.cassandra.search.DeletedWithRangeSearchOverride`. Same as above but limited by ranges.
- - `org.apache.james.mailbox.cassandra.search.NotDeletedWithRangeSearchOverride`. List non deleted messages in a given range.
- Lists all messages and filters out deleted message thus this is based on the following heuristic: most messages are not marked as deleted.
- - `org.apache.james.mailbox.cassandra.search.UnseenSearchOverride`. List unseen messages in the corresponding cassandra projection.
-
-Please note that custom overrides can be defined here. `opensearch.search.overrides` allow specifying search overrides and is a
-coma separated list of search override FQDNs. Default to none.
-
-EG:
-
-----
-opensearch.search.overrides=org.apache.james.mailbox.cassandra.search.AllSearchOverride,org.apache.james.mailbox.cassandra.search.DeletedSearchOverride, org.apache.james.mailbox.cassandra.search.DeletedWithRangeSearchOverride,org.apache.james.mailbox.cassandra.search.NotDeletedWithRangeSearchOverride,org.apache.james.mailbox.cassandra.search.UidSearchOverride,org.apache.james.mailbox.cassandra.search.UnseenSearchOverride
-----
-
-== Configure dedicated language analyzers for mailbox index
-
-OpenSearch supports various language analyzers out of the box: https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-lang-analyzer.html.
-
-James could utilize this to improve the user searching experience upon his language.
-
-While one could modify mailbox index mapping programmatically to customize this behavior, here we should just document a manual way to archive this without breaking our common index' mapping code.
-
-The idea is modifying mailbox index mappings with the target language analyzer as a JSON file, then submit it directly
-to OpenSearch via cURL command to create the mailbox index before James start. Let's adapt dedicated language analyzers
-where appropriate for the following fields:
-
-.Language analyzers propose change
-|===
-| Field | Analyzer change
-
-| from.name
-| `keep_mail_and_url` analyzer -> `keep_mail_and_url_language_a` analyzer
-
-| subject
-| `keep_mail_and_url` analyzer -> `keep_mail_and_url_language_a` analyzer
-
-| to.name
-| `keep_mail_and_url` analyzer -> `keep_mail_and_url_language_a` analyzer
-
-| cc.name
-| `keep_mail_and_url` analyzer -> `keep_mail_and_url_language_a` analyzer
-
-| bcc.name
-| `keep_mail_and_url` analyzer -> `keep_mail_and_url_language_a` analyzer
-
-| textBody
-| `standard` analyzer -> `language_a` analyzer
-
-| htmlBody
-| `standard` analyzer -> `language_a` analyzer
-
-| attachments.fileName
-| `standard` analyzer -> `language_a` analyzer
-
-| attachments.textContent
-| `standard` analyzer -> `language_a` analyzer
-
-|===
-
-In there:
-
- - `keep_mail_and_url` and `standard` are our current analyzers for mailbox index.
- - `language_a` analyzer: the built-in analyzer of OpenSearch. EG: `french`
- - `keep_mail_and_url_language_a` analyzer: a custom of `keep_mail_and_url` analyzer with some language filters.Every language has
-their own filters so please have a look at filters which your language need to add. EG which need to be added for French:
-----
-"filter": {
- "french_elision": {
- "type": "elision",
- "articles_case": true,
- "articles": [
- "l", "m", "t", "qu", "n", "s",
- "j", "d", "c", "jusqu", "quoiqu",
- "lorsqu", "puisqu"
- ]
- },
- "french_stop": {
- "type": "stop",
- "stopwords": "_french_"
- },
- "french_stemmer": {
- "type": "stemmer",
- "language": "light_french"
- }
-}
-----
-
-After modifying above proposed change, you should have a JSON file that contains new setting and mapping of mailbox index. Here
-we provide https://github.com/apache/james-project/blob/master/mailbox/opensearch/example_french_index.json[a sample JSON for French language].
-If you want to customize that JSON file for your own language need, please make these modifications:
-
- - Replace the `french` analyzer with your built-in language (have a look at https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-lang-analyzer.html[built-in language analyzers])
- - Modify `keep_mail_and_url_french` analyzer' filters with your language filters, and customize the analyzer' name.
-
-Please change also `number_of_shards`, `number_of_replicas` and `index.write.wait_for_active_shards` values in the sample file according to your need.
-
-Run this cURL command with above JSON file to create `mailbox_v1` (Mailbox index' default name) index before James start:
-----
-curl -X PUT ES_IP:ES_PORT/mailbox_v1 -H "Content-Type: application/json" -d @example_french_index.json
-----
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+:pages-path: distributed
+:server-name: Distributed James Server
+:package-tag: cassandra
+include::partial$configure/opensearch.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/configure/pop3.adoc b/docs/modules/servers/pages/distributed/configure/pop3.adoc
index 43db960b86f..1179dadf079 100644
--- a/docs/modules/servers/pages/distributed/configure/pop3.adoc
+++ b/docs/modules/servers/pages/distributed/configure/pop3.adoc
@@ -1,77 +1,7 @@
= Distributed James Server — pop3server.xml
:navtitle: pop3server.xml
-Consult this link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/pop3server.xml[example]
-to get some examples and hints.
-
-The POP3 service is controlled by a configuration block in the pop3server.xml.
-The pop3server tag defines the boundaries of the configuration block. It encloses
-all the relevant configuration for the POP3 server. The behavior of the POP service is
-controlled by the attributes and children of this tag.
-
-This tag has an optional boolean attribute - *enabled* - that defines whether the service is active or not.
-The value defaults to "true" if not present.
-
-The standard children of the pop3server tag are:
-
-.jmx.properties content
-|===
-| Property name | explanation
-
-| bind
-| Configure this to bind to a specific inetaddress. This is an optional integer value.
-This value is the port on which this POP3 server is configured
-to listen. If the tag or value is absent then the service
-will bind to all network interfaces for the machine If the tag or value is omitted,
-the value will default to the standard POP3 port, 11
-port 995 is the well-known/IANA registered port for POP3S ie over SSL/TLS
-port 110 is the well-known/IANA registered port for Standard POP3
-
-| connectionBacklog
-|
-
-| tls
-| Set to true to support STARTTLS or SSL for the Socket.
-To create a new keystore execute:
-`keytool -genkey -alias james -keyalg RSA -storetype PKCS12 -keystore /path/to/james/conf/keystore`
-Please note that each POP3 server exposed on different port can specify its own keystore, independently from any other
-TLS based protocols. Read xref:distributed/configure/ssl.adoc[SSL configuration page] for more information.
-
-| handler.helloName
-| This is the name used by the server to identify itself in the POP3
-protocol. If autodetect is TRUE, the server will discover its
-own host name and use that in the protocol. If discovery fails,
-the value of 'localhost' is used. If autodetect is FALSE, James
-will use the specified value.
-
-| handler.connectiontimeout
-| Connection timeout in seconds
-
-| handler.connectionLimit
-| Set the maximum simultaneous incoming connections for this service
-
-| handler.connectionLimitPerIP
-| Set the maximum simultaneous incoming connections per IP for this service
-
-| handler.handlerchain
-| This loads the core CommandHandlers. Only remove this if you really know what you are doing.
-
-| bossWorkerCount
-| Set the maximum count of boss threads. Boss threads are responsible for accepting incoming POP3 connections
-and initializing associated resources. Optional integer, by default, boss threads are not used and this responsibility is being dealt with
-by IO threads.
-
-| ioWorkerCount
-| Set the maximum count of IO threads. IO threads are responsible for receiving incoming POP3 messages and framing them
-(split line by line). IO threads also take care of compression and SSL encryption. Their tasks are short-lived and non-blocking.
-Optional integer, defaults to 2 times the count of CPUs.
-
-| maxExecutorCount
-| Set the maximum count of worker threads. Worker threads takes care of potentially blocking tasks like executing POP3 requests. Optional integer, defaults to 16.
-
-| useEpoll
-| true or false - If true uses native EPOLL implementation for Netty otherwise uses NIO. Defaults to false.
-
-| gracefulShutdown
-| true or false - If true attempts a graceful shutdown, which is safer but can take time. Defaults to true.
-|===
\ No newline at end of file
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+:pages-path: distributed
+:server-name: Distributed James Server
+include::partial$configure/pop3.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/configure/queue.adoc b/docs/modules/servers/pages/distributed/configure/queue.adoc
index ce2dfe2bff5..e9907a090a2 100644
--- a/docs/modules/servers/pages/distributed/configure/queue.adoc
+++ b/docs/modules/servers/pages/distributed/configure/queue.adoc
@@ -1,19 +1,5 @@
= Distributed James Server — queue.properties
:navtitle: queue.properties
-This configuration helps you configure mail queue you want to select.
-
-== Queue Configuration
-
-.queue.properties content
-|===
-| Property name | explanation
-
-| mail.queue.choice
-| Mail queue can be implemented by many type of message brokers: Pulsar, RabbitMQ,... This property will choose which mail queue you want, defaulting to RABBITMQ
-|===
-
-`mail.queue.choice` supports the following options:
-
-* You can specify the `RABBITMQ` if you want to choose RabbitMQ mail queue
-* You can specify the `PULSAR` if you want to choose Pulsar mail queue
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+include::partial$configure/queue.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/configure/rabbitmq.adoc b/docs/modules/servers/pages/distributed/configure/rabbitmq.adoc
index f0871e0d5d1..3f183ed4684 100644
--- a/docs/modules/servers/pages/distributed/configure/rabbitmq.adoc
+++ b/docs/modules/servers/pages/distributed/configure/rabbitmq.adoc
@@ -1,137 +1,8 @@
= Distributed James Server — rabbitmq.properties
:navtitle: rabbitmq.properties
-This configuration helps you configure components using RabbitMQ.
-
-Consult this link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/rabbitmq.properties[example]
-to get some examples and hints.
-
-== RabbitMQ Configuration
-
-.rabbitmq.properties content
-|===
-| Property name | explanation
-
-| uri
-| the amqp URI pointing to RabbitMQ server. If you use a vhost, specify it as well at the end of the URI.
-Details about amqp URI format is in https://www.rabbitmq.com/uri-spec.html[RabbitMQ URI Specification]
-
-| management.uri
-| the URI pointing to RabbitMQ Management Service. James need to retrieve some information about listing queues
-from this service in runtime.
-Details about URI format is in https://www.rabbitmq.com/management.html#usage-ui[RabbitMQ Management URI]
-
-| management.user
-| username used to access management service
-
-| management.password
-| password used to access management service
-
-| connection.pool.retries
-| Configure retries count to retrieve a connection. Exponential backoff is performed between each retries.
-Optional integer, defaults to 10
-
-| connection.pool.min.delay.ms
-| Configure initial duration (in ms) between two connection retries. Exponential backoff is performed between each retries.
-Optional integer, defaults to 100
-
-| channel.pool.retries
-| Configure retries count to retrieve a channel. Exponential backoff is performed between each retries.
-Optional integer, defaults to 3
-
-| channel.pool.max.delay.ms
-| Configure timeout duration (in ms) to obtain a rabbitmq channel. Defaults to 30 seconds.
-Optional integer, defaults to 30 seconds.
-
-| channel.pool.size
-| Configure the size of the channel pool.
-Optional integer, defaults to 3
-
-| driver.network.recovery.interval
-| Optional, non-negative integer, default to 100ms. The interval (in ms) that RabbitMQ driver will automatic recovery wait before attempting to reconnect. See https://www.rabbitmq.com/client-libraries/java-api-guide#connection-recovery
-
-| ssl.enabled
-| Is using ssl enabled
-Optional boolean, defaults to false
-
-| ssl.management.enabled
-| Is using ssl on management api enabled
-Optional boolean, defaults to false
-
-| ssl.validation.strategy
-| Configure the validation strategy used for rabbitmq connections. Possible values are default, ignore and override.
-Optional string, defaults to using systemwide ssl configuration
-
-| ssl.truststore
-| Points to the truststore (PKCS12) used for verifying rabbitmq connection. If configured then "ssl.truststore.password" must also be configured,
-Optional string, defaults to systemwide truststore. "ssl.validation.strategy: override" must be configured if you want to use this
-
-| ssl.truststore.password
-| Configure the truststore password. If configured then "ssl.truststore" must also be configured,
-Optional string, defaults to empty string. "ssl.validation.strategy: override" must be configured if you want to use this
-
-| ssl.hostname.verifier
-| Configure host name verification. Possible options are default and accept_any_hostname
-Optional string, defaults to subject alternative name host verifier
-
-| ssl.keystore
-| Points to the keystore(PKCS12) used for client certificate authentication. If configured then "ssl.keystore.password" must also be configured,
-Optional string, defaults to empty string
-
-| ssl.keystore.password
-| Configure the keystore password. If configured then "ssl.keystore" must also be configured,
-Optional string, defaults to empty string
-
-| quorum.queues.enable
-| Boolean. Whether to activate Quorum queue usage for all queues.
-Quorum queues enables high availability.
-False (default value) results in the usage of classic queues.
-
-| quorum.queues.replication.factor
-| Strictly positive integer. The replication factor to use when creating quorum queues.
-
-| quorum.queues.delivery.limit
-| Strictly positive integer. Value for x-delivery-limit queue parameter, default to none. Setting a delivery limit can
-prevent RabbitMQ outage if message processing fails. Read https://www.rabbitmq.com/docs/quorum-queues#poison-message-handling
-
-| hosts
-| Optional, default to the host specified as part of the URI.
-Allow creating cluster aware connections.
-A coma separated list of hosts, example: hosts=ip1:5672,ip2:5672
-
-| mailqueue.publish.confirm.enabled
-| Whether or not to enable publish confirms for the mail queue. Optional boolean, defaults to true.
-
-| event.bus.publish.confirm.enabled
-| Whether or not to enable publish confirms for the event bus. Optional boolean, defaults to true.
-
-| event.bus.notification.durability.enabled
-| Whether or not the queue backing notifications should be durable. Optional boolean, defaults to true.
-
-| event.bus.propagate.dispatch.error
-| Whether to propagate errors back to the callers when eventbus fails to dispatch group events to RabbitMQ (then store the failed events in the event dead letters).
-Optional boolean, defaults to true.
-
-| vhost
-| Optional string. This parameter is only a workaround to support invalid URIs containing character like '_'.
-You still need to specify the vhost in the uri parameter.
-
-|===
-
-== Tuning RabbitMQ for quorum queue use
-
-While quorum queues are great at preserving your data and enabling High Availability, they demand more resources and
-a greater care than regular RabbitMQ queues.
-
-See link:https://www.rabbitmq.com/docs/quorum-queues#performance-tuning[this section of RabbitMQ documentation regarding RabbitMQ quroum queue performance tunning].
-
- - Provide decent amount of RAM memory to RabbitMQ. 4GB is a good start.
- - Setting a delivery limit is advised as looping messages can cause extreme memory consumptions onto quorum queues.
- - Set up Raft for small messages:
-
-....
-raft.segment_max_entries = 32768
-....
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+include::partial$configure/rabbitmq.adoc[]
== RabbitMQ MailQueue Configuration
@@ -153,19 +24,19 @@ Not necessarily needed for MDA deployments, mail queue management adds significa
| mailqueue.view.sliceWindow
| James divides the view into slices, each slice contains data for a given period, sliceWindow parameter controls this period.
This dividing of periods allows faster browsing of the mail queue. Tips for choosing sliceWindow are explained in
-https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/rabbitmq.properties[rabbitmq.properties]
+{sample-configuration-prefix-url}/rabbitmq.properties[rabbitmq.properties]
| mailqueue.view.bucketCount
| Mails in a mail queue are distributed across the underlying storage service.
BucketCount describes how to be distributing mails to fit with your James setup
Tips for choosing bucketCount are explained in
-https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/rabbitmq.properties[rabbitmq.properties]
+{sample-configuration-prefix-url}/rabbitmq.properties[rabbitmq.properties]
| mailqueue.view.updateBrowseStartPace
| To browse, James needs a starting point and to continuously update that point in runtime.
UpdateBrowseStartPace describes the probability to update the starting point.
Tips for choosing updateBrowseStartPace are explained in
-https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/rabbitmq.properties[rabbitmq.properties]
+{sample-configuration-prefix-url}/rabbitmq.properties[rabbitmq.properties]
| mailqueue.size.metricsEnabled
| By default, the metrics are disabled for the mail queue size.
@@ -173,7 +44,7 @@ As computing the size of the mail queue is currently implemented on top of brows
sometimes it can get too big, making it impossible for the ES reporter to handle it correctly without crashing.
It can be useful then to disable it.
Tips for choosing metricsEnabled are explained in
-https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/rabbitmq.properties[rabbitmq.properties]
+{sample-configuration-prefix-url}/rabbitmq.properties[rabbitmq.properties]
| notification.queue.ttl
| Configure queue ttl (in ms). References: https://www.rabbitmq.com/ttl.html#queue-ttl.
@@ -181,34 +52,3 @@ This is used only on queues used to share notification patterns, are exclusive t
Optional integer, defaults is 3600000.
|===
-
-== RabbitMQ Tasks Configuration
-
-Tasks are WebAdmin triggered long running jobs. RabbitMQ is used to organise their execution in a work queue,
-with an exclusive consumer.
-
-.rabbitmq.properties content
-|===
-| Property name | explanation
-
-| task.consumption.enabled
-| Whether to enable task consumption on this node.
-Disable with caution (this only makes sense in a distributed setup where other nodes consume tasks).
-Defaults to true.
-
-Limitation: Sometimes, some tasks running on James can be very heavy and take a couple of hours to complete.
-If other tasks are being triggered meanwhile on WebAdmin, they go on the TaskManagerWorkQueue and James unack them,
-telling RabbitMQ it will consume them later. If they don't get consumed before the consumer timeout setup in
-RabbitMQ (default being 30 minutes), RabbitMQ closes the channel on an exception. It is thus advised to declare a
-longer timeout in rabbitmq.conf. More https://www.rabbitmq.com/consumers.html#acknowledgement-timeout[here].
-
-| task.queue.consumer.timeout
-| Task queue consumer timeout.
-
-Optional. Duration (support multiple time units cf `DurationParser`), defaults to 1 day.
-
-Required at least RabbitMQ version 3.12 to have effect.
-This is used to avoid the task queue consumer (which could run very long tasks) being disconnected by RabbitMQ after the default acknowledgement timeout 30 minutes.
-References: https://www.rabbitmq.com/consumers.html#acknowledgement-timeout.
-
-|===
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/configure/recipientrewritetable.adoc b/docs/modules/servers/pages/distributed/configure/recipientrewritetable.adoc
index 108e09e56fc..983756ca61c 100644
--- a/docs/modules/servers/pages/distributed/configure/recipientrewritetable.adoc
+++ b/docs/modules/servers/pages/distributed/configure/recipientrewritetable.adoc
@@ -1,18 +1,7 @@
= Distributed James Server — recipientrewritetable.xml
:navtitle: recipientrewritetable.xml
-Here are explanations on the different kinds about xref:distributed/architecture/index.adoc#_recipient_rewrite_tables[recipient rewriting].
-
-Consult this link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/recipientrewritetable.xml[example]
-to get some examples and hints.
-
-.recipientrewritetable.xml
-|===
-| Property name | explanation
-
-| recursiveMapping
-| If set to false only the first mapping will get processed - Default true.
-
-| mappingLimit
-|By setting the mappingLimit you can specify how much mapping will get processed before a bounce will send. This avoids infinity loops. Default 10.
-|===
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+:pages-path: distributed
+:server-name: Distributed James Server
+include::partial$configure/recipientrewritetable.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/configure/redis.adoc b/docs/modules/servers/pages/distributed/configure/redis.adoc
index 0d318b89cee..659ca53b354 100644
--- a/docs/modules/servers/pages/distributed/configure/redis.adoc
+++ b/docs/modules/servers/pages/distributed/configure/redis.adoc
@@ -1,47 +1,5 @@
= Distributed James Server — redis.properties
:navtitle: redis.properties
-This configuration helps you configure components using Redis. This so far only includes optional rate limiting component.
-
-Consult this link:https://github.com/apache/james-project/blob/fabfdf4874da3aebb04e6fe4a7277322a395536a/server/mailet/rate-limiter-redis/redis.properties[example]
-to get some examples and hints.
-
-== Redis Configuration
-
-.redis.properties content
-|===
-| Property name | explanation
-
-| redisURL
-| the Redis URI pointing to Redis server. Compulsory.
-
-| redis.topology
-| Redis server topology. Defaults to standalone. Possible values: standalone, cluster, master-replica
-
-| redis.readFrom
-| The property to determine how Lettuce routes read operations to Redis server with topologies other than standalone. Defaults to master. Possible values: master, masterPreferred, replica, replicaPreferred, any
-
-Reference: https://github.com/redis/lettuce/wiki/ReadFrom-Settings
-
-| redis.ioThreads
-| IO threads to be using for the underlying Netty networking resources. If unspecified driver defaults applies.
-
-| redis.workerThreads
-| Worker threads to be using for the underlying driver. If unspecified driver defaults applies.
-|===
-
-== Enabling Multithreading in Redis
-
-Redis 6 and later versions support multithreading, but by default, Redis operates as a single-threaded process.
-
-On a virtual machine with multiple CPU cores, you can enhance Redis performance by enabling multithreading. This can significantly improve I/O operations, particularly for workloads with high concurrency or large data volumes.
-
-See link:https://redis.io/docs/latest/operate/oss_and_stack/management/config-file/[THREADED I/O section].
-
-Example if you have a 4 cores CPU, you can enable the following lines in the `redis.conf` file:
-....
-io-threads 3
-io-threads-do-reads yes
-....
-
-However, if your machine has only 1 CPU core or your Redis usage is not intensive, you will not benefit from this.
\ No newline at end of file
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+include::partial$configure/redis.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/configure/remote-delivery-error-handling.adoc b/docs/modules/servers/pages/distributed/configure/remote-delivery-error-handling.adoc
index 55764e7a5d6..68efbdb38f6 100644
--- a/docs/modules/servers/pages/distributed/configure/remote-delivery-error-handling.adoc
+++ b/docs/modules/servers/pages/distributed/configure/remote-delivery-error-handling.adoc
@@ -1,117 +1,8 @@
= Distributed James Server — About RemoteDelivery error handling
:navtitle: About RemoteDelivery error handling
-The advanced server mailQueue implemented by combining RabbitMQ for messaging and Cassandra for administrative operation
-does not support delays.
-
-Delays are an important feature for Mail Exchange servers, allowing to defer in time the retries, potentially letting the
-time for the remote server to recover. Furthermore, they enable implementation of advanced features like throttling and
-rate limiting of emails sent to a given domain.
-
-As such, the use of the distributed server as a Mail Exchange server is currently discouraged.
-
-However, for operators willing to inter-operate with a limited set of well-identified, trusted remote mail servers, such
-limitation can be reconsidered. The main concern then become error handling for remote mail server failures. The following
-document will present a well tested strategy for Remote Delivery error handling leveraging standards Mail Processing components
-and mechanisms.
-
-== Expectations
-
-Such a solution should:
-
-- Attempt delivery a single time
-- Store transient and permanent failure in different mail repositories
-- After a given number of tries, transient failures should be considered permanent
-
-== Design
-
-image::remote-delivery-error-handling.png[Schema detailing the proposed solution]
-
-- Remote Delivery is configured for performing a single retry.
-- Remote Delivery attaches the error code and if the failure is permanent/temporary when transferring failed emails to the
-bounce processor.
-- The specified bounce processor will categorise the failure, and store temporary and permanent failures in different
-mail repositories.
-- A reprocessing of the temporary delivery errors mailRepository needs to be scheduled in a recurring basis. For
-instance via a CRON job calling the right webadmin endpoint.
-- A counter ensures that a configured number of delivery tries is not exceeded.
-
-=== Limitation
-
-MailRepositories are not meant for transient data storage, and thus are prone to tombstone issues.
-
-This might be acceptable if you need to send mail to well-known peers. For instance handling your mail gateway failures.
-However a Mail Exchange server doing relay on the internet would quickly hit this limitation.
-
-Also note that external triggering of the retry process is needed.
-
-== Operation
-
-Here is an example of configuration achieving the proposed solution:
-
-....
-
-
-
- outgoing
- 0
- 0
- 10
- true
-
- remote-delivery-error
-
-
-
- cassandra://var/mail/error/remote-delivery/permanent/
-
-
-
-
-
-
- cassandra://var/mail/error/remote-delivery/temporary/
-
-
-
- cassandra://var/mail/error/remote-delivery/permanent/
-
-
-
- cassandra://var/mail/error/
-
-
-....
-
-Note:
-
-- The *relay* processor holds a RemoteDelivery mailet configured to do a single try, at most 5 times (see the AtMost matcher).
-Mails exceeding the AtMost condition are considered as permanent delivery errors. Delivery errors are sent to the
-*remote-delivery-error* processor.
-- The *remote-delivery-error* stores temporary and permanent errors.
-- Permanent relay errors are stored in `cassandra://var/mail/error/remote-delivery/permanent/`.
-- Temporary relay errors are stored in `cassandra://var/mail/error/remote-delivery/temporary/`.
-
-In order to retry the relay of temporary failed emails, operators will have to configure a cron job for reprocessing
-emails from *cassandra://var/mail/error/remote-delivery/temporary/* mailRepository into the *relay* processor.
-
-This can be achieved via the following webAdmin call :
-
-....
-curl -XPATCH 'http://ip:8000/mailRepositories/cassandra%3A%2F%2Fvar%2Fmail%2Ferror%2Fremote-delivery%2Ftemporary%2F/mails?action=reprocess&processor=relay'
-....
-
-See xref:distributed/operate/webadmin.adoc#_reprocessing_mails_from_a_mail_repository[the documentation].
-
-Administrators need to keep a close eye on permanent errors (that might require audit, and potentially contacting the remote
-service supplier).
-
-To do so, one should regularly audit the content of *cassandra://var/mail/error/remote-delivery/permanent/*. This can be done
-via webAdmin calls:
-
-....
-curl -XGET 'http://ip:8000/mailRepositories/cassandra%3A%2F%2Fvar%2Fmail%2Ferror%2Fremote-delivery%2Ftemporary%2F/mails'
-....
-
-See xref:distributed/operate/webadmin.adoc#_listing_mails_contained_in_a_mail_repository[the documentation].
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+:pages-path: distributed
+:server-name: Distributed James Server
+:mailet-repository-path-prefix: cassandra
+include::partial$configure/remote-delivery-error-handling.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/configure/search.adoc b/docs/modules/servers/pages/distributed/configure/search.adoc
index 735b843bfa9..f4d5b156716 100644
--- a/docs/modules/servers/pages/distributed/configure/search.adoc
+++ b/docs/modules/servers/pages/distributed/configure/search.adoc
@@ -1,18 +1,5 @@
= Distributed James Server — Search configuration
:navtitle: Search configuration
-This configuration helps you configure the components used to back search.
-
-.search.properties content
-|===
-| Property name | explanation
-
-| implementation
-| The implementation to be used for search. Should be one of:
- - *opensearch* : Index and search mails into OpenSearch.
- - *scanning* : Do not index documents and perform scanning search, scrolling mailbox for matching contents.
- This implementation can have a prohibitive cost.
- - *opensearch-disabled* : Saves events to index into event dead letter. Make searches fails.
- This is useful to start James without OpenSearch while still tracking messages to index for later recovery. This
- can be used in order to ease delays for disaster recovery action plans.
-|===
\ No newline at end of file
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+include::partial$configure/search.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/configure/sieve.adoc b/docs/modules/servers/pages/distributed/configure/sieve.adoc
index 3874f3c6c47..b3b3c4f16fa 100644
--- a/docs/modules/servers/pages/distributed/configure/sieve.adoc
+++ b/docs/modules/servers/pages/distributed/configure/sieve.adoc
@@ -1,92 +1,7 @@
= Sieve
:navtitle: Sieve
-James servers are able to evaluate and execute Sieve scripts.
-
-Sieve is an extensible mail filtering language. It's limited
-expressiveness (no loops or variables, no tests with side
-effects) allows user created scripts to be run safely on email
-servers. Sieve is targeted at the final delivery phase (where
-an incoming email is transferred to a user's mailbox).
-
-The following Sieve capabilities are supported by Apache James:
-
- - link:https://www.ietf.org/rfc/rfc2234.txt[RFC 2234 ABNF]
- - link:https://www.ietf.org/rfc/rfc2244.txt[RFC 2244 ACAP]
- - link:https://www.ietf.org/rfc/rfc2298.txt[RFC 2298 MDN]
- - link:https://tools.ietf.org/html/rfc5228[RFC 5228 Sieve]
- - link:https://tools.ietf.org/html/rfc4790[RFC 4790 IAPCR]
- - link:https://tools.ietf.org/html/rfc5173[RFC 5173 Body Extension]
- - link:https://datatracker.ietf.org/doc/html/rfc5230[RFC 5230 Vacations]
-
-To be correctly executed, please note that the *Sieve* mailet is required to be positioned prior the
-*LocalDelivery* mailet.
-
-== Managing Sieve scripts
-
-A user willing to manage his Sieve scripts on the server can do so via several means:
-
-He can ask an admin to upload his script via the xref:distributed/operate/cli.adoc[CLI]
-
-As James supports ManageSieve (link:https://datatracker.ietf.org/doc/html/rfc5804[RFC-5804]) a user
-can thus use compatible software to manage his Sieve scripts.
-
-== ManageSieve protocol
-
-*WARNING*: ManageSieve protocol should be considered experimental.
-
-Consult link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/managesieveserver.xml[managesieveserver.xml]
-in GIT to get some examples and hints.
-
-The service is controlled by a configuration block in the managesieveserver.xml.
-The managesieveserver tag defines the boundaries of the configuration block. It encloses
-all the relevant configuration for the ManageSieve server. The behavior of the ManageSieve service is
-controlled by the attributes and children of this tag.
-
-This tag has an optional boolean attribute - *enabled* - that defines whether the service is active or not.
-The value defaults to "false" if
-not present.
-
-The standard children of the managesieveserver tag are:
-
-.managesieveserver.xml content
-|===
-| Property name | explanation
-
-| bind
-| Configure this to bind to a specific inetaddress. This is an optional integer value. This value is the port on which this ManageSieve server is configured to listen. If the tag or value is absent then the service
-will bind to all network interfaces for the machine If the tag or value is omitted, the value will default to the standard ManageSieve port (port 4190 is the well-known/IANA registered port for ManageSieve.)
-
-| tls
-| Set to true to support STARTTLS or SSL for the Socket.
-To use this you need to copy sunjce_provider.jar to /path/james/lib directory. To create a new keystore execute:
-`keytool -genkey -alias james -keyalg RSA -storetype PKCS12 -keystore /path/to/james/conf/keystore`.
-Please note that each ManageSieve server exposed on different port can specify its own keystore, independently from any other
-TLS based protocols.
-
-| connectionBacklog
-| Number of connection backlog of the server (maximum number of queued connection requests)
-
-| connectiontimeout
-| Connection timeout in seconds
-
-| connectionLimit
-| Set the maximum simultaneous incoming connections for this service
-
-| connectionLimitPerIP
-| Set the maximum simultaneous incoming connections per IP for this service
-
-| bossWorkerCount
-| Set the maximum count of boss threads. Boss threads are responsible for accepting incoming ManageSieve connections
-and initializing associated resources. Optional integer, by default, boss threads are not used and this responsibility is being dealt with
-by IO threads.
-
-| ioWorkerCount
-| Set the maximum count of IO threads. IO threads are responsible for receiving incoming ManageSieve messages and framing them
-(split line by line). IO threads also take care of compression and SSL encryption. Their tasks are short-lived and non-blocking.
-Optional integer, defaults to 2 times the count of CPUs.
-
-| maxExecutorCount
-| Set the maximum count of worker threads. Worker threads takes care of potentially blocking tasks like executing ManageSieve commands.
-Optional integer, defaults to 16.
-|===
\ No newline at end of file
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+:pages-path: distributed
+:server-name: Distributed James Server
+include::partial$configure/sieve.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/configure/smtp-hooks.adoc b/docs/modules/servers/pages/distributed/configure/smtp-hooks.adoc
index 5dd48b0edce..45051231326 100644
--- a/docs/modules/servers/pages/distributed/configure/smtp-hooks.adoc
+++ b/docs/modules/servers/pages/distributed/configure/smtp-hooks.adoc
@@ -1,370 +1,7 @@
= Distributed James Server — SMTP Hooks
:navtitle: SMTP Hooks
-This documentation page lists and documents SMTP hooks that can be used within the
-Distributed Server SMTP protocol stack in order to customize the way your SMTP server
-behaves without of the box components.
-
-== DNSRBLHandler
-
-This command handler check against https://www.wikiwand.com/en/Domain_Name_System-based_Blackhole_List[RBL-Lists]
-(Real-time Blackhole List).
-
-If getDetail is set to true it try to retrieve information from TXT Record
-why the ip was blocked. Default to false.
-
-before you enable out the DNS RBL handler documented as an example below,
-please take a moment to review each block in the list.
-We have included some that various JAMES committers use,
-but you must decide which, if any, are appropriate
-for your environment.
-
-The mail servers hosting
-@apache.org mailing lists, for example, use a
-slightly different list than we have included below.
-And it is likely that most JAMES committers also have
-slightly different sets of lists.
-
-The SpamAssassin user's list would be one good place to discuss the
-measured quality of various block lists.
-
-NOTA BENE: the domain names, below, are terminated
-with '.' to ensure that they are absolute names in
-DNS lookups. Under some circumstances, names that
-are not explicitly absolute could be treated as
-relative names, leading to incorrect results. This
-has been observed on *nix and MS-Windows platforms
-by users of multiple mail servers, and is not JAMES
-specific. If you are unsure what this means for you,
-please speak with your local system/network admins.
-
-This handler should be considered experimental.
-
-Example configuration:
-
-....
-
-
-
- false
-
- query.bondedsender.org.
- sbl-xbl.spamhaus.org.
- dul.dnsbl.sorbs.net.
- list.dsbl.org.
-
-
-
-....
-
-== DSN hooks
-
-The Distributed server has optional support for DSN (link:https://tools.ietf.org/html/rfc3461[RFC-3461])
-
-Please read carefully xref:distributed/configure/dsn.adoc[this page].
-
-....
-
- <...>
-
-
-
-
-
- <...>
-
-
-
-....
-
-Note that a specific configuration of xref:distributed/configure/mailetcontainer.adoc[mailetcontainer.xml] is
-required as well to be spec compliant.
-
-== MailPriorityHandler
-
-This handler can add a hint to the mail which tells the MailQueue which email should get processed first.
-
-Normally the MailQueue will just handle Mails in FIFO manner.
-
-Valid priority values are 1,5,9 where 9 is the highest.
-
-This handler should be considered experimental.
-
-Example configuration:
-
-....
-
-
-
-
-
- yourdomain1
- 1
-
-
- yourdomain2
- 9
-
-
-
-
-....
-
-== MaxRcptHandler
-If activated you can limit the maximal recipients.
-
-This handler should be considered experimental.
-
-Example configuration:
-
-....
-
-
-
- 10
-
-
-....
-
-== POP3BeforeSMTPHandler
-
-This connect handler can be used to enable POP3 before SMTP support.
-
-Please note that only the ip get stored to identify an authenticated client.
-
-The expireTime is the time after which an ipAddress is handled as expired.
-
-This handler should be considered as unsupported.
-
-Example configuration:
-
-....
-
-
-
- 1 hour
-
-
-....
-
-== ResolvableEhloHeloHandler
-
-Checks for resolvable HELO/EHLO before accept the HELO/EHLO.
-
-If checkAuthNetworks is set to true sender domain will be checked also for clients that
-are allowed to relay. Default is false.
-
-This handler should be considered experimental.
-
-Example configuration:
-
-....
-
-
-
-
-....
-
-== ReverseEqualsEhloHeloHandler
-
-Checks HELO/EHLO is equal the reverse of the connecting client before accept it
-If checkAuthNetworks is set to true sender domain will be checked also for clients that
-are allowed to relay. Default is false.
-
-This handler should be considered experimental.
-
-Example configuration:
-
-....
-
-
-
-
-....
-
-== SetMimeHeaderHandler
-
-This handler allows you to add mime headers to the processed mails.
-
-This handler should be considered experimental.
-
-Example configuration:
-
-....
-
-
-
- SPF-test
- passed
-
-
-....
-
-== SpamAssassinHandler
-
-This MessageHandler could be used to check message against spamd before
-accept the email. So it's possible to reject a message on smtplevel if a
-configured hits amount is reached.
-
-This handler should be considered experimental.
-
-Example configuration:
-
-....
-
-
-
- 127.0.0.1
- 783
- 10
-
-
-....
-
-== SPFHandler
-
-This command handler can be used to reject emails with not match the SPF record of the sender domain.
-
-If checkAuthNetworks is set to true sender domain will be checked also for clients that
-are allowed to relay. Default is false.
-
-This handler should be considered experimental.
-
-Example configuration:
-
-....
-
-
-
- false
- true
-
-
-....
-
-== URIRBLHandler
-
-This MessageHandler could be used to extract domain out of the message and check
-this domains against uriRbllists. See http://www.surbl.org for more information.
-The message get rejected if a domain matched.
-
-This handler should be considered experimental.
-
-Example configuration:
-
-....
-
-
-
- reject
- true
-
- multi.surbl.org
-
-
-
-....
-
-== ValidRcptHandler
-
-With ValidRcptHandler, all email will get rejected which has no valid user.
-
-You need to add the recipient to the validRecipient list if you want
-to accept email for a recipient which not exist on the server.
-
-If you want James to act as a spamtrap or honeypot, you may comment ValidRcptHandler
-and implement the needed processors in spoolmanager.xml.
-
-This handler should be considered stable.
-
-Example configuration:
-
-....
-
-
-
-
-....
-
-== ValidSenderDomainHandler
-
-If activated mail is only accepted if the sender contains
-a resolvable domain having a valid MX Record or A Record associated!
-
-If checkAuthNetworks is set to true sender domain will be checked also for clients that
-are allowed to relay. Default is false.
-
-Example configuration:
-
-....
-
-
-
-
-....
-
-== FUTURERELEASE hooks
-
-The Distributed server has optional support for FUTURERELEASE (link:https://www.rfc-editor.org/rfc/rfc4865.html[RFC-4865])
-
-....
-
- <...>
-
-
-
-
-
-
-....
-
-== Message Transfer Priorities hooks
-
-The Distributed server has optional support for SMTP Extension for Message Transfer Priorities (link:https://www.rfc-editor.org/rfc/rfc6710.html[RFC-6710])
-
-The SMTP server does not allow positive priorities from unauthorized sources and sets the priority to the default value (0).
-
-....
-
- <...>
-
-
-
-
-
-
-
-....
-
-== DKIM checks hooks
-
-Hook for verifying DKIM signatures of incoming mails.
-
-This hook can be restricted to specific sender domains and authenticate those emails against
-their DKIM signature. Given a signed outgoing traffic this hook can use operators to accept legitimate
-emails emitted by their infrastructure but redirected without envelope changes to there own domains by
-some intermediate third parties. See link:https://issues.apache.org/jira/browse/JAMES-4032[JAMES-4032].
-
-Supported configuration elements:
-
-- *forceCRLF*: Should CRLF be forced when computing body hashes.
-- *onlyForSenderDomain*: If specified, the DKIM checks are applied just for the emails whose MAIL FROM specifies this domain. If unspecified, all emails are checked (default).
-- *signatureRequired*: If DKIM signature is checked, the absence of signature will generate failure. Defaults to false.
-- *expectedDToken*: If DKIM signature is checked, the body should contain at least one DKIM signature with this d token. If unspecified, all d tokens are considered valid (default).
-
-Example handlerchain configuration for `smtpserver.xml`:
-
-....
-
-
- true
- apache.org
- true
- apache.org
-
-
-
-....
-
-Would allow emails using `apache.org` as a MAIL FROM domain if, and only if they contain a
-valid DKIM signature for the `apache.org` domain.
\ No newline at end of file
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+:pages-path: distributed
+:server-name: Distributed James Server
+include::partial$configure/smtp-hooks.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/configure/smtp.adoc b/docs/modules/servers/pages/distributed/configure/smtp.adoc
index 34e22887143..85f0845c48a 100644
--- a/docs/modules/servers/pages/distributed/configure/smtp.adoc
+++ b/docs/modules/servers/pages/distributed/configure/smtp.adoc
@@ -1,316 +1,7 @@
= Distributed James Server — smtpserver.xml
:navtitle: smtpserver.xml
-== Incoming SMTP
-
-Consult this link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/smtpserver.xml[example]
-to get some examples and hints.
-
-The SMTP service is controlled by a configuration block in the smptserver.xml.
-The smtpserver tag defines the boundaries of the configuration block. It encloses
-all the relevant configuration for the SMTP server. The behavior of the SMTP service is
-controlled by the attributes and children of this tag.
-
-This tag has an optional boolean attribute - *enabled* - that defines whether the service is active or not. The value defaults to "true" if
-not present.
-
-The standard children of the smtpserver tag are:
-
-.smtpserver.xml content
-|===
-| Property name | explanation
-
-| bind
-| A list of address:port separed by comma - This is an optional value. If present, this value is a string describing
-the IP address to which this service should be bound. If the tag or value is absent then the service
-will bind to all network interfaces for the machine on port 25. Port 25 is the well-known/IANA registered port for SMTP.
-Port 465 is the well-known/IANA registered port for SMTP over TLS.
-
-| connectBacklog
-|The IP address (host name) the MBean Server will bind/listen to.
-
-| tls
-| Set to true to support STARTTLS or SSL for the Socket.
-To use this you need to copy sunjce_provider.jar to /path/james/lib directory. To create a new keystore execute:
-`keytool -genkey -alias james -keyalg RSA -storetype PKCS12 -keystore /path/to/james/conf/keystore`.
-The algorithm is optional and only needs to be specified when using something other
-than the Sun JCE provider - You could use IbmX509 with IBM Java runtime.
-Please note that each SMTP/LMTP server exposed on different port can specify its own keystore, independently from any other
-TLS based protocols.
-
-| helloName
-| This is a required tag with an optional body that defines the server name
-used in the initial service greeting. The tag may have an optional attribute - *autodetect*. If
-the autodetect attribute is present and true, the service will use the local hostname
-returned by the Java libraries. If autodetect is absent or false, the body of the tag will be used. In
-this case, if nobody is present, the value "localhost" will be used.
-
-| connectionTimeout
-| This is an optional tag with a non-negative integer body. Connection timeout in seconds.
-
-| connectionLimit
-| Set the maximum simultaneous incoming connections for this service.
-
-| connectionLimitPerIP
-| Set the maximum simultaneous incoming connections per IP for this service.
-
-| proxyRequired
-| Enables proxy support for this service for incoming connections. HAProxy's protocol
-(https://www.haproxy.org/download/2.7/doc/proxy-protocol.txt) is used and might be compatible
-with other proxies (e.g. traefik). If enabled, it is *required* to initiate the connection
-using HAProxy's proxy protocol.
-
-| authRequired
-| (deprecated) use auth.announce instead.
-
-This is an optional tag with a boolean body. If true, then the server will
-announce authentication after HELO command. If this tag is absent, or the value
-is false then the client will not be prompted for authentication. Only simple user/password authentication is
-supported at this time. Supported values:
-
- * true: announced only to not authorizedAddresses
-
- * false: don't announce AUTH. If absent, *authorizedAddresses* are set to a wildcard to accept all remote hosts.
-
- * announce: like true, but always announce AUTH capability to clients
-
-Please note that emails are only relayed if, and only if, the user did authenticate, or is in an authorized network,
-regardless of this option.
-
-| auth.announce
-| This is an optional tag. Possible values are:
-
-* never: Don't announce auth.
-
-* always: always announce AUTH capability to clients.
-
-* forUnauthorizedAddresses: announced only to not authorizedAddresses
-
-Please note that emails are only relayed if, and only if, the user did authenticate, or is in an authorized network,
-regardless of this option.
-
-| auth.requireSSL
-| This is an optional tag, defaults to true. If true, authentication is not advertised via capabilities on unencrypted
-channels.
-
-| auth.plainAuthEnabled
-| This is an optional tag, defaults to true. If false, AUTH PLAIN and AUTH LOGIN will not be exposed. This setting
-can be used to enforce strong authentication mechanisms.
-
-| auth.oidc.oidcConfigurationURL
-| Provide OIDC url address for information to user. Only configure this when you want to authenticate SMTP server using a OIDC provider.
-
-| auth.oidc.jwksURL
-| Provide url to get OIDC's JSON Web Key Set to validate user token. Only configure this when you want to authenticate SMTP server using a OIDC provider.
-
-| auth.oidc.claim
-| Claim string uses to identify user. E.g: "email_address". Only configure this when you want to authenticate SMTP server using a OIDC provider.
-
-| auth.oidc.scope
-| An OAuth scope that is valid to access the service (RF: RFC7628). Only configure this when you want to authenticate SMTP server using a OIDC provider.
-
-| auth.oidc.introspection.url
-| Optional. An OAuth introspection token URL will be called to validate the token (RF: RFC7662).
-Only configure this when you want to validate the revocation token by the OIDC provider.
-Note that James always verifies the signature of the token even whether this configuration is provided or not.
-
-| auth.oidc.introspection.auth
-| Optional. Provide Authorization in header request when introspecting token.
-Eg: `Basic xyz`
-
-| auth.oidc.userinfo.url
-| Optional. An Userinfo URL will be called to validate the token (RF: OpenId.Core https://openid.net/specs/openid-connect-core-1_0.html).
-Only configure this when you want to validate the revocation token by the OIDC provider.
-Note that James always verifies the signature of the token even whether this configuration is provided or not.
-James will ignore check token by userInfo if the `auth.oidc.introspection.url` is already configured
-
-| authorizedAddresses
-| Authorize specific addresses/networks.
-
-If you use SMTP AUTH, addresses that match those specified here will
-be permitted to relay without SMTP AUTH. If you do not use SMTP
-AUTH, and you specify addresses here, then only addresses that match
-those specified will be permitted to relay.
-
-Addresses may be specified as a IP address or domain name, with an
-optional netmask, e.g.,
-
-127.*, 127.0.0.0/8, 127.0.0.0/255.0.0.0, and localhost/8 are all the same
-
-See also the RemoteAddrNotInNetwork matcher in the transport processor.
-You would generally use one OR the other approach.
-
-| verifyIdentity
-| This is an optional tag. This options governs MAIL FROM verifications, and prevents spoofing of the MAIL FROM
-envelop field.
-
-The following values are supported:
-
- - `strict`: use of a local domain in MAIL FROM requires the SMTP client to be authenticated with a matching user or one
- of its aliases. It will verify that the sender address matches the address of the user or one of its alias (from user or domain aliases).
- This prevents a user of your mail server from acting as someone else
- - `disabled`: no check is performed and third party are free to send emails as local users. Note that relaying emails will
- need third party to be authenticated thus preventing open relays.
- - `relaxed`: Based on a simple heuristic to determine if the SMTP client is a MUA or a MX (use of a valid domain in EHLO),
- we do act as `strict` for MUAs thus prompting them early for the need of authentication, but accept use of local MAIL FROM for
- MX. Authentication can then be delayed to later, eg after DATA transaction with the DKIMHook which might allow email looping through
- third party domains via mail redirection, effectively enforcing that the mail originates from our servers. See
- link:https://issues.apache.org/jira/browse/JAMES-4032[JAMES-4032] for detailed explanation.
-
-Backward compatibility is provided and thus the following values are supported:
-
- - `true`: act as `strict`
- - `false`: act as `disabled`
-
-| maxmessagesize
-| This is an optional tag with a non-negative integer body. It specifies the maximum
-size, in kbytes, of any message that will be transmitted by this SMTP server. It is a service-wide, as opposed to
-a per user, limit. If the value is zero then there is no limit. If the tag isn't specified, the service will
-default to an unlimited message size. Must be a positive integer, optionally with a unit: B, K, M, G.
-
-| heloEhloEnforcement
-| This sets whether to enforce the use of HELO/EHLO salutation before a
-MAIL command is accepted. If unspecified, the value defaults to true.
-
-| smtpGreeting
-| This sets the SMTPGreeting which will be used when connect to the smtpserver
-If none is specified a default is generated
-
-| handlerchain
-| The configuration handler chain. See xref:distributed/configure/smtp-hooks.adoc[this page] for configuring out-of the
-box extra SMTP handlers and hooks.
-
-| bossWorkerCount
-| Set the maximum count of boss threads. Boss threads are responsible for accepting incoming SMTP connections
-and initializing associated resources. Optional integer, by default, boss threads are not used and this responsibility is being dealt with
-by IO threads.
-
-| ioWorkerCount
-| Set the maximum count of IO threads. IO threads are responsible for receiving incoming SMTP messages and framing them
-(split line by line). IO threads also take care of compression and SSL encryption. Their tasks are short-lived and non-blocking.
-Optional integer, defaults to 2 times the count of CPUs.
-
-| maxExecutorCount
-| Set the maximum count of worker threads. Worker threads takes care of potentially blocking tasks like executing SMTP commands.
-Optional integer, defaults to 16.
-
-| useEpoll
-| true or false - If true uses native EPOLL implementation for Netty otherwise uses NIO. Defaults to false.
-
-| gracefulShutdown
-| true or false - If true attempts a graceful shutdown, which is safer but can take time. Defaults to true.
-
-| disabledFeatures
-| Extended SMTP features to hide in EHLO responses.
-|===
-
-=== OIDC setup
-James SMTP support XOAUTH2 authentication mechanism which allow authenticating against a OIDC providers.
-Please configure `auth.oidc` part to use this.
-
-We do supply an link:https://github.com/apache/james-project/tree/master/examples/oidc[example] of such a setup.
-It uses the Keycloak OIDC provider, but usage of similar technologies is definitely doable.
-
-== About open relays
-
-Authenticated SMTP is a method of securing your SMTP server. With SMTP AUTH enabled senders who wish to
-relay mail through the SMTP server (that is, send mail that is eventually to be delivered to another SMTP
-server) must authenticate themselves to Apache James Server before sending their message. Mail that is to be delivered
-locally does not require authentication. This method ensures that spammers cannot use your SMTP server
-to send unauthorized mail, while still enabling users who may not have fixed IP addresses to send their
-messages.
-
-Mail servers that allow spammers to send unauthorized email are known as open relays. So SMTP AUTH
-is a mechanism for ensuring that your server is not an open relay.
-
-It is extremely important that your server not be configured as an open relay. Aside from potential
-costs associated with usage by spammers, connections from servers that are determined to be open relays
-are routinely rejected by SMTP servers. This can severely impede the ability of your mail server to
-send mail.
-
-At this time Apache James Server only supports simple user name / password authentication.
-
-As mentioned above, SMTP AUTH requires that Apache James Server be able to distinguish between mail intended
-for local delivery and mail intended for remote delivery. Apache James Server makes this determination by matching the
-domain to which the mail was sent against the *DomainList* component, configured by
-xref:distributed/configure/domainlist.adoc[*domainlist.xml*].
-
-The Distributed Server is configured out of the box so as to not serve as an open relay for spammers. This is done
-by relayed emails originate from a trusted source. This includes:
-
-* Authenticated SMTP/JMAP users
-* Mails generated by the server (eg: bounces)
-* Mails originating from a trusted network as configured in *smtpserver.xml*
-
-If you wish to ensure that authenticated users can only send email from their own account, you may
-optionally set the verifyIdentity element of the smtpserver configuration block to "true".
-
-=== Verification
-
-Verify that you have not inadvertently configured your server as an open relay. This is most easily
-accomplished by using the service provided at https://mxtoolbox.com/diagnostic.aspx[mxtoolbox.com]. mxtoolbox.com will
-check your mail server and inform you if it is an open relay. This tool further more verifies additional properties like:
-
-* Your DNS configuration, especially that you mail server IP has a valid reverse DNS entry
-* That your SMTP connection is secured
-* That you are not an OpenRelay
-* This website also allow a quick lookup to ensure your mail server is not in public blacklists.
-
-Of course it is also necessary to confirm that users and log in and send
-mail through your server. This can be accomplished using any standard mail client (i.e. Thunderbird, Outlook,
-Eudora, Evolution).
-
-== LMTP Configuration
-
-Consult this link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/lmtpserver.xml[example]
-to get some examples and hints.
-
-The configuration is the same of for SMTP.
-
-By default, it is deactivated. You can activate it alongside SMTP and bind for example on port 24.
-
-The default LMTP server stores directly emails in user mailboxes, without further treatment.
-
-However we do ship an alternative handler chain allowing to execute the mailet container, thus achieving a behaviour similar
-to the default SMTP protocol. Here is how to achieve this:
-
-....
-
-
- lmtpserver
- 0.0.0.0:24
- 200
- 1200
- 0
- 0
- 0
-
-
-
-
-
-....
-
-Note that by default the mailet container is executed with all recipients at once and do not allow per recipient
-error reporting. An option splitExecution allow to execute the mailet container for each recipient separately and mitigate this
-limitation at the cost of performance.
-
-....
-
-
- lmtpserver
- 0.0.0.0:24
- 200
- 1200
- 0
- 0
- 0
-
-
-
- true
-
-
-
-
-....
\ No newline at end of file
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+:pages-path: distributed
+:server-name: Distributed James Server
+include::partial$configure/smtp.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/configure/spam.adoc b/docs/modules/servers/pages/distributed/configure/spam.adoc
index 8a7839d6048..4b7dabd7972 100644
--- a/docs/modules/servers/pages/distributed/configure/spam.adoc
+++ b/docs/modules/servers/pages/distributed/configure/spam.adoc
@@ -1,190 +1,8 @@
= Distributed James Server — Anti-Spam configuration
:navtitle: Anti-Spam configuration
-Anti-Spam system can be configured via two main different mechanisms:
-
-* SMTP Hooks;
-* Mailets;
-
-== AntiSpam SMTP Hooks
-
-"FastFail" SMTP Hooks acts to reject before spooling
-on the SMTP level. The Spam detector hook can be used as a fastfail hook, therefore
-Spam filtering system must run as a server on the same machine as the Apache James Server.
-
-SMTP Hooks for non-existent users, DSN filter, domains with invalid MX record,
-can also be configured.
-
-*SpamAssassinHandler* (experimental) also enables to classify the messages as spam or not
-with a configurable score threshold (`0.0`, non-configurable). Only a global database is supported. Per user spam
-detection is not supported by this hook.
-
-== AntiSpam Mailets
-
-James' repository provide two AntiSpam mailets: SpamAssassin and RspamdScanner.
-We can select one in them for filtering spam mail.
-
-* *SpamAssassin and RspamdScanner* Mailet is designed to classify the messages as spam or not
-with a configurable score threshold. Usually a message will only be
-considered as spam if it matches multiple criteria; matching just a single test
-will not usually be enough to reach the threshold. Note that this mailet is executed on a per-user basis.
-
-=== Rspamd
-
-The Rspamd extension (optional) requires an extra configuration file `rspamd.properties` to configure RSpamd connection
-
-.rspamd.properties content
-|===
-| Property name | explanation
-
-| rSpamdUrl
-| URL defining the Rspamd's server. Eg: http://rspamd:11334
-
-| rSpamdPassword
-| Password for pass authentication when request to Rspamd's server. Eg: admin
-
-| rspamdTimeout
-| Integer. Timeout for http requests to Rspamd. Default to 15 seconds.
-
-| perUserBayes
-| Boolean. Whether to scan/learn mails using per-user Bayes. Default to false.
-|===
-
-`RspamdScanner` supports the following options:
-
-* You can specify the `virusProcessor` if you want to enable virus scanning for mail. Upon configurable `virusProcessor`
-you can specify how James process mail virus. We provide a sample Rspamd mailet and `virusProcessor` configuration:
-
-* You can specify the `rejectSpamProcessor`. Emails marked as `rejected` by Rspamd will be redirected to this
-processor. This corresponds to emails with the highest spam score, thus delivering them to users as marked as spam
-might not even be desirable.
-
-* The `rewriteSubject` option allows to rewritte subjects when asked by Rspamd.
-
-This mailet can scan mails against per-user Bayes by configure `perUserBayes` in `rspamd.properties`. This is achieved
-through the use of Rspamd `Deliver-To` HTTP header. If true, Rspamd will be called for each recipient of the mail, which comes at a performance cost. If true, subjects are not rewritten.
-If true `virusProcessor` and `rejectSpamProcessor` are honnered per user, at the cost of email copies. Default to false.
-
-Here is an example of mailet pipeline conducting out RspamdScanner execution:
-
-....
-
-
- true
- virus
- spam
-
-
- Spam
-
-
-
-
-
-
-
- file://var/mail/virus/
-
-
-
-
-
- all
- .*
-
-
- [VIRUS]
-
-
-
-
-
-
- cassandra://var/mail/spam
-
-
-....
-
-==== Feedback for Rspamd
-If enabled, the `RspamdListener` will base on the Mailbox event to detect the message is a spam or not, then James will send report `spam` or `ham` to Rspamd.
-This listener can report mails to per-user Bayes by configure `perUserBayes` in `rspamd.properties`.
-The Rspamd listener needs to explicitly be registered with xref:distributed/configure/listeners.adoc[listeners.xml].
-
-Example:
-
-....
-
-
- org.apache.james.rspamd.RspamdListener
-
-
-....
-
-For more detail about how to use Rspamd's extension: `third-party/rspamd/index.md`
-
-Alternatively, batch reports can be triggered on user mailbox content via webAdmin. link:https://github.com/apache/james-project/tree/master/third-party/rspamd#additional-webadmin-endpoints[Read more].
-
-
-=== SpamAssassin
-Here is an example of mailet pipeline conducting out SpamAssassin execution:
-
-....
-
- ignore
- spamassassin
- 783
-
-
-
- org.apache.james.spamassassin.status; X-JAMES-SPAMASSASSIN-STATUS
- org.apache.james.spamassassin.flag; X-JAMES-SPAMASSASSIN-FLAG
-
-
- Spam
-
-....
-
-* *BayesianAnalysis* (unsupported) in the Mailet uses Bayesian probability to classify mail as
-spam or not spam. It relies on the training data coming from the users’ judgment.
-Users need to manually judge as spam and send to spam@thisdomain.com, oppositely,
-if not spam they then send to not.spam@thisdomain.com. BayesianAnalysisfeeder learns
-from this training dataset, and build predictive models based on Bayesian probability.
-There will be a certain table for maintaining the frequency of Corpus for keywords
-in the database. Every 10 mins a thread in the BayesianAnalysis will check and update
-the table. Also, the correct approach is to send the original spam or non-spam
-as an attachment to another message sent to the feeder in order to avoid bias from the
-current sender's email header.
-
-==== Feedback for SpamAssassin
-
-If enabled, the `SpamAssassinListener` will asynchronously report users mails moved to the `Spam` mailbox as Spam,
-and other mails as `Ham`, effectively populating the user database for per user spam detection. This enables a per-user
-Spam categorization to be conducted out by the SpamAssassin mailet, the SpamAssassin hook being unaffected.
-
-The SpamAssassin listener requires an extra configuration file `spamassassin.properties` to configure SpamAssassin connection (optional):
-
-.spamassassin.properties content
-|===
-| Property name | explanation
-
-| spamassassin.host
-| Hostname of the SpamAssassin server. Defaults to 127.0.0.1.
-
-| spamassassin.port
-| Port of the SpamAssassin server. Defaults to 783.
-|===
-
-Note that this configuration file only affects the listener, and not the hook or mailet.
-
-The SpamAssassin listener needs to explicitly be registered with xref:distributed/configure/listeners.adoc[listeners.xml].
-
-Example:
-
-....
-
-
- org.apache.james.mailbox.spamassassin.SpamAssassinListener
- true
-
-
-....
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+:pages-path: distributed
+:server-name: Distributed James Server
+:mailet-repository-path-prefix: cassandra
+include::partial$configure/spam.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/configure/ssl.adoc b/docs/modules/servers/pages/distributed/configure/ssl.adoc
index b21a17fa3a8..f77590ec95a 100644
--- a/docs/modules/servers/pages/distributed/configure/ssl.adoc
+++ b/docs/modules/servers/pages/distributed/configure/ssl.adoc
@@ -1,247 +1,7 @@
= Distributed James Server — SSL & TLS configuration
:navtitle: SSL & TLS configuration
-This document explains how to enable James 3.0 servers to use Transport Layer Security (TLS)
-for encrypted client-server communication.
-
-== Configure a Server to Use SSL/TLS
-
-Each of the servers xref:distributed/configure/smtp.adoc[SMTP - LMTP],
-xref:distributed/configure/pop3.adoc[POP3] and xref:distributed/configure/imap.adoc[IMAP]
-supports use of SSL/TLS.
-
-TLS (Transport Layer Security) and SSL (Secure Sockets Layer) are protocols that provide
-data encryption and authentication between applications in scenarios where that data is
-being sent across an insecure network, such as checking your email
-(How does the Secure Socket Layer work?). The terms SSL and TLS are often used
-interchangeably or in conjunction with each other (TLS/SSL),
-but one is in fact the predecessor of the other — SSL 3.0 served as the basis
-for TLS 1.0 which, as a result, is sometimes referred to as SSL 3.1.
-
-You need to add a block in the corresponding configuration file (smtpserver.xml, pop3server.xml, imapserver.xml,..)
-
-....
-
- file://conf/keystore
- PKCS12
- yoursecret
- org.bouncycastle.jce.provider.BouncyCastleProvider
-
-....
-
-Alternatively TLS keys can be supplied via PEM files:
-
-....
-
- file://conf/private.key
- file://conf/certs.self-signed.csr
-
-....
-
-An optional secret might be specified for the private key:
-
-....
-
- file://conf/private.key
- file://conf/certs.self-signed.csr
- yoursecret
-
-....
-
-Optionally, TLS protocols and/or cipher suites can be specified explicitly (smtpserver.xml, pop3server.xml, imapserver.xml,..).
-Otherwise, the default protocols and cipher suites of the used JDK will be used:
-....
-
-
- TLSv1.2
- TLSv1.1
- TLSv1
- SSLv3
-
-
- TLS_AES_256_GCM_SHA384
- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256
-
-
-....
-
-Each of these block has an optional boolean configuration element socketTLS and startTLS which is used to toggle
-use of SSL or TLS for the service.
-
-With socketTLS (SSL/TLS in Thunderbird), all the communication is encrypted.
-
-With startTLS (STARTTLS in Thunderbird), the preamble is readable, but the rest is encrypted.
-
-....
-* OK JAMES IMAP4rev1 Server Server 192.168.1.4 is ready.
-* CAPABILITY IMAP4rev1 LITERAL+ CHILDREN WITHIN STARTTLS IDLE NAMESPACE UIDPLUS UNSELECT AUTH=PLAIN
-1 OK CAPABILITY completed.
-2 OK STARTTLS Begin TLS negotiation now.
-... rest is encrypted...
-....
-
-You can only enable one of the both at the same time for a service.
-
-It is also recommended to change the port number on which the service will listen:
-
-* POP3 - port 110, Secure POP3 - port 995
-* IMAP - port 143, Secure IMAP4 - port 993
-* SMTP - port 25, Secure SMTP - port 465
-
-You will now need to create your certificate store and place it in the james/conf/ folder with the name you defined in the keystore tag.
-
-Please note `JKS` keystore format is also supported (default value if no keystore type is specified):
-
-....
-
- file://conf/keystore
- JKS
- yoursecret
- org.bouncycastle.jce.provider.BouncyCastleProvider
-
-....
-
-
-=== Client authentication via certificates
-
-When you enable TLS, you may also configure the server to require a client certificate for authentication:
-
-....
-
- file://conf/keystore
- JKS
- yoursecret
-
-
- file://conf/truststore
- JKS
- yoursecret
- false
-
-
-....
-
-James verifies client certificates against the provided truststore. You can fill it with trusted peer certificates directly, or an issuer certificate (CA) if you trust all certificates created by it. If you omit the truststore configuration, James will use the Java default truststore instead, effectively trusting any known CA.
-
-James can optionally enable OCSP verifications for client certificates against Certificate Revocation List referenced
-in the certificate itself.
-
-== Creating your own PEM keys
-
-The following commands can be used to create self signed PEM keys:
-
-....
-# Generating your private key
-openssl genrsa -des3 -out private.key 2048
-
-# Creating your certificates
-openssl req -new -key private.key -out certs.csr
-
-# Signing the certificate yourself
-openssl x509 -req -days 365 -in certs.csr -signkey private.key -out certs.self-signed.csr
-
-# Removing the password from the private key
-# Not necessary if you supply the secret in the configuration
-openssl rsa -in private.key -out private.nopass.key
-....
-
-You may then supply this TLS configuration:
-
-....
-
- file://conf/private.nopass.key
- file://conf/certs.self-signed.csr
-
-....
-
-== Certificate Keystores
-
-This section gives more indication for users relying on keystores.
-
-=== Creating your own Certificate Keystore
-
-(Adapted from the Tomcat 4.1 documentation)
-
-James currently operates only on JKS or PKCS12 format keystores. This is Java's standard "Java KeyStore" format, and is
-the format created by the keytool command-line utility. This tool is included in the JDK.
-
-To import an existing certificate into a JKS keystore, please read the documentation (in your JDK documentation package)
-about keytool.
-
-To create a new keystore from scratch, containing a single self-signed Certificate, execute the following from a terminal
-command line:
-
-....
-keytool -genkey -alias james -keyalg RSA -storetype PKCS12 -keystore your_keystore_filename
-....
-
-(The RSA algorithm should be preferred as a secure algorithm, and this also ensures general compatibility with other
-servers and components.)
-
-As a suggested standard, create the keystore in the james/conf directory, with a name like james.keystore.
-
-After executing this command, you will first be prompted for the keystore password.
-
-Next, you will be prompted for general information about this Certificate, such as company, contact name, and so on.
-This information may be displayed to users when importing into the certificate store of the client, so make sure that
-the information provided here matches what they will expect.
-
-Important: in the "distinguished name", set the "common name" (CN) to the DNS name of your James server, the one
-you will use to access it from your mail client (like "mail.xyz.com").
-
-Finally, you will be prompted for the key password, which is the password specifically for this Certificate
-(as opposed to any other Certificates stored in the same keystore file).
-
-If everything was successful, you now have a keystore file with a Certificate that can be used by your server.
-
-You MUST have only one certificate in the keystore file used by James.
-
-=== Installing a Certificate provided by a Certificate Authority
-
-(Adapted from the Tomcat 4.1 documentation
-
-To obtain and install a Certificate from a Certificate Authority (like verisign.com, thawte.com or trustcenter.de)
-you should have read the previous section and then follow these instructions:
-
-==== Create a local Certificate Signing Request (CSR)
-
-In order to obtain a Certificate from the Certificate Authority of your choice you have to create a so called
-Certificate Signing Request (CSR). That CSR will be used by the Certificate Authority to create a Certificate
-that will identify your James server as "secure". To create a CSR follow these steps:
-
-* Create a local Certificate as described in the previous section.
-
-The CSR is then created with:
-
-....
- keytool -certreq -keyalg RSA -alias james -file certreq.csr -keystore your_keystore_filename
-....
-
-Now you have a file called certreq.csr. The file is encoded in PEM format. You can submit it to the Certificate Authority
-(look at the documentation of the Certificate Authority website on how to do this). In return you get a Certificate.
-
-Now that you have your Certificate you can import it into you local keystore. First of all you may have to import a so
-called Chain Certificate or Root Certificate into your keystore (the major Certificate Authorities are already in place,
-so it's unlikely that you will need to perform this step). After that you can procede with importing your Certificate.
-
-==== Optionally Importing a so called Chain Certificate or Root Certificate
-
-Download a Chain Certificate from the Certificate Authority you obtained the Certificate from.
-
-* For Verisign.com go to: http://www.verisign.com/support/install/intermediate.html
-* For Trustcenter.de go to: http://www.trustcenter.de/certservices/cacerts/en/en.htm#server
-* For Thawte.com go to: http://www.thawte.com/certs/trustmap.html (seems no longer valid)
-
-==== Import the Chain Certificate into you keystore
-
-....
-keytool -import -alias root -keystore your_keystore_filename -trustcacerts -file filename_of_the_chain_certificate
-....
-
-And finally import your new Certificate (It must be in X509 format):
-
-....
-keytool -import -alias james -keystore your_keystore_filename -trustcacerts -file your_certificate_filename
-....
-
-See also http://www.agentbob.info/agentbob/79.html[this page]
\ No newline at end of file
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+:pages-path: distributed
+:server-name: Distributed James Server
+include::partial$configure/ssl.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/configure/tika.adoc b/docs/modules/servers/pages/distributed/configure/tika.adoc
index fdb2cc9cf7a..604b31e4865 100644
--- a/docs/modules/servers/pages/distributed/configure/tika.adoc
+++ b/docs/modules/servers/pages/distributed/configure/tika.adoc
@@ -1,51 +1,5 @@
= Distributed James Server — tika.properties
:navtitle: tika.properties
-When using OpenSearch, you can configure an external Tika server for extracting and indexing text from attachments.
-Thus you can significantly improve user experience upon text searches.
-
-Note: You can launch a tika server using this command line:
-
-....
-docker run --name tika linagora/docker-tikaserver:1.24
-....
-
-Here are the different properties:
-
-.tika.properties content
-|===
-| Property name | explanation
-
-| tika.enabled
-| Should Tika text extractor be used?
-If true, the TikaTextExtractor will be used behind a cache.
-If false, the DefaultTextExtractor will be used (naive implementation only supporting text).
-Defaults to false.
-
-| tika.host
-| IP or domain name of your Tika server. The default value is 127.0.0.1
-
-| tika.port
-| Port of your tika server. The default value is 9998
-
-| tika.timeoutInMillis
-| Timeout when issuing request to the tika server. The default value is 3 seconds.
-
-| tika.cache.eviction.period
-| A cache is used to avoid, when possible, query Tika multiple time for the same attachments.
-This entry determines how long after the last read an entry vanishes.
-Please note that units are supported (ms - millisecond, s - second, m - minute, h - hour, d - day). Default unit is seconds.
-Default value is *1 day*
-
-| tika.cache.enabled
-| Should the cache be used? False by default
-
-| tika.cache.weight.max
-| Maximum weight of the cache.
-A value of *0* disables the cache
-Please note that units are supported (K for KB, M for MB, G for GB). Defaults is no units, so in bytes.
-Default value is *100 MB*.
-
-| tika.contentType.blacklist
-| Blacklist of content type is known-to-be-failing with Tika. Specify the list with comma separator.
-|===
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+include::partial$configure/tika.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/configure/usersrepository.adoc b/docs/modules/servers/pages/distributed/configure/usersrepository.adoc
index ff07f7929e3..d4cef0a23f7 100644
--- a/docs/modules/servers/pages/distributed/configure/usersrepository.adoc
+++ b/docs/modules/servers/pages/distributed/configure/usersrepository.adoc
@@ -1,136 +1,5 @@
= Distributed James Server — usersrepository.xml
:navtitle: usersrepository.xml
-User repositories are required to store James user information and authentication data.
-
-Consult this link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/usersrepository.xml[example]
-to get some examples and hints.
-
-== The user data model
-
-A user has two attributes: username and password.
-
-A valid user should satisfy these criteria:
-
-* username and password cannot be null or empty
-* username should not be longer than 255 characters
-* username can not contain '/'
-* username can not contain multiple domain delimiter('@')
-* A username can have only a local part when virtualHosting is disabled. E.g.'myUser'
-* When virtualHosting is enabled, a username should have a domain part, and the domain part should be concatenated
-after a domain delimiter('@'). E.g. 'myuser@james.org'
-
-A user is always considered as lower cased, so 'myUser' and 'myuser' are the same user, and can be used as well as
-recipient local part than as login for different protocols.
-
-== Configuration
-
-.usersrepository.xml content
-|===
-| Property name | explanation
-
-| enableVirtualHosting
-| true or false. Add domain support for users (default: false, except for Cassandra Users Repository)
-
-| administratorId
-|user's name. Allow a user to access to the https://tools.ietf.org/html/rfc4616#section-2[impersonation command],
-acting on the behalf of any user.
-
-| verifyFailureDelay
-| Delay after a failed authentication attempt with an invalid user name or password. Duration string defaulting to seconds, e.g. `2`, `2s`, `2000ms`. Default `0s` (disabled).
-
-| algorithm
-| use a specific hash algorithm to compute passwords, with optional mode `plain` (default) or `salted`; e.g. `SHA-512`, `SHA-512/plain`, `SHA-512/salted`, `PBKDF2`, `PBKDF2-SHA512` (default).
-Note: When using `PBKDF2` or `PBKDF2-SHA512` one can specify the iteration count and the key size in bytes. You can specify it as part of the algorithm. EG: `PBKDF2-SHA512-2000-512` will use
-2000 iterations with a key size of 512 bytes.
-
-| hashingMode
-| specify the hashing mode to use if there is none recorded in the database: `plain` (default) for newer installations or `legacy` for older ones
-
-|===
-
-== Configuring a LDAP
-
-Alternatively you can authenticate your users against a LDAP server. You need to configure
-the properties for accessing your LDAP server in this file.
-
-Consult this link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/usersrepository.xml[example]
-to get some examples and hints.
-
-Example:
-
-....
-
- true
-
-....
-
-SSL can be enabled by using `ldaps` scheme. `trustAllCerts` option can be used to trust all LDAP client certificates
-(optional, defaults to false).
-
-Example:
-
-....
-
- true
-
-....
-
-Moreover, per domain base DN can be configured:
-
-....
-true
-
- ou=People,o=other.com,ou=system
-
-
-....
-
-You can connect to multiple LDAP servers for better availability by using `ldapHosts` option (fallback to `ldapHost` is supported) to specify the list of LDAP Server URL with the comma `,` delimiter. We do support different schemas for LDAP servers.
-
-Example:
-
-....
-
- true
-
-....
-
-When VirtualHosting is on, you can enable local part as login username by configure the `resolveLocalPartAttribute`.
-This is the LDAP attribute that allows to retrieve the local part of users. Optional, default to empty, which disables login with local part as username.
-
-Example:
-
-....
-
- true
-
-....
-
-The "userListBase" configuration option is used to differentiate users that can login from those that are listed
- as regular users. This is useful for dis-activating users, for instance.
-
-A different values from "userBase" can be used for setting up virtual logins,
-for instance in conjunction with "resolveLocalPartAttribute". This can also be used to manage
-disactivated users (in "userListBase" but not in "userBase").
-
-Note that "userListBase" can not be specified on a per-domain-basis.
-
-=== LDAP connection pool size tuning
-
-Apache James offers some options for configuring the LDAP connection pool used by unboundid:
-
-* *poolSize*: (optional, default = 4) The maximum number of connection in the pool. Note that if the pool is exhausted,
-extra connections will be created on the fly as needed.
-* *maxWaitTime*: (optional, default = 1000) the number of milli seconds to wait before creating off-pool connections,
-using a pool connection if released in time. This effectively smooth out traffic burst, thus in some case can help
-not overloading the LDAP
-* *connectionTimeout:* (optional) Sets the connection timeout on the underlying to the specified integer value
-* *readTimeout:* (optional) Sets property the read timeout to the specified integer value.
\ No newline at end of file
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+include::partial$configure/usersrepository.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/configure/vault.adoc b/docs/modules/servers/pages/distributed/configure/vault.adoc
index 2f7a4836a8d..97ee4a32476 100644
--- a/docs/modules/servers/pages/distributed/configure/vault.adoc
+++ b/docs/modules/servers/pages/distributed/configure/vault.adoc
@@ -1,38 +1,8 @@
= Distributed James Server — deletedMessageVault.properties
:navtitle: deletedMessageVault.properties
-Deleted Messages Vault is the component in charge of retaining messages before they are going to be deleted.
-Messages stored in the Deleted Messages Vault could be deleted after exceeding their retentionPeriod (explained below).
-It also supports to restore or export messages matching with defined criteria in
-xref:distributed/operate/webadmin.adoc#_deleted_messages_vault[WebAdmin deleted messages vault document] by using
-xref:distributed/operate/webadmin.adoc#_deleted_messages_vault[WebAdmin endpoints].
-
-== Deleted Messages Vault Configuration
-
-Once the vault is active, James will start moving deleted messages to it asynchronously.
-
-The Deleted Messages Vault also stores and manages deleted messages into a BlobStore. The BlobStore can be either
-based on an object storage or on Cassandra. For configuring the BlobStore the vault will use, you can look at
-xref:distributed/configure/blobstore.adoc[*blobstore.properties*] BlobStore Configuration section.
-
-== deletedMessageVault.properties
-
-Consult this link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/deletedMessageVault.properties[example]
-to get some examples and hints.
-
-.deletedMessageVault.properties content
-|===
-| Property name | explanation
-
-| enabled
-| Allows to enable or disable usage of the Deleted Message Vault. Default to false.
-
-| workQueueEnabled
-| Enable work queue to be used with deleted message vault. Default to false.
-
-| retentionPeriod
-| Deleted messages stored in the Deleted Messages Vault are expired after this period (default: 1 year). It can be expressed in *y* years, *d* days, *h* hours, ...
-
-| restoreLocation
-| Messages restored from the Deleted Messages Vault are placed in a mailbox with this name (default: ``Restored-Messages``). The mailbox will be created if it does not exist yet.
-|===
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+:pages-path: distributed
+:server-name: Distributed James Server
+:backend-name: Cassandra
+include::partial$configure/vault.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/configure/webadmin.adoc b/docs/modules/servers/pages/distributed/configure/webadmin.adoc
index 767f4fca47b..13393213f99 100644
--- a/docs/modules/servers/pages/distributed/configure/webadmin.adoc
+++ b/docs/modules/servers/pages/distributed/configure/webadmin.adoc
@@ -1,100 +1,7 @@
= Distributed James Server — webadmin.properties
:navtitle: webadmin.properties
-The web administration supports for now the CRUD operations on the domains, the users, their mailboxes and their quotas,
-managing mail repositories, performing cassandra migrations, and much more, as described in the following sections.
-
-*WARNING*: This API allows authentication only via the use of JWT. If not
-configured with JWT, an administrator should ensure an attacker can not
-use this API.
-
-By the way, some endpoints are not filtered by authentication. Those endpoints are not related to data stored in James,
-for example: Swagger documentation & James health checks.
-
-== Configuration
-
-Consult this link:https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/webadmin.properties[example]
-to get some examples and hints.
-
-.webadmin.properties content
-|===
-| Property name | explanation
-
-| enabled
-| Define if WebAdmin is launched (default: false)
-
-| port
-| Define WebAdmin's port (default: 8080)
-
-| host
-| Define WebAdmin's host (default: localhost, use 0.0.0.0 to listen on all addresses)
-
-| cors.enable
-| Allow the Cross-origin resource sharing (default: false)
-
-| cors.origin
-| Specify ths CORS origin (default: null)
-
-| jwt.enable
-| Allow JSON Web Token as an authentication mechanism (default: false)
-
-| https.enable
-| Use https (default: false)
-
-| https.keystore
-| Specify a keystore file for https (default: null)
-
-| https.password
-| Specify the keystore password (default: null)
-
-| https.trust.keystore
-| Specify a truststore file for https (default: null)
-
-| https.trust.password
-| Specify the truststore password (default: null)
-
-| jwt.publickeypem.url
-| Optional. JWT tokens allow request to bypass authentication. Path to the JWT public key.
-Defaults to the `jwt.publickeypem.url` value of `jmap.properties` file if unspecified
-(legacy behaviour)
-
-| extensions.routes
-| List of Routes specified as fully qualified class name that should be loaded in addition to your product routes list. Routes
-needs to be on the classpath or in the ./extensions-jars folder. Read mode about
-xref:customization:webadmin-routes.adoc[creating you own webadmin routes].
-
-| maxThreadCount
-| Maximum threads used by the underlying Jetty server. Optional.
-
-| minThreadCount
-| Minimum threads used by the underlying Jetty server. Optional.
-
-|===
-
-== Generating a JWT key pair
-
-The Distributed server enforces the use of RSA-SHA-256.
-
-One can use OpenSSL to generate a JWT key pair :
-
- # private key
- openssl genrsa -out rs256-4096-private.rsa 4096
- # public key
- openssl rsa -in rs256-4096-private.rsa -pubout > rs256-4096-public.pem
-
-The private key can be used to generate JWT tokens, for instance
-using link:https://github.com/vandium-io/jwtgen[jwtgen]:
-
- jwtgen -a RS256 -p rs256-4096-private.rsa 4096 -c "sub=bob@domain.tld" -c "admin=true" -e 3600 -V
-
-This token can then be passed as `Bearer` of the `Authorization` header :
-
- curl -H "Authorization: Bearer $token" -XGET http://127.0.0.1:8000/domains
-
-The public key can be referenced as `jwt.publickeypem.url` of the `jmap.properties` configuration file.
-
-== Reverse-proxy set up
-
-WebAdmin adds the value of `X-Real-IP` header as part of the logging MDC.
-
-This allows for reverse proxies to cary other the IP address of the client down to the JMAP server for diagnostic purpose.
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+:pages-path: distributed
+:server-name: Distributed James Server
+include::partial$configure/webadmin.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/operate/cli.adoc b/docs/modules/servers/pages/distributed/operate/cli.adoc
index 5bd2a2dded6..b312310244f 100644
--- a/docs/modules/servers/pages/distributed/operate/cli.adoc
+++ b/docs/modules/servers/pages/distributed/operate/cli.adoc
@@ -1,335 +1,6 @@
= Distributed James Server — Command Line Interface
:navtitle: Command Line Interface
-The distributed server is packed with a command line client.
-
-To run this command line client simply execute:
-
-....
-java -jar /root/james-cli.jar -h 127.0.0.1 -p 9999 COMMAND
-....
-
-The following document will explain you which are the available options
-for *COMMAND*.
-
-Note: the above command line before *COMMAND* will be documented as _\{cli}_.
-
-== Manage Domains
-
-Domains represent the domain names handled by your server.
-
-You can add a domain:
-
-....
-{cli} AddDomain domain.tld
-....
-
-You can remove a domain:
-
-....
-{cli} RemoveDomain domain.tld
-....
-
-(Note: associated users are not removed automatically)
-
-Check if a domain is handled:
-
-....
-{cli} ContainsDomain domain.tld
-....
-
-And list your domains:
-
-....
-{cli} ListDomains
-....
-
-== Managing users
-
-Note: the following commands are explained with virtual hosting turned
-on.
-
-Users are accounts on the mail server. James can maintain mailboxes for
-them.
-
-You can add a user:
-
-....
-{cli} AddUser user@domain.tld password
-....
-
-Note: the domain used should have been previously created.
-
-You can delete a user:
-
-....
-{cli} RemoveUser user@domain.tld
-....
-
-(Note: associated mailboxes are not removed automatically)
-
-And change a user password:
-
-....
-{cli} SetPassword user@domain.tld password
-....
-
-Note: All these write operations can not be performed on LDAP backend,
-as the implementation is read-only.
-
-Finally, you can list users:
-
-....
-{cli} ListUsers
-....
-
-=== Virtual hosting
-
-James supports virtualhosting.
-
-* If set to true in the configuration, then the username is the full
-mail address.
-
-The domains then become a part of the user.
-
-_usera@domaina.com and_ _usera@domainb.com_ on a mail server with
-_domaina.com_ and _domainb.com_ configured are mail addresses that
-belongs to different users.
-
-* If set to false in the configurations, then the username is the mail
-address local part.
-
-It means that a user is automatically created for all the domains
-configured on your server.
-
-_usera@domaina.com and_ _usera@domainb.com_ on a mail server with
-_domaina.com_ and _domainb.com_ configured are mail addresses that
-belongs to the same users.
-
-Here are some sample commands for managing users when virtual hosting is
-turned off:
-
-....
-{cli} AddUser user password
-{cli} RemoveUser user
-{cli} SetPassword user password
-....
-
-== Managing mailboxes
-
-An administrator can perform some basic operation on user mailboxes.
-
-Note on mailbox formatting: mailboxes are composed of three parts.
-
-* The namespace, indicating what kind of mailbox it is. (Shared or
-not?). The value for users mailboxes is #private . Note that for now no
-other values are supported as James do not support shared mailboxes.
-* The username as stated above, depending on the virtual hosting value.
-* And finally mailbox name. Be aware that `.' serves as mailbox
-hierarchy delimiter.
-
-An administrator can delete all of the mailboxes of a user, which is not
-done automatically when removing a user (to avoid data loss):
-
-....
-{cli} DeleteUserMailboxes user@domain.tld
-....
-
-He can delete a specific mailbox:
-
-....
-{cli} DeleteMailbox #private user@domain.tld INBOX.toBeDeleted
-....
-
-He can list the mailboxes of a specific user:
-
-....
-{cli} ListUserMailboxes user@domain.tld
-....
-
-And finally can create a specific mailbox:
-
-....
-{cli} CreateMailbox #private user@domain.tld INBOX.newFolder
-....
-
-== Adding a message in a mailbox
-
-The administrator can use the CLI to add a message in a mailbox. this
-can be done using:
-
-....
-{cli} ImportEml #private user@domain.tld INBOX.newFolder /full/path/to/file.eml
-....
-
-This command will add a message having the content specified in file.eml
-(that needs to be at the EML format). It will get added in the
-INBOX.subFolder mailbox belonging to user user@domain.tld.
-
-== Managing mappings
-
-A mapping is a recipient rewriting rule. There is several kind of
-rewriting rules:
-
-* address mapping: rewrite a given mail address into an other one.
-* regex mapping.
-
-You can manage address mapping like (redirects email from
-fromUser@fromDomain.tld to redirected@domain.new, then deletes the
-mapping):
-
-....
-{cli} AddAddressMapping fromUser fromDomain.tld redirected@domain.new
-{cli} RemoveAddressMapping fromUser fromDomain.tld redirected@domain.new
-....
-
-You can manage regex mapping like this:
-
-....
-{cli} AddRegexMapping redirected domain.new .*@domain.tld
-{cli} RemoveRegexMapping redirected domain.new .*@domain.tld
-....
-
-You can view mapping for a mail address:
-
-....
-{cli} ListUserDomainMappings user domain.tld
-....
-
-And all mappings defined on the server:
-
-....
-{cli} ListMappings
-....
-
-== Manage quotas
-
-Quotas are limitations on a group of mailboxes. They can limit the
-*size* or the *messages count* in a group of mailboxes.
-
-James groups by defaults mailboxes by user (but it can be overridden),
-and labels each group with a quotaroot.
-
-To get the quotaroot a given mailbox belongs to:
-
-....
-{cli} GetQuotaroot #private user@domain.tld INBOX
-....
-
-Then you can get the specific quotaroot limitations.
-
-For the number of messages:
-
-....
-{cli} GetMessageCountQuota quotaroot
-....
-
-And for the storage space available:
-
-....
-{cli} GetStorageQuota quotaroot
-....
-
-You see the maximum allowed for these values:
-
-For the number of messages:
-
-....
-{cli} GetMaxMessageCountQuota quotaroot
-....
-
-And for the storage space available:
-
-....
-{cli} GetMaxStorageQuota quotaroot
-....
-
-You can also specify maximum for these values.
-
-For the number of messages:
-
-....
-{cli} SetMaxMessageCountQuota quotaroot value
-....
-
-And for the storage space available:
-
-....
-{cli} SetMaxStorageQuota quotaroot value
-....
-
-With value being an integer. Please note the use of units for storage
-(K, M, G). For instance:
-
-....
-{cli} SetMaxStorageQuota someone@apache.org 4G
-....
-
-Moreover, James allows to specify global maximum values, at the server
-level. Note: syntax is similar to what was exposed previously.
-
-....
-{cli} SetGlobalMaxMessageCountQuota value
-{cli} GetGlobalMaxMessageCountQuota
-{cli} SetGlobalMaxStorageQuota value
-{cli} GetGlobalMaxStorageQuota
-....
-
-== Re-indexing
-
-James allow you to index your emails in a search engine, for making
-search faster.
-
-For some reasons, you might want to re-index your mails (inconsistencies
-across datastore, migrations).
-
-To re-index all mails of all mailboxes of all users, type:
-
-....
-{cli} ReindexAll
-....
-
-And for a specific mailbox:
-
-....
-{cli} Reindex #private user@domain.tld INBOX
-....
-
-== Sieve scripts quota
-
-James implements Sieve (RFC-5228). Your users can then write scripts
-and upload them to the server. Thus they can define the desired behavior
-upon email reception. James defines a Sieve mailet for this, and stores
-Sieve scripts. You can update them via the ManageSieve protocol, or via
-the ManageSieveMailet.
-
-You can define quota for the total size of Sieve scripts, per user.
-
-Syntax is similar to what was exposed for quotas. For defaults values:
-
-....
-{cli} GetSieveQuota
-{cli} SetSieveQuota value
-{cli} RemoveSieveQuota
-....
-
-And for specific user quotas:
-
-....
-{cli} GetSieveUserQuota user@domain.tld
-{cli} SetSieveQuota user@domain.tld value
-{cli} RemoveSieveUserQuota user@domain.tld
-....
-
-== Switching of mailbox implementation
-
-Migration is experimental for now. You would need to customize *Spring*
-configuration to add a new mailbox manager with a different bean name.
-
-You can then copy data across mailbox managers using:
-
-....
-{cli} CopyMailbox srcBean dstBean
-....
-
-You will then need to reconfigure James to use the new mailbox manager.
\ No newline at end of file
+:xref-base: distributed
+:server-name: Distributed James Server
+include::partial$operate/cli.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/operate/guide.adoc b/docs/modules/servers/pages/distributed/operate/guide.adoc
index d286ca8b453..7ecc8456a25 100644
--- a/docs/modules/servers/pages/distributed/operate/guide.adoc
+++ b/docs/modules/servers/pages/distributed/operate/guide.adoc
@@ -1,201 +1,12 @@
= Distributed James Server — Operator guide
:navtitle: Operator guide
-This guide aims to be an entry-point to the James documentation for user
-managing a distributed Guice James server.
-
-It includes:
-
-* Simple architecture explanations
-* Propose some diagnostics for some common issues
-* Present procedures that can be set up to address these issues
-
-In order to not duplicate information, existing documentation will be
-linked.
-
-Please note that this product is under active development, should be
-considered experimental and thus targets advanced users.
-
-== Basic Monitoring
-
-A toolbox is available to help an administrator diagnose issues:
-
-* xref:distributed/operate/logging.adoc[Structured logging into Kibana]
-* xref:distributed/operate/metrics.adoc[Metrics graphs into Grafana]
-* xref:distributed/operate/webadmin.adoc#_healthcheck[WebAdmin HealthChecks]
-
-== Mail processing
-
-Currently, an administrator can monitor mail processing failure through `ERROR` log
-review. We also recommend watching in Kibana INFO logs using the
-`org.apache.james.transport.mailets.ToProcessor` value as their `logger`. Metrics about
-mail repository size, and the corresponding Grafana boards are yet to be contributed.
-
-Furthermore, given the default mailet container configuration, we recommend monitoring
-`cassandra://var/mail/error/` to be empty.
-
-WebAdmin exposes all utilities for
-xref:distributed/operate/webadmin.adoc#_reprocessing_mails_from_a_mail_repository[reprocessing
-all mails in a mail repository] or
-xref:distributed/operate/webadmin.adoc#_reprocessing_a_specific_mail_from_a_mail_repository[reprocessing
-a single mail in a mail repository].
-
-In order to prevent unbounded processing that could consume unbounded resources. We can provide a CRON with `limit` parameter.
-Ex: 10 reprocessed per minute
-Note that it only support the reprocessing all mails.
-
-Also, one can decide to
-xref:distributed/operate/webadmin.adoc#_removing_all_mails_from_a_mail_repository[delete
-all the mails of a mail repository] or
-xref:distributed/operate/webadmin.adoc#_removing_a_mail_from_a_mail_repository[delete
-a single mail of a mail repository].
-
-Performance of mail processing can be monitored via the
-https://github.com/apache/james-project/blob/d2cf7c8e229d9ed30125871b3de5af3cb1553649/server/grafana-reporting/es-datasource/MAILET-1490071694187-dashboard.json[mailet
-grafana board] and
-https://github.com/apache/james-project/blob/d2cf7c8e229d9ed30125871b3de5af3cb1553649/server/grafana-reporting/es-datasource/MATCHER-1490071813409-dashboard.json[matcher
-grafana board].
-
-=== Recipient rewriting
-
-Given the default configuration, errors (like loops) uopn recipient rewritting will lead
-to emails being stored in `cassandra://var/mail/rrt-error/`.
-
-We recommend monitoring the content of this mail repository to be empty.
-
-If it is not empty, we recommend
-verifying user mappings via xref:distributed/operate/webadmin.adoc#_listing_user_mappings_[User Mappings webadmin API] then once identified break the loop by removing
-some Recipient Rewrite Table entry via the
-xref:distributed/operate/webadmin.adoc#_removing_an_alias_of_an_user[Delete Alias],
-xref:distributed/operate/webadmin.adoc#_removing_a_group_member[Delete Group member],
-xref:distributed/operate/webadmin.adoc#_removing_a_destination_of_a_forward[Delete forward],
-xref:distributed/operate/webadmin.adoc#_remove_an_address_mapping[Delete Address mapping],
-xref:distributed/operate/webadmin.adoc#_removing_a_domain_mapping[Delete Domain mapping]
-or xref:distributed/operate/webadmin.adoc#_removing_a_regex_mapping[Delete Regex mapping]
-APIs (as needed).
-
-The `Mail.error` field can help diagnose the issue as well. Then once
-the root cause has been addressed, the mail can be reprocessed.
-
-== Mailbox Event Bus
-
-It is possible for the administrator of James to define the mailbox
-listeners he wants to use, by adding them in the
-https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/listeners.xml[listeners.xml]
-configuration file. It’s possible also to add your own custom mailbox
-listeners. This enables to enhance capabilities of James as a Mail
-Delivery Agent. You can get more information about those
-link:config-listeners.html[here].
-
-Currently, an administrator can monitor listeners failures through
-`ERROR` log review. Metrics regarding mailbox listeners can be monitored
-via
-https://github.com/apache/james-project/blob/d2cf7c8e229d9ed30125871b3de5af3cb1553649/server/grafana-reporting/es-datasource/MailboxListeners-1528958667486-dashboard.json[mailbox_listeners
-grafana board] and
-https://github.com/apache/james-project/blob/d2cf7c8e229d9ed30125871b3de5af3cb1553649/server/grafana-reporting/es-datasource/MailboxListeners%20rate-1552903378376.json[mailbox_listeners_rate
-grafana board].
-
-Upon exceptions, a bounded number of retries are performed (with
-exponential backoff delays). If after those retries the listener is
-still failing to perform its operation, then the event will be stored in
-the xref:distributed/operate/webadmin.adoc#_event_dead_letter[Event Dead Letter]. This
-API allows diagnosing issues, as well as redelivering the events.
-
-To check that you have undelivered events in your system, you can first
-run the associated with
-xref:distributed/operate/webadmin.adoc#_healthcheck[event dead letter health check] .
-You can explore Event DeadLetter content through WebAdmin. For
-this, xref:distributed/operate/webadmin.adoc#_listing_mailbox_listener_groups[list mailbox listener groups]
-you will get a list of groups back, allowing
-you to check if those contain registered events in each by
-xref:distributed/operate/webadmin.adoc#_listing_failed_events[listing their failed events].
-
-If you get failed events IDs back, you can as well
-xref:distributed/operate/webadmin.adoc#_getting_event_details[check their details].
-
-An easy way to solve this is just to trigger then the
-xref:distributed/operate/webadmin.adoc#_redeliver_all_events[redeliver all events]
-task. It will start reprocessing all the failed events registered in
-event dead letters.
-
-In order to prevent unbounded processing that could consume unbounded resources. We can provide a CRON with `limit` parameter.
-Ex: 10 redelivery per minute
-
-If for some other reason you don’t need to redeliver all events, you
-have more fine-grained operations allowing you to
-xref:distributed/operate/webadmin.adoc#_redeliver_group_events[redeliver group events]
-or even just
-xref:distributed/operate/webadmin.adoc#_redeliver_a_single_event[redeliver a single event].
-
-== OpenSearch Indexing
-
-A projection of messages is maintained in OpenSearch via a listener
-plugged into the mailbox event bus in order to enable search features.
-
-You can find more information about OpenSearch configuration
-link:config-opensearch.html[here].
-
-=== Usual troubleshooting procedures
-
-As explained in the link:#_mailbox_event_bus[Mailbox Event Bus] section,
-processing those events can fail sometimes.
-
-Currently, an administrator can monitor indexation failures through
-`ERROR` log review. You can as well
-xref:distributed/operate/webadmin.adoc#_listing_failed_events[list failed events] by
-looking with the group called
-`org.apache.james.mailbox.opensearch.events.OpenSearchListeningMessageSearchIndex$OpenSearchListeningMessageSearchIndexGroup`.
-A first on-the-fly solution could be to just
-link:#_mailbox_event_bus[redeliver those group events with event dead letter].
-
-If the event storage in dead-letters fails (for instance in the face of
-Cassandra storage exceptions), then you might need to use our WebAdmin
-reIndexing tasks.
-
-From there, you have multiple choices. You can
-xref:distributed/operate/webadmin.adoc#_reindexing_all_mails[reIndex all mails],
-xref:distributed/operate/webadmin.adoc#_reindexing_a_mailbox_mails[reIndex mails from a mailbox] or even just
-xref:distributed/operate/webadmin.adoc#_reindexing_a_single_mail_by_messageid[reIndex a single mail].
-
-When checking the result of a reIndexing task, you might have failed
-reprocessed mails. You can still use the task ID to
-xref:distributed/operate/webadmin.adoc#_fixing_previously_failed_reindexing[reprocess previously failed reIndexing mails].
-
-=== On the fly OpenSearch Index setting update
-
-Sometimes you might need to update index settings. Cases when an
-administrator might want to update index settings include:
-
-* Scaling out: increasing the shard count might be needed.
-* Changing string analysers, for instance to target another language
-* etc.
-
-In order to achieve such a procedure, you need to:
-
-* https://www.elastic.co/guide/en/elasticsearch/reference/7.10/indices-create-index.html[Create
-the new index] with the right settings and mapping
-* James uses two aliases on the mailbox index: one for reading
-(`mailboxReadAlias`) and one for writing (`mailboxWriteAlias`). First
-https://www.elastic.co/guide/en/elasticsearch/reference/7.10/indices-aliases.html[add
-an alias] `mailboxWriteAlias` to that new index, so that now James
-writes on the old and new indexes, while only keeping reading on the
-first one
-* Now trigger a
-https://www.elastic.co/guide/en/elasticsearch/reference/7.10/docs-reindex.html[reindex]
-from the old index to the new one (this actively relies on `_source`
-field being present)
-* When this is done, add the `mailboxReadAlias` alias to the new index
-* Now that the migration to the new index is done, you can
-https://www.elastic.co/guide/en/elasticsearch/reference/7.10/indices-delete-index.html[drop
-the old index]
-* You might want as well modify the James configuration file
-https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/opensearch.properties[elasticsearch.properties]
-by setting the parameter `opensearch.index.mailbox.name` to the name
-of your new index. This is to avoid that James re-creates index upon
-restart
-
-_Note_: keep in mind that reindexing can be a very long operation
-depending on the volume of mails you have stored.
+:xref-base: distributed
+:mailet-repository-path-prefix: cassandra
+:backend-name: cassandra
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration
+:server-name: Distributed James Server
+include::partial$operate/guide.adoc[]
== Solving cassandra inconsistencies
@@ -220,7 +31,7 @@ message reads and will temporarily decrease the performance.
==== How to detect the outdated projections
You can watch the `MessageFastViewProjection` health check at
-xref:distributed/operate/webadmin.adoc#_check_all_components[webadmin documentation].
+xref:{xref-base}/operate/webadmin.adoc#_check_all_components[webadmin documentation].
It provides a check based on the ratio of missed projection reads.
==== How to solve
@@ -249,7 +60,7 @@ diagnostic and fixes.
==== How to solve
An admin can run offline webadmin
-xref:distributed/operate/webadmin.adoc#_fixing_mailboxes_inconsistencies[solve Cassandra mailbox object inconsistencies task]
+xref:{xref-base}/operate/webadmin.adoc#_fixing_mailboxes_inconsistencies[solve Cassandra mailbox object inconsistencies task]
in order to sanitize his
mailbox denormalization.
@@ -273,7 +84,7 @@ message prefix: `Invalid mailbox counters`.
==== How to solve
Execute the
-xref:distributed/operate/webadmin.adoc#_recomputing_mailbox_counters[recompute Mailbox counters task].
+xref:{xref-base}/operate/webadmin.adoc#_recomputing_mailbox_counters[recompute Mailbox counters task].
This task is not concurrent-safe. Concurrent
increments & decrements will be ignored during a single mailbox
processing. Re-running this task may eventually return the correct
@@ -293,7 +104,7 @@ User can see a message in JMAP but not in IMAP, or mark a message as
==== How to solve
Execute the
-xref:distributed/operate/webadmin.adoc#_fixing_message_inconsistencies[solve Cassandra message inconsistencies task]. This task is not
+xref:{xref-base}/operate/webadmin.adoc#_fixing_message_inconsistencies[solve Cassandra message inconsistencies task]. This task is not
concurrent-safe. User actions concurrent to the inconsistency fixing
task could result in new inconsistencies being created. However the
source of truth `imapUidTable` will not be affected and thus re-running
@@ -313,7 +124,7 @@ Incorrect quotas could be seen in the `Mail User Agent` (IMAP or JMAP).
==== How to solve
Execute the
-xref:distributed/operate/webadmin.adoc#_recomputing_current_quotas_for_users[recompute Quotas counters task]. This task is not concurrent-safe. Concurrent
+xref:{xref-base}/operate/webadmin.adoc#_recomputing_current_quotas_for_users[recompute Quotas counters task]. This task is not concurrent-safe. Concurrent
operations will result in an invalid quota to be persisted. Re-running
this task may eventually return the correct result.
@@ -333,7 +144,7 @@ the mean time, the recommendation is to execute the
==== How to solve
Execute the Cassandra mapping `SolveInconsistencies` task described in
-xref:distributed/operate/webadmin.adoc#_operations_on_mappings_sources[webadmin documentation]
+xref:{xref-base}/operate/webadmin.adoc#_operations_on_mappings_sources[webadmin documentation]
== Setting Cassandra user permissions
@@ -487,35 +298,6 @@ the https://cassandra.apache.org/doc/latest/tools/cqlsh.html[cqlsh]
utility. A full compaction might be needed in order for the changes to
be taken into account.
-== Mail Queue
-
-=== Fine tune configuration for RabbitMQ
-
-In order to adapt mail queue settings to the actual traffic load, an
-administrator needs to perform fine configuration tunning as explained
-in
-https://github.com/apache/james-project/blob/master/src/site/xdoc/server/config-rabbitmq.xml[rabbitmq.properties].
-
-Be aware that `MailQueue::getSize` is currently performing a browse and
-thus is expensive. Size recurring metric reporting thus introduces
-performance issues. As such, we advise setting
-`mailqueue.size.metricsEnabled=false`.
-
-=== Managing email queues
-
-Managing an email queue is an easy task if you follow this procedure:
-
-* First, xref:distributed/operate/webadmin.adoc#_listing_mail_queues[List mail queues]
-and xref:distributed/operate/webadmin.adoc#_getting_a_mail_queue_details[get a mail queue details].
-* And then
-xref:distributed/operate/webadmin.adoc#_listing_the_mails_of_a_mail_queue[List the mails of a mail queue].
-
-In case, you need to clear an email queue because there are only spam or
-trash emails in the email queue you have this procedure to follow:
-
-* All mails from the given mail queue will be deleted with
-xref:distributed/operate/webadmin.adoc#_clearing_a_mail_queue[Clearing a mail queue].
-
== Updating Cassandra schema version
A schema version indicates you which schema your James server is relying
@@ -551,64 +333,18 @@ These schema updates can be triggered by webadmin using the Cassandra
backend. Following steps are for updating Cassandra schema version:
* At the very first step, you need to
-xref:distributed/operate/webadmin.adoc#_retrieving_current_cassandra_schema_version[retrieve
+xref:{xref-base}/operate/webadmin.adoc#_retrieving_current_cassandra_schema_version[retrieve
current Cassandra schema version]
* And then, you
-xref:distributed/operate/webadmin.adoc#_retrieving_latest_available_cassandra_schema_version[retrieve
+xref:{xref-base}/operate/webadmin.adoc#_retrieving_latest_available_cassandra_schema_version[retrieve
latest available Cassandra schema version] to make sure there is a
latest available version
* Eventually, you can update the current schema version to the one you
got with
-xref:distributed/operate/webadmin.adoc#_upgrading_to_the_latest_version[upgrading to
+xref:{xref-base}/operate/webadmin.adoc#_upgrading_to_the_latest_version[upgrading to
the latest version]
Otherwise, if you need to run the migrations to a specific version, you
can use
-xref:distributed/operate/webadmin.adoc#_upgrading_to_a_specific_version[Upgrading to a
-specific version]
-
-== Deleted Message Vault
-
-We recommend the administrator to
-xref:#_cleaning_expired_deleted_messages[run it] in cron job to save
-storage volume.
-
-=== How to configure deleted messages vault
-
-To setup James with Deleted Messages Vault, you need to follow those
-steps:
-
-* Enable Deleted Messages Vault by configuring Pre Deletion Hooks.
-* Configuring the retention time for the Deleted Messages Vault.
-
-==== Enable Deleted Messages Vault by configuring Pre Deletion Hooks
-
-You need to configure this hook in
-https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/listeners.xml[listeners.xml]
-configuration file. More details about configuration & example can be
-found at http://james.apache.org/server/config-listeners.html[Pre
-Deletion Hook Configuration]
-
-==== Configuring the retention time for the Deleted Messages Vault
-
-In order to configure the retention time for the Deleted Messages Vault,
-an administrator needs to perform fine configuration tunning as
-explained in
-https://github.com/apache/james-project/blob/master/server/apps/distributed-app/sample-configuration/deletedMessageVault.properties[deletedMessageVault.properties].
-Mails are not retained forever as you have to configure a retention
-period (by `retentionPeriod`) before using it (with one-year retention
-by default if not defined).
-
-=== Restore deleted messages after deletion
-
-After users deleted their mails and emptied the trash, the admin can use
-xref:distributed/operate/webadmin.adoc#_restore_deleted_messages[Restore Deleted Messages]
-to restore all the deleted mails.
-
-=== Cleaning expired deleted messages
-
-You can delete all deleted messages older than the configured
-`retentionPeriod` by using
-xref:distributed/operate/webadmin.adoc#_deleted_messages_vault[Purge Deleted Messages].
-We recommend calling this API in CRON job on 1st day each
-month.
+xref:{xref-base}/operate/webadmin.adoc#_upgrading_to_a_specific_version[Upgrading to a
+specific version]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/operate/index.adoc b/docs/modules/servers/pages/distributed/operate/index.adoc
index bcad596cdea..da76f0558b8 100644
--- a/docs/modules/servers/pages/distributed/operate/index.adoc
+++ b/docs/modules/servers/pages/distributed/operate/index.adoc
@@ -1,28 +1,10 @@
= Distributed James Server — Operate the Distributed server
:navtitle: Operate the Distributed server
-The following pages detail how to operate the Distributed server.
-
-Once you have a Distributed James server up and running you then need to ensure it operates correctly and has a decent performance.
-You may also need to perform some operation maintenance or recover from incidents. This section covers
-these topics.
-
-Read more about xref:distributed/operate/logging.adoc[Logging].
-
-The xref:distributed/operate/webadmin.adoc[WebAdmin Restfull administration API] is the
-recommended way to operate the Distributed James server. It allows managing and interacting with most
-server components.
-
-The xref:distributed/operate/cli.adoc[Command line interface] allows to interact with some
-server components. However it relies on JMX technologies and its use is discouraged.
-
-The xref:distributed/operate/metrics.adoc[metrics] allows to build latency and throughput
-graphs, that can be visualized, for instance in *Grafana*.
-
-We did put together a xref:distributed/operate/guide.adoc[detailed guide] for
-distributed James operators. We also propose a xref:distributed/operate/performanceChecklist.adoc[performance checklist].
-
-We also included a guide for xref:distributed/operate/migrating.adoc[migrating existing data] into the distributed server.
+:xref-base: distributed
+:server-name: Distributed James Server
+:server-tag: distributed
+include::partial$operate/index.adoc[]
Read more about xref:distributed/operate/cassandra-migration.adoc[Cassandra data migration].
diff --git a/docs/modules/servers/pages/distributed/operate/logging.adoc b/docs/modules/servers/pages/distributed/operate/logging.adoc
index 43079e87aa6..5c93f32071f 100644
--- a/docs/modules/servers/pages/distributed/operate/logging.adoc
+++ b/docs/modules/servers/pages/distributed/operate/logging.adoc
@@ -1,251 +1,9 @@
= Distributed James Server — Logging
:navtitle: Logging
-We recommend to closely monitoring *ERROR* and *WARNING* logs. Those
-logs should be considered not normal.
-
-If you encounter some suspicious logs:
-
-* If you have any doubt about the log being caused by a bug in James
-source code, please reach us via the bug tracker, the user mailing list or our Gitter channel (see our
-http://james.apache.org/#second[community page])
-* They can be due to insufficient performance from tier applications (eg
-Cassandra timeouts). In such case we advise you to conduct a close
-review of performances at the tier level.
-
-Leveraging filters in Kibana discover view can help to filter out
-''already known'' frequently occurring logs.
-
-When reporting ERROR or WARNING logs, consider adding the full logs, and
-related data (eg the raw content of a mail triggering an issue) to the
-bug report in order to ease resolution.
-
-== Logging configuration
-
-Distributed James uses link:http://logback.qos.ch/[logback] as a logging library
-and link:https://docs.fluentbit.io/[FluentBit] as centralize logging.
-
-Information about logback configuration can be found
-link:http://logback.qos.ch/manual/configuration.html[here].
-
-== Structured logging
-
-=== Using FluentBit as a log forwarder
-
-==== Using Docker
-
-Distributed Server leverages the use of MDC in order to achieve structured logging, and better add context to the logged information. We furthermore ship json logs to file with RollingFileAppender on the classpath to easily allow FluentBit to directly tail the log file.
-Here is a sample conf/logback.xml configuration file for logback with the following pre-requisites:
-
-Logging in a structured json fashion and write to file for centralizing logging.
-Centralize logging third party like FluentBit can tail from logging’s file then filter/process and put in to OpenSearch
-
-....
-
-
-
-
- true
-
-
-
-
- logs/james.%d{yyyy-MM-dd}.%i.log
- 1
- 200MB
- 100MB
-
-
-
-
- yyyy-MM-dd'T'HH:mm:ss.SSSX
- Etc/UTC
-
-
- true
-
-
- false
-
-
-
-
-
-
-
-
-
-
-....
-
-First you need to create a `logs` folder, then mount it to James container and to FluentBit.
-
-docker-compose:
-....
-version: "3"
-
-services:
- james:
- depends_on:
- - opensearch
- - cassandra
- - rabbitmq
- - s3
- entrypoint: bash -c "java -cp 'james-server.jar:extension-jars/*:james-server-memory-guice.lib/*' -Dworking.directory=/root/ -Dlogback.configurationFile=/root/conf/logback.xml org.apache.james.CassandraRabbitMQJamesServerMain"
- image: linagora/james-rabbitmq-project:branch-master
- container_name: james
- hostname: james.local
- volumes:
- - ./extension-jars:/root/extension-jars
- - ./conf/logback.xml:/root/conf/logback.xml
- - ./logs:/root/logs
- ports:
- - "80:80"
- - "25:25"
- - "110:110"
- - "143:143"
- - "465:465"
- - "587:587"
- - "993:993"
- - "8080:8000"
-
- opensearch:
- image: opensearchproject/opensearch:2.14.0
- ports:
- - "9200:9200"
- environment:
- - discovery.type=single-node
-
- cassandra:
- image: cassandra:4.1.5
- ports:
- - "9042:9042"
-
- rabbitmq:
- image: rabbitmq:3.13.3-management
- ports:
- - "5672:5672"
- - "15672:15672"
-
- s3:
- image: registry.scality.com/cloudserver/cloudserver:8.7.25
- container_name: s3.docker.test
- environment:
- - SCALITY_ACCESS_KEY_ID=accessKey1
- - SCALITY_SECRET_ACCESS_KEY=secretKey1
- - S3BACKEND=mem
- - LOG_LEVEL=trace
- - REMOTE_MANAGEMENT_DISABLE=1
-
- fluent-bit:
- image: fluent/fluent-bit:1.5.7
- volumes:
- - ./fluentbit/fluent-bit.conf:/fluent-bit/etc/fluent-bit.conf
- - ./fluentbit/parsers.conf:/fluent-bit/etc/parsers.conf
- - ./logs:/fluent-bit/log
- ports:
- - "24224:24224"
- - "24224:24224/udp"
- depends_on:
- - opensearch
-
- opensearch-dashboards:
- image: opensearchproject/opensearch-dashboards:2.16.0
- environment:
- OPENSEARCH_HOSTS: http://opensearch:9200
- ports:
- - "5601:5601"
- depends_on:
- - opensearch
-....
-
-FluentBit config as:
-the `Host opensearch` pointing to `opensearch` service in docker-compose file.
-....
-[SERVICE]
- Parsers_File /fluent-bit/etc/parsers.conf
-
-[INPUT]
- name tail
- path /fluent-bit/log/*.log
- Parser docker
- docker_mode on
- buffer_chunk_size 1MB
- buffer_max_size 1MB
- mem_buf_limit 64MB
- Refresh_Interval 30
-
-[OUTPUT]
- Name stdout
- Match *
-
-
-[OUTPUT]
- Name es
- Match *
- Host opensearch
- Port 9200
- Index fluentbit
- Logstash_Format On
- Logstash_Prefix fluentbit-james
- Type docker
-....
-
-FluentBit Parser config:
-....
-[PARSER]
- Name docker
- Format json
- Time_Key timestamp
- Time_Format %Y-%m-%dT%H:%M:%S.%LZ
- Time_Keep On
- Decode_Field_As escaped_utf8 log do_next
- Decode_Field_As escaped log do_next
- Decode_Field_As json log
-....
-
-==== Using Kubernetes
-
-If using James in a Kubernetes environment, you can just append the logs to the console in a JSON formatted way
-using Jackson to easily allow FluentBit to directly tail them.
-
-Here is a sample conf/logback.xml configuration file for achieving this:
-
-....
-
-
-
-
- true
-
-
-
-
-
- yyyy-MM-dd'T'HH:mm:ss.SSSX
- Etc/UTC
-
-
- true
-
-
- false
-
-
-
-
-
-
-
-
-
-
-....
-
-Regarding FluentBit on Kubernetes, you need to install it as a DaemonSet. Some official template exist
-with FluentBit outputting logs to OpenSearch. For more information on how to install it,
-with your cluster, you can look at this https://docs.fluentbit.io/manual/installation/kubernetes[documentation].
-
-As stated by the https://docs.fluentbit.io/manual/installation/kubernetes#details[detail] of the
-official documentation, FluentBit is configured to consume out of the box logs from containers
-on the same running node. So it should scrap your James logs without extra configuration.
+:xref-base: distributed
+:server-name: Distributed James Server
+:server-tag: distributed
+:docker-compose-code-block-sample: servers:distributed/operate/logging/docker-compose-block.adoc
+:backend-name: cassandra
+include::partial$operate/logging.adoc[]
diff --git a/docs/modules/servers/pages/distributed/operate/logging/docker-compose-block.adoc b/docs/modules/servers/pages/distributed/operate/logging/docker-compose-block.adoc
new file mode 100644
index 00000000000..77b46b433f6
--- /dev/null
+++ b/docs/modules/servers/pages/distributed/operate/logging/docker-compose-block.adoc
@@ -0,0 +1,78 @@
+[source,docker-compose]
+----
+version: "3"
+
+services:
+ james:
+ depends_on:
+ - opensearch
+ - cassandra
+ - rabbitmq
+ - s3
+ entrypoint: bash -c "java -cp 'james-server.jar:extension-jars/*:james-server-memory-guice.lib/*' -Dworking.directory=/root/ -Dlogback.configurationFile=/root/conf/logback.xml org.apache.james.CassandraRabbitMQJamesServerMain"
+ image: linagora/james-rabbitmq-project:branch-master
+ container_name: james
+ hostname: james.local
+ volumes:
+ - ./extension-jars:/root/extension-jars
+ - ./conf/logback.xml:/root/conf/logback.xml
+ - ./logs:/root/logs
+ ports:
+ - "80:80"
+ - "25:25"
+ - "110:110"
+ - "143:143"
+ - "465:465"
+ - "587:587"
+ - "993:993"
+ - "8080:8000"
+
+ opensearch:
+ image: opensearchproject/opensearch:2.14.0
+ ports:
+ - "9200:9200"
+ environment:
+ - discovery.type=single-node
+
+ cassandra:
+ image: cassandra:4.1.5
+ ports:
+ - "9042:9042"
+
+ rabbitmq:
+ image: rabbitmq:3.13.3-management
+ ports:
+ - "5672:5672"
+ - "15672:15672"
+
+ s3:
+ image: registry.scality.com/cloudserver/cloudserver:8.7.25
+ container_name: s3.docker.test
+ environment:
+ - SCALITY_ACCESS_KEY_ID=accessKey1
+ - SCALITY_SECRET_ACCESS_KEY=secretKey1
+ - S3BACKEND=mem
+ - LOG_LEVEL=trace
+ - REMOTE_MANAGEMENT_DISABLE=1
+
+ fluent-bit:
+ image: fluent/fluent-bit:1.5.7
+ volumes:
+ - ./fluentbit/fluent-bit.conf:/fluent-bit/etc/fluent-bit.conf
+ - ./fluentbit/parsers.conf:/fluent-bit/etc/parsers.conf
+ - ./logs:/fluent-bit/log
+ ports:
+ - "24224:24224"
+ - "24224:24224/udp"
+ depends_on:
+ - opensearch
+
+ opensearch-dashboards:
+ image: opensearchproject/opensearch-dashboards:2.16.0
+ environment:
+ OPENSEARCH_HOSTS: http://opensearch:9200
+ ports:
+ - "5601:5601"
+ depends_on:
+ - opensearch
+----
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/operate/metrics.adoc b/docs/modules/servers/pages/distributed/operate/metrics.adoc
index e99f718d5de..d75368916af 100644
--- a/docs/modules/servers/pages/distributed/operate/metrics.adoc
+++ b/docs/modules/servers/pages/distributed/operate/metrics.adoc
@@ -1,181 +1,7 @@
= Distributed James Server — Metrics
:navtitle: Metrics
-James relies on the https://metrics.dropwizard.io/4.1.2/manual/core.html[Dropwizard metric library]
-for keeping track of some core metrics of James.
-
-Such metrics are made available via JMX. You can connect for instance using VisualVM and the associated
-mbean plugins.
-
-We also support displaying them via https://grafana.com/[Grafana]. Two methods can be used to back grafana display:
-
- - Prometheus metric collection - Data are exposed on a HTTP endpoint for Prometheus scrape.
- - ElasticSearch metric collection - This method is depreciated and will be removed in next version.
-
-== Expose metrics for Prometheus collection
-
-To enable James metrics, add ``extensions.routes`` to https://github.com/apache/james-project/blob/master/server/apps/distributed-app/docs/modules/ROOT/pages/configure/webadmin.adoc[webadmin.properties] file:
-```
-extensions.routes=org.apache.james.webadmin.dropwizard.MetricsRoutes
-```
-Connect to james-admin url to test the result:
-....
-http://james-admin-url/metrics
-....
-
-== Configure Prometheus Data source
-You need to set up https://prometheus.io/docs/prometheus/latest/getting_started/[Prometheus] first to scrape James metrics. +
-Add Apache James WebAdmin Url or IP address to ``prometheus.yaml`` configuration file:
-....
-scrape_configs:
- # The job name is added as a label `job=` to any timeseries scraped from this config.
- - job_name: 'WebAdmin url Example'
- scrape_interval: 5s
- metrics_path: /metrics
- static_configs:
- - targets: ['james-webamin-url']
- - job_name: 'WebAdmin IP Example'
- scrape_interval: 5s
- metrics_path: /metrics
- static_configs:
- - targets: ['192.168.100.10:8000']
-....
-
-== Connect Prometheus to Grafana
-
-You can do this either from https://prometheus.io/docs/visualization/grafana/[Grafana UI] or from a https://grafana.com/docs/grafana/latest/datasources/prometheus/[configuration file]. +
-The following `docker-compose.yaml` will help you install a simple Prometheus/ Grafana stack :
-
-```
-version: '3'
-#Metric monitoring
- grafana:
- image: grafana/grafana:latest
- container_name: grafana
- ports:
- - "3000:3000"
-
- prometheus:
- image: prom/prometheus:latest
- restart: unless-stopped
- ports:
- - "9090:9090"
- volumes:
- - ./conf/prometheus.yml:/etc/prometheus/prometheus.yml
-```
-
-== Getting dashboards
-Now that the Promtheus/Grafana servers are up, go to this https://github.com/apache/james-project/tree/master/server/grafana-reporting/prometheus-datasource/[link] to get all dashboards JSON file. Import the different JSON files in this directory to Grafana via UI.
-
-
-image::preload-dashboards.png[Pre-loaded dashboards]
-
-*Note: For communication between multiple docker-compose projects, see https://stackoverflow.com/questions/38088279/communication-between-multiple-docker-compose-projects[here] for example. An easier approach is to merge James and Metric docker-compose files together.
-
-== Available metrics
-
-Here are the available metrics :
-
- - James JVM metrics
- - Number of active SMTP connections
- - Number of SMTP commands received
- - Number of active IMAP connections
- - Number of IMAP commands received
- - Number of active LMTP connections
- - Number of LMTP commands received
- - Number of per queue number of enqueued mails
- - Number of sent emails
- - Number of delivered emails
- - Diverse Response time percentiles, counts and rates for JMAP
- - Diverse Response time percentiles, counts and rates for IMAP
- - Diverse Response time percentiles, counts and rates for SMTP
- - Diverse Response time percentiles, counts and rates for WebAdmin
- - Diverse Response time percentiles, counts and rates for each Mail Queue
- - Per mailet and per matcher Response time percentiles
- - Diverse Response time percentiles, counts and rates for DNS
- - Cassandra Java driver metrics
- - Tika HTTP client statistics
- - SpamAssassin TCP client statistics
- - Mailbox listeners statistics time percentiles
- - Mailbox listeners statistics requests rate
- - Pre-deletion hooks execution statistics time percentiles
-
-== Available Grafana boards
-
-Here are the various relevant Grafana boards for the Distributed Server:
-
-- https://github.com/apache/james-project/tree/master/server/grafana-reporting/prometheus-datasource/James_BlobStore.json[BlobStore] :
-Rates and percentiles for the BlobStore component
-- https://github.com/apache/james-project/tree/master/server/grafana-reporting/prometheus-datasource/James_DNS_Dashboard.json[DNS] :
-Latencies and query counts for DNS resolution.
-- https://github.com/apache/james-project/tree/master/server/grafana-reporting/prometheus-datasource/James_IMAP_Board.json[IMAP] :
-Latencies for the IMAP protocol
-- https://github.com/apache/james-project/tree/master/server/grafana-reporting/prometheus-datasource/James_IMAP_CountBoard.json[IMAP counts] :
-Request counts for the IMAP protocol
-- https://github.com/apache/james-project/tree/master/server/grafana-reporting/prometheus-datasource/James_JMAP_Board.json[JMAP] :
-Latencies for the JMAP protocol
-- https://github.com/apache/james-project/tree/master/server/grafana-reporting/prometheus-datasource/James_JMAP_CountBoard.json[JMAP counts] :
-Request counts for the JMAP protocol
-- https://github.com/apache/james-project/tree/master/server/grafana-reporting/prometheus-datasource/James_JVM.json[JVM] :
-JVM statistics (heap, gcs, etc...)
-- https://github.com/apache/james-project/tree/master/server/grafana-reporting/prometheus-datasource/James_MAILET.json[Mailets] :
-Per-mailet execution timings.
-- https://github.com/apache/james-project/tree/master/server/grafana-reporting/prometheus-datasource/James_MATCHER.json[Matchers] :
-Per-matcher execution timings
-- https://github.com/apache/james-project/tree/master/server/grafana-reporting/prometheus-datasource/James_MailQueue.json[MailQueue] :
-MailQueue statistics
-- https://github.com/apache/james-project/tree/master/server/grafana-reporting/prometheus-datasource/James_SMTP_Board.json[SMTP] :
-SMTP latencies reports
-- https://github.com/apache/james-project/tree/master/server/grafana-reporting/prometheus-datasource/James_SMTP_CountBoard.json[SMTP count] :
-Request count for the SMTP protocol
-
-=== Dashboard samples
-Latencies for the JMAP protocol +
-
-image::JMAP_board.png[JMAP]
-
-Latencies for the IMAP protocol +
-
-image::IMAP_board.png[IMAP]
-
-JVM Statistics +
-
-image::JVM_board.png[JVM]
-
-BlobStore Statistics +
-
-image::BlobStore.png[BlobStore]
-
-webAdmin Statistics +
-
-image::webAdmin.png[webAdmin]
-
-== Expose metrics for Elasticsearch collection
-
-The following command allow you to run a fresh grafana server :
-
-....
-docker run -i -p 3000:3000 grafana/grafana
-....
-
-Once running, you need to set up an ElasticSearch data-source : - select
-proxy mode - Select version 2.x of ElasticSearch - make the URL point
-your ES node - Specify the index name. By default, it should be :
-
-....
-[james-metrics-]YYYY-MM
-....
-
-Import the different dashboards you want.
-
-You then need to enable reporting through ElasticSearch. Modify your
-James ElasticSearch configuration file accordingly. To help you doing
-this, you can take a look to
-link:https://github.com/apache/james-project/blob/3.7.x/server/apps/distributed-app/sample-configuration/elasticsearch.properties[elasticsearch.properties].
-
-If some metrics seem abnormally slow despite in depth database
-performance tuning, feedback is appreciated as well on the bug tracker,
-the user mailing list or our Gitter channel (see our
-http://james.apache.org/#second[community page]) . Any additional
-details categorizing the slowness are appreciated as well (details of
-the slow requests for instance).
+:other-metrics: Cassandra Java driver metrics
+:xref-base: distributed
+:server-name: Distributed James Server
+include::partial$operate/metrics.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/operate/migrating.adoc b/docs/modules/servers/pages/distributed/operate/migrating.adoc
index ce3d8da8cbe..c79a77a004c 100644
--- a/docs/modules/servers/pages/distributed/operate/migrating.adoc
+++ b/docs/modules/servers/pages/distributed/operate/migrating.adoc
@@ -1,34 +1,6 @@
= Distributed James Server — Migrating existing data
:navtitle: Migrating existing data
-This page presents how operators can migrate your user mailbox and mails into the Distributed Server in order to adopt it.
-
-We assume you have a xref:distributed/configure/index.adoc[well configured] running Distributed server
-at hand. We also assume existing mails are hosted on a tier mail server which can be accessed via IMAP and supports
-impersonation.
-
-First, you want to create the domains handled by your server, as well as the users you will be hosting. This operation
-can be performed via WebAdmin or the CLI.
-
- * Using webadmin :
- ** Read xref:distributed/operate/webadmin.adoc#_create_a_domain[this section] for creating domains
- ** Read xref:distributed/operate/webadmin.adoc#_create_a_user[this section] for creating users
- * Using the CLI :
- ** Read xref:distributed/operate/cli.adoc#_manage_domains[this section] for creating domains
- ** Read xref:distributed/operate/cli.adoc#_managing_users[this section] for creating users
-
-Second, you want to allow an administrator account of your Distributed Server to have write access on other user mailboxes.
-This can be setted up this the *administratorId* configuration option of the xref:distributed/configure/usersrepository.adoc[usersrepository.xml] configuration file.
-
-Then, it is time to run https://github.com/imapsync/imapsync[imapsync] script to copy the emails from the previous mail server
-into the Distributed Server. Here is an example migrating a single user, relying on impersonation:
-
-....
-imapsync --host1 previous.server.domain.tld \
- --user1 user@domain.tld --authuser1 adminOldServer@domain.tld \
- --proxyauth1 --password1 passwordOfTheOldAdmin \
- --host2 distributed.james.domain.tld \
- --user2 use1@domain.tld \
- --authuser2 adminNewServer@domain.tld --proxyauth2 \
- --password2 passwordOfTheNewAdmin
-....
\ No newline at end of file
+:xref-base: distributed
+:server-name: Distributed James Server
+include::partial$operate/migrating.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/operate/performanceChecklist.adoc b/docs/modules/servers/pages/distributed/operate/performanceChecklist.adoc
index 65b60229136..ec5b35ea74f 100644
--- a/docs/modules/servers/pages/distributed/operate/performanceChecklist.adoc
+++ b/docs/modules/servers/pages/distributed/operate/performanceChecklist.adoc
@@ -1,22 +1,42 @@
= Distributed James Server — Performance checklist
:navtitle: Performance checklist
-This guide aims to help James operators refine their James configuration and set up to achieve better performance.
+:xref-base: distributed
+:backend-name: Cassandra
+:mail-queue-name: CassandraMailQueueView
+include::partial$operate/performanceChecklist.adoc[]
-== Database setup
+=== RabbitMQ
+
+We recommend against the use of the CassandraMailQueueView, as browsing and advanced queue management features
+is unnecessary for Mail Delivery Agent and are not meaningful in the absence of delays.
+
+Similarly, we recommend turning off queue size metrics, which are expensive to compute.
-Cassandra, OpenSearch, RabbitMQ is a large topic in itself that we do not intend to cover here. Yet, here are some
-very basic recommendation that are always beneficial to keep in mind.
+We also recommend against the use of publish confirms, which comes at a high performance price.
+
+In `rabbitmq.properties`:
-We recommend:
+....
+cassandra.view.enabled=false
-* Running Cassandra, OpenSearch on commodity hardware with attached SSD. SAN disks are known to cause performance
-issues for these technologies. HDD disks are to be banned for these performance related applications.
-* We recommend getting an Object Storage SaaS offering that suites your needs. Most generalist S3 offers will suite
-James needs.
-* We do provide a guide on xref:[Database benchmarks] that can help identify and fix issues.
+mailqueue.size.metricsEnabled=false
-== James configuration
+event.bus.publish.confirm.enabled=false
+mailqueue.publish.confirm.enabled=false
+....
+
+=== Object storage
+
+We recommend the use of the blob store cache, which will be populated by email headers which shall be treated as metadata.
+
+`blob.properties`:
+
+....
+cache.enable=true
+cache.cassandra.ttl=1year
+cache.sizeThresholdInBytes=16 KiB
+....
=== Cassandra
@@ -64,100 +84,4 @@ Cassandra overload.
max-concurrent-requests = 192
}
-....
-
-=== Object storage
-
-We recommend the use of the blob store cache, which will be populated by email headers which shall be treated as metadata.
-
-`blob.properties`:
-
-....
-cache.enable=true
-cache.cassandra.ttl=1year
-cache.sizeThresholdInBytes=16 KiB
-....
-
-=== RabbitMQ
-
-We recommend against the use of the CassandraMailQueueView, as browsing and advanced queue management features
-is unnecessary for Mail Delivery Agent and are not meaningful in the absence of delays.
-
-Similarly, we recommend turning off queue size metrics, which are expensive to compute.
-
-We also recommend against the use of publish confirms, which comes at a high performance price.
-
-In `rabbitmq.properties`:
-
-....
-cassandra.view.enabled=false
-
-mailqueue.size.metricsEnabled=false
-
-event.bus.publish.confirm.enabled=false
-mailqueue.publish.confirm.enabled=false
-....
-
-=== JMAP protocol
-
-If you are not using JMAP, disabling it will avoid you the cost of populating related projections and thus is recommended.
-Within `jmap.properties`:
-
-....
-enabled=false
-....
-
-We recommend turning on EmailQueryView as it enables resolution of mailbox listing against Cassandra, thus unlocking massive
-stability / performance gains. Within `jmap.properties`:
-
-....
-view.email.query.enabled=true
-....
-
-=== IMAP / SMTP
-
-We recommend against resolving client connection DNS names. This behaviour can be disabled via a system property within
-`jvm.properties`:
-
-....
-james.protocols.mdc.hostname=false
-....
-
-Concurrent IMAP request count is the critical setting. In `imapServer.xml`:
-
-....
-200
-4096
-....
-
-Other recommendation includes avoiding unecessary work upon IMAP IDLE, not starting dedicated BOSS threads:
-
-....
-false
-0
-....
-
-=== Other generic recommendations
-
-* Remove unneeded listeners / mailets
-* Reduce duplication of Matchers within mailetcontainer.xml
-* Limit usage of "DEBUG" loglevel. INFO should be more than decent in most cases.
-* While GC tunning is a science in itself, we had good results with G1GC and a low pause time:
-
-....
--Xlog:gc*:file=/root/gc.log -XX:MaxGCPauseMillis=20 -XX:ParallelGCThreads=2
-....
-
-* We recommand tunning bach sizes: `batchsizes.properties`. This allows, limiting parallel S3 reads, while loading many
-messages concurrently on Cassandra, and improves IMAP massive operations support.
-
-....
-fetch.metadata=200
-fetch.headers=30
-fetch.body=30
-fetch.full=30
-
-copy=8192
-
-move=8192
....
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/operate/security.adoc b/docs/modules/servers/pages/distributed/operate/security.adoc
index b70db53553a..6d59fd82b24 100644
--- a/docs/modules/servers/pages/distributed/operate/security.adoc
+++ b/docs/modules/servers/pages/distributed/operate/security.adoc
@@ -1,249 +1,6 @@
= Security checklist
:navtitle: Security checklist
-This document aims as summarizing threats, security best practices as well as recommendations.
-
-== Threats
-
-Operating an email server exposes you to the following threats:
-
- - Spammers might attempt to use your servers to send their spam messages on their behalf. We speak of
-*open relay*. In addition to the resources consumed being an open relay will affect the trust other mail
-installations have in you, and thus will cause legitimate traffic to be rejected.
- - Emails mostly consist of private data, which shall only be accessed by their legitimate user. Failure
-to do so might result in *information disclosure*.
- - *Email forgery*. An attacker might craft an email on the behalf of legitimate users.
- - Email protocols allow user to authenticate and thus can be used as *oracles* to guess user passwords.
- - *Spam*. Non legitimate traffic can be a real burden to your users.
- - *Phishing*: Crafted emails that tricks the user into doing unintended actions.
- - *Viruses*: An attacker sends an attachment that contains an exploit that could run if a user opens it.
- - *Denial of service*: A small request may result in a very large response and require considerable work on the server...
- - *Denial of service*: A malicious JMAP client may use the JMAP push subscription to attempt to flood a third party
-server with requests, creating a denial-of-service attack and masking the attacker’s true identity.
- - *Dictionary Harvest Attacks*: An attacker can rely on SMTP command reply code to know if a user exists or not. This
- can be used to obtain the list of local users and later use those address as targets for other attacks.
-
-== Best practices
-
-The following sections ranks best practices.
-
-=== Best practices: Must
-
- - 1. Configure James in order not to be an xref:distributed/configure/smtp.adoc#_about_open_relays[open relay]. This should be the
-case with the default configuration.
-
-Be sure in xref:distributed/configure/smtp.adoc[smtpserver.xml] to activate the following options: `verifyIdentity`.
-
-We then recommend to manually test your installation in order to ensure that:
-
- - Unauthenticated SMTP users cannot send mails to external email addresses (they are not relayed)
- - Unauthenticated SMTP users can send mails to internal email addresses
- - Unauthenticated SMTP users cannot use local addresses in their mail from, and send emails both locally and to distant targets.
-
- - 2. Avoid *STARTTLS* usage and favor SSL. Upgrade from a non encrypted channel into an encrypted channel is an opportunity
-for additional vulnerabilities. This is easily prevented by requiring SSL connection upfront. link:https://nostarttls.secvuln.info/[Read more...]
-
-Please note that STARTTLS is still beneficial in the context of email relaying, which happens on SMTP port 25 unencrypted,
-and enable opportunistic encryption upgrades that would not overwise be possible. We recommend keeping STARTTLS activated
-for SMTP port 25.
-
- - 3. Use SSL for xref:distributed/configure/mailets.adoc#_remotedelivery[remote delivery] whenever you are using a gateway relaying SMTP server.
-
- - 4. Rely on an external identity service, dedicated to user credential storage. James supports xref:distributed/configure/usersrepository.adoc#_configuring_a_ldap[LDAP]. If you are
-forced to store users in James be sure to choose `PBKDF2` as a hashing algorithm. Also, delays on authentication failures
-are supported via the `verifyFailureDelay` property. Note that IMAP / SMTP connections are closed after 3 authentication
-failures.
-
- - 5. Ensure that xref:distributed/configure/webadmin.adoc[WebAdmin] is not exposed unencrypted to the outer world. Doing so trivially
-exposes yourself. You can either disable it, activate JWT security, or restrict it to listen only on localhost.
-
- - 6. Set up `HTTPS` for http based protocols, namely *JMAP* and *WebAdmin*. We recommend the use of a reverse proxy like Nginx.
-
- - 7. Set up link:https://james.apache.org/howTo/spf.html[SPF] and link:https://james.apache.org/howTo/dkim.html[DKIM]
-for your outgoing emails to be trusted.
-
- - 8. Prevent access to JMX. This can be achieved through a strict firewalling policy
-(link:https://nickbloor.co.uk/2017/10/22/analysis-of-cve-2017-12628/[blocking port 9999 is not enough])
-or xref:distributed/configure/jmx.adoc[disabling JMX]. JMX is needed to use the existing CLI application but webadmin do offer similar
-features. Set the `jmx.remote.x.mlet.allow.getMBeansFromURL` to `false` to disable JMX remote code execution feature.
-
- - 9. If JMAP is enabled, be sure that JMAP PUSH cannot be used for server side request forgery. This can be
-xref:distributed/configure/jmap.adoc[configured] using the `push.prevent.server.side.request.forgery=true` property,
-forbidding push to private addresses.
-
-=== Best practice: Should
-
- - 1. Avoid advertising login/authenticate capabilities in clear channels. This might prevent some clients to attempt login
-on clear channels, and can be configured for both xref:distributed/configure/smtp.adoc[SMTP] and xref:distributed/configure/imap.adoc[IMAP]
-using `auth.plainAuthEnabled=false`.
-
- - 2. Verify link:https://james.apache.org/howTo/spf.html[SPF] and xref:distributed/configure/mailets.adoc#_dkimverify[DKIM] for your incoming emails.
-
- - 3. Set up reasonable xref:distributed/operate/webadmin.adoc#_administrating_quotas[storage quota] for your users.
-
- - 4. We recommend setting up anti-spam and anti-virus solutions. James comes with some xref:distributed/configure/spam.adoc[Rspamd and SpamAssassin]
-integration, and some xref:distributed/configure/mailets.adoc#_clamavscan[ClamAV] tooling exists.
-Rspamd supports anti-phishing modules.
-Filtering with third party systems upstream is also possible.
-
- - 5. In order to limit your attack surface, disable protocols you or your users do not use. This includes the JMAP protocol,
-POP3, ManagedSieve, etc... Be conservative on what you expose.
-
- - 6. If operating behind a load-balancer, set up the link:https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt[PROXY protocol] for
-TCP based protocols (IMAP and SMTP `proxyRequired` option)
-
-=== Best practice: Could
-
- - 1. Set up link:https://openid.net/connect/[OIDC] for IMAP, SMTP and JMAP. Disable login/plain/basic authentication.
-
- - 2. You can configure xref:distributed/configure/ssl.adoc#_client_authentication_via_certificates[Client authentication via certificates].
-
- - 3. You can xref:distributed/configure/mailets.adoc#_smimesign[sign], xref:distributed/configure/mailets.adoc#_smimechecksignature[verify]
-and xref:distributed/configure/mailets.adoc#_smimedecrypt[decrypt] your email traffic using link:https://datatracker.ietf.org/doc/html/rfc5751[SMIME].
-
-== Known vulnerabilities
-
-Several vulnerabilities have had been reported for previous releases of Apache James server.
-
-Be sure not to run those! We highly recommend running the latest release, which we put great effort in not to use
-outdated dependencies.
-
-=== Reporting vulnerabilities
-
-We follow the standard procedures within the ASF regarding link:https://apache.org/security/committers.html#vulnerability-handling[vulnerability handling]
-
-=== CVE-2024-21742: Mime4J DOM header injection
-
-Apache JAMES MIME4J prior to version 0.8.10 allow attackers able to specify the value of a header field to craft other header fields.
-
-*Severity*: Moderate
-
-*Mitigation*: Release 0.8.10 rejects the use of LF inside a header field thus preventing the issue.
-
-Upgrading to Apache James MIME4J 0.8.10 is thus advised.
-
-=== CVE-2023-51747: SMTP smuggling in Apache James
-
-Apache James distribution prior to release 3.7.5 and release 3.8.1 is subject to SMTP smuggling, when used in combination
-of antother vulnerable server and can result in SPF bypass, leading to email forgery.
-
-*Severity*: High
-
-*Mitigation*: Release 3.7.5 and 3.8.1 interpret strictly the CRLF delimiter and thus prevent the issue.
-
-Upgrading to Apache James 3.7.5 or 3.8.1 is thus advised.
-
-=== CVE-2023-51518: Privilege escalation via JMX pre-authentication deserialisation
-
-Apache James distribution prior to release 3.7.5 and 3.8.1 allow privilege escalation via JMX pre-authentication deserialisation.
-An attacker would need to identify a deserialization glitch before triggering an exploit.
-
-*Severity*: Moderate
-
-*Mitigation*:We recommend turning off JMX whenever possible.
-
-Release 3.7.5 and 3.8.1 disable deserialization on unauthencited channels.
-
-Upgrading to Apache James 3.7.5 on 3.8.1 is thus advised.
-
-
-=== CVE-2023-26269: Privilege escalation through unauthenticated JMX
-
-Apache James distribution prior to release 3.7.4 allows privilege escalation through the use of JMX.
-
-*Severity*: Moderate
-
-*Mitigation*: We recommend turning on authentication on. If the CLI is unused we recommend turning JMX off.
-
-Release 3.7.4 set up implicitly JMX authentication for Guice based products and addresses the underlying JMX exploits.
-
-Upgrading to Apache James 3.7.4 is thus advised.
-
-=== CVE-2022-45935: Temporary File Information Disclosure in Apache JAMES
-
-Apache James distribution prior to release 3.7.3 is vulnerable to a temporary File Information Disclosure.
-
-*Severity*: Moderate
-
-*Mitigation*: We recommend to upgrade to Apache James 3.7.3 or higher, which fixes this vulnerability.
-
-
-=== CVE-2021-44228: STARTTLS command injection in Apache JAMES
-
-Apache James distribution prior to release 3.7.1 is vulnerable to a buffering attack relying on the use of the STARTTLS command.
-
-Fix of CVE-2021-38542, which solved similar problem from Apache James 3.6.1, is subject to a parser differential and do not take into account concurrent requests.
-
-*Severity*: Moderate
-
-*Mitigation*: We recommend to upgrade to Apache James 3.7.1 or higher, which fixes this vulnerability.
-
-=== CVE-2021-38542: Apache James vulnerable to STARTTLS command injection (IMAP and POP3)
-
-Apache James prior to release 3.6.1 is vulnerable to a buffering attack relying on the use of the STARTTLS
-command. This can result in Man-in -the-middle command injection attacks, leading potentially to leakage
-of sensible information.
-
-*Severity*: Moderate
-
-This issue is being tracked as link:https://issues.apache.org/jira/browse/JAMES-1862[JAMES-1862]
-
-*Mitigation*: We recommend upgrading to Apache James 3.6.1, which fixes this vulnerability.
-
-Furthermore, we recommend, if possible to dis-activate STARTTLS and rely solely on explicit TLS for mail protocols, including SMTP, IMAP and POP3.
-
-Read more link:https://nostarttls.secvuln.info/[about STARTTLS security here].
-
-=== CVE-2021-40110: Apache James IMAP vulnerable to a ReDoS
-
-Using Jazzer fuzzer, we identified that an IMAP user can craft IMAP LIST commands to orchestrate a Denial
-Of Service using a vulnerable Regular expression. This affected Apache James prior to 3.6.1
-
-*Severity*: Moderate
-
-This issue is being tracked as link:https://issues.apache.org/jira/browse/JAMES-3635[JAMES-3635]
-
-*Mitigation*: We recommend upgrading to Apache James 3.6.1, which enforce the use of RE2J regular
-expression engine to execute regex in linear time without back-tracking.
-
-=== CVE-2021-40111: Apache James IMAP parsing Denial Of Service
-
-While fuzzing with Jazzer the IMAP parsing stack we discover that crafted APPEND and STATUS IMAP command
-could be used to trigger infinite loops resulting in expensive CPU computations and OutOfMemory exceptions.
-This can be used for a Denial Of Service attack. The IMAP user needs to be authenticated to exploit this
-vulnerability. This affected Apache James prior to version 3.6.1.
-
-*Severity*: Moderate
-
-This issue is being tracked as link:https://issues.apache.org/jira/browse/JAMES-3634[JAMES-3634]
-
-*Mitigation*: We recommend upgrading to Apache James 3.6.1, which fixes this vulnerability.
-
-=== CVE-2021-40525: Apache James: Sieve file storage vulnerable to path traversal attacks
-
-Apache James ManagedSieve implementation alongside with the file storage for sieve scripts is vulnerable
-to path traversal, allowing reading and writing any file.
-
-*Severity*: Moderate
-
-This issue is being tracked as link:https://issues.apache.org/jira/browse/JAMES-3646[JAMES-3646]
-
-*Mitigation*:This vulnerability had been patched in Apache James 3.6.1 and higher. We recommend the upgrade.
-
-This could also be mitigated by ensuring manageSieve is disabled, which is the case by default.
-
-Distributed and Cassandra based products are also not impacted.
-
-=== CVE-2017-12628 Privilege escalation using JMX
-
-The Apache James Server prior version 3.0.1 is vulnerable to Java deserialization issues.
-One can use this for privilege escalation.
-This issue can be mitigated by:
-
- - Upgrading to James 3.0.1 onward
- - Using a recent JRE (Exploit could not be reproduced on OpenJdk 8 u141)
- - Exposing JMX socket only to localhost (default behaviour)
- - Possibly running James in a container
- - Disabling JMX all-together (Guice only)
-
-Read more link:http://james.apache.org//james/update/2017/10/20/james-3.0.1.html[here].
\ No newline at end of file
+:xref-base: distributed
+:backend-name: Cassandra
+include::partial$operate/security.adoc[]
diff --git a/docs/modules/servers/pages/distributed/operate/webadmin.adoc b/docs/modules/servers/pages/distributed/operate/webadmin.adoc
index 8a452f37556..eccbabc759b 100644
--- a/docs/modules/servers/pages/distributed/operate/webadmin.adoc
+++ b/docs/modules/servers/pages/distributed/operate/webadmin.adoc
@@ -1,4291 +1,13 @@
= Distributed James Server — WebAdmin REST administration API
:navtitle: WebAdmin REST administration API
-The web administration supports for now the CRUD operations on the domains, the users, their mailboxes and their quotas,
- managing mail repositories, performing cassandra migrations, and much more, as described in the following sections.
-
-*WARNING*: This API allow authentication only via the use of JWT. If not
-configured with JWT, an administrator should ensure an attacker can not
-use this API.
-
-By the way, some endpoints are not filtered by authentication. Those endpoints are not related to data stored in James,
-for example: Swagger documentation & James health checks.
-
-In case of any error, the system will return an error message which is
-json format like this:
-
-....
-{
- statusCode: ,
- type: ,
- message:
- cause:
-}
-....
-
-Also be aware that, in case things go wrong, all endpoints might return
-a 500 internal error (with a JSON body formatted as exposed above). To
-avoid information duplication, this is omitted on endpoint specific
-documentation.
-
-Finally, please note that in case of a malformed URL the 400 bad request
-response will contain an HTML body.
-
-== HealthCheck
-
-=== Check all components
-
-This endpoint is simple for now and is just returning the http status
-code corresponding to the state of checks (see below). The user has to
-check in the logs in order to have more information about failing
-checks.
-
-....
-curl -XGET http://ip:port/healthcheck
-....
-
-Will return a list of healthChecks execution result, with an aggregated
-result:
-
-....
-{
- "status": "healthy",
- "checks": [
- {
- "componentName": "Cassandra backend",
- "escapedComponentName": "Cassandra%20backend",
- "status": "healthy"
- "cause": null
- }
- ]
-}
-....
-
-*status* field can be:
-
-* *healthy*: Component works normally
-* *degraded*: Component works in degraded mode. Some non-critical
-services may not be working, or latencies are high, for example. Cause
-contains explanations.
-* *unhealthy*: The component is currently not working. Cause contains
-explanations.
-
-Supported health checks include:
-
-* *Cassandra backend*: Cassandra storage.
-* *OpenSearch Backend*: OpenSearch storage.
-* *EventDeadLettersHealthCheck*
-* *Guice application lifecycle*
-* *JPA Backend*: JPA storage.
-* *MailReceptionCheck* We rely on a configured user, send an email to him and
-assert that the email is well received, and can be read within the given configured
-period. Unhealthy means that the email could not be received before reacing the timeout.
-* *MessageFastViewProjection* Health check of the component storing JMAP properties
-which are fast to retrieve. Those properties are computed in advance
-from messages and persisted in order to archive a better performance.
-There are some latencies between a source update and its projections
-updates. Incoherency problems arise when reads are performed in this
-time-window. We piggyback the projection update on missed JMAP read in
-order to decrease the outdated time window for a given entry. The health
-is determined by the ratio of missed projection reads. (lower than 10%
-causes `degraded`)
-* *RabbitMQ backend*: RabbitMQ messaging.
-
-Response codes:
-
-* 200: All checks have answered with a Healthy or Degraded status. James
-services can still be used.
-* 503: At least one check have answered with a Unhealthy status
-
-=== Check single component
-
-Performs a health check for the given component. The component is
-referenced by its URL encoded name.
-
-....
-curl -XGET http://ip:port/healthcheck/checks/Cassandra%20backend
-....
-
-Will return the component’s name, the component’s escaped name, the
-health status and a cause.
-
-....
-{
- "componentName": "Cassandra backend",
- "escapedComponentName": "Cassandra%20backend",
- "status": "healthy"
- "cause": null
-}
-....
-
-Response codes:
-
-* 200: The check has answered with a Healthy or Degraded status.
-* 404: A component with the given name was not found.
-* 503: The check has answered with an Unhealthy status.
-
-=== List all health checks
-
-This endpoint lists all the available health checks.
-
-....
-curl -XGET http://ip:port/healthcheck/checks
-....
-
-Will return the list of all available health checks.
-
-....
-[
- {
- "componentName": "Cassandra backend",
- "escapedComponentName": "Cassandra%20backend"
- }
-]
-....
-
-Response codes:
-
-* 200: List of available health checks
-
-== Task management
-
-Some webadmin features schedule tasks. The task management API allow to
-monitor and manage the execution of the following tasks.
-
-Note that the `taskId` used in the following APIs is returned by other
-WebAdmin APIs scheduling tasks.
-
-=== Getting a task details
-
-....
-curl -XGET http://ip:port/tasks/3294a976-ce63-491e-bd52-1b6f465ed7a2
-....
-
-An Execution Report will be returned:
-
-....
-{
- "submitDate": "2017-12-27T15:15:24.805+0700",
- "startedDate": "2017-12-27T15:15:24.809+0700",
- "completedDate": "2017-12-27T15:15:24.815+0700",
- "cancelledDate": null,
- "failedDate": null,
- "taskId": "3294a976-ce63-491e-bd52-1b6f465ed7a2",
- "additionalInformation": {},
- "status": "completed",
- "type": "type-of-the-task"
-}
-....
-
-Note that:
-
-* `status` can have the value:
-** `waiting`: The task is scheduled but its execution did not start yet
-** `inProgress`: The task is currently executed
-** `cancelled`: The task had been cancelled
-** `completed`: The task execution is finished, and this execution is a
-success
-** `failed`: The task execution is finished, and this execution is a
-failure
-* `additionalInformation` is a task specific object giving additional
-information and context about that task. The structure of this
-`additionalInformation` field is provided along the specific task
-submission endpoint.
-
-Response codes:
-
-* 200: The specific task was found and the execution report exposed
-above is returned
-* 400: Invalid task ID
-* 404: Task ID was not found
-
-=== Awaiting a task
-
-One can await the end of a task, then receive its final execution
-report.
-
-That feature is especially usefully for testing purpose but still can
-serve real-life scenario.
-
-....
-curl -XGET http://ip:port/tasks/3294a976-ce63-491e-bd52-1b6f465ed7a2/await?timeout=duration
-....
-
-An Execution Report will be returned.
-
-`timeout` is optional. By default it is set to 365 days (the maximum
-value). The expected value is expressed in the following format:
-`Nunit`. `N` should be strictly positive. `unit` could be either in the
-short form (`s`, `m`, `h`, etc.), or in the long form (`day`, `week`,
-`month`, etc.).
-
-Examples:
-
-* `30s`
-* `5m`
-* `7d`
-* `1y`
-
-Response codes:
-
-* 200: The specific task was found and the execution report exposed
-above is returned
-* 400: Invalid task ID or invalid timeout
-* 404: Task ID was not found
-* 408: The timeout has been reached
-
-=== Cancelling a task
-
-You can cancel a task by calling:
-
-....
-curl -XDELETE http://ip:port/tasks/3294a976-ce63-491e-bd52-1b6f465ed7a2
-....
-
-Response codes:
-
-* 204: Task had been cancelled
-* 400: Invalid task ID
-
-=== Listing tasks
-
-A list of all tasks can be retrieved:
-
-....
-curl -XGET http://ip:port/tasks
-....
-
-Will return a list of Execution reports
-
-One can filter the above results by status. For example:
-
-....
-curl -XGET http://ip:port/tasks?status=inProgress
-....
-
-Will return a list of Execution reports that are currently in progress. This list is sorted by
-reverse submitted date (recent tasks goes first).
-
-Response codes:
-
-* 200: A list of corresponding tasks is returned
-* 400: Invalid status value
-
-Additional optional task parameters are supported:
-
-- `status` one of `waiting`, `inProgress`, `canceledRequested`, `completed`, `canceled`, `failed`. Only
-tasks with the given status are returned.
-- `type`: only tasks with the given type are returned.
-- `submittedBefore`: Date. Returns only tasks submitted before this date.
-- `submittedAfter`: Date. Returns only tasks submitted after this date.
-- `startedBefore`: Date. Returns only tasks started before this date.
-- `startedAfter`: Date. Returns only tasks started after this date.
-- `completedBefore`: Date. Returns only tasks completed before this date.
-- `completedAfter`: Date. Returns only tasks completed after this date.
-- `failedBefore`: Date. Returns only tasks failed before this date.
-- `failedAfter`: Date. Returns only tasks faield after this date.
-- `offset`: Integer, number of tasks to skip in the response. Useful for paging.
-- `limit`: Integer, maximum number of tasks to return in one call
-
-Example of date format: `2023-04-15T07:23:27.541254+07:00` and `2023-04-15T07%3A23%3A27.541254%2B07%3A00` once URL encoded.
-
-=== Endpoints returning a task
-
-Many endpoints do generate a task.
-
-Example:
-
-....
-curl -XPOST /endpoint?action={action}
-....
-
-The response to these requests will be the scheduled `taskId` :
-
-....
-{"taskId":"5641376-02ed-47bd-bcc7-76ff6262d92a"}
-....
-
-Positionned headers:
-
-* Location header indicates the location of the resource associated with
-the scheduled task. Example:
-
-....
-Location: /tasks/3294a976-ce63-491e-bd52-1b6f465ed7a2
-....
-
-Response codes:
-
-* 201: Task generation succeeded. Corresponding task id is returned.
-* Other response codes might be returned depending on the endpoint
-
-The additional information returned depends on the scheduled task type
-and is documented in the endpoint documentation.
-
-== Administrating domains
-
-=== Create a domain
-
-....
-curl -XPUT http://ip:port/domains/domainToBeCreated
-....
-
-Resource name domainToBeCreated:
-
-* can not be null or empty
-* can not contain `@'
-* can not be more than 255 characters
-* can not contain `/'
-
-Response codes:
-
-* 204: The domain was successfully added
-* 400: The domain name is invalid
-
-=== Delete a domain
-
-....
-curl -XDELETE http://ip:port/domains/{domainToBeDeleted}
-....
-
-Note: Deletion of an auto-detected domain, default domain or of an
-auto-detected ip is not supported. We encourage you instead to review
-your https://james.apache.org/server/config-domainlist.html[domain list
-configuration].
-
-Response codes:
-
-* 204: The domain was successfully removed
-
-=== Test if a domain exists
-
-....
-curl -XGET http://ip:port/domains/{domainName}
-....
-
-Response codes:
-
-* 204: The domain exists
-* 404: The domain does not exist
-
-=== Get the list of domains
-
-....
-curl -XGET http://ip:port/domains
-....
-
-Possible response:
-
-....
-["domain1", "domain2"]
-....
-
-Response codes:
-
-* 200: The domain list was successfully retrieved
-
-=== Get the list of aliases for a domain
-
-....
-curl -XGET http://ip:port/domains/destination.domain.tld/aliases
-....
-
-Possible response:
-
-....
-[
- {"source": "source1.domain.tld"},
- {"source": "source2.domain.tld"}
-]
-....
-
-When sending an email to an email address having `source1.domain.tld` or
-`source2.domain.tld` as a domain part (example:
-`user@source1.domain.tld`), then the domain part will be rewritten into
-destination.domain.tld (so into `user@destination.domain.tld`).
-
-Response codes:
-
-* 200: The domain aliases was successfully retrieved
-* 400: destination.domain.tld has an invalid syntax
-* 404: destination.domain.tld is not part of handled domains and does
-not have local domains as aliases.
-
-=== Create an alias for a domain
-
-To create a domain alias execute the following query:
-
-....
-curl -XPUT http://ip:port/domains/destination.domain.tld/aliases/source.domain.tld
-....
-
-When sending an email to an email address having `source.domain.tld` as
-a domain part (example: `user@source.domain.tld`), then the domain part
-will be rewritten into `destination.domain.tld` (so into
-`user@destination.domain.tld`).
-
-Response codes:
-
-* 204: The redirection now exists
-* 400: `source.domain.tld` or `destination.domain.tld` have an invalid
-syntax
-* 400: `source, domain` and `destination domain` are the same
-* 404: `source.domain.tld` are not part of handled domains.
-
-Be aware that no checks to find possible loops that would result of this creation will be performed.
-
-=== Delete an alias for a domain
-
-To delete a domain alias execute the following query:
-
-....
-curl -XDELETE http://ip:port/domains/destination.domain.tld/aliases/source.domain.tld
-....
-
-When sending an email to an email address having `source.domain.tld` as
-a domain part (example: `user@source.domain.tld`), then the domain part
-will be rewritten into `destination.domain.tld` (so into
-`user@destination.domain.tld`).
-
-Response codes:
-
-* 204: The redirection now no longer exists
-* 400: `source.domain.tld` or destination.domain.tld have an invalid
-syntax
-* 400: source, domain and destination domain are the same
-* 404: `source.domain.tld` are not part of handled domains.
-
-=== Delete all users data of a domain
-
-....
-curl -XPOST http://ip:port/domains/{domainToBeUsed}?action=deleteData
-....
-
-Would create a task that deletes data of all users of the domain.
-
-[More details about endpoints returning a task](#_endpoints_returning_a_task).
-
-Response codes:
-
-* 201: Success. Corresponding task id is returned.
-* 400: Error in the request. Details can be found in the reported error.
-
-The scheduled task will have the following type `DeleteUsersDataOfDomainTask` and the following `additionalInformation`:
-
-....
-{
- "type": "DeleteUsersDataOfDomainTask",
- "domain": "domain.tld",
- "successfulUsersCount": 2,
- "failedUsersCount": 1,
- "failedUsers": ["faileduser@domain.tld"],
- "timestamp": "2023-05-22T08:52:47.076261Z"
-}
-....
-
-Notes: `failedUsers` only lists maximum 100 failed users.
-
-== Administrating users
-
-=== Create a user
-
-....
-curl -XPUT http://ip:port/users/usernameToBeUsed \
- -d '{"password":"passwordToBeUsed"}' \
- -H "Content-Type: application/json"
-....
-
-Resource name usernameToBeUsed representing valid users, hence it should
-match the criteria at xref:distributed/configure/usersrepository.adoc[User Repositories documentation]
-
-Response codes:
-
-* 204: The user was successfully created
-* 400: The user name or the payload is invalid
-* 409: The user name already exists
-
-Note: If the user exists already, its password cannot be updated using this.
-If you want to update a user's password, please have a look at *Update a user password* below.
-
-=== Updating a user password
-
-....
-curl -XPUT http://ip:port/users/usernameToBeUsed?force \
- -d '{"password":"passwordToBeUsed"}' \
- -H "Content-Type: application/json"
-....
-
-Response codes:
-
-- 204: The user's password was successfully updated
-- 400: The user name or the payload is invalid
-
-This also can be used to create a new user.
-
-=== Verifying a user password
-
-....
-curl -XPOST http://ip:port/users/usernameToBeUsed/verify \
- -d '{"password":"passwordToBeVerified"}' \
- -H "Content-Type: application/json"
-....
-
-Response codes:
-
-- 204: The user's password was correct
-- 401: Wrong password or user does not exist
-- 400: The user name or the payload is invalid
-
-This intentionally treats non-existing users as unauthenticated, to prevent a username oracle attack.
-
-=== Testing a user existence
-
-....
-curl -XHEAD http://ip:port/users/usernameToBeUsed
-....
-
-Resource name ``usernameToBeUsed'' represents a valid user, hence it
-should match the criteria at xref:distributed/configure/usersrepository.adoc[User Repositories documentation]
-
-Response codes:
-
-* 200: The user exists
-* 400: The user name is invalid
-* 404: The user does not exist
-
-=== Deleting a user
-
-....
-curl -XDELETE http://ip:port/users/{userToBeDeleted}
-....
-
-Response codes:
-
-* 204: The user was successfully deleted
-
-=== Retrieving the user list
-
-....
-curl -XGET http://ip:port/users
-....
-
-The answer looks like:
-
-....
-[{"username":"username@domain-jmapauthentication.tld"},{"username":"username@domain.tld"}]
-....
-
-Response codes:
-
-* 200: The user name list was successfully retrieved
-
-=== Retrieving the list of allowed `From` headers for a given user
-
-This endpoint allows to know which From headers a given user is allowed to use when sending mails.
-
-....
-curl -XGET http://ip:port/users/givenUser/allowedFromHeaders
-....
-
-The answer looks like:
-
-....
-["user@domain.tld","alias@domain.tld"]
-....
-
-Response codes:
-
-* 200: The list was successfully retrieved
-* 400: The user is invalid
-* 404: The user is unknown
-
-=== Add a delegated user of a base user
-
-....
-curl -XPUT http://ip:port/users/baseUser/authorizedUsers/delegatedUser
-....
-
-Response codes:
-
-* 200: Addition of the delegated user succeeded
-* 404: The base user does not exist
-* 400: The delegated user does not exist
-
-Note: Delegation is only available on top of Cassandra products and not implemented yet on top of JPA backends.
-
-=== Remove a delegated user of a base user
-
-....
-curl -XDELETE http://ip:port/users/baseUser/authorizedUsers/delegatedUser
-....
-
-Response codes:
-
-* 200: Removal of the delegated user succeeded
-* 404: The base user does not exist
-* 400: The delegated user does not exist
-
-Note: Delegation is only available on top of Cassandra products and not implemented yet on top of JPA backends.
-
-=== Retrieving the list of delegated users of a base user
-
-....
-curl -XGET http://ip:port/users/baseUser/authorizedUsers
-....
-
-The answer looks like:
-
-....
-["alice@domain.tld","bob@domain.tld"]
-....
-
-Response codes:
-
-* 200: The list was successfully retrieved
-* 404: The base user does not exist
-
-Note: Delegation is only available on top of Cassandra products and not implemented yet on top of JPA backends.
-
-=== Remove all delegated users of a base user
-
-....
-curl -XDELETE http://ip:port/users/baseUser/authorizedUsers
-....
-
-Response codes:
-
-* 200: Removal of the delegated users succeeded
-* 404: The base user does not exist
-
-Note: Delegation is only available on top of Cassandra products and not implemented yet on top of JPA backends.
-
-=== Change a username
-
-....
-curl -XPOST http://ip:port/users/oldUser/rename/newUser?action=rename
-....
-
-Would migrate account data from `oldUser` to `newUser`.
-
-link:#_endpoints_returning_a_task[More details about endpoints returning
-a task].
-
-Implemented migration steps are:
-
- - `ForwardUsernameChangeTaskStep`: creates forward from old user to new user and migrates existing forwards
- - `FilterUsernameChangeTaskStep`: migrates users filtering rules
- - `DelegationUsernameChangeTaskStep`: migrates delegations where the impacted user is either delegatee or delegator
- - `MailboxUsernameChangeTaskStep`: migrates mailboxes belonging to the old user to the account of the new user. It also
- migrates user's mailbox subscriptions.
- - `ACLUsernameChangeTaskStep`: migrates ACLs on mailboxes the migrated user has access to and updates subscriptions accordingly.
- - `QuotaUsernameChangeTaskStep`: migrates quotas user from old user to new user.
-
-Response codes:
-
-* 201: Success. Corresponding task id is returned.
-* 400: Error in the request. Details can be found in the reported error. If you encounter the error "'oldUser' parameter should be an existing user," please note that this validation can be bypassed by specifying the `force` query parameter.
-
-The `fromStep` query parameter allows skipping previous steps, allowing to resume the username change from a failed step.
-
-The scheduled task will have the following type `UsernameChangeTask` and the following `additionalInformation`:
-
-....
-{
- "type": "UsernameChangeTask",
- "oldUser": "jessy.jones@domain.tld",
- "newUser": "jessy.smith@domain.tld",
- "status": {
- "A": "DONE",
- "B": "FAILED",
- "C": "ABORTED"
- },
- "fromStep": null,
- "timestamp": "2023-02-17T02:54:01.246477Z"
-}
-....
-
-Valid status includes:
-
- - `SKIPPED`: bypassed via `fromStep` setting
- - `WAITING`: Awaits execution
- - `IN_PROGRESS`: Currently executed
- - `FAILED`: Error encountered while executing this step. Check the logs.
- - `ABORTED`: Won't be executed because of previous step failures.
-
-=== Delete data of a user
-
-....
-curl -XPOST http://ip:port/users/usernameToBeUsed?action=deleteData
-....
-
-Would create a task that deletes data of the user.
-
-link:#_endpoints_returning_a_task[More details about endpoints returning a task].
-
-Implemented deletion steps are:
-
- - `RecipientRewriteTableUserDeletionTaskStep`: deletes all rewriting rules related to this user.
- - `FilterUserDeletionTaskStep`: deletes all filters belonging to the user.
- - `DelegationUserDeletionTaskStep`: deletes all delegations from / to the user.
- - `MailboxUserDeletionTaskStep`: deletes mailboxes of this user, all ACLs of this user, as well as his subscriptions.
- - `WebPushUserDeletionTaskStep`: deletes push data registered for this user.
- - `IdentityUserDeletionTaskStep`: deletes identities registered for this user.
- - `VacationUserDeletionTaskStep`: deletes vacations registered for this user.
-
-Response codes:
-
-* 201: Success. Corresponding task id is returned.
-* 400: Error in the request. Details can be found in the reported error.
-
-The `fromStep` query parameter allows skipping previous steps, allowing to resume the user data deletion from a failed step.
-
-The scheduled task will have the following type `DeleteUserDataTask` and the following `additionalInformation`:
-
-....
-{
- "type": "DeleteUserDataTask",
- "username": "jessy.jones@domain.tld",
- "status": {
- "A": "DONE",
- "B": "FAILED",
- "C": "ABORTED"
- },
- "fromStep": null,
- "timestamp": "2023-02-17T02:54:01.246477Z"
-}
-....
-
-Valid status includes:
-
- - `SKIPPED`: bypassed via `fromStep` setting
- - `WAITING`: Awaits execution
- - `IN_PROGRESS`: Currently executed
- - `FAILED`: Error encountered while executing this step. Check the logs.
- - `ABORTED`: Won't be executed because of previous step failures.
-
-=== Retrieving the user identities
-
-....
-curl -XGET http://ip:port/users/{baseUser}/identities?default=true
-....
-
-API to get the list of identities of a user
-
-The response will look like:
-
-```
-[
- {
- "name":"identity name 1",
- "email":"bob@domain.tld",
- "id":"4c039533-75b9-45db-becc-01fb0e747aa8",
- "mayDelete":true,
- "textSignature":"textSignature 1",
- "htmlSignature":"htmlSignature 1",
- "sortOrder":1,
- "bcc":[
- {
- "emailerName":"bcc name 1",
- "mailAddress":"bcc1@domain.org"
- }
- ],
- "replyTo":[
- {
- "emailerName":"reply name 1",
- "mailAddress":"reply1@domain.org"
- }
- ]
- }
-]
-```
-
-Query parameters:
-
-* default: (Optional) allows getting the default identity of a user. In order to do that: `default=true`
-
-Response codes:
-
-* 200: The list was successfully retrieved
-* 400: The user is invalid
-* 404: The user is unknown or the default identity can not be found.
-
-The optional `default` query parameter allows getting the default identity of a user.
-In order to do that: `default=true`
-
-The web-admin server will return `404` response code when the default identity can not be found.
-
-=== Creating a JMAP user identity
-
-API to create a new JMAP user identity
-....
-curl -XPOST http://ip:port/users/{username}/identities \
--d '{
- "name": "Bob",
- "email": "bob@domain.tld",
- "mayDelete": true,
- "htmlSignature": "a html signature",
- "textSignature": "a text signature",
- "bcc": [{
- "email": "boss2@domain.tld",
- "name": "My Boss 2"
- }],
- "replyTo": [{
- "email": "boss@domain.tld",
- "name": "My Boss"
- }],
- "sortOrder": 0
- }' \
--H "Content-Type: application/json"
-....
-
-Response codes:
-
-* 201: The new identity was successfully created
-* 404: The username is unknown
-* 400: The payload is invalid
-
-Resource name ``username'' represents a valid user
-
-=== Updating a JMAP user identity
-
-API to update an exist JMAP user identity
-....
-curl -XPUT http://ip:port/users/{username}/identities/{identityId} \
--d '{
- "name": "Bob",
- "htmlSignature": "a html signature",
- "textSignature": "a text signature",
- "bcc": [{
- "email": "boss2@domain.tld",
- "name": "My Boss 2"
- }],
- "replyTo": [{
- "email": "boss@domain.tld",
- "name": "My Boss"
- }],
- "sortOrder": 1
- }' \
--H "Content-Type: application/json"
-....
-
-Response codes:
-
-* 204: The identity were successfully updated
-* 404: The username is unknown
-* 400: The payload is invalid
-
-Resource name ``username'' represents a valid user
-Resource name ``identityId'' represents a exist user identity
-
-== Administrating vacation settings
-
-=== Get vacation settings
-
-....
-curl -XGET http://ip:port/vacation/usernameToBeUsed
-....
-
-Resource name usernameToBeUsed representing valid users, hence it should
-match the criteria at xref:distributed/configure/usersrepository.adoc[User Repositories documentation]
-
-The response will look like this:
-
-....
-{
- "enabled": true,
- "fromDate": "2021-09-20T10:00:00Z",
- "toDate": "2021-09-27T18:00:00Z",
- "subject": "Out of office",
- "textBody": "I am on vacation, will be back soon.",
- "htmlBody": "
I am on vacation, will be back soon.
"
-}
-....
-
-Response codes:
-
-* 200: The vacation settings were successfully retrieved
-* 404: The user name is unknown
-
-=== Update vacation settings
-
-....
-curl -XPOST http://ip:port/vacation/usernameToBeUsed
-....
-
-Request body must be a JSON structure as described above.
-
-If any field is not set in the request, the corresponding field in the existing vacation message is left unchanged.
-
-Response codes:
-
-* 204: The vacation settings were successfully updated
-* 404: The user name is unknown
-* 400: The payload is invalid
-
-=== Delete vacation settings
-
-....
-curl -XDELETE http://ip:port/vacation/usernameToBeUsed
-....
-
-For convenience, this disables and clears the existing vacation settings of the user.
-
-Response codes:
-
-* 204: The vacation settings were successfully disabled
-* 404: The user name is unknown
-
-== Administrating mailboxes
-
-=== All mailboxes
-
-Several actions can be performed on the server mailboxes.
-
-Request pattern is:
-
-....
-curl -XPOST /mailboxes?action={action1},...
-....
-
-link:#_endpoints_returning_a_task[More details about endpoints returning
-a task].
-
-Response codes:
-
-* 201: Success. Corresponding task id is returned.
-* 400: Error in the request. Details can be found in the reported error.
-
-The kind of task scheduled depends on the action parameter. See below
-for details.
-
-==== Fixing mailboxes inconsistencies
-
-....
-curl -XPOST /mailboxes?task=SolveInconsistencies
-....
-
-Will schedule a task for fixing inconsistencies for the mailbox
-deduplicated object stored in Cassandra.
-
-link:#_endpoints_returning_a_task[More details about endpoints returning
-a task].
-
-The `I-KNOW-WHAT-I-M-DOING` header is mandatory (you can read more
-information about it in the warning section below).
-
-The scheduled task will have the following type
-`solve-mailbox-inconsistencies` and the following
-`additionalInformation`:
-
-....
-{
- "type":"solve-mailbox-inconsistencies",
- "processedMailboxEntries": 3,
- "processedMailboxPathEntries": 3,
- "fixedInconsistencies": 2,
- "errors": 1,
- "conflictingEntries":[{
- "mailboxDaoEntry":{
- "mailboxPath":"#private:user:mailboxName",
- "mailboxId":"464765a0-e4e7-11e4-aba4-710c1de3782b"
- }," +
- "mailboxPathDaoEntry":{
- "mailboxPath":"#private:user:mailboxName2",
- "mailboxId":"464765a0-e4e7-11e4-aba4-710c1de3782b"
- }
- }]
-}
-....
-
-Note that conflicting entry inconsistencies will not be fixed and will
-require to explicitly use link:#_correcting_ghost_mailbox[ghost mailbox]
-endpoint in order to merge the conflicting mailboxes and prevent any
-message loss.
-
-*WARNING*: this task can cancel concurrently running legitimate user
-operations upon dirty read. As such this task should be run offline.
-
-A dirty read is when data is read between the two writes of the
-denormalization operations (no isolation).
-
-In order to ensure being offline, stop the traffic on SMTP, JMAP and
-IMAP ports, for example via re-configuration or firewall rules.
-
-Due to all of those risks, a `I-KNOW-WHAT-I-M-DOING` header should be
-positioned to `ALL-SERVICES-ARE-OFFLINE` in order to prevent accidental
-calls.
-
-==== Recomputing mailbox counters
-
-....
-curl -XPOST /mailboxes?task=RecomputeMailboxCounters
-....
-
-Will recompute counters (unseen & total count) for the mailbox object
-stored in Cassandra.
-
-Cassandra maintains a per mailbox projection for message count and
-unseen message count. As with any projection, it can go out of sync,
-leading to inconsistent results being returned to the client.
-
-link:#_endpoints_returning_a_task[More details about endpoints returning
-a task].
-
-The scheduled task will have the following type
-`recompute-mailbox-counters` and the following `additionalInformation`:
-
-....
-{
- "type":"recompute-mailbox-counters",
- "processedMailboxes": 3,
- "failedMailboxes": ["464765a0-e4e7-11e4-aba4-710c1de3782b"]
-}
-....
-
-Note that conflicting inconsistencies entries will not be fixed and will
-require to explicitly use link:#_correcting_ghost_mailbox[ghost mailbox]
-endpoint in order to merge the conflicting mailboxes and prevent any
-message loss.
-
-*WARNING*: this task do not take into account concurrent modifications
-upon a single mailbox counter recomputation. Rerunning the task will
-_eventually_ provide the consistent result. As such we advise to run
-this task offline.
-
-In order to ensure being offline, stop the traffic on SMTP, JMAP and
-IMAP ports, for example via re-configuration or firewall rules.
-
-`trustMessageProjection` query parameter can be set to `true`. Content
-of `messageIdTable` (listing messages by their mailbox context) table
-will be trusted and not compared against content of `imapUidTable` table
-(listing messages by their messageId mailbox independent identifier).
-This will result in a better performance running the task at the cost of
-safety in the face of message denormalization inconsistencies.
-
-Defaults to false, which generates additional checks. You can read
-https://github.com/apache/james-project/blob/master/src/adr/0022-cassandra-message-inconsistency.md[this
-ADR] to better understand the message projection and how it can become
-inconsistent.
-
-==== Recomputing Global JMAP fast message view projection
-
-Message fast view projection stores message properties expected to be
-fast to fetch but are actually expensive to compute, in order for
-GetMessages operation to be fast to execute for these properties.
-
-These projection items are asynchronously computed on mailbox events.
-
-You can force the full projection recomputation by calling the following
-endpoint:
-
-....
-curl -XPOST /mailboxes?task=recomputeFastViewProjectionItems
-....
-
-Will schedule a task for recomputing the fast message view projection
-for all mailboxes.
-
-link:#_endpoints_returning_a_task[More details about endpoints returning
-a task].
-
-An admin can specify the concurrency that should be used when running
-the task:
-
-* `messagesPerSecond` rate at which messages should be processed, per
-second. Defaults to 10.
-
-This optional parameter must have a strictly positive integer as a value
-and be passed as query parameters.
-
-Example:
-
-....
-curl -XPOST /mailboxes?task=recomputeFastViewProjectionItems&messagesPerSecond=20
-....
-
-The scheduled task will have the following type
-`RecomputeAllFastViewProjectionItemsTask` and the following
-`additionalInformation`:
-
-....
-{
- "type":"RecomputeAllPreviewsTask",
- "processedUserCount": 3,
- "processedMessageCount": 3,
- "failedUserCount": 2,
- "failedMessageCount": 1,
- "runningOptions": {
- "messagesPerSecond":20
- }
-}
-....
-
-Response codes:
-
-* 201: Success. Corresponding task id is returned.
-* 400: Error in the request. Details can be found in the reported error.
-
-==== Populate email query view
-
-Email query view is an optional projection to offload common JMAP `Email/query` requests used for listing mails on Cassandra
-and not on the search index thus improving the overall reliability / performance on this operation.
-
-These projection items are asynchronously computed on mailbox events.
-
-You can populate this projection with the following request:
-
-....
-curl -XPOST /mailboxes?task=populateEmailQueryView
-....
-
-Will schedule a task for recomputing the fast message view projection
-for all mailboxes.
-
-link:#_endpoints_returning_a_task[More details about endpoints returning
-a task].
-
-An admin can specify the concurrency that should be used when running
-the task:
-
-* `messagesPerSecond` rate at which messages should be processed, per
-second. Defaults to 10.
-
-This optional parameter must have a strictly positive integer as a value
-and be passed as query parameters.
-
-Example:
-
-....
-curl -XPOST /mailboxes?task=populateEmailQueryView&messagesPerSecond=20
-....
-
-The scheduled task will have the following type
-`PopulateEmailQueryViewTask` and the following
-`additionalInformation`:
-
-....
-{
- "type":"PopulateEmailQueryViewTask",
- "processedUserCount": 3,
- "processedMessageCount": 3,
- "failedUserCount": 2,
- "failedMessageCount": 1,
- "runningOptions": {
- "messagesPerSecond":20
- }
-}
-....
-
-Response codes:
-
-* 201: Success. Corresponding task id is returned.
-* 400: Error in the request. Details can be found in the reported error.
-
-==== Recomputing Cassandra filtering projection
-
-You can force the reset of the Cassandra filtering projection by calling the following
-endpoint:
-
-....
-curl -XPOST /mailboxes?task=populateFilteringProjection
-....
-
-Will schedule a task.
-
-link:#_endpoints_returning_a_task[More details about endpoints returning
-a task].
-
-The scheduled task will have the following type
-`PopulateFilteringProjectionTask` and the following
-`additionalInformation`:
-
-....
-{
- "type":"RecomputeAllPreviewsTask",
- "processedUserCount": 3,
- "failedUserCount": 2
-}
-....
-
-Response codes:
-
-* 201: Success. Corresponding task id is returned.
-* 400: Error in the request. Details can be found in the reported error.
-
-==== ReIndexing action
-
-Be also aware of the limits of this API:
-
-Warning: During the re-indexing, the result of search operations might
-be altered.
-
-Warning: Canceling this task should be considered unsafe as it will
-leave the currently reIndexed mailbox as partially indexed.
-
-Warning: While we have been trying to reduce the inconsistency window to
-a maximum (by keeping track of ongoing events), concurrent changes done
-during the reIndexing might be ignored.
-
-===== ReIndexing all mails
-
-....
-curl -XPOST http://ip:port/mailboxes?task=reIndex
-....
-
-Will schedule a task for reIndexing all the mails stored on this James
-server.
-
-link:#_endpoints_returning_a_task[More details about endpoints returning
-a task].
-
-An admin can specify the concurrency that should be used when running
-the task:
-
-* `messagesPerSecond` rate at which messages should be processed per
-second. Default is 50.
-
-This optional parameter must have a strictly positive integer as a value
-and be passed as query parameter.
-
-An admin can also specify the reindexing mode it wants to use when
-running the task:
-
-* `mode` the reindexing mode used. There are 2 modes for the moment:
-** `rebuildAll` allows to rebuild all indexes. This is the default mode.
-** `fixOutdated` will check for outdated indexed document and reindex
-only those.
-
-This optional parameter must be passed as query parameter.
-
-It’s good to note as well that there is a limitation with the
-`fixOutdated` mode. As we first collect metadata of stored messages to
-compare them with the ones in the index, a failed `expunged` operation
-might not be well corrected (as the message might not exist anymore but
-still be indexed).
-
-Example:
-
- curl -XPOST http://ip:port/mailboxes?task=reIndex&messagesPerSecond=200&mode=rebuildAll
-
-The scheduled task will have the following type `full-reindexing` and
-the following `additionalInformation`:
-
-....
-{
- "type":"full-reindexing",
- "runningOptions":{
- "messagesPerSecond":200,
- "mode":"REBUILD_ALL"
- },
- "successfullyReprocessedMailCount":18,
- "failedReprocessedMailCount": 3,
- "mailboxFailures": ["12", "23" ],
- "messageFailures": [
- {
- "mailboxId": "1",
- "uids": [1, 36]
- }]
-}
-....
-
-===== Fixing previously failed ReIndexing
-
-Will schedule a task for reIndexing all the mails which had failed to be
-indexed from the ReIndexingAllMails task.
-
-Given `bbdb69c9-082a-44b0-a85a-6e33e74287a5` being a `taskId` generated
-for a reIndexing tasks
-
-....
-curl -XPOST 'http://ip:port/mailboxes?task=reIndex&reIndexFailedMessagesOf=bbdb69c9-082a-44b0-a85a-6e33e74287a5'
-....
-
-link:#_endpoints_returning_a_task[More details about endpoints returning
-a task].
-
-An admin can specify the concurrency that should be used when running
-the task:
-
-* `messagesPerSecond` rate at which messages should be processed per
-second. Default is 50.
-
-This optional parameter must have a strictly positive integer as a value
-and be passed as query parameter.
-
-An admin can also specify the reindexing mode it wants to use when
-running the task:
-
-* `mode` the reindexing mode used. There are 2 modes for the moment:
-** `rebuildAll` allows to rebuild all indexes. This is the default mode.
-** `fixOutdated` will check for outdated indexed document and reindex
-only those.
-
-This optional parameter must be passed as query parameter.
-
-It’s good to note as well that there is a limitation with the
-`fixOutdated` mode. As we first collect metadata of stored messages to
-compare them with the ones in the index, a failed `expunged` operation
-might not be well corrected (as the message might not exist anymore but
-still be indexed).
-
-Example:
-
-....
-curl -XPOST http://ip:port/mailboxes?task=reIndex&reIndexFailedMessagesOf=bbdb69c9-082a-44b0-a85a-6e33e74287a5&messagesPerSecond=200&mode=rebuildAll
-....
-
-The scheduled task will have the following type
-`error-recovery-indexation` and the following `additionalInformation`:
-
-....
-{
- "type":"error-recovery-indexation"
- "runningOptions":{
- "messagesPerSecond":200,
- "mode":"REBUILD_ALL"
- },
- "successfullyReprocessedMailCount":18,
- "failedReprocessedMailCount": 3,
- "mailboxFailures": ["12", "23" ],
- "messageFailures": [{
- "mailboxId": "1",
- "uids": [1, 36]
- }]
-}
-....
-
-===== Create missing parent mailboxes
-
-Will schedule a task for creating all the missing parent mailboxes in a hierarchical mailbox tree, which is the result
-of a partially failed rename operation of a child mailbox.
-
-....
-curl -XPOST http://ip:port/mailboxes?task=createMissingParents
-....
-
-link:#_endpoints_returning_a_task[More details about endpoints returning
-a task].
-
-Response codes:
-
-* 201: Success. Corresponding task id is returned.
-* 400: Error in the request. Details can be found in the reported error.
-
-The scheduled task will have the following type `createMissingParents` and the following `additionalInformation`:
-
-....
-{
- "type":"createMissingParents"
- "created": ["1", "2" ],
- "totalCreated": 2,
- "failures": [],
- "totalFailure": 0
-}
-....
-
-=== Single mailbox
-
-==== ReIndexing a mailbox mails
-
-....
-curl -XPOST http://ip:port/mailboxes/{mailboxId}?task=reIndex
-....
-
-Will schedule a task for reIndexing all the mails in one mailbox.
-
-Note that `mailboxId' path parameter needs to be a (implementation
-dependent) valid mailboxId.
-
-link:#_endpoints_returning_a_task[More details about endpoints returning
-a task].
-
-An admin can specify the concurrency that should be used when running
-the task:
-
-* `messagesPerSecond` rate at which messages should be processed per
-second. Default is 50.
-
-This optional parameter must have a strictly positive integer as a value
-and be passed as query parameter.
-
-An admin can also specify the reindexing mode it wants to use when
-running the task:
-
-* `mode` the reindexing mode used. There are 2 modes for the moment:
-** `rebuildAll` allows to rebuild all indexes. This is the default mode.
-** `fixOutdated` will check for outdated indexed document and reindex
-only those.
-
-This optional parameter must be passed as query parameter.
-
-It’s good to note as well that there is a limitation with the
-`fixOutdated` mode. As we first collect metadata of stored messages to
-compare them with the ones in the index, a failed `expunged` operation
-might not be well corrected (as the message might not exist anymore but
-still be indexed).
-
-Example:
-
-....
-curl -XPOST http://ip:port/mailboxes/{mailboxId}?task=reIndex&messagesPerSecond=200&mode=fixOutdated
-....
-
-Response codes:
-
-* 201: Success. Corresponding task id is returned.
-* 400: Error in the request. Details can be found in the reported error.
-
-The scheduled task will have the following type `mailbox-reindexing` and
-the following `additionalInformation`:
-
-....
-{
- "type":"mailbox-reindexing",
- "runningOptions":{
- "messagesPerSecond":200,
- "mode":"FIX_OUTDATED"
- },
- "mailboxId":"{mailboxId}",
- "successfullyReprocessedMailCount":18,
- "failedReprocessedMailCount": 3,
- "mailboxFailures": ["12"],
- "messageFailures": [
- {
- "mailboxId": "1",
- "uids": [1, 36]
- }]
-}
-....
-
-Warning: During the re-indexing, the result of search operations might
-be altered.
-
-Warning: Canceling this task should be considered unsafe as it will
-leave the currently reIndexed mailbox as partially indexed.
-
-Warning: While we have been trying to reduce the inconsistency window to
-a maximum (by keeping track of ongoing events), concurrent changes done
-during the reIndexing might be ignored.
-
-== Administrating Messages
-
-=== ReIndexing a single mail by messageId
-
-....
-curl -XPOST http://ip:port/messages/{messageId}?task=reIndex
-....
-
-Will schedule a task for reIndexing a single email in all the mailboxes
-containing it.
-
-Note that `messageId' path parameter needs to be a (implementation
-dependent) valid messageId.
-
-link:#_endpoints_returning_a_task[More details about endpoints returning
-a task].
-
-Response codes:
-
-* 201: Success. Corresponding task id is returned.
-* 400: Error in the request. Details can be found in the reported error.
-
-The scheduled task will have the following type `messageId-reindexing`
-and the following `additionalInformation`:
-
-....
-{
- "messageId":"18"
-}
-....
-
-Warning: During the re-indexing, the result of search operations might
-be altered.
-
-=== Fixing message inconsistencies
-
-This task is only available on top of Guice Cassandra products.
-
-....
-curl -XPOST /messages?task=SolveInconsistencies
-....
-
-Will schedule a task for fixing message inconsistencies created by the
-message denormalization process.
-
-Messages are denormalized and stored in separated data tables in
-Cassandra, so they can be accessed by their unique identifier or mailbox
-identifier & local mailbox identifier through different protocols.
-
-Failure in the denormalization process will lead to inconsistencies, for
-example:
-
-....
-BOB receives a message
-The denormalization process fails
-BOB can read the message via JMAP
-BOB cannot read the message via IMAP
-
-BOB marks a message as SEEN
-The denormalization process fails
-The message is SEEN via JMAP
-The message is UNSEEN via IMAP
-....
-
-link:#_endpoints_returning_a_task[More details about endpoints returning
-a task].
-
-An admin can specify the concurrency that should be used when running
-the task:
-
-* `messagesPerSecond` rate of messages to be processed per second.
-Default is 100.
-
-This optional parameter must have a strictly positive integer as a value
-and be passed as query parameter.
-
-An admin can also specify the reindexing mode it wants to use when
-running the task:
-
-* `mode` the reindexing mode used. There are 2 modes for the moment:
-** `rebuildAll` allows to rebuild all indexes. This is the default mode.
-** `fixOutdated` will check for outdated indexed document and reindex
-only those.
-
-This optional parameter must be passed as query parameter.
-
-It’s good to note as well that there is a limitation with the
-`fixOutdated` mode. As we first collect metadata of stored messages to
-compare them with the ones in the index, a failed `expunged` operation
-might not be well corrected (as the message might not exist anymore but
-still be indexed).
-
-Example:
-
-....
-curl -XPOST /messages?task=SolveInconsistencies&messagesPerSecond=200&mode=rebuildAll
-....
-
-Response codes:
-
-* 201: Success. Corresponding task id is returned.
-* 400: Error in the request. Details can be found in the reported error.
-
-The scheduled task will have the following type
-`solve-message-inconsistencies` and the following
-`additionalInformation`:
-
-....
-{
- "type":"solve-message-inconsistencies",
- "timestamp":"2007-12-03T10:15:30Z",
- "processedImapUidEntries": 2,
- "processedMessageIdEntries": 1,
- "addedMessageIdEntries": 1,
- "updatedMessageIdEntries": 0,
- "removedMessageIdEntries": 1,
- "runningOptions":{
- "messagesPerSecond": 200,
- "mode":"REBUILD_ALL"
- },
- "fixedInconsistencies": [
- {
- "mailboxId": "551f0580-82fb-11ea-970e-f9c83d4cf8c2",
- "messageId": "d2bee791-7e63-11ea-883c-95b84008f979",
- "uid": 1
- },
- {
- "mailboxId": "551f0580-82fb-11ea-970e-f9c83d4cf8c2",
- "messageId": "d2bee792-7e63-11ea-883c-95b84008f979",
- "uid": 2
- }
- ],
- "errors": [
- {
- "mailboxId": "551f0580-82fb-11ea-970e-f9c83d4cf8c2",
- "messageId": "ffffffff-7e63-11ea-883c-95b84008f979",
- "uid": 3
- }
- ]
-}
-....
-
-User actions concurrent to the inconsistency fixing task could result in
-concurrency issues. New inconsistencies could be created.
-
-However the source of truth will not be impacted, hence rerunning the
-task will eventually fix all issues.
-
-This task could be run safely online and can be scheduled on a recurring
-basis outside of peak traffic by an admin to ensure Cassandra message
-consistency.
-
-=== Deleting old messages of all users
-
-*Note:*
-Consider enabling the xref:distributed/configure/vault.adoc[Deleted Messages Vault]
-if you use this feature.
-
-Old messages tend to pile up in user INBOXes. An admin might want to delete
-these on behalf of the users, e.g. all messages older than 30 days:
-....
-curl -XDELETE http://ip:port/messages?olderThan=30d
-....
-
-link:#_endpoints_returning_a_task[More details about endpoints returning a task].
-
-The `olderThan` parameter should be expressed in the following format: `Nunit`.
-`N` should be strictly positive. `unit` could be either in the short form
-(`d`, `w`, `y` etc.), or in the long form (`days`, `weeks`, `months`, `years`).
-The default unit is `days`.
-
-Response codes:
-
-* 201: Success. Corresponding task id is returned.
-* 400: Error in the request. Details can be found in the reported error.
-
-The scheduled task will have the type `ExpireMailboxTask` and the following `additionalInformation`:
-
-....
-{
- "type": "ExpireMailboxTask"
- "mailboxesExpired": 5,
- "mailboxesFailed": 2,
- "mailboxesProcessed": 10,
- "messagesDeleted": 23,
-}
-....
-
-To delete old mails from a different mailbox than INBOX, e.g. a mailbox
-named "Archived" :
-....
-curl -XDELETE http://ip:port/messages?mailbox=Archived&olderThan=30d
-....
-
-Since this is a somewhat expensive operation, the task is throttled to one user
-per second. You may speed it up via `usersPerSecond=10` for example. But keep
-in mind that a high rate might overwhelm your database or blob store.
-
-*Scanning search only:* (unsupported for Lucene and OpenSearch search implementations) +
-Some mail clients can add an `Expires` header (RFC 4021) to their messages.
-Instead of specifying an absolute age, you may choose to delete only such
-messages where the expiration date from this header lies in the past:
-....
-curl -XDELETE http://ip:port/messages?byExpiresHeader
-....
-In this case you should also add the xref:distributed/configure/mailets.adoc[mailet]
-`Expires` to your mailet container, which can sanitize expiration date headers.
-
-
-== Administrating user mailboxes
-
-=== Creating a mailbox
-
-....
-curl -XPUT http://ip:port/users/{usernameToBeUsed}/mailboxes/{mailboxNameToBeCreated}
-....
-
-Resource name `usernameToBeUsed` should be an existing user Resource
-name `mailboxNameToBeCreated` should not be empty, nor contain % * characters, nor starting with #.
-
-Response codes:
-
-* 204: The mailbox now exists on the server
-* 400: Invalid mailbox name
-* 404: The user name does not exist. Note that this check can be bypassed by specifying the `force` query parameter.
-
-To create nested mailboxes, for instance a work mailbox inside the INBOX
-mailbox, people should use the . separator. The sample query is:
-
-....
-curl -XDELETE http://ip:port/users/{usernameToBeUsed}/mailboxes/INBOX.work
-....
-
-=== Deleting a mailbox and its children
-
-....
-curl -XDELETE http://ip:port/users/{usernameToBeUsed}/mailboxes/{mailboxNameToBeDeleted}
-....
-
-Resource name `usernameToBeUsed` should be an existing user Resource
-name `mailboxNameToBeDeleted` should not be empty
-
-Response codes:
-
-* 204: The mailbox now does not exist on the server
-* 400: Invalid mailbox name
-* 404: The user name does not exist. Note that this check can be bypassed by specifying the `force` query parameter.
-
-=== Testing existence of a mailbox
-
-....
-curl -XGET http://ip:port/users/{usernameToBeUsed}/mailboxes/{mailboxNameToBeTested}
-....
-
-Resource name `usernameToBeUsed` should be an existing user Resource
-name `mailboxNameToBeTested` should not be empty
-
-Response codes:
-
-* 204: The mailbox exists
-* 400: Invalid mailbox name
-* 404: The user name does not exist, the mailbox does not exist
-
-=== Listing user mailboxes
-
-....
-curl -XGET http://ip:port/users/{usernameToBeUsed}/mailboxes
-....
-
-The answer looks like:
-
-....
-[{"mailboxName":"INBOX"},{"mailboxName":"outbox"}]
-....
-
-Resource name `usernameToBeUsed` should be an existing user
-
-Response codes:
-
-* 200: The mailboxes list was successfully retrieved
-* 404: The user name does not exist, the mailbox does not exist. Note that this check can be bypassed by specifying the `force` query parameter.
-
-
-=== Deleting user mailboxes
-
-....
-curl -XDELETE http://ip:port/users/{usernameToBeUsed}/mailboxes
-....
-
-Resource name `usernameToBeUsed` should be an existing user
-
-Response codes:
-
-* 204: The user do not have mailboxes anymore
-* 404: The user name does not exist. Note that this check can be bypassed by specifying the `force` query parameter.
-
-=== Exporting user mailboxes
-
-....
-curl -XPOST http://ip:port/users/{usernameToBeUsed}/mailboxes?action=export
-....
-
-Resource name `usernameToBeUsed` should be an existing user
-
-Response codes:
-
-* 201: Success. Corresponding task id is returned
-* 404: The user name does not exist
-
-The scheduled task will have the following type `MailboxesExportTask`
-and the following `additionalInformation`:
-
-....
-{
- "type":"MailboxesExportTask",
- "timestamp":"2007-12-03T10:15:30Z",
- "username": "user",
- "stage": "STARTING"
-}
-....
-
-=== ReIndexing a user mails
-
-....
-curl -XPOST http://ip:port/users/{usernameToBeUsed}/mailboxes?task=reIndex
-....
-
-Will schedule a task for reIndexing all the mails in ``user@domain.com''
-mailboxes (encoded above).
-
-link:#_endpoints_returning_a_task[More details about endpoints returning
-a task].
-
-An admin can specify the concurrency that should be used when running
-the task:
-
-* `messagesPerSecond` rate at which messages should be processed per
-second. Default is 50.
-
-This optional parameter must have a strictly positive integer as a value
-and be passed as query parameter.
-
-An admin can also specify the reindexing mode it wants to use when
-running the task:
-
-* `mode` the reindexing mode used. There are 2 modes for the moment:
-** `rebuildAll` allows to rebuild all indexes. This is the default mode.
-** `fixOutdated` will check for outdated indexed document and reindex
-only those.
-
-This optional parameter must be passed as query parameter.
-
-It’s good to note as well that there is a limitation with the
-`fixOutdated` mode. As we first collect metadata of stored messages to
-compare them with the ones in the index, a failed `expunged` operation
-might not be well corrected (as the message might not exist anymore but
-still be indexed).
-
-Example:
-
-....
-curl -XPOST http://ip:port/users/{usernameToBeUsed}/mailboxes?task=reIndex&messagesPerSecond=200&mode=fixOutdated
-....
-
-Response codes:
-
-* 201: Success. Corresponding task id is returned.
-* 400: Error in the request. Details can be found in the reported error.
-
-The scheduled task will have the following type `user-reindexing` and
-the following `additionalInformation`:
-
-....
-{
- "type":"user-reindexing",
- "runningOptions":{
- "messagesPerSecond":200,
- "mode":"FIX_OUTDATED"
- },
- "user":"user@domain.com",
- "successfullyReprocessedMailCount":18,
- "failedReprocessedMailCount": 3,
- "mailboxFailures": ["12", "23" ],
- "messageFailures": [
- {
- "mailboxId": "1",
- "uids": [1, 36]
- }]
-}
-....
-
-Warning: During the re-indexing, the result of search operations might
-be altered.
-
-Warning: Canceling this task should be considered unsafe as it will
-leave the currently reIndexed mailbox as partially indexed.
-
-Warning: While we have been trying to reduce the inconsistency window to
-a maximum (by keeping track of ongoing events), concurrent changes done
-during the reIndexing might be ignored.
-
-=== Counting emails
-
-....
-curl -XGET http://ip:port/users/{usernameToBeUsed}/mailboxes/{mailboxName}/messageCount
-....
-
-Will return the total count of messages within the mailbox of that user.
-
-Resource name `usernameToBeUsed` should be an existing user.
-
-Resource name `mailboxName` should not be empty, nor contain `% *` characters, nor starting with `#`.
-
-Response codes:
-
-* 200: The number of emails in a given mailbox
-* 400: Invalid mailbox name
-* 404: Invalid get on user mailboxes. The `usernameToBeUsed` or `mailboxName` does not exit'
-
-=== Counting unseen emails
-
-....
-curl -XGET http://ip:port/users/{usernameToBeUsed}/mailboxes/{mailboxName}/unseenMessageCount
-....
-
-Will return the total count of unseen messages within the mailbox of that user.
-
-Resource name `usernameToBeUsed` should be an existing user.
-
-Resource name `mailboxName` should not be empty, nor contain `% *` characters, nor starting with `#`.
-
-Response codes:
-
-* 200: The number of unseen emails in a given mailbox
-* 400: Invalid mailbox name
-* 404: Invalid get on user mailboxes. The `usernameToBeUsed` or `mailboxName` does not exit'
-
-=== Clearing mailbox content
-
-....
-curl -XDELETE http://ip:port/users/{usernameToBeUsed}/mailboxes/{mailboxName}/messages
-....
-
-Will schedule a task for clearing all the mails in ``mailboxName`` mailbox of ``usernameToBeUsed``.
-
-link:#_endpoints_returning_a_task[More details about endpoints returning
-a task].
-
-Resource name `usernameToBeUsed` should be an existing user.
-
-Resource name `mailboxName` should not be empty, nor contain `% *` characters, nor starting with `#`.
-
-Response codes:
-
-* 201: Success. Corresponding task id is returned.
-* 400: Invalid mailbox name
-* 404: Invalid get on user mailboxes. The `username` or `mailboxName` does not exit
-
-The scheduled task will have the following type `ClearMailboxContentTask` and
-the following `additionalInformation`:
-
-....
-{
- "mailboxName": "mbx1",
- "messagesFailCount": 9,
- "messagesSuccessCount": 10,
- "timestamp": "2007-12-03T10:15:30Z",
- "type": "ClearMailboxContentTask",
- "username": "bob@domain.tld"
-}
-....
-
-=== Subscribing a user to all of its mailboxes
-
-....
-curl -XPOST http://ip:port/users/{usernameToBeUsed}/mailboxes?task=subscribeAll
-....
-
-Will schedule a task for subscribing a user to all of its mailboxes.
-
-link:#_endpoints_returning_a_task[More details about endpoints returning
-a task].
-
-Most users are unaware of what an IMAP subscription is, nor how they can manage it. If the subscription list gets out
-of sync with the mailbox list, it could result in downgraded user experience (see MAILBOX-405). This task allow
-to reset the subscription list to the mailbox list on a per user basis thus working around the aforementioned issues.
-
-Response codes:
-
-- 201: Success. Corresponding task id is returned.
-- 404: No such user
-
-The scheduled task will have the following type `SubscribeAllTask` and the following `additionalInformation`:
-
-....
-{
- "type":"SubscribeAllTask",
- "username":"user@domain.com",
- "subscribedCount":18,
- "unsubscribedCount": 3
-}
-....
-
-=== Recomputing User JMAP fast message view projection
-
-This action is only available for backends supporting JMAP protocol.
-
-Message fast view projection stores message properties expected to be
-fast to fetch but are actually expensive to compute, in order for
-GetMessages operation to be fast to execute for these properties.
-
-These projection items are asynchronously computed on mailbox events.
-
-You can force the full projection recomputation by calling the following
-endpoint:
-
-....
-curl -XPOST /users/{usernameToBeUsed}/mailboxes?task=recomputeFastViewProjectionItems
-....
-
-Will schedule a task for recomputing the fast message view projection
-for all mailboxes of `usernameToBeUsed`.
-
-link:#_endpoints_returning_a_task[More details about endpoints returning
-a task].
-
-An admin can specify the concurrency that should be used when running
-the task:
-
-* `messagesPerSecond` rate at which messages should be processed, per
-second. Defaults to 10.
-
-This optional parameter must have a strictly positive integer as a value
-and be passed as query parameters.
-
-Example:
-
-....
-curl -XPOST /mailboxes?task=recomputeFastViewProjectionItems&messagesPerSecond=20
-....
-
-The scheduled task will have the following type
-`RecomputeUserFastViewProjectionItemsTask` and the following
-`additionalInformation`:
-
-....
-{
- "type":"RecomputeUserFastViewProjectionItemsTask",
- "username": "{usernameToBeUsed}",
- "processedMessageCount": 3,
- "failedMessageCount": 1,
- "runningOptions": {
- "messagesPerSecond":20
- }
-}
-....
-
-Response codes:
-
-* 201: Success. Corresponding task id is returned.
-* 400: Error in the request. Details can be found in the reported error.
-* 404: User not found.
-
-== Administrating quotas
-
-=== Administrating quotas by users
-
-==== Getting the quota for a user
-
-....
-curl -XGET http://ip:port/quota/users/{usernameToBeUsed}
-....
-
-Resource name `usernameToBeUsed` should be an existing user
-
-The answer is the details of the quota of that user.
-
-....
-{
- "global": {
- "count":252,
- "size":242
- },
- "domain": {
- "count":152,
- "size":142
- },
- "user": {
- "count":52,
- "size":42
- },
- "computed": {
- "count":52,
- "size":42
- },
- "occupation": {
- "size":13,
- "count":21,
- "ratio": {
- "size":0.25,
- "count":0.5,
- "max":0.5
- }
- }
-}
-....
-
-* The `global` entry represent the quota limit allowed on this James
-server.
-* The `domain` entry represent the quota limit allowed for the user of
-that domain.
-* The `user` entry represent the quota limit allowed for this specific
-user.
-* The `computed` entry represent the quota limit applied for this user,
-resolved from the upper values.
-* The `occupation` entry represent the occupation of the quota for this
-user. This includes used count and size as well as occupation ratio
-(used / limit).
-
-Note that `quota` object can contain a fixed value, an empty value
-(null) or an unlimited value (-1):
-
-....
-{"count":52,"size":42}
-
-{"count":null,"size":null}
-
-{"count":52,"size":-1}
-....
-
-Response codes:
-
-* 200: The user’s quota was successfully retrieved
-* 404: The user does not exist
-
-==== Updating the quota for a user
-
-....
-curl -XPUT http://ip:port/quota/users/{usernameToBeUsed}
-....
-
-Resource name `usernameToBeUsed` should be an existing user
-
-The body can contain a fixed value, an empty value (null) or an
-unlimited value (-1):
-
-....
-{"count":52,"size":42}
-
-{"count":null,"size":null}
-
-{"count":52,"size":-1}
-....
-
-Response codes:
-
-* 204: The quota has been updated
-* 400: The body is not a positive integer neither an unlimited value
-(-1).
-* 404: The user does not exist
-
-==== Getting the quota count for a user
-
-....
-curl -XGET http://ip:port/quota/users/{usernameToBeUsed}/count
-....
-
-Resource name `usernameToBeUsed` should be an existing user
-
-The answer looks like:
-
-....
-52
-....
-
-Response codes:
-
-* 200: The user’s quota was successfully retrieved
-* 204: No quota count limit is defined at the user level for this user
-* 404: The user does not exist
-
-==== Updating the quota count for a user
-
-....
-curl -XPUT http://ip:port/quota/users/{usernameToBeUsed}/count
-....
-
-Resource name `usernameToBeUsed` should be an existing user
-
-The body can contain a fixed value or an unlimited value (-1):
-
-....
-52
-....
-
-Response codes:
-
-* 204: The quota has been updated
-* 400: The body is not a positive integer neither an unlimited value
-(-1).
-* 404: The user does not exist
-
-==== Deleting the quota count for a user
-
-....
-curl -XDELETE http://ip:port/quota/users/{usernameToBeUsed}/count
-....
-
-Resource name `usernameToBeUsed` should be an existing user
-
-Response codes:
-
-* 204: The quota has been updated to unlimited value.
-* 404: The user does not exist
-
-==== Getting the quota size for a user
-
-....
-curl -XGET http://ip:port/quota/users/{usernameToBeUsed}/size
-....
-
-Resource name `usernameToBeUsed` should be an existing user
-
-The answer looks like:
-
-....
-52
-....
-
-Response codes:
-
-* 200: The user’s quota was successfully retrieved
-* 204: No quota size limit is defined at the user level for this user
-* 404: The user does not exist
-
-==== Updating the quota size for a user
-
-....
-curl -XPUT http://ip:port/quota/users/{usernameToBeUsed}/size
-....
-
-Resource name `usernameToBeUsed` should be an existing user
-
-The body can contain a fixed value or an unlimited value (-1):
-
-....
-52
-....
-
-Response codes:
-
-* 204: The quota has been updated
-* 400: The body is not a positive integer neither an unlimited value
-(-1).
-* 404: The user does not exist
-
-==== Deleting the quota size for a user
-
-....
-curl -XDELETE http://ip:port/quota/users/{usernameToBeUsed}/size
-....
-
-Resource name `usernameToBeUsed` should be an existing user
-
-Response codes:
-
-* 204: The quota has been updated to unlimited value.
-* 404: The user does not exist
-
-==== Searching user by quota ratio
-
-....
-curl -XGET 'http://ip:port/quota/users?minOccupationRatio=0.8&maxOccupationRatio=0.99&limit=100&offset=200&domain=domain.com'
-....
-
-Will return:
-
-....
-[
- {
- "username":"user@domain.com",
- "detail": {
- "global": {
- "count":252,
- "size":242
- },
- "domain": {
- "count":152,
- "size":142
- },
- "user": {
- "count":52,
- "size":42
- },
- "computed": {
- "count":52,
- "size":42
- },
- "occupation": {
- "size":48,
- "count":21,
- "ratio": {
- "size":0.9230,
- "count":0.5,
- "max":0.9230
- }
- }
- }
- },
- ...
-]
-....
-
-Where:
-
-* *minOccupationRatio* is a query parameter determining the minimum
-occupation ratio of users to be returned.
-* *maxOccupationRatio* is a query parameter determining the maximum
-occupation ratio of users to be returned.
-* *domain* is a query parameter determining the domain of users to be
-returned.
-* *limit* is a query parameter determining the maximum number of users
-to be returned.
-* *offset* is a query parameter determining the number of users to skip.
-
-Please note that users are alphabetically ordered on username.
-
-The response is a list of usernames, with attached quota details as
-defined link:#_getting_the_quota_for_a_user[here].
-
-Response codes:
-
-* 200: List of users had successfully been returned.
-* 400: Validation issues with parameters
-
-==== Recomputing current quotas for users
-
-....
-curl -XPOST /quota/users?task=RecomputeCurrentQuotas
-....
-
-Will recompute current quotas (count and size) for all users stored in
-James.
-
-James maintains per quota a projection for current quota count and size.
-As with any projection, it can go out of sync, leading to inconsistent
-results being returned to the client.
-
-link:#_endpoints_returning_a_task[More details about endpoints returning
-a task].
-
-An admin can specify the concurrency that should be used when running
-the task:
-
-* `usersPerSecond` rate at which users quotas should be reprocessed, per
-second. Defaults to 1.
-
-This optional parameter must have a strictly positive integer as a value
-and be passed as query parameters.
-
-An admin can select which quota component he wants to recompute:
-
-* `quotaComponent` component whose quota need to be reprocessed. It could be one of values: MAILBOX, SIEVE, JMAP_UPLOADS.
-
-The admin could select several quota components. If he does not select, quotas of all components would be recomputed.
-
-Example:
-
-....
-curl -XPOST /quota/users?task=RecomputeCurrentQuotas&usersPerSecond=20"aComponent=MAILBOX"aComponent=JMAP_UPLOADS
-....
-
-The scheduled task will have the following type
-`recompute-current-quotas` and the following `additionalInformation`:
-
-....
-{
- "type":"recompute-current-quotas",
- "recomputeSingleQuotaComponentResults": [
- {
- "quotaComponent": "MAILBOX",
- "processedIdentifierCount": 3,
- "failedIdentifiers": ["#private&bob@localhost"]
- },
- {
- "quotaComponent": "JMAP_UPLOADS",
- "processedIdentifierCount": 3,
- "failedIdentifiers": ["bob@localhost"]
- }
- ],
- "runningOptions": {
- "usersPerSecond":20
- }
-}
-....
-
-*WARNING*: this task do not take into account concurrent modifications
-upon a single current quota re-computation. Rerunning the task will
-_eventually_ provide the consistent result.
-
-=== Administrating quotas by domains
-
-==== Getting the quota for a domain
-
-....
-curl -XGET http://ip:port/quota/domains/{domainToBeUsed}
-....
-
-Resource name `domainToBeUsed` should be an existing domain. For
-example:
-
-....
-curl -XGET http://ip:port/quota/domains/james.org
-....
-
-The answer will detail the default quota applied to users belonging to
-that domain:
-
-....
-{
- "global": {
- "count":252,
- "size":null
- },
- "domain": {
- "count":null,
- "size":142
- },
- "computed": {
- "count":252,
- "size":142
- }
-}
-....
-
-* The `global` entry represents the quota limit defined on this James
-server by default.
-* The `domain` entry represents the quota limit allowed for the user of
-that domain by default.
-* The `computed` entry represents the quota limit applied for the users
-of that domain, by default, resolved from the upper values.
-
-Note that `quota` object can contain a fixed value, an empty value
-(null) or an unlimited value (-1):
-
-....
-{"count":52,"size":42}
-
-{"count":null,"size":null}
-
-{"count":52,"size":-1}
-....
-
-Response codes:
-
-* 200: The domain’s quota was successfully retrieved
-* 404: The domain does not exist
-* 405: Domain Quota configuration not supported when virtual hosting is
-deactivated.
-
-==== Updating the quota for a domain
-
-....
-curl -XPUT http://ip:port/quota/domains/{domainToBeUsed}
-....
-
-Resource name `domainToBeUsed` should be an existing domain.
-
-The body can contain a fixed value, an empty value (null) or an
-unlimited value (-1):
-
-....
-{"count":52,"size":42}
-
-{"count":null,"size":null}
-
-{"count":52,"size":-1}
-....
-
-Response codes:
-
-* 204: The quota has been updated
-* 400: The body is not a positive integer neither an unlimited value
-(-1).
-* 404: The domain does not exist
-* 405: Domain Quota configuration not supported when virtual hosting is
-deactivated.
-
-==== Getting the quota count for a domain
-
-....
-curl -XGET http://ip:port/quota/domains/{domainToBeUsed}/count
-....
-
-Resource name `domainToBeUsed` should be an existing domain.
-
-The answer looks like:
-
-....
-52
-....
-
-Response codes:
-
-* 200: The domain’s quota was successfully retrieved
-* 204: No quota count limit is defined at the domain level for this
-domain
-* 404: The domain does not exist
-* 405: Domain Quota configuration not supported when virtual hosting is
-desactivated.
-
-==== Updating the quota count for a domain
-
-....
-curl -XPUT http://ip:port/quota/domains/{domainToBeUsed}/count
-....
-
-Resource name `domainToBeUsed` should be an existing domain.
-
-The body can contain a fixed value or an unlimited value (-1):
-
-....
-52
-....
-
-Response codes:
-
-* 204: The quota has been updated
-* 400: The body is not a positive integer neither an unlimited value
-(-1).
-* 404: The domain does not exist
-* 405: Domain Quota configuration not supported when virtual hosting is
-desactivated.
-
-==== Deleting the quota count for a domain
-
-....
-curl -XDELETE http://ip:port/quota/domains/{domainToBeUsed}/count
-....
-
-Resource name `domainToBeUsed` should be an existing domain.
-
-Response codes:
-
-* 204: The quota has been updated to unlimited value.
-* 404: The domain does not exist
-* 405: Domain Quota configuration not supported when virtual hosting is
-deactivated.
-
-==== Getting the quota size for a domain
-
-....
-curl -XGET http://ip:port/quota/domains/{domainToBeUsed}/size
-....
-
-Resource name `domainToBeUsed` should be an existing domain.
-
-The answer looks like:
-
-....
-52
-....
-
-Response codes:
-
-* 200: The domain’s quota was successfully retrieved
-* 204: No quota size limit is defined at the domain level for this
-domain
-* 404: The domain does not exist
-* 405: Domain Quota configuration not supported when virtual hosting is
-deactivated.
-
-==== Updating the quota size for a domain
-
-....
-curl -XPUT http://ip:port/quota/domains/{domainToBeUsed}/size
-....
-
-Resource name `domainToBeUsed` should be an existing domain.
-
-The body can contain a fixed value or an unlimited value (-1):
-
-....
-52
-....
-
-Response codes:
-
-* 204: The quota has been updated
-* 400: The body is not a positive integer neither an unlimited value
-(-1).
-* 404: The domain does not exist
-* 405: Domain Quota configuration not supported when virtual hosting is
-deactivated.
-
-==== Deleting the quota size for a domain
-
-....
-curl -XDELETE http://ip:port/quota/domains/{domainToBeUsed}/size
-....
-
-Resource name `domainToBeUsed` should be an existing domain.
-
-Response codes:
-
-* 204: The quota has been updated to unlimited value.
-* 404: The domain does not exist
-
-=== Administrating global quotas
-
-==== Getting the global quota
-
-....
-curl -XGET http://ip:port/quota
-....
-
-The answer is the details of the global quota.
-
-....
-{
- "count":252,
- "size":242
-}
-....
-
-Note that `quota` object can contain a fixed value, an empty value
-(null) or an unlimited value (-1):
-
-....
-{"count":52,"size":42}
-
-{"count":null,"size":null}
-
-{"count":52,"size":-1}
-....
-
-Response codes:
-
-* 200: The quota was successfully retrieved
-
-==== Updating global quota
-
-....
-curl -XPUT http://ip:port/quota
-....
-
-The body can contain a fixed value, an empty value (null) or an
-unlimited value (-1):
-
-....
-{"count":52,"size":42}
-
-{"count":null,"size":null}
-
-{"count":52,"size":-1}
-....
-
-Response codes:
-
-* 204: The quota has been updated
-* 400: The body is not a positive integer neither an unlimited value
-(-1).
-
-==== Getting the global quota count
-
-....
-curl -XGET http://ip:port/quota/count
-....
-
-Resource name usernameToBeUsed should be an existing user
-
-The answer looks like:
-
-....
-52
-....
-
-Response codes:
-
-* 200: The quota was successfully retrieved
-* 204: No quota count limit is defined at the global level
-
-==== Updating the global quota count
-
-....
-curl -XPUT http://ip:port/quota/count
-....
-
-The body can contain a fixed value or an unlimited value (-1):
-
-....
-52
-....
-
-Response codes:
-
-* 204: The quota has been updated
-* 400: The body is not a positive integer neither an unlimited value
-(-1).
-
-==== Deleting the global quota count
-
-....
-curl -XDELETE http://ip:port/quota/count
-....
-
-Response codes:
-
-* 204: The quota has been updated to unlimited value.
-
-==== Getting the global quota size
-
-....
-curl -XGET http://ip:port/quota/size
-....
-
-The answer looks like:
-
-....
-52
-....
-
-Response codes:
-
-* 200: The quota was successfully retrieved
-* 204: No quota size limit is defined at the global level
-
-==== Updating the global quota size
-
-....
-curl -XPUT http://ip:port/quota/size
-....
-
-The body can contain a fixed value or an unlimited value (-1):
-
-....
-52
-....
-
-Response codes:
-
-* 204: The quota has been updated
-* 400: The body is not a positive integer neither an unlimited value
-(-1).
-
-==== Deleting the global quota size
-
-....
-curl -XDELETE http://ip:port/quota/size
-....
-
-Response codes:
-
-* 204: The quota has been updated to unlimited value.
-
-=== Administrating Sieve quotas
-
-Some limitations on space Users Sieve script can occupy can be
-configured by default, and overridden by user.
-
-==== Retrieving global sieve quota
-
-This endpoints allows to retrieve the global Sieve quota, which will be
-users default:
-
-....
-curl -XGET http://ip:port/sieve/quota/default
-....
-
-Will return the bytes count allowed by user per default on this server.
-
-....
-102400
-....
-
-Response codes:
-
-* 200: Request is a success and the value is returned
-* 204: No default quota is being configured
-
-==== Updating global sieve quota
-
-This endpoints allows to update the global Sieve quota, which will be
-users default:
-
-....
-curl -XPUT http://ip:port/sieve/quota/default
-....
-
-With the body being the bytes count allowed by user per default on this
-server.
-
-....
-102400
-....
-
-Response codes:
-
-* 204: Operation succeeded
-* 400: Invalid payload
-
-==== Removing global sieve quota
-
-This endpoints allows to remove the global Sieve quota. There will no
-more be users default:
-
-....
-curl -XDELETE http://ip:port/sieve/quota/default
-....
-
-Response codes:
-
-* 204: Operation succeeded
-
-==== Retrieving user sieve quota
-
-This endpoints allows to retrieve the Sieve quota of a user, which will
-be this users quota:
-
-....
-curl -XGET http://ip:port/sieve/quota/users/user@domain.com
-....
-
-Will return the bytes count allowed for this user.
-
-....
-102400
-....
-
-Response codes:
-
-* 200: Request is a success and the value is returned
-* 204: No quota is being configured for this user
-
-==== Updating user sieve quota
-
-This endpoints allows to update the Sieve quota of a user, which will be
-users default:
-
-....
-curl -XPUT http://ip:port/sieve/quota/users/user@domain.com
-....
-
-With the body being the bytes count allowed for this user on this
-server.
-
-....
-102400
-....
-
-Response codes:
-
-* 204: Operation succeeded
-* 400: Invalid payload
-
-==== Removing user sieve quota
-
-This endpoints allows to remove the Sieve quota of a user. There will no
-more quota for this user:
-
-....
-curl -XDELETE http://ip:port/sieve/quota/users/user@domain.com
-....
-
-Response codes:
-
-* 204: Operation succeeded
-
-== Administrating Jmap Uploads
-
-=== Cleaning upload repository
-
-....
-curl -XDELETE http://ip:port/jmap/uploads?scope=expired
-....
-
-Will schedule a task for clearing expired upload entries.
-
-
-link:#_endpoints_returning_a_task[More details about endpoints returning
-a task].
-
-
-Query parameter `scope` is required and have the value `expired`.
-
-Response codes:
-
-* 201: Success. Corresponding task id is returned.
-* 400: Scope invalid
-
-The scheduled task will have the following type `UploadRepositoryCleanupTask` and
-the following `additionalInformation`:
-
-....
-{
- "scope": "expired",
- "timestamp": "2007-12-03T10:15:30Z",
- "type": "UploadRepositoryCleanupTask"
-}
-....
-
-== Running blob garbage collection
-
-When deduplication is enabled one needs to explicitly run a garbage collection in order to delete no longer referenced
-blobs.
-
-To do so:
-
-....
-curl -XDELETE http://ip:port/blobs?scope=unreferenced
-....
-
-link:#_endpoints_returning_a_task[More details about endpoints returning a task].
-
-Additional parameters include Bloom filter tuning parameters:
-
- - *associatedProbability*: Allow to define the targeted false positive rate. Note that subsequent runs do not have the
-same false-positives. Defaults to `0.01`.
- - *expectedBlobCount*: Expected count of blobs used to size the bloom filters. Defaults to `1.000.000`.
-
-These settings directly impacts the memory footprint of the bloom filter. link:https://hur.st/bloomfilter/[Simulators] can
-help understand those parameters.
-
-The created task has the following additional information:
-
-....
-{
- "referenceSourceCount": 3456,
- "blobCount": 5678,
- "gcedBlobCount": 1234,
- "bloomFilterExpectedBlobCount": 10000,
- "bloomFilterAssociatedProbability": 0.01
-}
-....
-
-Where:
-
- - *bloomFilterExpectedBlobCount* correspond to the supplied *expectedBlobCount* query parameter.
- - *bloomFilterAssociatedProbability* correspond to the supplied *associatedProbability* query parameter.
- - *referenceSourceCount* is the count of distinct blob references encountered while populating the bloom filter.
- - *blobCount* is the count of blobs tried against the bloom filter. This value can be used to better size the bloom
-filter in later runs.
- - *gcedBlobCount* is the count of blobs that were garbage collected.
-
-== Administrating Recipient rewriting
-
-=== Address group
-
-You can use *webadmin* to define address groups.
-
-When a specific email is sent to the group mail address, every group
-member will receive it.
-
-Note that the group mail address is virtual: it does not correspond to
-an existing user.
-
-This feature uses xref:distributed/architecture/index.adoc#_recipient_rewrite_tables[Recipients rewrite table]
-and requires the
-https://github.com/apache/james-project/blob/master/server/mailet/mailets/src/main/java/org/apache/james/transport/mailets/RecipientRewriteTable.java[RecipientRewriteTable
-mailet] to be configured.
-
-Note that email addresses are restricted to ASCII character set. Mail
-addresses not matching this criteria will be rejected.
-
-==== Listing groups
-
-....
-curl -XGET http://ip:port/address/groups
-....
-
-Will return the groups as a list of JSON Strings representing mail
-addresses. For instance:
-
-....
-["group1@domain.com", "group2@domain.com"]
-....
-
-Response codes:
-
-* 200: Success
-
-==== Listing members of a group
-
-....
-curl -XGET http://ip:port/address/groups/group@domain.com
-....
-
-Will return the group members as a list of JSON Strings representing
-mail addresses. For instance:
-
-....
-["member1@domain.com", "member2@domain.com"]
-....
-
-Response codes:
-
-* 200: Success
-* 400: Group structure is not valid
-* 404: The group does not exist
-
-==== Adding a group member
-
-....
-curl -XPUT http://ip:port/address/groups/group@domain.com/member@domain.com
-....
-
-Will add member@domain.com to group@domain.com, creating the group if
-needed
-
-Response codes:
-
-* 204: Success
-* 400: Group structure or member is not valid
-* 400: Domain in the source is not managed by the DomainList
-* 409: Requested group address is already used for another purpose
-* 409: The addition of the group member would lead to a loop and thus cannot be performed
-
-==== Removing a group member
-
-....
-curl -XDELETE http://ip:port/address/groups/group@domain.com/member@domain.com
-....
-
-Will remove member@domain.com from group@domain.com, removing the group
-if group is empty after deletion
-
-Response codes:
-
-* 204: Success
-* 400: Group structure or member is not valid
-
-=== Address forwards
-
-You can use *webadmin* to define address forwards.
-
-When a specific email is sent to the base mail address, every forward
-destination addresses will receive it.
-
-Please note that the base address can be optionaly part of the forward
-destination. In that case, the base recipient also receive a copy of the
-mail. Otherwise he is omitted.
-
-Forwards can be defined for existing users. It then defers from
-``groups''.
-
-This feature uses xref:distributed/architecture/index.adoc#_recipient_rewrite_tables[Recipients rewrite table]
-and requires the
-https://github.com/apache/james-project/blob/master/server/mailet/mailets/src/main/java/org/apache/james/transport/mailets/RecipientRewriteTable.java[RecipientRewriteTable
-mailet] to be configured.
-
-Note that email addresses are restricted to ASCII character set. Mail
-addresses not matching this criteria will be rejected.
-
-==== Listing Forwards
-
-....
-curl -XGET http://ip:port/address/forwards
-....
-
-Will return the users having forwards configured as a list of JSON
-Strings representing mail addresses. For instance:
-
-....
-["user1@domain.com", "user2@domain.com"]
-....
-
-Response codes:
-
-* 200: Success
-
-==== Listing destinations in a forward
-
-....
-curl -XGET http://ip:port/address/forwards/user@domain.com
-....
-
-Will return the destination addresses of this forward as a list of JSON
-Strings representing mail addresses. For instance:
-
-....
-[
- {"mailAddress":"destination1@domain.com"},
- {"mailAddress":"destination2@domain.com"}
-]
-....
-
-Response codes:
-
-* 200: Success
-* 400: Forward structure is not valid
-* 404: The given user don’t have forwards or does not exist
-
-==== Adding a new destination to a forward
-
-....
-curl -XPUT http://ip:port/address/forwards/user@domain.com/targets/destination@domain.com
-....
-
-Will add destination@domain.com to user@domain.com, creating the forward
-if needed
-
-Response codes:
-
-* 204: Success
-* 400: Forward structure or member is not valid
-* 400: Domain in the source is not managed by the DomainList
-* 404: Requested forward address does not match an existing user
-* 409: The creation of the forward would lead to a loop and thus cannot be performed
-
-==== Removing a destination of a forward
-
-....
-curl -XDELETE http://ip:port/address/forwards/user@domain.com/targets/destination@domain.com
-....
-
-Will remove destination@domain.com from user@domain.com, removing the
-forward if forward is empty after deletion
-
-Response codes:
-
-* 204: Success
-* 400: Forward structure or member is not valid
-
-=== Address aliases
-
-You can use *webadmin* to define aliases for an user.
-
-When a specific email is sent to the alias address, the destination
-address of the alias will receive it.
-
-Aliases can be defined for existing users.
-
-This feature uses xref:distributed/architecture/index.adoc#_recipient_rewrite_tables[Recipients rewrite table]
-and requires the
-https://github.com/apache/james-project/blob/master/server/mailet/mailets/src/main/java/org/apache/james/transport/mailets/RecipientRewriteTable.java[RecipientRewriteTable
-mailet] to be configured.
-
-Note that email addresses are restricted to ASCII character set. Mail
-addresses not matching this criteria will be rejected.
-
-==== Listing users with aliases
-
-....
-curl -XGET http://ip:port/address/aliases
-....
-
-Will return the users having aliases configured as a list of JSON
-Strings representing mail addresses. For instance:
-
-....
-["user1@domain.com", "user2@domain.com"]
-....
-
-Response codes:
-
-* 200: Success
-
-==== Listing alias sources of an user
-
-....
-curl -XGET http://ip:port/address/aliases/user@domain.com
-....
-
-Will return the aliases of this user as a list of JSON Strings
-representing mail addresses. For instance:
-
-....
-[
- {"source":"alias1@domain.com"},
- {"source":"alias2@domain.com"}
-]
-....
-
-Response codes:
-
-* 200: Success
-* 400: Alias structure is not valid
-
-==== Adding a new alias to an user
-
-....
-curl -XPUT http://ip:port/address/aliases/user@domain.com/sources/alias@domain.com
-....
-
-Will add alias@domain.com to user@domain.com, creating the alias if
-needed
-
-Response codes:
-
-* 204: OK
-* 400: Alias structure or member is not valid
-* 400: Source and destination can’t be the same!
-* 400: Domain in the destination or source is not managed by the
-DomainList
-* 409: The alias source exists as an user already
-* 409: The addition of the alias would lead to a loop and thus cannot be performed
-
-==== Removing an alias of an user
-
-....
-curl -XDELETE http://ip:port/address/aliases/user@domain.com/sources/alias@domain.com
-....
-
-Will remove alias@domain.com from user@domain.com, removing the alias if
-needed
-
-Response codes:
-
-* 204: OK
-* 400: Alias structure or member is not valid
-
-=== Domain mappings
-
-You can use *webadmin* to define domain mappings.
-
-Given a configured source (from) domain and a destination (to) domain,
-when an email is sent to an address belonging to the source domain, then
-the domain part of this address is overwritten, the destination domain
-is then used. A source (from) domain can have many destination (to)
-domains.
-
-For example: with a source domain `james.apache.org` maps to two
-destination domains `james.org` and `apache-james.org`, when a mail is
-sent to `admin@james.apache.org`, then it will be routed to
-`admin@james.org` and `admin@apache-james.org`
-
-This feature uses xref:distributed/architecture/index.adoc#_recipient_rewrite_tables[Recipients rewrite table]
-and requires the
-https://github.com/apache/james-project/blob/master/server/mailet/mailets/src/main/java/org/apache/james/transport/mailets/RecipientRewriteTable.java[RecipientRewriteTable
-mailet] to be configured.
-
-Note that email addresses are restricted to ASCII character set. Mail
-addresses not matching this criteria will be rejected.
-
-==== Listing all domain mappings
-
-....
-curl -XGET http://ip:port/domainMappings
-....
-
-Will return all configured domain mappings
-
-....
-{
- "firstSource.org" : ["firstDestination.com", "secondDestination.net"],
- "secondSource.com" : ["thirdDestination.com", "fourthDestination.net"],
-}
-....
-
-Response codes:
-
-* 200: OK
-
-==== Listing all destination domains for a source domain
-
-....
-curl -XGET http://ip:port/domainMappings/sourceDomain.tld
-....
-
-With `sourceDomain.tld` as the value passed to `fromDomain` resource
-name, the API will return all destination domains configured to that
-domain
-
-....
-["firstDestination.com", "secondDestination.com"]
-....
-
-Response codes:
-
-* 200: OK
-* 400: The `fromDomain` resource name is invalid
-* 404: The `fromDomain` resource name is not found
-
-==== Adding a domain mapping
-
-....
-curl -XPUT http://ip:port/domainMappings/sourceDomain.tld
-....
-
-Body:
-
-....
-destination.tld
-....
-
-With `sourceDomain.tld` as the value passed to `fromDomain` resource
-name, the API will add a destination domain specified in the body to
-that domain
-
-Response codes:
-
-* 204: OK
-* 400: The `fromDomain` resource name is invalid
-* 400: The destination domain specified in the body is invalid
-
-Be aware that no checks to find possible loops that would result of this creation will be performed.
-
-==== Removing a domain mapping
-
-....
-curl -XDELETE http://ip:port/domainMappings/sourceDomain.tld
-....
-
-Body:
-
-....
-destination.tld
-....
-
-With `sourceDomain.tld` as the value passed to `fromDomain` resource
-name, the API will remove a destination domain specified in the body
-mapped to that domain
-
-Response codes:
-
-* 204: OK
-* 400: The `fromDomain` resource name is invalid
-* 400: The destination domain specified in the body is invalid
-
-=== Regex mapping
-
-You can use *webadmin* to create regex mappings.
-
-A regex mapping contains a mapping source and a Java Regular Expression
-(regex) in String as the mapping value. Everytime, if a mail containing
-a recipient matched with the mapping source, then that mail will be
-re-routed to a new recipient address which is re written by the regex.
-
-This feature uses xref:distributed/architecture/index.adoc#_recipient_rewrite_tables[Recipients rewrite table]
-and requires the
-https://github.com/apache/james-project/blob/master/server/mailet/mailets/src/main/java/org/apache/james/transport/mailets/RecipientRewriteTable.java[RecipientRewriteTable
-API] to be configured.
-
-==== Adding a regex mapping
-
-....
-POST /mappings/regex/mappingSource/targets/regex
-....
-
-Where:
-
-* the `mappingSource` is the path parameter represents for the Regex
-Mapping mapping source
-* the `regex` is the path parameter represents for the Regex Mapping
-regex
-
-The route will add a regex mapping made from `mappingSource` and `regex`
-to RecipientRewriteTable.
-
-Example:
-
-....
-curl -XPOST http://ip:port/mappings/regex/james@domain.tld/targets/james@.*:james-intern@james.org
-....
-
-Response codes:
-
-* 204: Mapping added successfully.
-* 400: Invalid `mappingSource` path parameter.
-* 400: Invalid `regex` path parameter.
-
-Be aware that no checks to find possible loops that would result of this creation will be performed.
-
-==== Removing a regex mapping
-
-....
-DELETE /mappings/regex/{mappingSource}/targets/{regex}
-....
-
-Where:
-
-* the `mappingSource` is the path parameter representing the Regex
-Mapping mapping source
-* the `regex` is the path parameter representing the Regex Mapping regex
-
-The route will remove the regex mapping made from `regex` from the
-mapping source `mappingSource` to RecipientRewriteTable.
-
-Example:
-
-....
-curl -XDELETE http://ip:port/mappings/regex/james@domain.tld/targets/[O_O]:james-intern@james.org
-....
-
-Response codes:
-
-* 204: Mapping deleted successfully.
-* 400: Invalid `mappingSource` path parameter.
-* 400: Invalid `regex` path parameter.
-
-=== Address Mappings
-
-You can use *webadmin* to define address mappings.
-
-When a specific email is sent to the base mail address, every
-destination addresses will receive it.
-
-This feature uses xref:distributed/architecture/index.adoc#_recipient_rewrite_tables[Recipients rewrite table]
-and requires the
-https://github.com/apache/james-project/blob/master/server/mailet/mailets/src/main/java/org/apache/james/transport/mailets/RecipientRewriteTable.java[RecipientRewriteTable
-mailet] to be configured.
-
-Note that email addresses are restricted to ASCII character set. Mail
-addresses not matching this criteria will be rejected.
-
-Please use address mappings with caution, as it’s not a typed address.
-If you know the type of your address (forward, alias, domain, group,
-etc), prefer using the corresponding routes to those types.
-
-Here are the following actions available on address mappings:
-
-==== Add an address mapping
-
-....
-curl -XPOST http://ip:port/mappings/address/{mappingSource}/targets/{destinationAddress}
-....
-
-Add an address mapping to the Recipients rewrite table
-Mapping source is the value of \{mappingSource} Mapping destination is
-the value of \{destinationAddress} Type of mapping destination is
-Address
-
-Response codes:
-
-* 204: Action successfully performed
-* 400: Invalid parameters
-* 409: The addition of the address mapping would lead to a loop and thus cannot be performed
-
-==== Remove an address mapping
-
-....
-curl -XDELETE http://ip:port/mappings/address/{mappingSource}/targets/{destinationAddress}
-....
-
-* Remove an address mapping from the Recipients rewrite table
-* Mapping source is the value of `mappingSource`
-* Mapping destination is the value of `destinationAddress`
-* Type of mapping destination is Address
-
-Response codes:
-
-* 204: Action successfully performed
-* 400: Invalid parameters
-
-=== List all mappings
-
-....
-curl -XGET http://ip:port/mappings
-....
-
-Get all mappings from the
-xref:distributed/architecture/index.adoc#_recipient_rewrite_tables[Recipients rewrite table].
-
-Response body:
-
-....
-{
- "alias@domain.tld": [
- {
- "type": "Alias",
- "mapping": "user@domain.tld"
- },
- {
- "type": "Group",
- "mapping": "group-user@domain.tld"
- }
- ],
- "aliasdomain.tld": [
- {
- "type": "Domain",
- "mapping": "realdomain.tld"
- }
- ],
- "group@domain.tld": [
- {
- "type": "Address",
- "mapping": "user@domain.tld"
- }
- ]
-}
-....
-
-Response code:
-
-* 200: OK
-
-=== Listing User Mappings
-
-This endpoint allows receiving all mappings of a corresponding user.
-
-....
-curl -XGET http://ip:port/mappings/user/{userAddress}
-....
-
-Return all mappings of a user where:
-
-* `userAddress`: is the selected user
-
-Response body:
-
-....
-[
- {
- "type": "Address",
- "mapping": "user123@domain.tld"
- },
- {
- "type": "Alias",
- "mapping": "aliasuser123@domain.tld"
- },
- {
- "type": "Group",
- "mapping": "group123@domain.tld"
- }
-]
-....
-
-Response codes:
-
-* 200: OK
-* 400: Invalid parameter value
-
-== Administrating mail repositories
-
-=== Create a mail repository
-
-....
-curl -XPUT http://ip:port/mailRepositories/{encodedPathOfTheRepository}?protocol={someProtocol}
-....
-
-Resource name `encodedPathOfTheRepository` should be the resource path
-of the created mail repository. Example:
-
-....
-curl -XPUT http://ip:port/mailRepositories/mailRepo?protocol=file
-....
-
-Response codes:
-
-* 204: The repository is created
-
-=== Listing mail repositories
-
-....
-curl -XGET http://ip:port/mailRepositories
-....
-
-The answer looks like:
-
-....
-[
- {
- "repository": "var/mail/error/",
- "path": "var%2Fmail%2Ferror%2F"
- },
- {
- "repository": "var/mail/relay-denied/",
- "path": "var%2Fmail%2Frelay-denied%2F"
- },
- {
- "repository": "var/mail/spam/",
- "path": "var%2Fmail%2Fspam%2F"
- },
- {
- "repository": "var/mail/address-error/",
- "path": "var%2Fmail%2Faddress-error%2F"
- }
-]
-....
-
-You can use `id`, the encoded URL of the repository, to access it in
-later requests.
-
-Response codes:
-
-* 200: The list of mail repositories
-
-=== Getting additional information for a mail repository
-
-....
-curl -XGET http://ip:port/mailRepositories/{encodedPathOfTheRepository}
-....
-
-Resource name `encodedPathOfTheRepository` should be the resource path
-of an existing mail repository. Example:
-
-....
-curl -XGET http://ip:port/mailRepositories/var%2Fmail%2Ferror%2F
-....
-
-The answer looks like:
-
-....
-{
- "repository": "var/mail/error/",
- "path": "mail%2Ferror%2F",
- "size": 243
-}
-....
-
-Response codes:
-
-* 200: Additonnal information for that repository
-* 404: This repository can not be found
-
-=== Listing mails contained in a mail repository
-
-....
-curl -XGET http://ip:port/mailRepositories/{encodedPathOfTheRepository}/mails
-....
-
-Resource name `encodedPathOfTheRepository` should be the resource path
-of an existing mail repository. Example:
-
-....
-curl -XGET http://ip:port/mailRepositories/var%2Fmail%2Ferror%2F/mails
-....
-
-The answer will contains all mailKey contained in that repository.
-
-....
-[
- "mail-key-1",
- "mail-key-2",
- "mail-key-3"
-]
-....
-
-Note that this can be used to read mail details.
-
-You can pass additional URL parameters to this call in order to limit
-the output: - A limit: no more elements than the specified limit will be
-returned. This needs to be strictly positive. If no value is specified,
-no limit will be applied. - An offset: allow to skip elements. This
-needs to be positive. Default value is zero.
-
-Example:
-
-....
-curl -XGET 'http://ip:port/mailRepositories/var%2Fmail%2Ferror%2F/mails?limit=100&offset=500'
-....
-
-Response codes:
-
-* 200: The list of mail keys contained in that mail repository
-* 400: Invalid parameters
-* 404: This repository can not be found
-
-=== Reading/downloading a mail details
-
-....
-curl -XGET http://ip:port/mailRepositories/{encodedPathOfTheRepository}/mails/mailKey
-....
-
-Resource name `encodedPathOfTheRepository` should be the resource path
-of an existing mail repository. Resource name `mailKey` should be the
-key of a mail stored in that repository. Example:
-
-....
-curl -XGET http://ip:port/mailRepositories/var%2Fmail%2Ferror%2F/mails/mail-key-1
-....
-
-If the Accept header in the request is ``application/json'', then the
-response looks like:
-
-....
-{
- "name": "mail-key-1",
- "sender": "sender@domain.com",
- "recipients": ["recipient1@domain.com", "recipient2@domain.com"],
- "state": "address-error",
- "error": "A small message explaining what happened to that mail...",
- "remoteHost": "111.222.333.444",
- "remoteAddr": "127.0.0.1",
- "lastUpdated": null
-}
-....
-
-If the Accept header in the request is ``message/rfc822'', then the
-response will be the _eml_ file itself.
-
-Additional query parameter `additionalFields` add the existing
-information to the response for the supported values (only work with
-``application/json'' Accept header):
-
-* attributes
-* headers
-* textBody
-* htmlBody
-* messageSize
-* perRecipientsHeaders
-
-....
-curl -XGET http://ip:port/mailRepositories/file%3A%2F%2Fvar%2Fmail%2Ferror%2F/mails/mail-key-1?additionalFields=attributes,headers,textBody,htmlBody,messageSize,perRecipientsHeaders
-....
-
-Give the following kind of response:
-
-....
-{
- "name": "mail-key-1",
- "sender": "sender@domain.com",
- "recipients": ["recipient1@domain.com", "recipient2@domain.com"],
- "state": "address-error",
- "error": "A small message explaining what happened to that mail...",
- "remoteHost": "111.222.333.444",
- "remoteAddr": "127.0.0.1",
- "lastUpdated": null,
- "attributes": {
- "name2": "value2",
- "name1": "value1"
- },
- "perRecipientsHeaders": {
- "third@party": {
- "headerName1": [
- "value1",
- "value2"
- ],
- "headerName2": [
- "value3",
- "value4"
- ]
- }
- },
- "headers": {
- "headerName4": [
- "value6",
- "value7"
- ],
- "headerName3": [
- "value5",
- "value8"
- ]
- },
- "textBody": "My body!!",
- "htmlBody": "My body!!",
- "messageSize": 42424242
-}
-....
-
-Response codes:
-
-* 200: Details of the mail
-* 404: This repository or mail can not be found
-
-=== Removing a mail from a mail repository
-
-....
-curl -XDELETE http://ip:port/mailRepositories/{encodedPathOfTheRepository}/mails/mailKey
-....
-
-Resource name `encodedPathOfTheRepository` should be the resource path
-of an existing mail repository. Resource name `mailKey` should be the
-key of a mail stored in that repository. Example:
-
-....
-curl -XDELETE http://ip:port/mailRepositories/var%2Fmail%2Ferror%2F/mails/mail-key-1
-....
-
-Response codes:
-
-* 204: This mail no longer exists in this repository
-* 404: This repository can not be found
-
-=== Removing all mails from a mail repository
-
-....
-curl -XDELETE http://ip:port/mailRepositories/{encodedPathOfTheRepository}/mails
-....
-
-Resource name `encodedPathOfTheRepository` should be the resource path
-of an existing mail repository. Example:
-
-....
-curl -XDELETE http://ip:port/mailRepositories/var%2Fmail%2Ferror%2F/mails
-....
-
-link:#_endpoints_returning_a_task[More details about endpoints returning
-a task].
-
-Response codes:
-
-* 201: Task generation succeeded. Corresponding task id is returned.
-* 404: Could not find that mail repository
-
-The scheduled task will have the following type `clear-mail-repository`
-and the following `additionalInformation`:
-
-....
-{
- "mailRepositoryPath":"var/mail/error/",
- "initialCount": 243,
- "remainingCount": 17
-}
-....
-
-=== Reprocessing mails from a mail repository
-
-Sometime, you want to re-process emails stored in a mail repository. For
-instance, you can make a configuration error, or there can be a James
-bug that makes processing of some mails fail. Those mail will be stored
-in a mail repository. Once you solved the problem, you can reprocess
-them.
-
-To reprocess mails from a repository:
-
-....
-curl -XPATCH http://ip:port/mailRepositories/{encodedPathOfTheRepository}/mails?action=reprocess
-....
-
-Resource name `encodedPathOfTheRepository` should be the resource path
-of an existing mail repository. Example:
-
-For instance:
-
-....
-curl -XPATCH http://ip:port/mailRepositories/var%2Fmail%2Ferror%2F/mails?action=reprocess
-....
-
-Additional query parameters are supported:
-
-- `queue` allows you to
-target the mail queue you want to enqueue the mails in. Defaults to
-`spool`.
-- `processor` allows you to overwrite the state of the
-reprocessing mails, and thus select the processors they will start their
-processing in. Defaults to the `state` field of each processed email.
-- `consume` (boolean defaulting to `true`) whether the reprocessing should consume the mail in its originating mail repository. Passing
-this value to `false` allows non destructive reprocessing as you keep a copy of the email in the mail repository and can be valuable
-when debugging.
-- `limit` (integer value. Optional, default is empty). It enables to limit the count of elements reprocessed.
-If unspecified the count of the processed elements is unbounded.
-- `maxRetries` Optional integer, defaults to no max retries limit. Only processed emails that had been retried less
-than this value. Ignored by default.
-
-redeliver_group_events
-
-....
-curl -XPATCH 'http://ip:port/mailRepositories/var%2Fmail%2Ferror%2F/mails?action=reprocess&processor=transport&queue=spool'
-....
-
-Note that the `action` query parameter is compulsary and can only take
-value `reprocess`.
-
-link:#_endpoints_returning_a_task[More details about endpoints returning
-a task].
-
-Response codes:
-
-* 201: Task generation succeeded. Corresponding task id is returned.
-* 404: Could not find that mail repository
-
-The scheduled task will have the following type `reprocessing-all` and
-the following `additionalInformation`:
-
-....
-{
- "mailRepositoryPath":"var/mail/error/",
- "targetQueue":"spool",
- "targetProcessor":"transport",
- "initialCount": 243,
- "remainingCount": 17
-}
-....
-
-=== Reprocessing a specific mail from a mail repository
-
-To reprocess a specific mail from a mail repository:
-
-....
-curl -XPATCH http://ip:port/mailRepositories/{encodedPathOfTheRepository}/mails/mailKey?action=reprocess
-....
-
-Resource name `encodedPathOfTheRepository` should be the resource id of
-an existing mail repository. Resource name `mailKey` should be the key
-of a mail stored in that repository. Example:
-
-For instance:
-
-....
-curl -XPATCH http://ip:port/mailRepositories/var%2Fmail%2Ferror%2F/mails/name1?action=reprocess
-....
-
-Additional query parameters are supported:
-
-- `queue` allows you to
-target the mail queue you want to enqueue the mails in. Defaults to
-`spool`.
-- `processor` allows you to overwrite the state of the
-reprocessing mails, and thus select the processors they will start their
-processing in. Defaults to the `state` field of each processed email.
-- `consume` (boolean defaulting to `true`) whether the reprocessing should consume the mail in its originating mail repository. Passing
-this value to `false` allows non destructive reprocessing as you keep a copy of the email in the mail repository and can be valuable
-when debugging.
-
-While `processor` being an optional parameter, not specifying it will
-result reprocessing the mails in their current state
-(https://james.apache.org/server/feature-mailetcontainer.html#Processors[see
-documentation about processors and state]). Consequently, only few cases
-will give a different result, definitively storing them out of the mail
-repository.
-
-For instance:
-
-....
-curl -XPATCH 'http://ip:port/mailRepositories/var%2Fmail%2Ferror%2F/mails/name1?action=reprocess&processor=transport&queue=spool'
-....
-
-Note that the `action` query parameter is compulsary and can only take
-value `reprocess`.
-
-link:#_endpoints_returning_a_task[More details about endpoints returning
-a task].
-
-Response codes:
-
-* 201: Task generation succeeded. Corresponding task id is returned.
-* 404: Could not find that mail repository
-
-The scheduled task will have the following type `reprocessing-one` and
-the following `additionalInformation`:
-
-....
-{
- "mailRepositoryPath":"var/mail/error/",
- "targetQueue":"spool",
- "targetProcessor":"transport",
- "mailKey":"name1"
-}
-....
-
-== Administrating mail queues
-
-=== Listing mail queues
-
-....
-curl -XGET http://ip:port/mailQueues
-....
-
-The answer looks like:
-
-....
-["outgoing","spool"]
-....
-
-Response codes:
-
-* 200: The list of mail queues
-
-=== Getting a mail queue details
-
-....
-curl -XGET http://ip:port/mailQueues/{mailQueueName}
-....
-
-Resource name `mailQueueName` is the name of a mail queue, this command
-will return the details of the given mail queue. For instance:
-
-....
-{"name":"outgoing","size":0}
-....
-
-Response codes:
-
-* 200: Success
-* 400: Mail queue is not valid
-* 404: The mail queue does not exist
-
-=== Listing the mails of a mail queue
-
-....
-curl -XGET http://ip:port/mailQueues/{mailQueueName}/mails
-....
-
-Additional URL query parameters:
-
-* `limit`: Maximum number of mails returned in a single call. Only
-strictly positive integer values are accepted. Example:
-
-....
-curl -XGET http://ip:port/mailQueues/{mailQueueName}/mails?limit=100
-....
-
-The answer looks like:
-
-....
-[{
- "name": "Mail1516976156284-8b3093b9-eebf-4c40-9c26-1450f4fcdc3c-to-test.com",
- "sender": "user@james.linagora.com",
- "recipients": ["someone@test.com"],
- "nextDelivery": "1969-12-31T23:59:59.999Z"
-}]
-....
-
-Response codes:
-
-* 200: Success
-* 400: Mail queue is not valid or limit is invalid
-* 404: The mail queue does not exist
-
-=== Deleting mails from a mail queue
-
-....
-curl -XDELETE http://ip:port/mailQueues/{mailQueueName}/mails?sender=senderMailAddress
-....
-
-This request should have exactly one query parameter from the following
-list:
-
-* sender: which is a mail address (i.e. sender@james.org)
-* name: which is a string
-* recipient: which is a mail address (i.e. recipient@james.org)
-
-The mails from the given mail queue matching the query parameter will be
-deleted.
-
-link:#_endpoints_returning_a_task[More details about endpoints returning
-a task].
-
-Response codes:
-
-* 201: Task generation succeeded. Corresponding task id is returned.
-* 400: Invalid request
-* 404: The mail queue does not exist
-
-The scheduled task will have the following type
-`delete-mails-from-mail-queue` and the following
-`additionalInformation`:
-
-....
-{
- "queue":"outgoing",
- "initialCount":10,
- "remainingCount": 5,
- "sender": "sender@james.org",
- "name": "Java Developer",
- "recipient: "recipient@james.org"
-}
-....
-
-=== Clearing a mail queue
-
-....
-curl -XDELETE http://ip:port/mailQueues/{mailQueueName}/mails
-....
-
-All mails from the given mail queue will be deleted.
-
-link:#_endpoints_returning_a_task[More details about endpoints returning
-a task].
-
-Response codes:
-
-* 201: Task generation succeeded. Corresponding task id is returned.
-* 400: Invalid request
-* 404: The mail queue does not exist
-
-The scheduled task will have the following type `clear-mail-queue` and
-the following `additionalInformation`:
-
-....
-{
- "queue":"outgoing",
- "initialCount":10,
- "remainingCount": 0
-}
-....
-
-=== Flushing mails from a mail queue
-
-....
-curl -XPATCH http://ip:port/mailQueues/{mailQueueName}?delayed=true \
- -d '{"delayed": false}' \
- -H "Content-Type: application/json"
-....
-
-This request should have the query parameter _delayed_ set to _true_, in
-order to indicate only delayed mails are affected. The payload should
-set the `delayed` field to false inorder to remove the delay. This is
-the only supported combination, and it performs a flush.
-
-The mails delayed in the given mail queue will be flushed.
-
-link:#_endpoints_returning_a_task[More details about endpoints returning
-a task].
-
-Response codes:
-
-* 204: Success (No content)
-* 400: Invalid request
-* 404: The mail queue does not exist
-
-=== RabbitMQ republishing a mail queue from cassandra
-
-....
-curl -XPOST 'http://ip:port/mailQueues/{mailQueueName}?action=RepublishNotProcessedMails&olderThan=1d'
-....
-
-This method is specific to the distributed flavor of James, which relies
-on Cassandra and RabbitMQ for implementing a mail queue. In case of a
-RabbitMQ crash resulting in a loss of messages, this task can be
-launched to repopulate the `mailQueueName` queue in RabbitMQ using the
-information stored in Cassandra.
-
-The `olderThan` parameter is mandatory. It filters the mails to be
-restored, by taking into account only the mails older than the given
-value. The expected value should be expressed in the following format:
-`Nunit`. `N` should be strictly positive. `unit` could be either in the
-short form (`h`, `d`, `w`, etc.), or in the long form (`day`, `week`,
-`month`, etc.).
-
-Examples:
-
-* `5h`
-* `7d`
-* `1y`
-
-Response codes:
-
-* 201: Task created
-* 400: Invalid request
-
-The response body contains the id of the republishing task.
-`{ "taskId": "a650a66a-5984-431e-bdad-f1baad885856" }`
-
-=== Cassandra view of the RabbitMQ mailQueue: browse start update
-
-....
-curl -XPOST 'http://ip:port/mailQueues/{mailQueueName}?action=updateBrowseStart
-....
-
-Will return a task that updates the browse start of the aforementioned mailQueue, regardless of the configuration.
-
-link:#_endpoints_returning_a_task[More details about endpoints returning
-a task].
-
-This is an advanced, potentially expensive operation which requires a good understanding of the RabbitMQMailQueue design
-(https://github.com/apache/james-project/blob/master/src/adr/0031-distributed-mail-queue.md). Especially, care needs to
-be taken to call this at most once per slice (not doing so might be expensive).
-
-== Sending email over webAdmin
-
-....
-curl -XPOST /mail-transfer-service
-
-{MIME message}
-....
-
-Will send the following email to the recipients specified in the MIME message.
-
-The `{MIME message}` payload must match `message/rfc822` format.
-
-== Event Dead Letter
-
-The EventBus allows to register `group listeners' that are called in a
-distributed fashion. These group listeners enable the implementation of
-some advanced mailbox manager feature like indexing, spam reporting,
-quota management and the like.
-
-Upon exceptions, a bounded number of retries are performed (with
-exponential backoff delays). If after those retries the listener is
-still failing, then the event will be stored in the ``Event Dead
-Letter''. This API allows diagnosing issues, as well as performing event
-replay.
-
-=== Listing mailbox listener groups
-
-This endpoint allows discovering the list of mailbox listener groups.
-
-....
-curl -XGET http://ip:port/events/deadLetter/groups
-....
-
-Will return a list of group names that can be further used to interact
-with the dead letter API:
-
-....
-["org.apache.james.mailbox.events.EventBusTestFixture$GroupA", "org.apache.james.mailbox.events.GenericGroup-abc"]
-....
-
-Response codes:
-
-* 200: Success. A list of group names is returned.
-
-=== Listing failed events
-
-This endpoint allows listing failed events for a given group:
-
-....
-curl -XGET http://ip:port/events/deadLetter/groups/org.apache.james.mailbox.events.EventBusTestFixture$GroupA
-....
-
-Will return a list of insertionIds:
-
-....
-["6e0dd59d-660e-4d9b-b22f-0354479f47b4", "58a8f59d-660e-4d9b-b22f-0354486322a2"]
-....
-
-Response codes:
-
-* 200: Success. A list of insertion ids is returned.
-* 400: Invalid group name
-
-=== Getting event details
-
-....
-curl -XGET http://ip:port/events/deadLetter/groups/org.apache.james.mailbox.events.EventBusTestFixture$GroupA/6e0dd59d-660e-4d9b-b22f-0354479f47b4
-....
-
-Will return the full JSON associated with this event.
-
-Response codes:
-
-* 200: Success. A JSON representing this event is returned.
-* 400: Invalid group name or `insertionId`
-* 404: No event with this `insertionId`
-
-=== Deleting an event
-
-....
-curl -XDELETE http://ip:port/events/deadLetter/groups/org.apache.james.mailbox.events.EventBusTestFixture$GroupA/6e0dd59d-660e-4d9b-b22f-0354479f47b4
-....
-
-Will delete this event.
-
-Response codes:
-
-* 204: Success
-* 400: Invalid group name or `insertionId`
-
-=== Deleting all events of a group
-
-....
-curl -XDELETE http://ip:port/events/deadLetter/groups/org.apache.james.mailbox.events.EventBusTestFixture$GroupA
-....
-
-Will delete all events of this group.
-
-Response codes:
-
-* 204: Success
-* 400: Invalid group name
-
-=== Redeliver all events
-
-....
-curl -XPOST http://ip:port/events/deadLetter?action=reDeliver
-....
-
-Additional query parameters are supported:
-
-- `limit` (integer value. Optional, default is empty). It enables to limit the count of elements redelivered.
-If unspecified the count of the processed elements is unbounded
-
-For instance:
-
-....
-curl -XPOST http://ip:port/events/deadLetter?action=reDeliver&limit=10
-....
-
-Will create a task that will attempt to redeliver all events stored in
-``Event Dead Letter''. If successful, redelivered events will then be
-removed from ``Dead Letter''.
-
-link:#_endpoints_returning_a_task[More details about endpoints returning
-a task].
-
-Response codes:
-
-* 201: the taskId of the created task
-* 400: Invalid action argument
-
-=== Redeliver group events
-
-....
-curl -XPOST http://ip:port/events/deadLetter/groups/org.apache.james.mailbox.events.EventBusTestFixture$GroupA?action=reDeliver
-....
-
-Will create a task that will attempt to redeliver all events of a
-particular group stored in ``Event Dead Letter''. If successful,
-redelivered events will then be removed from ``Dead Letter''.
-
-Additional query parameters are supported:
-
-- `limit` (integer value. Optional, default is empty). It enables to limit the count of elements redelivered.
-If unspecified the count of the processed elements is unbounded
-
-For instance:
-
-....
-curl -XPOST http://ip:port/events/deadLetter/groups/org.apache.james.mailbox.events.EventBusTestFixture$GroupA?action=reDeliver&limit=10
-....
-
-link:#_endpoints_returning_a_task[More details about endpoints returning
-a task].
-
-Response codes:
-
-* 201: the taskId of the created task
-* 400: Invalid group name or action argument
-
-=== Redeliver a single event
-
-....
-curl -XPOST http://ip:port/events/deadLetter/groups/org.apache.james.mailbox.events.EventBusTestFixture$GroupA/6e0dd59d-660e-4d9b-b22f-0354479f47b4?action=reDeliver
-....
-
-Will create a task that will attempt to redeliver a single event of a
-particular group stored in ``Event Dead Letter''. If successful,
-redelivered event will then be removed from ``Dead Letter''.
-
-link:#_endpoints_returning_a_task[More details about endpoints returning
-a task].
-
-Response codes:
-
-* 201: the taskId of the created task
-* 400: Invalid group name, insertion id or action argument
-* 404: No event with this insertionId
+:server-name: Distributed James Server
+:xref-base: distributed
+:backend-name: Cassandra
+:admin-mail-queues-extend: servers:distributed/operate/webadmin/admin-mail-queues-extend.adoc
+:admin-messages-extend: servers:distributed/operate/webadmin/admin-messages-extend.adoc
+:admin-mailboxes-extend: servers:distributed/operate/webadmin/admin-mailboxes-extend.adoc
+include::partial$operate/webadmin.adoc[]
== Cassandra extra operations
@@ -4497,471 +219,3 @@ the following `additionalInformation`:
"messageFailedCount": 0
}
....
-
-== Deleted Messages Vault
-
-The `Deleted Message Vault plugin' allows you to keep users deleted
-messages during a given retention time. This set of routes allow you to
-_restore_ users deleted messages or export them in an archive.
-
-To move deleted messages in the vault, you need to specifically
-configure the DeletedMessageVault PreDeletionHook.
-
-=== Restore Deleted Messages
-
-Deleted messages of a specific user can be restored by calling the
-following endpoint:
-
-....
-curl -XPOST http://ip:port/deletedMessages/users/userToRestore@domain.ext?action=restore
-
-{
- "combinator": "and",
- "criteria": [
- {
- "fieldName": "subject",
- "operator": "containsIgnoreCase",
- "value": "Apache James"
- },
- {
- "fieldName": "deliveryDate",
- "operator": "beforeOrEquals",
- "value": "2014-10-30T14:12:00Z"
- },
- {
- "fieldName": "deletionDate",
- "operator": "afterOrEquals",
- "value": "2015-10-20T09:08:00Z"
- },
- {
- "fieldName": "recipients","
- "operator": "contains","
- "value": "recipient@james.org"
- },
- {
- "fieldName": "hasAttachment",
- "operator": "equals",
- "value": "false"
- },
- {
- "fieldName": "sender",
- "operator": "equals",
- "value": "sender@apache.org"
- },
- {
- "fieldName": "originMailboxes",
- "operator": "contains",
- "value": "02874f7c-d10e-102f-acda-0015176f7922"
- }
- ]
-};
-....
-
-The requested Json body is made from a list of criterion objects which
-have the following structure:
-
-....
-{
- "fieldName": "supportedFieldName",
- "operator": "supportedOperator",
- "value": "A plain string representing the matching value of the corresponding field"
-}
-....
-
-Deleted Messages which are matched with the *all* criterion in the query
-body will be restored. Here are a list of supported fieldName for the
-restoring:
-
-* subject: represents for deleted message `subject` field matching.
-Supports below string operators:
-** contains
-** containsIgnoreCase
-** equals
-** equalsIgnoreCase
-* deliveryDate: represents for deleted message `deliveryDate` field
-matching. Tested value should follow the right date time with zone
-offset format (ISO-8601) like `2008-09-15T15:53:00+05:00` or
-`2008-09-15T15:53:00Z` Supports below date time operators:
-** beforeOrEquals: is the deleted message’s `deliveryDate` before or
-equals the time of tested value.
-** afterOrEquals: is the deleted message’s `deliveryDate` after or
-equals the time of tested value
-* deletionDate: represents for deleted message `deletionDate` field
-matching. Tested value & Supports operators: similar to `deliveryDate`
-* sender: represents for deleted message `sender` field matching. Tested
-value should be a valid mail address. Supports mail address operator:
-** equals: does the tested sender equal to the sender of the tested
-deleted message ? +
-* recipients: represents for deleted message `recipients` field
-matching. Tested value should be a valid mail address. Supports list
-mail address operator:
-** contains: does the tested deleted message’s recipients contain tested
-recipient ?
-* hasAttachment: represents for deleted message `hasAttachment` field
-matching. Tested value could be `false` or `true`. Supports boolean
-operator:
-** equals: does the tested deleted message’s hasAttachment property
-equal to the tested hasAttachment value?
-* originMailboxes: represents for deleted message `originMailboxes`
-field matching. Tested value is a string serialized of mailbox id.
-Supports list mailbox id operators:
-** contains: does the tested deleted message’s originMailbox ids contain
-tested mailbox id ?
-
-Messages in the Deleted Messages Vault of a specified user that are
-matched with Query Json Object in the body will be appended to his
-`Restored-Messages' mailbox, which will be created if needed.
-
-*Note*:
-
-* Query parameter `action` is required and should have the value
-`restore` to represent the restoring feature. Otherwise, a bad request
-response will be returned
-* Query parameter `action` is case sensitive
-* fieldName & operator passed to the routes are case sensitive
-* Currently, we only support query combinator `and` value, otherwise,
-requests will be rejected
-* If you only want to restore by only one criterion, the json body could
-be simplified to a single criterion:
-
-....
-{
- "fieldName": "subject",
- "operator": "containsIgnoreCase",
- "value": "Apache James"
-}
-....
-
-* For restoring all deleted messages, passing a query json with an empty
-criterion list to represent `matching all deleted messages`:
-
-....
-{
- "combinator": "and",
- "criteria": []
-}
-....
-
-* For limiting the number of restored messages, you can use the `limit` query property:
-
-....
-{
- "combinator": "and",
- "limit": 99
- "criteria": []
-}
-....
-
-*Warning*: Current web-admin uses `US` locale as the default. Therefore,
-there might be some conflicts when using String `containsIgnoreCase`
-comparators to apply on the String data of other special locales stored
-in the Vault. More details at
-https://issues.apache.org/jira/browse/MAILBOX-384[JIRA]
-
-Response code:
-
-* 201: Task for restoring deleted has been created
-* 400: Bad request:
-** action query param is not present
-** action query param is not a valid action
-** user parameter is invalid
-** can not parse the JSON body
-** Json query object contains unsupported operator, fieldName
-** Json query object values violate parsing rules
-* 404: User not found
-
-link:#_endpoints_returning_a_task[More details about endpoints returning
-a task].
-
-The scheduled task will have the following type
-`deleted-messages-restore` and the following `additionalInformation`:
-
-....
-{
- "successfulRestoreCount": 47,
- "errorRestoreCount": 0,
- "user": "userToRestore@domain.ext"
-}
-....
-
-while:
-
-* successfulRestoreCount: number of restored messages
-* errorRestoreCount: number of messages that failed to restore
-* user: owner of deleted messages need to restore
-
-=== Export Deleted Messages
-
-Retrieve deleted messages matched with requested query from an user then
-share the content to a targeted mail address (exportTo)
-
-....
-curl -XPOST 'http://ip:port/deletedMessages/users/userExportFrom@domain.ext?action=export&exportTo=userReceiving@domain.ext'
-
-BODY: is the json query has the same structure with Restore Deleted Messages section
-....
-
-*Note*: Json query passing into the body follows the same rules &
-restrictions like in link:#_restore_deleted_messages[Restore Deleted
-Messages]
-
-Response code:
-
-* 201: Task for exporting has been created
-* 400: Bad request:
-** exportTo query param is not present
-** exportTo query param is not a valid mail address
-** action query param is not present
-** action query param is not a valid action
-** user parameter is invalid
-** can not parse the JSON body
-** Json query object contains unsupported operator, fieldName
-** Json query object values violate parsing rules
-* 404: User not found
-
-link:#_endpoints_returning_a_task[More details about endpoints returning
-a task].
-
-The scheduled task will have the following type
-`deleted-messages-export` and the following `additionalInformation`:
-
-....
-{
- "userExportFrom": "userToRestore@domain.ext",
- "exportTo": "userReceiving@domain.ext",
- "totalExportedMessages": 1432
-}
-....
-
-while:
-
-* userExportFrom: export deleted messages from this user
-* exportTo: content of deleted messages have been shared to this mail
-address
-* totalExportedMessages: number of deleted messages match with
-json query, then being shared to sharee.
-
-=== Purge Deleted Messages
-
-You can overwrite `retentionPeriod' configuration in
-`deletedMessageVault' configuration file or use the default value of 1
-year.
-
-Purge all deleted messages older than the configured `retentionPeriod'
-
-....
-curl -XDELETE http://ip:port/deletedMessages?scope=expired
-....
-
-link:#_endpoints_returning_a_task[More details about endpoints returning
-a task].
-
-Response code:
-
-* 201: Task for purging has been created
-* 400: Bad request:
-** action query param is not present
-** action query param is not a valid action
-
-You may want to call this endpoint on a regular basis.
-
-=== Permanently Remove Deleted Message
-
-Delete a Deleted Message with `MessageId`
-
-....
-curl -XDELETE http://ip:port/deletedMessages/users/user@domain.ext/messages/3294a976-ce63-491e-bd52-1b6f465ed7a2
-....
-
-link:#_endpoints_returning_a_task[More details about endpoints returning
-a task].
-
-Response code:
-
-* 201: Task for deleting message has been created
-* 400: Bad request:
-** user parameter is invalid
-** messageId parameter is invalid
-* 404: User not found
-
-The scheduled task will have the following type
-`deleted-messages-delete` and the following `additionalInformation`:
-
-....
- {
- "userName": "user@domain.ext",
- "messageId": "3294a976-ce63-491e-bd52-1b6f465ed7a2"
- }
-....
-
-while: - user: delete deleted messages from this user - deleteMessageId:
-messageId of deleted messages will be delete
-
-== Administrating DLP Configuration
-
-DLP (stands for Data Leak Prevention) is supported by James. A DLP
-matcher will, on incoming emails, execute regular expressions on email
-sender, recipients or content, in order to report suspicious emails to
-an administrator. WebAdmin can be used to manage these DLP rules on a
-per `senderDomain` basis.
-
-`senderDomain` is domain of the sender of incoming emails, for example:
-`apache.org`, `james.org`,… Each `senderDomain` correspond to a distinct
-DLP configuration.
-
-=== List DLP configuration by sender domain
-
-Retrieve a DLP configuration for corresponding `senderDomain`, a
-configuration contains list of configuration items
-
-....
-curl -XGET http://ip:port/dlp/rules/{senderDomain}
-....
-
-Response codes:
-
-* 200: A list of dlp configuration items is returned
-* 400: Invalid `senderDomain` or payload in request
-* 404: The domain does not exist.
-
-This is an example of returned body. The rules field is a list of rules
-as described below.
-
-....
-{"rules : [
- {
- "id": "1",
- "expression": "james.org",
- "explanation": "Find senders or recipients containing james[any char]org",
- "targetsSender": true,
- "targetsRecipients": true,
- "targetsContent": false
- },
- {
- "id": "2",
- "expression": "Find senders containing apache[any char]org",
- "explanation": "apache.org",
- "targetsSender": true,
- "targetsRecipients": false,
- "targetsContent": false
- }
-]}
-....
-
-=== Store DLP configuration by sender domain
-
-Store a DLP configuration for corresponding `senderDomain`, if any item
-of DLP configuration in the request is stored before, it will not be
-stored anymore
-
-....
-curl -XPUT http://ip:port/dlp/rules/{senderDomain}
-....
-
-The body can contain a list of DLP configuration items formed by those
-fields: - `id`(String) is mandatory, unique identifier of the
-configuration item - `expression`(String) is mandatory, regular
-expression to match contents of targets - `explanation`(String) is
-optional, description of the configuration item -
-`targetsSender`(boolean) is optional and defaults to false. If true,
-`expression` will be applied to Sender and to From headers of the mail -
-`targetsContent`(boolean) is optional and defaults to false. If true,
-`expression` will be applied to Subject headers and textual bodies
-(text/plain and text/html) of the mail - `targetsRecipients`(boolean) is
-optional and defaults to false. If true, `expression` will be applied to
-recipients of the mail
-
-This is an example of returned body. The rules field is a list of rules
-as described below.
-
-....
-{"rules": [
- {
- "id": "1",
- "expression": "james.org",
- "explanation": "Find senders or recipients containing james[any char]org",
- "targetsSender": true,
- "targetsRecipients": true,
- "targetsContent": false
- },
- {
- "id": "2",
- "expression": "Find senders containing apache[any char]org",
- "explanation": "apache.org",
- "targetsSender": true,
- "targetsRecipients": false,
- "targetsContent": false
- }
-]}
-....
-
-Response codes:
-
-* 204: List of dlp configuration items is stored
-* 400: Invalid `senderDomain` or payload in request
-* 404: The domain does not exist.
-
-=== Remove DLP configuration by sender domain
-
-Remove a DLP configuration for corresponding `senderDomain`
-
-....
-curl -XDELETE http://ip:port/dlp/rules/{senderDomain}
-....
-
-Response codes:
-
-* 204: DLP configuration is removed
-* 400: Invalid `senderDomain` or payload in request
-* 404: The domain does not exist.
-
-=== Fetch a DLP configuration item by sender domain and rule id
-
-Retrieve a DLP configuration rule for corresponding `senderDomain` and a
-`ruleId`
-
-....
-curl -XGET http://ip:port/dlp/rules/{senderDomain}/rules/{ruleId}
-....
-
-Response codes:
-
-* 200: A dlp configuration item is returned
-* 400: Invalid `senderDomain` or payload in request
-* 404: The domain and/or the rule does not exist.
-
-This is an example of returned body.
-
-....
-{
- "id": "1",
- "expression": "james.org",
- "explanation": "Find senders or recipients containing james[any char]org",
- "targetsSender": true,
- "targetsRecipients": true,
- "targetsContent": false
-}
-....
-
-== Reloading server certificates
-
-Certificates for TCP based protocols (IMAP, SMTP, POP3, LMTP and ManageSieve) can be updated at
-runtime, without service interuption and without closing existing connections.
-
-In order to do so:
-
- - Generate / retrieve your cryptographic materials and replace the ones specified in James configuration.
- - Then call the following endpoint:
-
-....
-curl -XPOST http://ip:port/servers?reload-certificate
-....
-
-Optional query parameters:
-
- - `port`: positive integer (valid port number). Only reload certificates for the specific port.
-
-Return code:
-
- - 204: the certificate is reloaded
- - 400: Invalid request.
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/operate/webadmin/admin-mail-queues-extend.adoc b/docs/modules/servers/pages/distributed/operate/webadmin/admin-mail-queues-extend.adoc
new file mode 100644
index 00000000000..377be5637bf
--- /dev/null
+++ b/docs/modules/servers/pages/distributed/operate/webadmin/admin-mail-queues-extend.adoc
@@ -0,0 +1,14 @@
+=== Cassandra view of the RabbitMQ mailQueue: browse start update
+
+....
+curl -XPOST 'http://ip:port/mailQueues/{mailQueueName}?action=updateBrowseStart
+....
+
+Will return a task that updates the browse start of the aforementioned mailQueue, regardless of the configuration.
+
+link:#_endpoints_returning_a_task[More details about endpoints returning
+a task].
+
+This is an advanced, potentially expensive operation which requires a good understanding of the RabbitMQMailQueue design
+(https://github.com/apache/james-project/blob/master/src/adr/0031-distributed-mail-queue.md). Especially, care needs to
+be taken to call this at most once per slice (not doing so might be expensive).
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/operate/webadmin/admin-mailboxes-extend.adoc b/docs/modules/servers/pages/distributed/operate/webadmin/admin-mailboxes-extend.adoc
new file mode 100644
index 00000000000..9342dc4341e
--- /dev/null
+++ b/docs/modules/servers/pages/distributed/operate/webadmin/admin-mailboxes-extend.adoc
@@ -0,0 +1,226 @@
+==== Fixing mailboxes inconsistencies
+
+....
+curl -XPOST /mailboxes?task=SolveInconsistencies
+....
+
+Will schedule a task for fixing inconsistencies for the mailbox
+deduplicated object stored in Cassandra.
+
+link:#_endpoints_returning_a_task[More details about endpoints returning
+a task].
+
+The `I-KNOW-WHAT-I-M-DOING` header is mandatory (you can read more
+information about it in the warning section below).
+
+The scheduled task will have the following type
+`solve-mailbox-inconsistencies` and the following
+`additionalInformation`:
+
+....
+{
+ "type":"solve-mailbox-inconsistencies",
+ "processedMailboxEntries": 3,
+ "processedMailboxPathEntries": 3,
+ "fixedInconsistencies": 2,
+ "errors": 1,
+ "conflictingEntries":[{
+ "mailboxDaoEntry":{
+ "mailboxPath":"#private:user:mailboxName",
+ "mailboxId":"464765a0-e4e7-11e4-aba4-710c1de3782b"
+ }," +
+ "mailboxPathDaoEntry":{
+ "mailboxPath":"#private:user:mailboxName2",
+ "mailboxId":"464765a0-e4e7-11e4-aba4-710c1de3782b"
+ }
+ }]
+}
+....
+
+Note that conflicting entry inconsistencies will not be fixed and will
+require to explicitly use link:#_correcting_ghost_mailbox[ghost mailbox]
+endpoint in order to merge the conflicting mailboxes and prevent any
+message loss.
+
+*WARNING*: this task can cancel concurrently running legitimate user
+operations upon dirty read. As such this task should be run offline.
+
+A dirty read is when data is read between the two writes of the
+denormalization operations (no isolation).
+
+In order to ensure being offline, stop the traffic on SMTP, JMAP and
+IMAP ports, for example via re-configuration or firewall rules.
+
+Due to all of those risks, a `I-KNOW-WHAT-I-M-DOING` header should be
+positioned to `ALL-SERVICES-ARE-OFFLINE` in order to prevent accidental
+calls.
+
+==== Recomputing mailbox counters
+
+....
+curl -XPOST /mailboxes?task=RecomputeMailboxCounters
+....
+
+Will recompute counters (unseen & total count) for the mailbox object
+stored in Cassandra.
+
+Cassandra maintains a per mailbox projection for message count and
+unseen message count. As with any projection, it can go out of sync,
+leading to inconsistent results being returned to the client.
+
+link:#_endpoints_returning_a_task[More details about endpoints returning
+a task].
+
+The scheduled task will have the following type
+`recompute-mailbox-counters` and the following `additionalInformation`:
+
+....
+{
+ "type":"recompute-mailbox-counters",
+ "processedMailboxes": 3,
+ "failedMailboxes": ["464765a0-e4e7-11e4-aba4-710c1de3782b"]
+}
+....
+
+Note that conflicting inconsistencies entries will not be fixed and will
+require to explicitly use link:#_correcting_ghost_mailbox[ghost mailbox]
+endpoint in order to merge the conflicting mailboxes and prevent any
+message loss.
+
+*WARNING*: this task do not take into account concurrent modifications
+upon a single mailbox counter recomputation. Rerunning the task will
+_eventually_ provide the consistent result. As such we advise to run
+this task offline.
+
+In order to ensure being offline, stop the traffic on SMTP, JMAP and
+IMAP ports, for example via re-configuration or firewall rules.
+
+`trustMessageProjection` query parameter can be set to `true`. Content
+of `messageIdTable` (listing messages by their mailbox context) table
+will be trusted and not compared against content of `imapUidTable` table
+(listing messages by their messageId mailbox independent identifier).
+This will result in a better performance running the task at the cost of
+safety in the face of message denormalization inconsistencies.
+
+Defaults to false, which generates additional checks. You can read
+https://github.com/apache/james-project/blob/master/src/adr/0022-cassandra-message-inconsistency.md[this
+ADR] to better understand the message projection and how it can become
+inconsistent.
+
+=== Fixing message inconsistencies
+
+This task is only available on top of Guice Cassandra products.
+
+....
+curl -XPOST /messages?task=SolveInconsistencies
+....
+
+Will schedule a task for fixing message inconsistencies created by the
+message denormalization process.
+
+Messages are denormalized and stored in separated data tables in
+Cassandra, so they can be accessed by their unique identifier or mailbox
+identifier & local mailbox identifier through different protocols.
+
+Failure in the denormalization process will lead to inconsistencies, for
+example:
+
+....
+BOB receives a message
+The denormalization process fails
+BOB can read the message via JMAP
+BOB cannot read the message via IMAP
+
+BOB marks a message as SEEN
+The denormalization process fails
+The message is SEEN via JMAP
+The message is UNSEEN via IMAP
+....
+
+link:#_endpoints_returning_a_task[More details about endpoints returning
+a task].
+
+An admin can specify the concurrency that should be used when running
+the task:
+
+* `messagesPerSecond` rate of messages to be processed per second.
+Default is 100.
+
+This optional parameter must have a strictly positive integer as a value
+and be passed as query parameter.
+
+An admin can also specify the reindexing mode it wants to use when
+running the task:
+
+* `mode` the reindexing mode used. There are 2 modes for the moment:
+** `rebuildAll` allows to rebuild all indexes. This is the default mode.
+** `fixOutdated` will check for outdated indexed document and reindex
+only those.
+
+This optional parameter must be passed as query parameter.
+
+It’s good to note as well that there is a limitation with the
+`fixOutdated` mode. As we first collect metadata of stored messages to
+compare them with the ones in the index, a failed `expunged` operation
+might not be well corrected (as the message might not exist anymore but
+still be indexed).
+
+Example:
+
+....
+curl -XPOST /messages?task=SolveInconsistencies&messagesPerSecond=200&mode=rebuildAll
+....
+
+Response codes:
+
+* 201: Success. Corresponding task id is returned.
+* 400: Error in the request. Details can be found in the reported error.
+
+The scheduled task will have the following type
+`solve-message-inconsistencies` and the following
+`additionalInformation`:
+
+....
+{
+ "type":"solve-message-inconsistencies",
+ "timestamp":"2007-12-03T10:15:30Z",
+ "processedImapUidEntries": 2,
+ "processedMessageIdEntries": 1,
+ "addedMessageIdEntries": 1,
+ "updatedMessageIdEntries": 0,
+ "removedMessageIdEntries": 1,
+ "runningOptions":{
+ "messagesPerSecond": 200,
+ "mode":"REBUILD_ALL"
+ },
+ "fixedInconsistencies": [
+ {
+ "mailboxId": "551f0580-82fb-11ea-970e-f9c83d4cf8c2",
+ "messageId": "d2bee791-7e63-11ea-883c-95b84008f979",
+ "uid": 1
+ },
+ {
+ "mailboxId": "551f0580-82fb-11ea-970e-f9c83d4cf8c2",
+ "messageId": "d2bee792-7e63-11ea-883c-95b84008f979",
+ "uid": 2
+ }
+ ],
+ "errors": [
+ {
+ "mailboxId": "551f0580-82fb-11ea-970e-f9c83d4cf8c2",
+ "messageId": "ffffffff-7e63-11ea-883c-95b84008f979",
+ "uid": 3
+ }
+ ]
+}
+....
+
+User actions concurrent to the inconsistency fixing task could result in
+concurrency issues. New inconsistencies could be created.
+
+However the source of truth will not be impacted, hence rerunning the
+task will eventually fix all issues.
+
+This task could be run safely online and can be scheduled on a recurring
+basis outside of peak traffic by an admin to ensure Cassandra message
+consistency.
diff --git a/docs/modules/servers/pages/distributed/operate/webadmin/admin-messages-extend.adoc b/docs/modules/servers/pages/distributed/operate/webadmin/admin-messages-extend.adoc
new file mode 100644
index 00000000000..1f77c276581
--- /dev/null
+++ b/docs/modules/servers/pages/distributed/operate/webadmin/admin-messages-extend.adoc
@@ -0,0 +1,117 @@
+=== Fixing message inconsistencies
+
+This task is only available on top of Guice Cassandra products.
+
+....
+curl -XPOST /messages?task=SolveInconsistencies
+....
+
+Will schedule a task for fixing message inconsistencies created by the
+message denormalization process.
+
+Messages are denormalized and stored in separated data tables in
+Cassandra, so they can be accessed by their unique identifier or mailbox
+identifier & local mailbox identifier through different protocols.
+
+Failure in the denormalization process will lead to inconsistencies, for
+example:
+
+....
+BOB receives a message
+The denormalization process fails
+BOB can read the message via JMAP
+BOB cannot read the message via IMAP
+
+BOB marks a message as SEEN
+The denormalization process fails
+The message is SEEN via JMAP
+The message is UNSEEN via IMAP
+....
+
+link:#_endpoints_returning_a_task[More details about endpoints returning
+a task].
+
+An admin can specify the concurrency that should be used when running
+the task:
+
+* `messagesPerSecond` rate of messages to be processed per second.
+Default is 100.
+
+This optional parameter must have a strictly positive integer as a value
+and be passed as query parameter.
+
+An admin can also specify the reindexing mode it wants to use when
+running the task:
+
+* `mode` the reindexing mode used. There are 2 modes for the moment:
+** `rebuildAll` allows to rebuild all indexes. This is the default mode.
+** `fixOutdated` will check for outdated indexed document and reindex
+only those.
+
+This optional parameter must be passed as query parameter.
+
+It’s good to note as well that there is a limitation with the
+`fixOutdated` mode. As we first collect metadata of stored messages to
+compare them with the ones in the index, a failed `expunged` operation
+might not be well corrected (as the message might not exist anymore but
+still be indexed).
+
+Example:
+
+....
+curl -XPOST /messages?task=SolveInconsistencies&messagesPerSecond=200&mode=rebuildAll
+....
+
+Response codes:
+
+* 201: Success. Corresponding task id is returned.
+* 400: Error in the request. Details can be found in the reported error.
+
+The scheduled task will have the following type
+`solve-message-inconsistencies` and the following
+`additionalInformation`:
+
+....
+{
+ "type":"solve-message-inconsistencies",
+ "timestamp":"2007-12-03T10:15:30Z",
+ "processedImapUidEntries": 2,
+ "processedMessageIdEntries": 1,
+ "addedMessageIdEntries": 1,
+ "updatedMessageIdEntries": 0,
+ "removedMessageIdEntries": 1,
+ "runningOptions":{
+ "messagesPerSecond": 200,
+ "mode":"REBUILD_ALL"
+ },
+ "fixedInconsistencies": [
+ {
+ "mailboxId": "551f0580-82fb-11ea-970e-f9c83d4cf8c2",
+ "messageId": "d2bee791-7e63-11ea-883c-95b84008f979",
+ "uid": 1
+ },
+ {
+ "mailboxId": "551f0580-82fb-11ea-970e-f9c83d4cf8c2",
+ "messageId": "d2bee792-7e63-11ea-883c-95b84008f979",
+ "uid": 2
+ }
+ ],
+ "errors": [
+ {
+ "mailboxId": "551f0580-82fb-11ea-970e-f9c83d4cf8c2",
+ "messageId": "ffffffff-7e63-11ea-883c-95b84008f979",
+ "uid": 3
+ }
+ ]
+}
+....
+
+User actions concurrent to the inconsistency fixing task could result in
+concurrency issues. New inconsistencies could be created.
+
+However the source of truth will not be impacted, hence rerunning the
+task will eventually fix all issues.
+
+This task could be run safely online and can be scheduled on a recurring
+basis outside of peak traffic by an admin to ensure Cassandra message
+consistency.
\ No newline at end of file
diff --git a/docs/modules/servers/pages/distributed/run/run-docker.adoc b/docs/modules/servers/pages/distributed/run/run-docker.adoc
index 986821b29c0..c5953537377 100644
--- a/docs/modules/servers/pages/distributed/run/run-docker.adoc
+++ b/docs/modules/servers/pages/distributed/run/run-docker.adoc
@@ -28,7 +28,7 @@ A default domain, james.local, has been created. You can see this by running:
James will respond to IMAP port 143 and SMTP port 25.
You have to create users before playing with james. You may also want to create other domains.
-Follow the 'Useful commands' section for more information about James CLI.
+Follow the xref:distributed/operate/cli.adoc['Useful commands'] section for more information about James CLI.
== Run with docker
diff --git a/docs/modules/servers/pages/index.adoc b/docs/modules/servers/pages/index.adoc
index 3fd055e4367..4c6faf58354 100644
--- a/docs/modules/servers/pages/index.adoc
+++ b/docs/modules/servers/pages/index.adoc
@@ -16,6 +16,7 @@ The available James Servers are:
* <>
* <>
* <>
+ * <>
* <>
If you are just checking out James for the first time, then we highly recommend
@@ -79,6 +80,14 @@ and is intended for experts only.
+[#postgres]
+== James Postgres Mail Server
+
+The xref:postgres/index.adoc[*Distributed with Postgres Server*] is a one
+variant of the distributed server with Postgres as the database.
+
+
+
[#test]
== James Test Server
diff --git a/docs/modules/servers/pages/postgres/architecture/consistency-model.adoc b/docs/modules/servers/pages/postgres/architecture/consistency-model.adoc
new file mode 100644
index 00000000000..dfd1687255c
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/architecture/consistency-model.adoc
@@ -0,0 +1,11 @@
+= Postgresql James server — Consistency Model
+:navtitle: Consistency Model
+
+:backend-name: postgres
+:backend-name-cap: Postgres
+:server-name: Postgresql James server
+:mailet-repository-path-prefix: postgres
+:xref-base: postgres
+:data_replication_extend: servers:postgres/architecture/consistency_model_data_replication_extend.adoc
+
+include::partial$architecture/consistency-model.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/architecture/consistency_model_data_replication_extend.adoc b/docs/modules/servers/pages/postgres/architecture/consistency_model_data_replication_extend.adoc
new file mode 100644
index 00000000000..ab0c01417a7
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/architecture/consistency_model_data_replication_extend.adoc
@@ -0,0 +1 @@
+//
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/architecture/implemented-standards.adoc b/docs/modules/servers/pages/postgres/architecture/implemented-standards.adoc
new file mode 100644
index 00000000000..e33b3d8a8d4
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/architecture/implemented-standards.adoc
@@ -0,0 +1,6 @@
+= Postgresql James server — Implemented standards
+:navtitle: Implemented standards
+
+:server-name: Postgresql James server
+
+include::partial$architecture/implemented-standards.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/architecture/index.adoc b/docs/modules/servers/pages/postgres/architecture/index.adoc
new file mode 100644
index 00000000000..8be525750ae
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/architecture/index.adoc
@@ -0,0 +1,13 @@
+= Postgresql James server — Architecture
+:navtitle: Architecture
+
+:backend-name: postgres
+:server-name: Postgresql James server
+:backend-storage-introduce: Postgresql is used for metadata storage. Postgresql is efficient for a very high workload.
+:storage-picture-file-name: storage_james_postgres.png
+:mailet-repository-path-prefix: postgres
+:xref-base: postgres
+:mailqueue-combined-extend: servers:postgres/architecture/mailqueue_combined_extend.adoc
+:mailqueue-combined-extend-backend:
+
+include::partial$architecture/index.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/architecture/mailqueue_combined_extend.adoc b/docs/modules/servers/pages/postgres/architecture/mailqueue_combined_extend.adoc
new file mode 100644
index 00000000000..dba010cbb09
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/architecture/mailqueue_combined_extend.adoc
@@ -0,0 +1 @@
+// 123
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/architecture/specialized-instances.adoc b/docs/modules/servers/pages/postgres/architecture/specialized-instances.adoc
new file mode 100644
index 00000000000..7f8e3493772
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/architecture/specialized-instances.adoc
@@ -0,0 +1,7 @@
+= Postgresql James server — Specialized instances
+:navtitle: Specialized instances
+
+:server-name: Postgresql James server
+:specialized-instances-file-name: specialized-instances-postgres.png
+
+include::partial$architecture/specialized-instances.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/benchmark/benchmark_prepare.adoc b/docs/modules/servers/pages/postgres/benchmark/benchmark_prepare.adoc
new file mode 100644
index 00000000000..5257ef4840f
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/benchmark/benchmark_prepare.adoc
@@ -0,0 +1,40 @@
+=== Postgresql prepare benchmark
+
+==== Install extension pg_stat_statements
+
+The `pg_stat_statements` extension provides a means for tracking execution statistics of all SQL statements executed by a server.
+The extension is useful for identifying high-traffic queries and for monitoring the performance of the server.
+For more information, see the [PostgreSQL documentation](https://www.postgresql.org/docs/current/pgstatstatements.html).
+
+To install the extension, connect to the database and run the following query:
+
+[source,sql]
+----
+create extension if not exists pg_stat_statements;
+alter system set shared_preload_libraries='pg_stat_statements';
+
+-- restart postgres
+-- optional
+alter system set pg_stat_statements.max = 100000;
+alter system set pg_stat_statements.track = 'all';
+----
+
+To reset statistics, use: `select pg_stat_statements_reset()`;
+
+The response fields that we are interested in are:
+
+- `query`: Text of a representative statement
+
+- `calls`: Number of times the statement was executed
+
+- `total_exec_time`, `mean_exec_time`, `min_exec_time`, `max_exec_time`
+
+To view the statistics, run the following query:
+
+```sql
+select query, mean_exec_time, total_exec_time, calls from pg_stat_statements order by total_exec_time desc;
+```
+
+The result sample:
+
+image::postgres_pg_stat_statements.png[Storage responsibilities for the {server-name}]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/benchmark/db-benchmark.adoc b/docs/modules/servers/pages/postgres/benchmark/db-benchmark.adoc
new file mode 100644
index 00000000000..5e9bf216f77
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/benchmark/db-benchmark.adoc
@@ -0,0 +1,8 @@
+= Postgresql James server -- Database benchmarks
+:navtitle: Database benchmarks
+
+:backend-name: postgres
+:server-name: Postgresql James server
+:backend-database-extend-sample: PostgreSQL 16 as main database: 1 nodes (OVH instance, 2 CPU / 7 GB RAM, 160 GB SSD)
+
+include::partial$benchmark/db-benchmark.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/benchmark/index.adoc b/docs/modules/servers/pages/postgres/benchmark/index.adoc
new file mode 100644
index 00000000000..0532346caa2
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/benchmark/index.adoc
@@ -0,0 +1,7 @@
+= Postgresql James server — Performance testing
+:navtitle: Performance testing the Postgresql James server
+
+:xref-base: postgres
+:server-name: Postgresql James server
+
+include::partial$benchmark/index.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/benchmark/james-benchmark.adoc b/docs/modules/servers/pages/postgres/benchmark/james-benchmark.adoc
new file mode 100644
index 00000000000..52bdec9769a
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/benchmark/james-benchmark.adoc
@@ -0,0 +1,10 @@
+= Postgresql James server benchmark
+:navtitle: James benchmarks
+
+:server-name: Postgresql James server
+:backend-database-extend-sample: PostgreSQL 16 as main database: 1 nodes (OVH instance, 2 CPU / 7 GB RAM, 160 GB SSD)
+:provision_file_url: https://github.com/apache/james-project/blob/d8225ed7c5ca8d79cde3b1c8755ee9ffcf462e29/server/apps/postgres-app/provision.sh
+:benchmark_prepare_extend: servers:postgres/benchmark/benchmark_prepare.adoc
+:james-imap-base-performance-picture: james-imap-base-performance-postgres.png
+
+include::partial$benchmark/james-benchmark.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/configure/batchsizes.adoc b/docs/modules/servers/pages/postgres/configure/batchsizes.adoc
new file mode 100644
index 00000000000..8c7264ce05a
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/configure/batchsizes.adoc
@@ -0,0 +1,5 @@
+= Postgresql James Server — batchsizes.properties
+:navtitle: batchsizes.properties
+
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/postgresql/server/apps/postgres-app/sample-configuration
+include::partial$configure/batchsizes.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/configure/blobstore.adoc b/docs/modules/servers/pages/postgres/configure/blobstore.adoc
new file mode 100644
index 00000000000..e7c1d341aa1
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/configure/blobstore.adoc
@@ -0,0 +1,51 @@
+= Postgresql James Server — blobstore.properties
+:navtitle: blobstore.properties
+
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/postgresql/server/apps/postgres-app/sample-configuration
+:pages-path: postgres
+
+== BlobStore
+
+This file is optional. If omitted, the *postgres* blob store will be used.
+
+BlobStore is the dedicated component to store blobs, non-indexable content.
+James uses the BlobStore for storing blobs which are usually mail contents, attachments, deleted mails...
+
+You can choose the underlying implementation of BlobStore to fit with your James setup.
+
+It could be the implementation on top of Postgres or file storage service S3 compatible like Openstack Swift and AWS S3.
+
+Consult link:{sample-configuration-prefix-url}/blob.properties[blob.properties]
+in GIT to get some examples and hints.
+
+=== Implementation choice
+
+*implementation* :
+
+* postgres: use cassandra based Postgres
+* objectstorage: use Swift/AWS S3 based BlobStore
+* file: (experimental) use directly the file system. Useful for legacy architecture based on shared ISCI SANs and/or
+distributed file system with no object store available.
+
+*deduplication.enable*: Mandatory. Supported value: true and false.
+
+If you choose to enable deduplication, the mails with the same content will be stored only once.
+
+WARNING: Once this feature is enabled, there is no turning back as turning it off will lead to the deletion of all
+the mails sharing the same content once one is deleted.
+
+Deduplication requires a garbage collector mechanism to effectively drop blobs. A first implementation
+based on bloom filters can be used and triggered using the WebAdmin REST API. See
+xref:{pages-path}/operate/webadmin.adoc#_running_blob_garbage_collection[Running blob garbage collection].
+
+In order to avoid concurrency issues upon garbage collection, we slice the blobs in generation, the two more recent
+generations are not garbage collected.
+
+*deduplication.gc.generation.duration*: Allow controlling the duration of one generation. Longer implies better deduplication
+but deleted blobs will live longer. Duration, defaults on 30 days, the default unit is in days.
+
+*deduplication.gc.generation.family*: Every time the duration is changed, this integer counter must be incremented to avoid
+conflicts. Defaults to 1.
+
+
+include::partial$configure/blobstore.adoc[]
diff --git a/docs/modules/servers/pages/postgres/configure/collecting-contacts.adoc b/docs/modules/servers/pages/postgres/configure/collecting-contacts.adoc
new file mode 100644
index 00000000000..b077a2c45ce
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/configure/collecting-contacts.adoc
@@ -0,0 +1,4 @@
+= Contact collection
+
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/postgresql/server/apps/postgres-app/sample-configuration
+include::partial$configure/collecting-contacts.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/configure/collecting-events.adoc b/docs/modules/servers/pages/postgres/configure/collecting-events.adoc
new file mode 100644
index 00000000000..431f06aa8be
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/configure/collecting-events.adoc
@@ -0,0 +1,4 @@
+= Event collection
+
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/postgresql/server/apps/postgres-app/sample-configuration
+include::partial$configure/collecting-events.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/configure/dns.adoc b/docs/modules/servers/pages/postgres/configure/dns.adoc
new file mode 100644
index 00000000000..ffff105f3e8
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/configure/dns.adoc
@@ -0,0 +1,5 @@
+= Postgresql James Server — dnsservice.xml
+:navtitle: dnsservice.xml
+
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/postgresql/server/apps/postgres-app/sample-configuration
+include::partial$configure/dns.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/configure/domainlist.adoc b/docs/modules/servers/pages/postgres/configure/domainlist.adoc
new file mode 100644
index 00000000000..9654c2c6b74
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/configure/domainlist.adoc
@@ -0,0 +1,5 @@
+= Postgresql James Server — domainlist.xml
+:navtitle: domainlist.xml
+
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/postgresql/server/apps/postgres-app/sample-configuration
+include::partial$configure/domainlist.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/configure/droplists.adoc b/docs/modules/servers/pages/postgres/configure/droplists.adoc
new file mode 100644
index 00000000000..fb1c242047d
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/configure/droplists.adoc
@@ -0,0 +1,6 @@
+= Postgresql James Server — DropLists
+:navtitle: DropLists
+
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/postgresql/server/apps/postgres-app/sample-configuration
+:pages-path: postgres
+include::partial$configure/droplists.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/configure/dsn.adoc b/docs/modules/servers/pages/postgres/configure/dsn.adoc
new file mode 100644
index 00000000000..46cdc91803e
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/configure/dsn.adoc
@@ -0,0 +1,7 @@
+= Postgresql James Server — Delivery Submission Notifications
+:navtitle: ESMTP DSN setup
+
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/postgresql/server/apps/postgres-app/sample-configuration
+:pages-path: distributed
+:mailet-repository-path-prefix: postgres
+include::partial$configure/dsn.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/configure/extensions.adoc b/docs/modules/servers/pages/postgres/configure/extensions.adoc
new file mode 100644
index 00000000000..c99cb4a6289
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/configure/extensions.adoc
@@ -0,0 +1,6 @@
+= Postgresql James Server — extensions.properties
+:navtitle: extensions.properties
+
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/postgresql/server/apps/postgres-app/sample-configuration
+:pages-path: postgres
+include::partial$configure/extensions.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/configure/healthcheck.adoc b/docs/modules/servers/pages/postgres/configure/healthcheck.adoc
new file mode 100644
index 00000000000..dd0a5e4bcb2
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/configure/healthcheck.adoc
@@ -0,0 +1,5 @@
+= Postgresql James Server — healthcheck.properties
+:navtitle: healthcheck.properties
+
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/postgresql/server/apps/postgres-app/sample-configuration
+include::partial$configure/healthcheck.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/configure/imap.adoc b/docs/modules/servers/pages/postgres/configure/imap.adoc
new file mode 100644
index 00000000000..47b538272fb
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/configure/imap.adoc
@@ -0,0 +1,6 @@
+= Postgresql James Server — imapserver.xml
+:navtitle: imapserver.xml
+
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/postgresql/server/apps/postgres-app/sample-configuration
+:pages-path: postgres
+include::partial$configure/imap.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/configure/index.adoc b/docs/modules/servers/pages/postgres/configure/index.adoc
new file mode 100644
index 00000000000..5ef404256d1
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/configure/index.adoc
@@ -0,0 +1,24 @@
+= Postgresql James Server — Configuration
+:navtitle: Configuration
+
+This section presents how to configure the Postgresql James server.
+
+The Postgresql James Server relies on separated files for configuring various components. Some files follow a *xml* format
+and some others follow a *property* format. Some files can be omitted, in which case the functionality can be disabled,
+or rely on reasonable defaults.
+
+The following configuration files are exposed:
+
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/postgresql/server/apps/postgres-app/sample-configuration
+:xref-base: postgres/configure
+:server-name: Postgresql James server
+
+include::partial$configure/forProtocolsPartial.adoc[]
+
+include::partial$configure/forStorageDependenciesPartial.adoc[]
+
+include::partial$configure/forCoreComponentsPartial.adoc[]
+
+include::partial$configure/forExtensionsPartial.adoc[]
+
+include::partial$configure/systemPropertiesPartial.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/configure/jmap.adoc b/docs/modules/servers/pages/postgres/configure/jmap.adoc
new file mode 100644
index 00000000000..912ba217436
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/configure/jmap.adoc
@@ -0,0 +1,7 @@
+= Postgresql James Server — jmap.properties
+:navtitle: jmap.properties
+
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/postgresql/server/apps/postgres-app/sample-configuration
+:server-name: Postgresql James server
+:backend-name: Postgresql
+include::partial$configure/jmap.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/configure/jmx.adoc b/docs/modules/servers/pages/postgres/configure/jmx.adoc
new file mode 100644
index 00000000000..0b294bbfa6a
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/configure/jmx.adoc
@@ -0,0 +1,5 @@
+= Postgresql James Server — jmx.properties
+:navtitle: jmx.properties
+
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/postgresql/server/apps/postgres-app/sample-configuration
+include::partial$configure/jmx.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/configure/jvm.adoc b/docs/modules/servers/pages/postgres/configure/jvm.adoc
new file mode 100644
index 00000000000..28611f12800
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/configure/jvm.adoc
@@ -0,0 +1,5 @@
+= Postgresql James Server — jvm.properties
+:navtitle: jvm.properties
+
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/postgresql/server/apps/postgres-app/sample-configuration
+include::partial$configure/jvm.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/configure/listeners.adoc b/docs/modules/servers/pages/postgres/configure/listeners.adoc
new file mode 100644
index 00000000000..011dd6c3963
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/configure/listeners.adoc
@@ -0,0 +1,6 @@
+= Postgresql James Server — listeners.xml
+:navtitle: listeners.xml
+
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/postgresql/server/apps/postgres-app/sample-configuration
+:server-name: Postgresql James server
+include::partial$configure/listeners.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/configure/mailetcontainer.adoc b/docs/modules/servers/pages/postgres/configure/mailetcontainer.adoc
new file mode 100644
index 00000000000..8b8184fbd95
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/configure/mailetcontainer.adoc
@@ -0,0 +1,6 @@
+= Postgresql James Server — mailetcontainer.xml
+:navtitle: mailetcontainer.xml
+
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/postgresql/server/apps/postgres-app/sample-configuration
+:pages-path: postgres
+include::partial$configure/mailetcontainer.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/configure/mailets.adoc b/docs/modules/servers/pages/postgres/configure/mailets.adoc
new file mode 100644
index 00000000000..07c8f532e56
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/configure/mailets.adoc
@@ -0,0 +1,6 @@
+= Postgresql James Server — Mailets
+:navtitle: Mailets
+
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/postgresql/server/apps/postgres-app/sample-configuration
+:server-name: Postgresql James server
+include::partial$configure/mailets.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/configure/mailrepositorystore.adoc b/docs/modules/servers/pages/postgres/configure/mailrepositorystore.adoc
new file mode 100644
index 00000000000..bba70563b2c
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/configure/mailrepositorystore.adoc
@@ -0,0 +1,9 @@
+= Postgresql James Server — mailrepositorystore.xml
+
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/postgresql/server/apps/postgres-app/sample-configuration
+:pages-path: postgres
+:server-name: Postgresql James server
+:mailet-repository-path-prefix: postgres
+:mail-repository-protocol: postgres
+:mail-repository-class: org.apache.james.mailrepository.postgres.PostgresMailRepository
+include::partial$configure/mailrepositorystore.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/configure/matchers.adoc b/docs/modules/servers/pages/postgres/configure/matchers.adoc
new file mode 100644
index 00000000000..d97cc58fd6a
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/configure/matchers.adoc
@@ -0,0 +1,7 @@
+= Postgresql James Server — Matchers
+:navtitle: Matchers
+
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/postgresql/server/apps/postgres-app/sample-configuration
+:pages-path: postgres
+:server-name: Postgresql James server
+include::partial$configure/matchers.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/configure/opensearch.adoc b/docs/modules/servers/pages/postgres/configure/opensearch.adoc
new file mode 100644
index 00000000000..16314afb10c
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/configure/opensearch.adoc
@@ -0,0 +1,8 @@
+= Postgresql James Server — opensearch.properties
+:navtitle: opensearch.properties
+
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/postgresql/server/apps/postgres-app/sample-configuration
+:pages-path: postgres
+:server-name: Postgresql James server
+:package-tag: postgres
+include::partial$configure/opensearch.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/configure/pop3.adoc b/docs/modules/servers/pages/postgres/configure/pop3.adoc
new file mode 100644
index 00000000000..95da0cfbc9a
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/configure/pop3.adoc
@@ -0,0 +1,7 @@
+= Postgresql James Server — pop3server.xml
+:navtitle: pop3server.xml
+
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/postgresql/server/apps/postgres-app/sample-configuration
+:pages-path: postgres
+:server-name: Postgresql James server
+include::partial$configure/pop3.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/configure/queue.adoc b/docs/modules/servers/pages/postgres/configure/queue.adoc
new file mode 100644
index 00000000000..09f666e498a
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/configure/queue.adoc
@@ -0,0 +1,5 @@
+= Postgresql James Server — queue.properties
+:navtitle: queue.properties
+
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/postgresql/server/apps/postgres-app/sample-configuration
+include::partial$configure/queue.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/configure/rabbitmq.adoc b/docs/modules/servers/pages/postgres/configure/rabbitmq.adoc
new file mode 100644
index 00000000000..ddee170f82d
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/configure/rabbitmq.adoc
@@ -0,0 +1,5 @@
+= Postgresql James Server — rabbitmq.properties
+:navtitle: rabbitmq.properties
+
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/postgresql/server/apps/postgres-app/sample-configuration
+include::partial$configure/rabbitmq.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/configure/recipientrewritetable.adoc b/docs/modules/servers/pages/postgres/configure/recipientrewritetable.adoc
new file mode 100644
index 00000000000..6cc602f7866
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/configure/recipientrewritetable.adoc
@@ -0,0 +1,7 @@
+= Postgresql James Server — recipientrewritetable.xml
+:navtitle: recipientrewritetable.xml
+
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/postgresql/server/apps/postgres-app/sample-configuration
+:pages-path: postgres
+:server-name: Postgresql James server
+include::partial$configure/recipientrewritetable.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/configure/redis.adoc b/docs/modules/servers/pages/postgres/configure/redis.adoc
new file mode 100644
index 00000000000..c3b2558d4b0
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/configure/redis.adoc
@@ -0,0 +1,5 @@
+= Postgresql James Server — redis.properties
+:navtitle: redis.properties
+
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/postgresql/server/apps/postgres-app/sample-configuration
+include::partial$configure/redis.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/configure/remote-delivery-error-handling.adoc b/docs/modules/servers/pages/postgres/configure/remote-delivery-error-handling.adoc
new file mode 100644
index 00000000000..7500221ac3e
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/configure/remote-delivery-error-handling.adoc
@@ -0,0 +1,8 @@
+= Postgresql James Server — About RemoteDelivery error handling
+:navtitle: About RemoteDelivery error handling
+
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/postgresql/server/apps/postgres-app/sample-configuration
+:pages-path: postgres
+:server-name: Postgresql James server
+:mailet-repository-path-prefix: postgres
+include::partial$configure/remote-delivery-error-handling.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/configure/search.adoc b/docs/modules/servers/pages/postgres/configure/search.adoc
new file mode 100644
index 00000000000..0c329853048
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/configure/search.adoc
@@ -0,0 +1,5 @@
+= Postgresql James Server — Search configuration
+:navtitle: Search configuration
+
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/postgresql/server/apps/postgres-app/sample-configuration
+include::partial$configure/search.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/configure/sieve.adoc b/docs/modules/servers/pages/postgres/configure/sieve.adoc
new file mode 100644
index 00000000000..8326b2752e4
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/configure/sieve.adoc
@@ -0,0 +1,7 @@
+= Sieve
+:navtitle: Sieve
+
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/postgresql/server/apps/postgres-app/sample-configuration
+:pages-path: postgres
+:server-name: Postgresql James server
+include::partial$configure/sieve.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/configure/smtp-hooks.adoc b/docs/modules/servers/pages/postgres/configure/smtp-hooks.adoc
new file mode 100644
index 00000000000..cac323ebc8d
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/configure/smtp-hooks.adoc
@@ -0,0 +1,7 @@
+= Postgresql James Server — SMTP Hooks
+:navtitle: SMTP Hooks
+
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/postgresql/server/apps/postgres-app/sample-configuration
+:pages-path: postgres
+:server-name: Postgresql James server
+include::partial$configure/smtp-hooks.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/configure/smtp.adoc b/docs/modules/servers/pages/postgres/configure/smtp.adoc
new file mode 100644
index 00000000000..e78cd94302f
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/configure/smtp.adoc
@@ -0,0 +1,7 @@
+= Postgresql James Server — smtpserver.xml
+:navtitle: smtpserver.xml
+
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/postgresql/server/apps/postgres-app/sample-configuration
+:pages-path: postgres
+:server-name: Postgresql James server
+include::partial$configure/smtp.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/configure/spam.adoc b/docs/modules/servers/pages/postgres/configure/spam.adoc
new file mode 100644
index 00000000000..bce4eb9ae1a
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/configure/spam.adoc
@@ -0,0 +1,8 @@
+= Postgresql James Server — Anti-Spam configuration
+:navtitle: Anti-Spam configuration
+
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/postgresql/server/apps/postgres-app/sample-configuration
+:pages-path: postgres
+:server-name: Postgresql James server
+:mailet-repository-path-prefix: postgres
+include::partial$configure/spam.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/configure/ssl.adoc b/docs/modules/servers/pages/postgres/configure/ssl.adoc
new file mode 100644
index 00000000000..16924ae6b2c
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/configure/ssl.adoc
@@ -0,0 +1,7 @@
+= Postgresql James Server — SSL & TLS configuration
+:navtitle: SSL & TLS configuration
+
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/postgresql/server/apps/postgres-app/sample-configuration
+:pages-path: postgres
+:server-name: Postgresql James server
+include::partial$configure/ssl.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/configure/tika.adoc b/docs/modules/servers/pages/postgres/configure/tika.adoc
new file mode 100644
index 00000000000..90a68e6eb8f
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/configure/tika.adoc
@@ -0,0 +1,5 @@
+= Postgresql James Server — tika.properties
+:navtitle: tika.properties
+
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/postgresql/server/apps/postgres-app/sample-configuration
+include::partial$configure/tika.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/configure/usersrepository.adoc b/docs/modules/servers/pages/postgres/configure/usersrepository.adoc
new file mode 100644
index 00000000000..8f6d3cba524
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/configure/usersrepository.adoc
@@ -0,0 +1,5 @@
+= Postgresql James Server — usersrepository.xml
+:navtitle: usersrepository.xml
+
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/postgresql/server/apps/postgres-app/sample-configuration
+include::partial$configure/usersrepository.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/configure/vault.adoc b/docs/modules/servers/pages/postgres/configure/vault.adoc
new file mode 100644
index 00000000000..dcdfc7dd207
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/configure/vault.adoc
@@ -0,0 +1,8 @@
+= Postgresql James Server — deletedMessageVault.properties
+:navtitle: deletedMessageVault.properties
+
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/postgresql/server/apps/postgres-app/sample-configuration
+:pages-path: postgres
+:server-name: Postgresql James server
+:backend-name: Postgresql
+include::partial$configure/vault.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/configure/webadmin.adoc b/docs/modules/servers/pages/postgres/configure/webadmin.adoc
new file mode 100644
index 00000000000..161652dde4d
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/configure/webadmin.adoc
@@ -0,0 +1,7 @@
+= Postgresql James Server — webadmin.properties
+:navtitle: webadmin.properties
+
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/postgresql/server/apps/postgres-app/sample-configuration
+:pages-path: postgres
+:server-name: Postgresql James server
+include::partial$configure/webadmin.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/extending.adoc b/docs/modules/servers/pages/postgres/extending.adoc
new file mode 100644
index 00000000000..96f10693b79
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/extending.adoc
@@ -0,0 +1,4 @@
+= Postgres James Mail Server — Extending behaviour
+:navtitle: Extending behaviour
+
+This section can be read xref:customization:index.adoc[this page].
diff --git a/docs/modules/servers/pages/postgres/extending/index.adoc b/docs/modules/servers/pages/postgres/extending/index.adoc
new file mode 100644
index 00000000000..c95b2919ad5
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/extending/index.adoc
@@ -0,0 +1,2 @@
+= Distributed James Postgres Server — Extending server behavior
+:navtitle: Extending server behavior
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/index.adoc b/docs/modules/servers/pages/postgres/index.adoc
new file mode 100644
index 00000000000..b3a8b8416ff
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/index.adoc
@@ -0,0 +1,15 @@
+= Postgres James Mail Server
+:navtitle: Distributed Postgres James Application
+
+The Postgres James server offers an easy way to scale email server. Based on
+SQL database solutions, here is https://www.postgresql.org/[Postgres].
+
+Postgres is a powerful and versatile database server. Known for its advanced features, scalability,
+and robust performance, Postgres is the ideal choice for handling high-throughput and large data sets efficiently.
+Its row-level security ensures top-notch data protection, while the flexible architecture allows seamless integration
+with various storage and search solutions
+
+In this section of the documentation, we will introduce you to:
+
+* xref:postgres/objectives.adoc[Objectives and motivation of the Distributed Postgres Server]
+* xref:postgres/run/index.adoc[Run the Postgresql Server]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/objectives.adoc b/docs/modules/servers/pages/postgres/objectives.adoc
new file mode 100644
index 00000000000..d1dcb91090b
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/objectives.adoc
@@ -0,0 +1,22 @@
+= Distributed James Server — Objectives and motivation
+:navtitle: Objectives and motivation
+
+From the outstanding advantages of a distributed mail system, such as scalability and enhancement,
+this project aims to implement a backend database version using Postgres.
+
+Primary Objectives:
+
+* Provide more options: The current James Distributed server uses Cassandra as the backend database.
+ This project aims to provide an alternative to Cassandra, using Postgres as the backend database.
+ This choice aims to offer a highly scalable and reactive James mail server, suitable for small to medium deployments,
+ while the distributed setup remains more fitting for larger ones.
+* Propose an alternative to the jpa-app variant: The jpa-app variant is a simple version of James that uses JPA
+ to store data and is compatible with various SQL databases.
+ With the postgres-app, we use the `r2dbc` library to connect to the Postgres database, implementing non-blocking,
+ reactive APIs for higher performance.
+* Leverage advanced Postgres features: Postgres is a powerful database that supports many advanced features.
+ This project aims to leverage these features to improve the efficiency of the James server.
+ For example, the implement https://www.postgresql.org/docs/current/ddl-rowsecurity.html[row-level security]
+ to improve the security of the James server.
+* Flexible deployment: The new architecture allows flexible module choices. You can use Postgres directly for
+ blob storage or use Object Storage (e.g Minio, S3...).
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/operate/cli.adoc b/docs/modules/servers/pages/postgres/operate/cli.adoc
new file mode 100644
index 00000000000..2f008e438b3
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/operate/cli.adoc
@@ -0,0 +1,6 @@
+= Postgresql James Server — Command Line Interface
+:navtitle: Command Line Interface
+
+:xref-base: postgres
+:server-name: Postgresql James Server
+include::partial$operate/cli.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/operate/guide.adoc b/docs/modules/servers/pages/postgres/operate/guide.adoc
new file mode 100644
index 00000000000..5b829212d66
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/operate/guide.adoc
@@ -0,0 +1,9 @@
+= Postgresql James Server — Operator guide
+:navtitle: Operator guide
+
+:xref-base: postgres
+:mailet-repository-path-prefix: postgres
+:backend-name: postgres
+:sample-configuration-prefix-url: https://github.com/apache/james-project/blob/master/server/apps/postgres-app/sample-configuration
+:server-name: Postgresql James Server
+include::partial$operate/guide.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/operate/index.adoc b/docs/modules/servers/pages/postgres/operate/index.adoc
new file mode 100644
index 00000000000..484f330aea3
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/operate/index.adoc
@@ -0,0 +1,7 @@
+= Postgresql James Server — Operate the Distributed server
+:navtitle: Operate the Distributed server
+
+:xref-base: postgres
+:server-name: Postgresql James Server
+:server-tag: postgres
+include::partial$operate/index.adoc[]
diff --git a/docs/modules/servers/pages/postgres/operate/logging.adoc b/docs/modules/servers/pages/postgres/operate/logging.adoc
new file mode 100644
index 00000000000..4b5d3de2453
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/operate/logging.adoc
@@ -0,0 +1,9 @@
+= Postgresql James Server — Logging
+:navtitle: Logging
+
+:xref-base: postgres
+:server-name: Postgresql James Server
+:server-tag: postgres
+:docker-compose-code-block-sample: servers:postgres/operate/logging/docker-compose-block.adoc
+:backend-name: postgres
+include::partial$operate/logging.adoc[]
diff --git a/docs/modules/servers/pages/postgres/operate/logging/docker-compose-block.adoc b/docs/modules/servers/pages/postgres/operate/logging/docker-compose-block.adoc
new file mode 100644
index 00000000000..3ff42faf399
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/operate/logging/docker-compose-block.adoc
@@ -0,0 +1,81 @@
+[source,docker-compose]
+----
+version: "3"
+
+services:
+ james:
+ depends_on:
+ - elasticsearch
+ - postgres
+ - rabbitmq
+ - s3
+ image: apache/james:postgres-latest
+ container_name: james
+ hostname: james.local
+ volumes:
+ - ./extension-jars:/root/extension-jars
+ - ./conf/logback.xml:/root/conf/logback.xml
+ - ./logs:/root/logs
+ ports:
+ - "80:80"
+ - "25:25"
+ - "110:110"
+ - "143:143"
+ - "465:465"
+ - "587:587"
+ - "993:993"
+ - "8080:8000"
+
+ elasticsearch:
+ image: docker.elastic.co/elasticsearch/elasticsearch:7.10.2
+ ports:
+ - "9200:9200"
+ environment:
+ - discovery.type=single-node
+
+ postgres:
+ image: postgres:16.3
+ ports:
+ - "5432:5432"
+ environment:
+ - POSTGRES_DB=james
+ - POSTGRES_USER=james
+ - POSTGRES_PASSWORD=secret1
+
+ rabbitmq:
+ image: rabbitmq:3.13.3-management
+ ports:
+ - "5672:5672"
+ - "15672:15672"
+
+ s3:
+ image: registry.scality.com/cloudserver/cloudserver:8.7.25
+ container_name: s3.docker.test
+ environment:
+ - SCALITY_ACCESS_KEY_ID=accessKey1
+ - SCALITY_SECRET_ACCESS_KEY=secretKey1
+ - S3BACKEND=mem
+ - LOG_LEVEL=trace
+ - REMOTE_MANAGEMENT_DISABLE=1
+
+ fluent-bit:
+ image: fluent/fluent-bit:1.5.7
+ volumes:
+ - ./fluentbit/fluent-bit.conf:/fluent-bit/etc/fluent-bit.conf
+ - ./fluentbit/parsers.conf:/fluent-bit/etc/parsers.conf
+ - ./logs:/fluent-bit/log
+ ports:
+ - "24224:24224"
+ - "24224:24224/udp"
+ depends_on:
+ - elasticsearch
+
+ kibana:
+ image: docker.elastic.co/kibana/kibana:7.10.2
+ environment:
+ ELASTICSEARCH_HOSTS: http://elasticsearch:9200
+ ports:
+ - "5601:5601"
+ depends_on:
+ - elasticsearch
+----
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/operate/metrics.adoc b/docs/modules/servers/pages/postgres/operate/metrics.adoc
new file mode 100644
index 00000000000..0bccbb7cc1e
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/operate/metrics.adoc
@@ -0,0 +1,7 @@
+= Postgresql James Server — Metrics
+:navtitle: Metrics
+
+:other-metrics: Postgresql Java driver metrics
+:xref-base: postgres
+:server-name: Postgresql James Server
+include::partial$operate/metrics.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/operate/migrating.adoc b/docs/modules/servers/pages/postgres/operate/migrating.adoc
new file mode 100644
index 00000000000..b00a838135c
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/operate/migrating.adoc
@@ -0,0 +1,6 @@
+= Postgresql James Server — Migrating existing data
+:navtitle: Migrating existing data
+
+:xref-base: postgres
+:server-name: Postgresql James Server
+include::partial$operate/migrating.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/operate/performanceChecklist.adoc b/docs/modules/servers/pages/postgres/operate/performanceChecklist.adoc
new file mode 100644
index 00000000000..42f6e9afc9b
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/operate/performanceChecklist.adoc
@@ -0,0 +1,6 @@
+= Postgresql James Server — Performance checklist
+:navtitle: Performance checklist
+
+:xref-base: postgres
+:backend-name: postgres
+include::partial$operate/performanceChecklist.adoc[]
diff --git a/docs/modules/servers/pages/postgres/operate/security.adoc b/docs/modules/servers/pages/postgres/operate/security.adoc
new file mode 100644
index 00000000000..80c578c9e5c
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/operate/security.adoc
@@ -0,0 +1,6 @@
+= Security checklist
+:navtitle: Security checklist
+
+:xref-base: postgres
+:backend-name: postgres
+include::partial$operate/security.adoc[]
diff --git a/docs/modules/servers/pages/postgres/operate/webadmin.adoc b/docs/modules/servers/pages/postgres/operate/webadmin.adoc
new file mode 100644
index 00000000000..b8a275de609
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/operate/webadmin.adoc
@@ -0,0 +1,10 @@
+= Postgresql James Server — WebAdmin REST administration API
+:navtitle: WebAdmin REST administration API
+
+:server-name: Postgresql James Server
+:xref-base: postgres
+:backend-name: postgres
+:admin-mail-queues-extend: servers:postgres/operate/webadmin/admin-mail-queues-extend.adoc
+:admin-messages-extend: servers:postgres/operate/webadmin/admin-messages-extend.adoc
+:admin-mailboxes-extend: servers:postgres/operate/webadmin/admin-mailboxes-extend.adoc
+include::partial$operate/webadmin.adoc[]
diff --git a/docs/modules/servers/pages/postgres/operate/webadmin/admin-mail-queues-extend.adoc b/docs/modules/servers/pages/postgres/operate/webadmin/admin-mail-queues-extend.adoc
new file mode 100644
index 00000000000..ba054a0f3b8
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/operate/webadmin/admin-mail-queues-extend.adoc
@@ -0,0 +1 @@
+// The document only covers Postgres
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/operate/webadmin/admin-mailboxes-extend.adoc b/docs/modules/servers/pages/postgres/operate/webadmin/admin-mailboxes-extend.adoc
new file mode 100644
index 00000000000..ba054a0f3b8
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/operate/webadmin/admin-mailboxes-extend.adoc
@@ -0,0 +1 @@
+// The document only covers Postgres
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/operate/webadmin/admin-messages-extend.adoc b/docs/modules/servers/pages/postgres/operate/webadmin/admin-messages-extend.adoc
new file mode 100644
index 00000000000..ba054a0f3b8
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/operate/webadmin/admin-messages-extend.adoc
@@ -0,0 +1 @@
+// The document only covers Postgres
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/run/index.adoc b/docs/modules/servers/pages/postgres/run/index.adoc
new file mode 100644
index 00000000000..5b2eb998018
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/run/index.adoc
@@ -0,0 +1,14 @@
+= Postgresql James Server — Run
+:navtitle: Run
+
+This sections presents guidance to all current deployment types of Postgresql James Server.
+
+== Run with Java
+
+Build your own Apache James Postgresql artifacts and start xref:postgres/run/run-java.adoc[Running it directly on a Java Virtual Machine].
+
+== Run with Docker
+
+We have prepared a docker-compose for Apache James to run with Postgresql & OpenSearch.
+
+You can start xref:postgres/run/run-docker.adoc[Running James with few simple Docker commands].
\ No newline at end of file
diff --git a/docs/modules/servers/pages/postgres/run/run-docker.adoc b/docs/modules/servers/pages/postgres/run/run-docker.adoc
new file mode 100644
index 00000000000..1299c241b1f
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/run/run-docker.adoc
@@ -0,0 +1,145 @@
+= Postgresql James Server — Run with docker
+:navtitle: Run with docker
+
+== Running via docker-compose
+
+Requirements: docker & docker-compose installed.
+
+When you try James this way, you will use the most current state of James.
+
+=== Running with Postgresql only
+
+It will be configured to run with Postgresql.
+All those components will be started with a single command.
+
+You can retrieve the docker-compose file : ( docker-compose file and james image name should be changed)
+
+ $ wget https://raw.githubusercontent.com/apache/james-project/master/server/apps/postgres-app/docker-compose.yml
+
+
+Then, you just have to start the services:
+
+ $ docker-compose up -d
+
+Wait a few seconds in order to have all those services start up. You will see the following log when James is available:
+james | Started : true
+
+A default domain, james.local, has been created. You can see this by running:
+
+ $ docker exec james james-cli -h 127.0.0.1 -p 9999 listdomains
+
+James will respond to IMAP port 143 and SMTP port 25.
+You have to create users before playing with james. You may also want to create other domains.
+Follow the xref:postgres/operate/cli.adoc['Useful commands'] section for more information about James CLI.
+
+=== Running distributed James
+
+We also have a distributed version of the James postgresql app with:
+
+* OpenSearch as a search indexer
+* S3 as the object storage
+* RabbitMQ as the event bus
+
+To run it, simply type:
+
+ $ docker compose -f docker-compose-distributed.yml up -d
+
+== Run with docker
+
+=== Requirements
+
+Compile the whole project:
+
+ mvn clean install -DskipTests -T 4
+
+Then load the James Postgresql server docker image:
+
+ docker load -i server/apps/postgres-app/target/jib-image.tar
+
+Alternatively we provide convenience distribution for the latest release:
+
+ docker pull apache/james:postgres-3.9.0
+
+=== Running with Postgresql only
+
+Firstly, create your own user network on Docker for the James environment:
+
+ $ docker network create --driver bridge james
+
+You need a running *Postgresql* in docker which connects to *james* network. To achieve this run:
+
+ $ docker run -d --network james --name=postgres --env 'POSTGRES_DB=james' --env 'POSTGRES_USER=james' --env 'POSTGRES_PASSWORD=secret1' postgres:16.0
+
+To run this container :
+
+ $ docker run --network james --hostname HOSTNAME -p "25:25" -p 80:80 -p "110:110" -p "143:143" -p "465:465" -p "587:587" -p "993:993" -p "127.0.0.1:8000:8000" --name james_run
+ -v $PWD/keystore:/root/conf/keystore -t apache/james:postgres-3.9.0 --generate-keystore
+
+Where :
+
+- HOSTNAME: is the hostname you want to give to your James container. This DNS entry will be used to send mail to your James server.
+
+Webadmin port binding is restricted to loopback as users are not authenticated by default on webadmin server. Thus you should avoid exposing it in production.
+Note that the above example assumes `127.0.0.1` is your loopback interface for convenience but you should change it if this is not the case on your machine.
+
+If you want to pass additional options to the underlying java command, you can configure a _JAVA_TOOL_OPTIONS_ env variable, for example add:
+
+ --env "JAVA_TOOL_OPTIONS=-Xms256m -Xmx2048m"
+
+To have log file accessible on a volume, add *-v $PWD/logs:/logs* option to the above command line, where *$PWD/logs* is your local directory to put files in.
+
+=== Running distributed
+
+Same as above, except that you need to run before James instances of RabbitMQ, S3 object storage and Opensearch.
+
+You need a running *rabbitmq* in docker which connects to *james* network. To achieve this run:
+
+ $ docker run -d --network james --name=rabbitmq rabbitmq:3.13.3-management
+
+You need a running *Zenko Cloudserver* objectstorage in docker which connects to *james* network. To achieve this run:
+
+ $ docker run -d --network james --env 'REMOTE_MANAGEMENT_DISABLE=1' --env 'SCALITY_ACCESS_KEY_ID=accessKey1' --env 'SCALITY_SECRET_ACCESS_KEY=secretKey1' --name=s3 registry.scality.com/cloudserver/cloudserver:8.7.25
+
+You need a running *OpenSearch* in docker which connects to *james* network. To achieve this run:
+
+$ docker run -d --network james -p 9200:9200 --name=opensearch --env 'discovery.type=single-node' opensearchproject/opensearch:2.14.0
+
+Then run James like in the section above.
+
+=== Specific keystore
+
+Alternatively, you can also generate a keystore in your conf folder with the
+following command, and drop `--generate-keystore` option:
+
+[source,bash]
+----
+$ keytool -genkey -alias james -keyalg RSA -keystore conf/keystore
+----
+
+=== Instrumentation
+You can use link:https://glowroot.org/[Glowroot] to instrumentalize James. It is packaged as part of the docker distribution to easily enable valuable performances insights.
+Disabled by default, its java agent can easily be enabled:
+
+ --env "JAVA_TOOL_OPTIONS=-javaagent:/root/glowroot.jar" -p "4000:4000"
+
+By default, the Glowroot UI is accessible from every machines in the network as defined in the _destination/admin.json_.
+Which you could configure before building the image, if you want to restrict its accessibility to localhost for example.
+See the https://github.com/glowroot/glowroot/wiki/Agent-Installation-(with-Embedded-Collector)#user-content-optional-post-installation-steps[Glowroot post installation steps] for more details.
+
+Or by mapping the 4000 port to the IP of the desired network interface, for example `-p 127.0.0.1:4000:4000`.
+
+
+=== Handling attachment indexing
+
+You can handle attachment text extraction before indexing in OpenSearch. This makes attachments searchable. To enable this:
+
+Run tika connect to *james* network:
+
+ $ docker run -d --network james --name tika apache/tika:2.9.2.1
+
+Run James:
+
+ $ docker run --network james --hostname HOSTNAME -p "25:25" -p 80:80 -p "110:110" -p "143:143" -p "465:465" -p "587:587" -p "993:993" -p "127.0.0.1:8000:8000"
+ --name james_run -v $PWD/keystore:/root/conf/keystore -t apache/james:postgres-latest
+
+You can find more explanation on the need of Tika in this xref:postgres/configure/tika.adoc[page].
diff --git a/docs/modules/servers/pages/postgres/run/run-java.adoc b/docs/modules/servers/pages/postgres/run/run-java.adoc
new file mode 100644
index 00000000000..5cd113b8746
--- /dev/null
+++ b/docs/modules/servers/pages/postgres/run/run-java.adoc
@@ -0,0 +1,108 @@
+= Postgresql James Server — Run
+:navtitle: Run
+
+== Building
+
+=== Requirements
+
+* Java 21 SDK
+* Maven 3
+
+=== Building the artifacts
+
+An usual compilation using maven will produce two artifacts into
+server/apps/postgres-app/target directory:
+
+* james-server-postgres-app.jar
+* james-server-postgres-app.lib
+
+You can for example run in the base of
+https://github.com/apache/james-project[this git repository]:
+
+....
+mvn clean install
+....
+
+== Running
+
+=== Running James with Postgresql only
+
+==== Requirements
+
+* Postgresql 16.0+
+
+==== James launch
+
+To run james, you have to create a directory containing required
+configuration files.
+
+James requires the configuration to be in a subfolder of working directory that is called conf.
+A https://github.com/apache/james-project/tree/master/server/apps/postgres-app/sample-configuration[sample directory]
+is provided with some default values you may need to replace. You will need to update its content to match your needs.
+
+Also you might need to add the files like in the
+https://github.com/apache/james-project/tree/master/server/apps/postgres-app/sample-configuration-single[sample directory]
+to not have OpenSearch indexing enabled by default for the search.
+
+You need to have a Postgresql instance running. You can either install the server or launch it via docker:
+
+[source,bash]
+----
+$ docker run -d --network james -p 5432:5432 --name=postgres --env 'POSTGRES_DB=james' --env 'POSTGRES_USER=james' --env 'POSTGRES_PASSWORD=secret1' postgres:16.0
+----
+
+Once everything is set up, you just have to run the jar with:
+
+[source,bash]
+----
+$ java -Dworking.directory=. -jar target/james-server-postgres-app.jar --generate-keystore
+----
+
+Alternatively, you can also generate a keystore in your conf folder with the
+following command, and drop `--generate-keystore` option:
+
+[source,bash]
+----
+$ keytool -genkey -alias james -keyalg RSA -keystore conf/keystore
+----
+
+=== Running distributed James
+
+==== Requirements
+
+* Postgresql 16.0+
+* OpenSearch 2.1.0+
+* RabbitMQ-Management 3.8.17+
+* Swift ObjectStorage 2.15.1+ or Zenko Cloudserver or AWS S3
+
+==== James Launch
+
+If you want to use the distributed version of James Postgres app, you will need to add configuration in the conf folder
+like in the https://github.com/apache/james-project/tree/master/server/apps/postgres-app/sample-configuration-distributed[sample directory].
+
+You need to have a Postgresql, OpenSearch, S3 and RabbitMQ instance
+running. You can either install the servers or launch them via docker:
+
+[source,bash]
+----
+$ docker run -d --network james -p 5432:5432 --name=postgres --env 'POSTGRES_DB=james' --env 'POSTGRES_USER=james' --env 'POSTGRES_PASSWORD=secret1' postgres:16.0
+$ docker run -d --network james -p 9200:9200 --name=opensearch --env 'discovery.type=single-node' opensearchproject/opensearch:2.14.0
+$ docker run -d -p 5672:5672 -p 15672:15672 --name=rabbitmq rabbitmq:3.13.3-management
+$ docker run -d --env 'REMOTE_MANAGEMENT_DISABLE=1' --env 'SCALITY_ACCESS_KEY_ID=accessKey1' --env 'SCALITY_SECRET_ACCESS_KEY=secretKey1' --name=s3 registry.scality.com/cloudserver/cloudserver:8.7.25
+----
+
+Once everything is set up, you just have to run the jar like in the with Postgresql only section.
+
+==== Using AWS S3 of Zenko Cloudserver
+
+By default, James is configured with [Zenko Cloudserver](https://hub.docker.com/r/zenko/cloudserver) which is compatible with AWS S3, in `blobstore.propeties` as such:
+
+[source,bash]
+----
+implementation=s3
+objectstorage.namespace=james
+objectstorage.s3.endPoint=http://s3.docker.test:8000/
+objectstorage.s3.region=eu-west-1
+objectstorage.s3.accessKeyId=accessKey1
+objectstorage.s3.secretKey=secretKey1
+----
\ No newline at end of file
diff --git a/docs/modules/servers/partials/architecture/consistency-model.adoc b/docs/modules/servers/partials/architecture/consistency-model.adoc
new file mode 100644
index 00000000000..2c6ce8f1680
--- /dev/null
+++ b/docs/modules/servers/partials/architecture/consistency-model.adoc
@@ -0,0 +1,70 @@
+This page presents the consistency model used by the {server-name} and
+points to the tools built around it.
+
+== Data Replication
+
+The {server-name} relies on different storage technologies, all having their own
+consistency models.
+
+These data stores replicate data in order to enforce some level of availability.
+
+By consistency, we mean the ability for all replica to hold the same data.
+
+By availability, we mean the ability for a replica to answer a request.
+
+In distributed systems, link:https://en.wikipedia.org/wiki/CAP_theorem[according to the CAP theorem],
+as we will necessarily encounter network partitions, then trade-offs need to be made between
+consistency and availability.
+
+This section details this trade-off for data stores used by the {server-name}.
+
+=== OpenSearch consistency model
+
+OpenSearch relies on link:https://opensearch.org/docs/latest/tuning-your-cluster/[strong consistency]
+with home-grown algorithm.
+
+The 2.x release line, that the distributed server is using, is known to provide faster recovery.
+
+Be aware that data is asynchronously indexed in OpenSearch, changes will be eventually visible.
+
+=== RabbitMQ consistency model
+
+The {server-name} can be set up to rely on a RabbitMQ cluster. All queues can be set up in an high availability
+fashion using link:https://www.rabbitmq.com/docs/quorum-queues[quorum queues] - those are replicated queues using the link:https://raft.github.io/[RAFT] consensus protocol and thus are
+strongly consistent.
+
+include::{data_replication_extend}[]
+
+== Consistency across data stores
+
+The {server-name} leverages several data stores:
+
+ - {backend-name} is used for metadata storage
+ - OpenSearch for search
+ - Object Storage for large object storage
+
+Thus the {server-name} also offers mechanisms to enforce consistency across data stores.
+
+=== Write path organisation
+
+The primary data stores are composed of {backend-name} for metadata and Object storage for binary data.
+
+To ensure the data referenced in {backend-name} is pointing to a valid object in the object store, we write
+the object store payload first, then write the corresponding metadata in {backend-name}.
+
+Similarly, metadata is destroyed first before the corresponding object is deleted.
+
+Such a procedure avoids metadata pointing to un existing blobs, however might lead to some unreferenced
+blobs.
+
+=== {backend-name-cap} ↔ OpenSearch
+
+After being written to the primary stores (namely {backend-name} & Object Storage), email content is
+asynchronously indexed into OpenSearch.
+
+This process, called the EventBus, which retries temporary errors, and stores transient errors for
+later admin-triggered retries is described further xref:{xref-base}/operate/guide.adoc#_mailbox_event_bus[here].
+His role is to spread load and limit inconsistencies.
+
+Furthermore, some xref:{xref-base}/operate/guide.adoc#_usual_troubleshooting_procedures[re-indexing tasks]
+enables to re-synchronise OpenSearch content with the primary data stores
diff --git a/docs/modules/servers/partials/architecture/implemented-standards.adoc b/docs/modules/servers/partials/architecture/implemented-standards.adoc
new file mode 100644
index 00000000000..5a338bc06e1
--- /dev/null
+++ b/docs/modules/servers/partials/architecture/implemented-standards.adoc
@@ -0,0 +1,117 @@
+This page details standards implemented by the {server-name}.
+
+== Message formats
+
+ - link:https://datatracker.ietf.org/doc/html/rfc5322[RFC-5322] Internet Message Format (MIME)
+ - link:https://datatracker.ietf.org/doc/html/rfc2045[RFC-2045] Format of Internet Message Bodies
+ - link:https://datatracker.ietf.org/doc/html/rfc3464[RFC-3464] An Extensible Message Format for Delivery Status Notifications
+ - James allow emmit DSNs from the mailet container.
+ - link:https://datatracker.ietf.org/doc/html/rfc8098[RFC-8098] Message Disposition Notification
+
+== TLS & authentication
+
+- link:https://datatracker.ietf.org/doc/html/rfc2595.html[RFC-2595] TLS for IMAP, POP3, SMTP (StartTLS)
+- link:https://datatracker.ietf.org/doc/html/rfc8314.html[RFC-8314] Implicit TLS
+- link:https://www.rfc-editor.org/rfc/rfc4959.html[RFC-4959] SASL IR: Initial client response
+- link:https://datatracker.ietf.org/doc/html/rfc4616[RFC-4616] SASL plain authentication
+- link:https://datatracker.ietf.org/doc/html/rfc8314.html[RFC-7628] SASL for OAUTH
+- Implemented for IMAP and SMTP
+- Support for OIDC standard only.
+
+== SMTP
+
+- link:https://datatracker.ietf.org/doc/html/rfc5321[RFC-5321] SMTP Protocol
+- link:https://datatracker.ietf.org/doc/html/rfc974[RFC-974] MAIL ROUTING AND THE DOMAIN SYSTEM
+- link:https://www.rfc-editor.org/rfc/rfc3461[RFC-3461] Simple Mail Transfer Protocol (SMTP) Service Extension for Delivery Status Notifications (DSNs)
+ - Requires extra configuration.
+- link:https://datatracker.ietf.org/doc/html/rfc1652[RFC-1652] SMTP Service Extension for 8bit-MIME transport
+- link:https://datatracker.ietf.org/doc/html/rfc1830[RFC-1830] SMTP Service Extensions for Transmission of Large and Binary MIME Messages
+- link:https://datatracker.ietf.org/doc/html/rfc1869[RFC-1869] SMTP Service Extensions
+- link:https://datatracker.ietf.org/doc/html/rfc1870[RFC-1870] SMTP Service Extension for Message Size Declaration
+- link:https://datatracker.ietf.org/doc/html/rfc1891[RFC-1891] SMTP Service Extension for Delivery Status Notifications
+- link:https://datatracker.ietf.org/doc/html/rfc1893[RFC-1893] Enhanced Mail System Status Codes
+- link:https://datatracker.ietf.org/doc/html/rfc2034[RFC-2034] SMTP Service Extension for Returning Enhanced Error Codes
+- link:https://datatracker.ietf.org/doc/html/rfc2142[RFC-2142] Mailbox Names For Common Services, Roles And Functions
+- link:https://datatracker.ietf.org/doc/html/rfc2197[RFC-2197] SMTP Service Extension for Command Pipelining
+- link:https://datatracker.ietf.org/doc/html/rfc2554[RFC-2554] ESMTP Service Extension for Authentication
+- link:https://datatracker.ietf.org/doc/rfc6710/[RFC-6710] SMTP Extension for Message Transfer Priorities
+
+== LMTP
+
+ - link:https://james.apache.org/server/rfclist/lmtp/rfc2033.txt[RFC-2033] LMTP Local Mail Transfer Protocol
+
+== IMAP
+
+The following IMAP specifications are implemented:
+
+ - link:https://datatracker.ietf.org/doc/html/rfc3501.html[RFC-3501] INTERNET MESSAGE ACCESS PROTOCOL - VERSION 4rev1
+ - link:https://datatracker.ietf.org/doc/html/rfc2177.html[RFC-2177] IMAP IDLE (mailbox scoped push notifications)
+ - link:https://www.rfc-editor.org/rfc/rfc9208.html[RFC-9208] IMAP QUOTA Extension
+ - link:https://datatracker.ietf.org/doc/html/rfc2342.html[RFC-2342] IMAP namespace
+ - link:https://datatracker.ietf.org/doc/html/rfc2088.html[RFC-2088] IMAP non synchronized literals
+ - link:https://datatracker.ietf.org/doc/html/rfc4315.html[RFC-4315] IMAP UIDPLUS
+ - link:https://datatracker.ietf.org/doc/html/rfc5464.html[RFC-5464] IMAP Metadata (annotations on mailboxes)
+ - link:https://datatracker.ietf.org/doc/html/rfc4551.html[RFC-4551] IMAP Condstore
+ - link:https://datatracker.ietf.org/doc/html/rfc5162.html[RFC-5162] IMAP QRESYNC (synchronisation semantic for deleted messages)
+ - We don't store a log of deleted modseq thus clients should rely on known sequences mechanism to optimize exchanges.
+ - link:https://datatracker.ietf.org/doc/html/rfc4978.html[RFC-4978] IMAP Compress (optional)
+ - link:https://datatracker.ietf.org/doc/html/rfc5161.html[RFC-5161] IMAP ENABLE
+ - link:https://datatracker.ietf.org/doc/html/rfc6851.html[RFC-6851] IMAP MOVE command
+ - link:https://datatracker.ietf.org/doc/html/rfc5182.html[RFC-5182] IMAP Extension for Referencing the Last SEARCH Result
+ - link:https://datatracker.ietf.org/doc/html/rfc5032.html[RFC-5032] IMAP WITHIN (for relative date search semantic)
+ - link:https://datatracker.ietf.org/doc/html/rfc4731.html[RFC-4731] IMAP ESEARCH: extentions for IMAP search: new options like min, max, count.
+ - link:https://datatracker.ietf.org/doc/html/rfc3348.html[RFC-3348] IMAP Child Mailbox Extension
+ - link:https://www.rfc-editor.org/rfc/rfc8508.html[RFC-8508] IMAP Replace Extension
+ - link:https://www.rfc-editor.org/rfc/rfc7889.html[RFC-7889] IMAP Extension for APPENDLIMIT
+ - link:https://www.rfc-editor.org/rfc/rfc8474.html[RFC-8474] IMAP Extension for Object Identifiers
+ - link:https://datatracker.ietf.org/doc/html/rfc2971.html[RFC-2971] IMAP ID Extension
+ - link:https://datatracker.ietf.org/doc/html/rfc8438.html[RFC-8438] IMAP Extension for STATUS=SIZE
+ - link:https://www.rfc-editor.org/rfc/rfc5258.html[RFC-5258] IMAP LIST Command Extensions
+ - link:https://www.rfc-editor.org/rfc/rfc5819.html[RFC-5819] IMAP4 Extension for Returning STATUS Information in Extended LIST
+ - link:https://www.rfc-editor.org/rfc/rfc8440.html[RFC-8440] IMAP4 Extension for Returning MYRIGHTS Information in Extended LIST
+ - link:https://www.rfc-editor.org/rfc/rfc8440.html[RFC-6154] IMAP LIST Extension for Special-Use Mailboxes
+ - link:https://www.rfc-editor.org/rfc/rfc8514.html[RFC-8514] IMAP SAVEDATE Extension
+ - link:https://www.rfc-editor.org/rfc/rfc8514.html[RFC-9394] IMAP PARTIAL Extension for Paged SEARCH and FETCH
+
+Partially implemented specifications:
+
+ - link:https://datatracker.ietf.org/doc/html/rfc4314.html[RFC-4314] IMAP ACL
+ - ACLs can be created and managed but mailbox not belonging to one account cannot, as of today, be accessed in IMAP.
+
+== JMAP
+
+ - link:https://datatracker.ietf.org/doc/html/rfc8620[RFC-8620] Json Metadata Application Protocol (JMAP)
+ - link:https://datatracker.ietf.org/doc/html/rfc8621[RFC-8621] JMAP for emails
+ - link:https://datatracker.ietf.org/doc/html/rfc8887[RFC-8887] JMAP over websockets
+ - link:https://datatracker.ietf.org/doc/html/rfc9007.html[RFC-9007] Message Delivery Notifications with JMAP.
+ - link:https://datatracker.ietf.org/doc/html/rfc8030.html[RFC-8030] Web PUSH: JMAP enable sending push notifications through a push gateway.
+
+https://jmap.io/[JMAP] is intended to be a new standard for email clients to connect to mail
+stores. It therefore intends to primarily replace IMAP + SMTP submission. It is also designed to be more
+generic. It does not replace MTA-to-MTA SMTP transmission.
+
+The link:https://github.com/apache/james-project/tree/master/server/protocols/jmap-rfc-8621/doc/specs/spec[annotated documentation]
+presents the limits of the JMAP RFC-8621 implementation part of the Apache James project.
+
+Some methods / types are not yet implemented, some implementations are naive, and the PUSH is not supported yet.
+
+Users are invited to read these limitations before using actively the JMAP RFC-8621 implementation, and should ensure their
+client applications only uses supported operations.
+
+== POP3
+
+ - link:https://www.ietf.org/rfc/rfc1939.txt[RFC-1939] Post Office Protocol - Version 3
+
+== ManageSieve
+
+Support for manageSieve is experimental.
+
+ - link:https://datatracker.ietf.org/doc/html/rfc5804[RFC-5804] A Protocol for Remotely Managing Sieve Scripts
+
+== Sieve
+
+ - link:https://datatracker.ietf.org/doc/html/rfc5228[RFC-5228] Sieve: An Email Filtering Language
+ - link:https://datatracker.ietf.org/doc/html/rfc5173[RFC-5173] Sieve Email Filtering: Body Extension
+ - link:https://datatracker.ietf.org/doc/html/rfc5230[RFC-5230] Sieve Email Filtering: Vacation Extension
+
+
diff --git a/docs/modules/servers/partials/architecture/index.adoc b/docs/modules/servers/partials/architecture/index.adoc
new file mode 100644
index 00000000000..449a31c99e3
--- /dev/null
+++ b/docs/modules/servers/partials/architecture/index.adoc
@@ -0,0 +1,302 @@
+This sections presents the {server-name} architecture.
+
+== Storage
+
+In order to deliver its promises, the {server-name} leverages the following storage strategies:
+
+image::{storage-picture-file-name}[Storage responsibilities for the {server-name}]
+
+ * {backend-storage-introduce}
+ * The *blob store* storage interface is responsible for storing potentially large binary data. For instance
+ email bodies, headers or attachments. Different technologies can be used: *{backend-name}*, or S3 compatible *Object Storage*
+(S3 or Swift).
+ * *OpenSearch* component empowers full text search on emails. It also enables querying data with unplanned access
+patterns. OpenSearch throughput do not however match the one of {backend-name} thus its use is avoided upon regular workloads.
+ * *RabbitMQ* enables James nodes of a same cluster to collaborate together. It is used to implement connected protocols,
+notification patterns as well as distributed resilient work queues and mail queue.
+ * *Tika* (optional) enables text extraction from attachments, thus improving full text search results.
+ * *link:https://spamassassin.apache.org/[SpamAssassin] or link:https://rspamd.com/[Rspamd]* (optional) can be used for Spam detection and user feedback is supported.
+
+xref:{xref-base}/architecture/consistency-model.adoc[This page] further details {server-name} consistency model.
+
+== Protocols
+
+The following protocols are supported and can be used to interact with the {server-name}:
+
+* *SMTP*
+* *IMAP*
+* xref:{xref-base}/operate/webadmin.adoc[WebAdmin] REST Administration API
+* *LMTP*
+* *POP3*
+
+The following protocols should be considered experimental:
+
+* *JMAP* (RFC-8620 &RFC-8621 specifications and known limitations of the James implementation are defined link:https://github.com/apache/james-project/tree/master/server/protocols/jmap-rfc-8621/doc[here])
+* *ManagedSieve*
+
+Read more on xref:{xref-base}/architecture/implemented-standards.adoc[implemented standards].
+
+== Topology
+
+While it is perfectly possible to deploy homogeneous James instances, with the same configuration and thus the same
+protocols and the same responsibilities one might want to investigate in
+xref:{xref-base}/architecture/specialized-instances.adoc['Specialized instances'].
+
+== Components
+
+This section presents the various components of the {server-name}, providing context about
+their interactions, and about their implementations.
+
+=== High level view
+
+Here is a high level view of the various server components and their interactions:
+
+image::server-components.png[Server components mobilized for SMTP & IMAP]
+
+ - The SMTP protocol receives a mail, and enqueue it on the MailQueue
+ - The MailetContainer will start processing the mail Asynchronously and will take business decisions like storing the
+ email locally in a user mailbox. The behaviour of the MailetContainer is highly customizable thanks to the Mailets and
+ the Matcher composibility.
+ - The Mailbox component is responsible of storing a user's mails.
+ - The user can use the IMAP or the JMAP protocol to retrieve and read his mails.
+
+These components will be presented more in depth below.
+
+=== Mail processing
+
+Mail processing allows to take asynchronously business decisions on
+received emails.
+
+Here are its components:
+
+* The `spooler` takes mail out of the mailQueue and executes mail
+processing within the `mailet container`.
+* The `mailet container` synchronously executes the user defined logic.
+This `logic' is written through the use of `mailet`, `matcher` and
+`processor`.
+* A `mailet` represents an action: mail modification, envelop
+modification, a side effect, or stop processing.
+* A `matcher` represents a condition to execute a mailet.
+* A `processor` is a flow of pair of `matcher` and `mailet` executed
+sequentially. The `ToProcessor` mailet is a `goto` instruction to start
+executing another `processor`
+* A `mail repository` allows storage of a mail as part of its
+processing. Standard configuration relies on the following mail
+repository:
+** `{mailet-repository-path-prefix}://var/mail/error/` : unexpected errors that occurred
+during mail processing. Emails impacted by performance related
+exceptions, or logical bug within James code are typically stored here.
+These mails could be reprocessed once the cause of the error is fixed.
+The `Mail.error` field can help diagnose the issue. Correlation with
+logs can be achieved via the use of the `Mail.name` field.
+** `{mailet-repository-path-prefix}://var/mail/address-error/` : mail addressed to a
+non-existing recipient of a handled local domain. These mails could be
+reprocessed once the user is created, for instance.
+** `{mailet-repository-path-prefix}://var/mail/relay-denied/` : mail for whom relay was
+denied: missing authentication can, for instance, be a cause. In
+addition to prevent disasters upon miss configuration, an email review
+of this mail repository can help refine a host spammer blacklist.
+** `{mailet-repository-path-prefix}://var/mail/rrt-error/` : runtime error upon Recipient
+Rewriting occurred. This is typically due to a loop.
+
+=== Mail Queue
+
+An email queue is a mandatory component of SMTP servers. It is a system
+that creates a queue of emails that are waiting to be processed for
+delivery. Email queuing is a form of Message Queuing – an asynchronous
+service-to-service communication. A message queue is meant to decouple a
+producing process from a consuming one. An email queue decouples email
+reception from email processing. It allows them to communicate without
+being connected. As such, the queued emails wait for processing until
+the recipient is available to receive them. As James is an Email Server,
+it also supports mail queue as well.
+
+==== Why Mail Queue is necessary
+
+You might often need to check mail queue to make sure all emails are
+delivered properly. At first, you need to know why email queues get
+clogged. Here are the two core reasons for that:
+
+* Exceeded volume of emails
+
+Some mailbox providers enforce email rate limits on IP addresses. The
+limits are based on the sender reputation. If you exceeded this rate and
+queued too many emails, the delivery speed will decrease.
+
+* Spam-related issues
+
+Another common reason is that your email has been busted by spam
+filters. The filters will let the emails gradually pass to analyze how
+the rest of the recipients react to the message. If there is slow
+progress, it’s okay. Your email campaign is being observed and assessed.
+If it’s stuck, there could be different reasons including the blockage
+of your IP address.
+
+==== Why combining RabbitMQ, Object storage {mailqueue-combined-extend-backend} for MailQueue
+
+* RabbitMQ ensures the messaging function, and avoids polling.
+* Object Storage stores potentially large binary payload.
+
+include::{mailqueue-combined-extend}[]
+
+However, the current design do not implement delays. Delays allow to
+define the time a mail have to be living in the mail queue before being
+dequeued and is used for example for exponential wait delays upon remote
+delivery retries, or
+
+=== Mailbox
+
+Storage for emails belonging for users.
+
+Metadata are stored in {backend-name} while headers, bodies and attachments are stored
+within the xref:#_blobstore[BlobStore].
+
+==== Search index
+
+Emails are indexed asynchronously in OpenSearch via the xref:#_event_bus[EventBus]
+in order to empower advanced and fast email full text search.
+
+Text extraction can be set up using link:https://tika.apache.org/[Tika], allowing
+to extract the text from attachment, allowing to search your emails based on the attachment
+textual content. In such case, the OpenSearch indexer will call a Tika server prior
+indexing.
+
+==== Quotas
+
+Current Quotas of users are hold in a {backend-name} projection. Limitations can be defined via
+user, domain or globally.
+
+==== Event Bus
+
+{server-name} relies on an event bus system to enrich mailbox capabilities. Each
+operation performed on the mailbox will trigger related events, that can
+be processed asynchronously by potentially any James node on a
+distributed system.
+
+Many different kind of events can be triggered during a mailbox
+operation, such as:
+
+* `MailboxEvent`: event related to an operation regarding a mailbox:
+** `MailboxDeletion`: a mailbox has been deleted
+** `MailboxAdded`: a mailbox has been added
+** `MailboxRenamed`: a mailbox has been renamed
+** `MailboxACLUpdated`: a mailbox got its rights and permissions updated
+* `MessageEvent`: event related to an operation regarding a message:
+** `Added`: messages have been added to a mailbox
+** `Expunged`: messages have been expunged from a mailbox
+** `FlagsUpdated`: messages had their flags updated
+** `MessageMoveEvent`: messages have been moved from a mailbox to another
+* `QuotaUsageUpdatedEvent`: event related to quota update
+
+Mailbox listeners can register themselves on this event bus system to be
+called when an event is fired, allowing to do different kind of extra
+operations on the system, like:
+
+* Current quota calculation
+* Message indexation with OpenSearch
+* Mailbox annotations cleanup
+* Ham/spam reporting to Spam filtering system
+* …
+
+==== Deleted Messages Vault
+
+Deleted Messages Vault is an interesting feature that will help James
+users have a chance to:
+
+* retain users deleted messages for some time.
+* restore & export deleted messages by various criteria.
+* permanently delete some retained messages.
+
+If the Deleted Messages Vault is enabled when users delete their mails,
+and by that we mean when they try to definitely delete them by emptying
+the trash, James will retain these mails into the Deleted Messages
+Vault, before an email or a mailbox is going to be deleted. And only
+administrators can interact with this component via
+xref:{xref-base}/operate/webadmin.adoc#_deleted_messages_vault[WebAdmin] REST APIs.
+
+However, mails are not retained forever as you have to configure a
+retention period before using it (with one-year retention by default if
+not defined). It’s also possible to permanently delete a mail if needed.
+
+=== Data
+
+Storage for domains and users.
+
+Domains are persisted in {backend-name}.
+
+Users can be managed in {backend-name}, or via a LDAP (read only).
+
+=== Recipient rewrite tables
+
+Storage of Recipients Rewriting rules, in {backend-name}.
+
+==== Mapping types
+
+James allows using various mapping types for better expressing the intent of your address rewriting logic:
+
+* *Domain mapping*: Rewrites the domain of mail addresses. Use it for technical purposes, user will not
+be allowed to use the source in their FROM address headers. Domain mappings can be managed via the CLI and
+added via xref:{xref-base}/operate/webadmin.adoc#_domain_mappings[WebAdmin]
+* *Domain aliases*: Rewrites the domain of mail addresses. Express the idea that both domains can be used
+inter-changeably. User will be allowed to use the source in their FROM address headers. Domain aliases can
+be managed via xref:{xref-base}/operate/webadmin.adoc#_get_the_list_of_aliases_for_a_domain[WebAdmin]
+* *Forwards*: Replaces the source address by another one. Vehicles the intent of forwarding incoming mails
+to other users. Listing the forward source in the forward destinations keeps a local copy. User will not be
+allowed to use the source in their FROM address headers. Forward can
+be managed via xref:{xref-base}/operate/webadmin.adoc#_address_forwards[WebAdmin]
+* *Groups*: Replaces the source address by another one. Vehicles the intent of a group registration: group
+address will be swapped by group member addresses (Feature poor mailing list). User will not be
+allowed to use the source in their FROM address headers. Groups can
+be managed via xref:{xref-base}/operate/webadmin.adoc#_address_group[WebAdmin]
+* *Aliases*: Replaces the source address by another one. Represents user owned mail address, with which
+he can interact as if it was his main mail address. User will be allowed to use the source in their FROM
+address headers. Aliases can be managed via xref:{xref-base}/operate/webadmin.adoc#_address_aliases[WebAdmin]
+* *Address mappings*: Replaces the source address by another one.Use for technical purposes, this mapping type do
+not hold specific intent.Prefer using one of the above mapping types... User will not be allowed to use the source
+in their FROM address headers.Address mappings can be managed via the CLI or via
+xref:{xref-base}/operate/webadmin.adoc#_address_mappings[WebAdmin]
+* *Regex mappings*: Applies the regex on the supplied address.User will not be allowed to use the source
+in their FROM address headers.Regex mappings can be managed via the CLI or via
+xref:{xref-base}/operate/webadmin.adoc#_regex_mapping[WebAdmin]
+* *Error*: Throws an error upon processing.User will not be allowed to use the source
+in their FROM address headers.Errors can be managed via the CLI
+
+[#_blobstore]
+=== BlobStore
+
+Stores potentially large binary data.
+
+Mailbox component, Mail Queue component, Deleted Message Vault
+component relies on it.
+
+Supported backends include S3 compatible ObjectStorage (link:https://wiki.openstack.org/wiki/Swift[Swift], S3 API).
+
+Encryption can be configured on top of ObjectStorage.
+
+Blobs can currently be deduplicated in order to reduce storage space. This means that two blobs with
+the same content will be stored one once.
+
+The downside is that deletion is more complicated, and a garbage collection needs to be run. A first implementation
+based on bloom filters can be used and triggered using the WebAdmin REST API.
+
+=== Task Manager
+
+Allows to control and schedule long running tasks run by other
+components. Among other it enables scheduling, progress monitoring,
+cancellation of long running tasks.
+
+{server-name} leverage a task manager using Event Sourcing and RabbitMQ for messaging.
+
+=== Event sourcing
+
+link:https://martinfowler.com/eaaDev/EventSourcing.html[Event sourcing] implementation
+for the {server-name} stores events in {backend-name}. It enables components
+to rely on event sourcing technics for taking decisions.
+
+A short list of usage are:
+
+* Data leak prevention storage
+* JMAP filtering rules storage
+* Validation of the MailQueue configuration
+* Sending email warnings to user close to their quota
+* Implementation of the TaskManager
diff --git a/docs/modules/servers/partials/architecture/specialized-instances.adoc b/docs/modules/servers/partials/architecture/specialized-instances.adoc
new file mode 100644
index 00000000000..d8e02b1dc75
--- /dev/null
+++ b/docs/modules/servers/partials/architecture/specialized-instances.adoc
@@ -0,0 +1,36 @@
+While it is perfectly possible to deploy homogeneous James instances, with the same configuration and thus the same
+protocols and the same responsibilities one might want to investigate in 'Specialized instances'.
+
+This deployment topology consists of {server-name} with heterogeneous configurations on top of shared
+databases. Groups of James servers will thus handle various protocols and have different responsibilities.
+
+This approach limits cascading failures across protocols and services. Think of *OutOfMemoryErrors*, CPUs starvation,
+{backend-name} driver issue, etc.
+
+However, we can't speak of microservices here: all James instances runs the same code, James is still a monolith, and
+databases need to be shared across instances.
+
+image::{specialized-instances-file-name}[Example of Specialized instances topology]
+
+We speak of:
+
+ - **Front-line servers** serves protocols. James enables to easily turn protocols on and off. Typically, each protocol would
+ be isolated in its own group of James instances: james-imap, james-jmap, james-smtp, james-webadmin, etc... Refer to
+ protocols configuration files to learn more.
+
+ - **Back-office servers** handles other services like:
+
+ - Mail processing.
+ - Remote delivery.
+ - Event processing.
+ - Task execution.
+
+Front-line servers will likely not handle back office responsibilities (but be sure to have back-office servers that do!).
+
+ - xref:{xref-base}/configure/mailetcontainer.adoc[Mail processing can be switched off].
+ - xref:{xref-base}/configure/listeners.adoc[Mailbox event processing can be switched off].
+ - xref:{xref-base}/configure/rabbitmq.adoc[Task execution can be switched off].
+ - Remote Delivery service is not started if the RemoteDelivery mailet is not positioned in mailetcontainer.xml.
+
+Of course, the above instances can be collocated at will, to reach some intermediate deployments with fewer
+instances to mitigate costs.
\ No newline at end of file
diff --git a/docs/modules/servers/partials/benchmark/db-benchmark.adoc b/docs/modules/servers/partials/benchmark/db-benchmark.adoc
new file mode 100644
index 00000000000..ab7a7abd5c6
--- /dev/null
+++ b/docs/modules/servers/partials/benchmark/db-benchmark.adoc
@@ -0,0 +1,373 @@
+This document provides basic performance of {server-name} databases, benchmark methodologies as a basis for a James administrator who
+can test and evaluate if his {server-name} databases are performing well.
+
+It includes:
+
+* A sample deployment topology
+* Propose benchmark methodology and base performance for each database. This aims to help operators to quickly identify
+performance issues and compliance of their databases.
+
+== Sample deployment topology
+
+We deploy a sample topology of {server-name} with these following databases:
+
+- OpenDistro 1.13.1 as search engine: 3 nodes, each node has 8 OVH vCores CPU and 30 GB memory limit (OVH b2-30 instance).
+- RabbitMQ 3.8.17 as message queue: 3 Kubernetes pods, each pod has 0.6 OVH vCore CPU and 2 GB memory limit.
+- OVH Swift S3 as an object storage
+- {backend-database-extend-sample}
+
+With the above system, our email service operates stably with valuable performance.
+For a more details, it can handle a load throughput up to about 1000 JMAP requests per second with 99th percentile latency is 400ms.
+
+== Benchmark methodologies and base performances
+We are willing to share the benchmark methodologies and the result to you as a reference to evaluate your {server-name}' performance.
+Other evaluation methods are welcome, as long as your databases exhibit similar or even better performance than ours.
+It is up to your business needs. If your databases shows results that fall far from our baseline performance, there's a good chance that
+there are problems with your system, and you need to check it out thoroughly.
+
+=== Benchmark OpenSearch
+
+==== Benchmark methodology
+
+===== Benchmark tool
+We use https://github.com/opensearch-project/opensearch-benchmark[opensearch-benchmark] - an official OpenSearch benchmarking tool.
+It provides the following features:
+
+- Automatically create OpenSearch clusters, stress tests them, and delete them.
+- Manage stress testing data and solutions by OpenSearch version.
+- Present stress testing data in a comprehensive way, allowing you to compare and analyze the data of different stress tests and store the data on a particular OpenSearch instance for secondary analysis.
+- Collect Java Virtual Machine (JVM) details, such as memory and garbage collection (GC) data, to locate performance problems.
+
+===== How to benchmark
+To install the `opensearch-benchmark` tool, you need Python 3.8+ including pip3 first, then run:
+```
+python3 -m pip install opensearch-benchmark
+```
+
+If you have any trouble or need more detailed instructions, please look in the https://github.com/opensearch-project/OpenSearch-Benchmark/blob/main/DEVELOPER_GUIDE.md[detailed installation guide].
+
+Let's see which workloads (simulation profiles) that `opensearch-benchmark` provides: ```opensearch-benchmark list worloads```.
+For our James use case, we are interested in ```pmc``` workload: ```Full-text benchmark with academic papers from PMC```.
+
+Run the below script to benchmark against your OpenSearch cluster:
+
+[source,bash]
+----
+opensearch-benchmark execute_test --pipeline=benchmark-only --workload=[workload-name] --target-host=[ip_node1:port_node1],[ip_node2:port_node2],[ip_node3:port_node3] --client-options="use_ssl:false,verify_certs:false,basic_auth_user:'[user]',basic_auth_password:'[password]'"
+----
+
+In there:
+
+* --pipeline=benchmark-only: benchmark against a running cluster
+* workload-name: the workload you want to benchmark
+* ip:port: OpenSearch Node' socket
+* user/password: OpenSearch authentication credentials
+
+==== Sample benchmark result
+===== PMC worload
+
+[source]
+----
+| Metric | Task | Value | Unit |
+|---------------------------------------------------------------:|------------------------------:|------------:|--------:|
+| Min Throughput | index-append | 734.63 | docs/s |
+| Mean Throughput | index-append | 763.16 | docs/s |
+| Median Throughput | index-append | 746.5 | docs/s |
+| Max Throughput | index-append | 833.51 | docs/s |
+| 50th percentile latency | index-append | 4738.57 | ms |
+| 90th percentile latency | index-append | 8129.1 | ms |
+| 99th percentile latency | index-append | 11734.5 | ms |
+| 100th percentile latency | index-append | 14662.9 | ms |
+| 50th percentile service time | index-append | 4738.57 | ms |
+| 90th percentile service time | index-append | 8129.1 | ms |
+| 99th percentile service time | index-append | 11734.5 | ms |
+| 100th percentile service time | index-append | 14662.9 | ms |
+| error rate | index-append | 0 | % |
+| Min Throughput | default | 19.94 | ops/s |
+| Mean Throughput | default | 19.95 | ops/s |
+| Median Throughput | default | 19.95 | ops/s |
+| Max Throughput | default | 19.96 | ops/s |
+| 50th percentile latency | default | 23.1322 | ms |
+| 90th percentile latency | default | 25.4129 | ms |
+| 99th percentile latency | default | 29.1382 | ms |
+| 100th percentile latency | default | 29.4762 | ms |
+| 50th percentile service time | default | 21.4895 | ms |
+| 90th percentile service time | default | 23.589 | ms |
+| 99th percentile service time | default | 26.6134 | ms |
+| 100th percentile service time | default | 27.9068 | ms |
+| error rate | default | 0 | % |
+| Min Throughput | term | 19.93 | ops/s |
+| Mean Throughput | term | 19.94 | ops/s |
+| Median Throughput | term | 19.94 | ops/s |
+| Max Throughput | term | 19.95 | ops/s |
+| 50th percentile latency | term | 31.0684 | ms |
+| 90th percentile latency | term | 34.1419 | ms |
+| 99th percentile latency | term | 74.7904 | ms |
+| 100th percentile latency | term | 103.663 | ms |
+| 50th percentile service time | term | 29.6775 | ms |
+| 90th percentile service time | term | 32.4288 | ms |
+| 99th percentile service time | term | 36.013 | ms |
+| 100th percentile service time | term | 102.193 | ms |
+| error rate | term | 0 | % |
+| Min Throughput | phrase | 19.94 | ops/s |
+| Mean Throughput | phrase | 19.95 | ops/s |
+| Median Throughput | phrase | 19.95 | ops/s |
+| Max Throughput | phrase | 19.95 | ops/s |
+| 50th percentile latency | phrase | 23.0255 | ms |
+| 90th percentile latency | phrase | 26.1607 | ms |
+| 99th percentile latency | phrase | 31.2094 | ms |
+| 100th percentile latency | phrase | 45.5012 | ms |
+| 50th percentile service time | phrase | 21.5109 | ms |
+| 90th percentile service time | phrase | 24.4144 | ms |
+| 99th percentile service time | phrase | 26.1865 | ms |
+| 100th percentile service time | phrase | 43.5122 | ms |
+| error rate | phrase | 0 | % |
+
+----------------------------------
+[INFO] SUCCESS (took 1772 seconds)
+----------------------------------
+----
+
+===== PMC custom workload
+We customized the PMC workload by increasing search throughput target to figure out our OpenSearch cluster limit.
+
+The result is that with 25-30 request/s we have a 99th percentile latency of 1s.
+
+==== References
+The `opensearch-benchmark` tool seems to be a fork of the official benchmark tool https://github.com/elastic/rally[EsRally] of Elasticsearch.
+The `opensearch-benchmark` tool is not adopted widely yet, so we believe some EsRally references could help as well:
+
+- https://www.alibabacloud.com/blog/esrally-official-stress-testing-tool-for-elasticsearch_597102[esrally: Official Stress Testing Tool for Elasticsearch]
+
+- https://esrally.readthedocs.io/en/latest/adding_tracks.html[Create a custom EsRally track]
+
+- https://discuss.elastic.co/t/why-the-percentile-latency-is-several-times-more-than-service-time/69630[Why the percentile latency is several times more than service time]
+
+=== Benchmark RabbitMQ
+
+==== Benchmark methodology
+
+===== Benchmark tool
+We use https://github.com/rabbitmq/rabbitmq-perf-test[rabbitmq-perf-test] tool.
+
+===== How to benchmark
+Using PerfTestMulti for more friendly:
+
+- Provide input scenario from a single file
+- Provide output result as a single file. Can be visualized result file by the chart (graph WebUI)
+
+Run a command like below:
+
+[source,bash]
+----
+bin/runjava com.rabbitmq.perf.PerfTestMulti [scenario-file] [result-file]
+----
+
+In order to visualize result, coping [result-file] to ```/html/examples/[result-file]```.
+Start webserver to view graph by the command:
+
+[source,bash]
+----
+bin/runjava com.rabbitmq.perf.WebServer
+----
+Then browse: http://localhost:8080/examples/sample.html
+
+==== Sample benchmark result
+- Scenario file:
+
+[source]
+----
+[{'name': 'consume', 'type': 'simple',
+'uri': 'amqp://james:eeN7Auquaeng@localhost:5677',
+'params':
+ [{'time-limit': 30, 'producer-count': 2, 'consumer-count': 4}]}]
+----
+
+- Result file:
+
+[source,json]
+----
+{
+ "consume": {
+ "send-bytes-rate": 0,
+ "recv-msg-rate": 4330.225080385852,
+ "avg-latency": 18975254,
+ "send-msg-rate": 455161.3183279743,
+ "recv-bytes-rate": 0,
+ "samples": [{
+ "elapsed": 15086,
+ "send-bytes-rate": 0,
+ "recv-msg-rate": 0,
+ "send-msg-rate": 0.06628662335940608,
+ "recv-bytes-rate": 0
+ },
+ {
+ "elapsed": 16086,
+ "send-bytes-rate": 0,
+ "recv-msg-rate": 1579,
+ "max-latency": 928296,
+ "min-latency": 278765,
+ "avg-latency": 725508,
+ "send-msg-rate": 388994,
+ "recv-bytes-rate": 0
+ },
+ {
+ "elapsed": 48184,
+ "send-bytes-rate": 0,
+ "recv-msg-rate": 3768.4918347742555,
+ "max-latency": 32969370,
+ "min-latency": 31852685,
+ "avg-latency": 32385432,
+ "send-msg-rate": 0,
+ "recv-bytes-rate": 0
+ },
+ {
+ "elapsed": 49186,
+ "send-bytes-rate": 0,
+ "recv-msg-rate": 4416.167664670658,
+ "max-latency": 33953465,
+ "min-latency": 32854771,
+ "avg-latency": 33373113,
+ "send-msg-rate": 0,
+ "recv-bytes-rate": 0
+ }]
+ }
+}
+----
+
+- Key result points:
+
+|===
+|Metrics |Unit |Result
+
+|Publisher throughput (the sending rate)
+|messages / second
+|3111
+
+|Consumer throughput (the receiving rate)
+|messages / second
+|4404
+|===
+
+=== Benchmark S3 storage
+
+==== Benchmark methodology
+
+===== Benchmark tool
+We use https://github.com/dvassallo/s3-benchmark[s3-benchmark] tool.
+
+===== How to benchmark
+1. Make sure you set up appropriate S3 credentials with `awscli`.
+2. If you are using a compatible S3 storage of cloud providers like OVH, you would need to configure
+`awscli-plugin-endpoint`. E.g: https://docs.ovh.com/au/en/storage/getting_started_with_the_swift_S3_API/[Getting started with the OVH Swift S3 API]
+3. Install `s3-benchmark` tool and run the command:
+
+[source,bash]
+----
+./s3-benchmark -endpoint=[endpoint] -region=[region] -bucket-name=[bucket-name] -payloads-min=[payload-min] -payloads-max=[payload-max] threads-max=[threads-max]
+----
+
+==== Sample benchmark result
+We did S3 performance testing with suitable email objects sizes: 4 KB, 128 KB, 1 MB, 8 MB.
+
+Result:
+
+[source,bash]
+----
+--- SETUP --------------------------------------------------------------------------------------------------------------------
+
+Uploading 4 KB objects
+ 100% |████████████████████████████████████████| [4s:0s]
+Uploading 128 KB objects
+ 100% |████████████████████████████████████████| [9s:0s]
+Uploading 1 MB objects
+ 100% |████████████████████████████████████████| [8s:0s]
+Uploading 8 MB objects
+ 100% |████████████████████████████████████████| [10s:0s]
+
+--- BENCHMARK ----------------------------------------------------------------------------------------------------------------
+
+Download performance with 4 KB objects (b2-30)
+ +-------------------------------------------------------------------------------------------------+
+ | Time to First Byte (ms) | Time to Last Byte (ms) |
++---------+----------------+------------------------------------------------+------------------------------------------------+
+| Threads | Throughput | avg min p25 p50 p75 p90 p99 max | avg min p25 p50 p75 p90 p99 max |
++---------+----------------+------------------------------------------------+------------------------------------------------+
+| 8 | 0.6 MB/s | 36 10 17 22 36 57 233 249 | 37 10 17 22 36 57 233 249 |
+| 9 | 0.6 MB/s | 30 10 15 21 33 45 82 234 | 30 10 15 21 33 45 83 235 |
+| 10 | 0.2 MB/s | 55 11 18 22 28 52 248 1075 | 55 11 18 22 28 52 249 1075 |
+| 11 | 0.3 MB/s | 66 11 18 23 45 233 293 683 | 67 11 19 23 45 233 293 683 |
+| 12 | 0.6 MB/s | 35 12 19 22 43 55 67 235 | 35 12 19 22 43 56 67 235 |
+| 13 | 0.2 MB/s | 68 11 19 26 58 79 279 1037 | 68 11 19 26 58 80 279 1037 |
+| 14 | 0.6 MB/s | 43 17 20 24 52 56 230 236 | 43 17 20 25 52 56 230 236 |
+| 15 | 0.2 MB/s | 69 11 16 23 50 66 274 1299 | 69 11 16 24 50 66 274 1299 |
+| 16 | 0.5 MB/s | 52 9 19 31 81 95 228 237 | 53 9 19 31 81 95 229 237 |
++---------+----------------+------------------------------------------------+------------------------------------------------+
+
+Download performance with 128 KB objects (b2-30)
+ +-------------------------------------------------------------------------------------------------+
+ | Time to First Byte (ms) | Time to Last Byte (ms) |
++---------+----------------+------------------------------------------------+------------------------------------------------+
+| Threads | Throughput | avg min p25 p50 p75 p90 p99 max | avg min p25 p50 p75 p90 p99 max |
++---------+----------------+------------------------------------------------+------------------------------------------------+
+| 8 | 3.3 MB/s | 71 16 22 28 39 66 232 1768 | 73 16 23 29 43 67 233 1769 |
+| 9 | 3.6 MB/s | 74 9 19 23 34 58 239 1646 | 75 10 20 24 37 59 240 1647 |
+| 10 | 2.9 MB/s | 97 16 21 24 48 89 656 2034 | 99 17 21 26 49 92 657 2035 |
+| 11 | 3.0 MB/s | 100 10 21 26 39 64 1049 2029 | 101 11 21 27 40 65 1050 2030 |
+| 12 | 3.0 MB/s | 76 12 19 24 44 56 256 2012 | 77 13 20 25 48 69 258 2013 |
+| 13 | 6.1 MB/s | 73 10 13 20 43 223 505 1026 | 74 10 15 21 43 224 506 1027 |
+| 14 | 5.5 MB/s | 81 11 15 23 51 240 666 1060 | 82 12 16 23 54 241 667 1060 |
+| 15 | 2.7 MB/s | 80 10 19 28 43 59 234 2222 | 84 11 25 34 47 60 236 2224 |
+| 16 | 18.6 MB/s | 58 10 19 26 61 224 248 266 | 61 10 22 29 65 224 249 267 |
++---------+----------------+------------------------------------------------+------------------------------------------------+
+
+Download performance with 1 MB objects (b2-30)
+ +-------------------------------------------------------------------------------------------------+
+ | Time to First Byte (ms) | Time to Last Byte (ms) |
++---------+----------------+------------------------------------------------+------------------------------------------------+
+| Threads | Throughput | avg min p25 p50 p75 p90 p99 max | avg min p25 p50 p75 p90 p99 max |
++---------+----------------+------------------------------------------------+------------------------------------------------+
+| 8 | 56.4 MB/s | 41 12 26 34 43 57 94 235 | 136 30 69 100 161 284 345 396 |
+| 9 | 55.2 MB/s | 53 19 32 39 50 69 238 247 | 149 26 84 117 164 245 324 655 |
+| 10 | 33.9 MB/s | 74 17 27 37 50 77 456 1060 | 177 29 97 134 205 273 484 1076 |
+| 11 | 57.3 MB/s | 56 26 35 44 57 71 251 298 | 185 40 93 129 216 329 546 871 |
+| 12 | 37.7 MB/s | 66 21 33 43 58 73 102 1024 | 202 24 81 125 205 427 839 1222 |
+| 13 | 57.6 MB/s | 59 24 35 40 58 71 275 289 | 215 40 94 181 288 393 500 674 |
+| 14 | 47.1 MB/s | 73 18 46 56 66 75 475 519 | 229 30 116 221 272 441 603 686 |
+| 15 | 58.2 MB/s | 65 11 40 51 63 75 260 294 | 243 29 132 174 265 485 831 849 |
+| 16 | 23.1 MB/s | 96 14 46 55 62 80 124 2022 | 278 31 124 187 249 634 827 2028 |
++---------+----------------+------------------------------------------------+------------------------------------------------+
+
+Download performance with 8 MB objects (b2-30)
+ +-------------------------------------------------------------------------------------------------+
+ | Time to First Byte (ms) | Time to Last Byte (ms) |
++---------+----------------+------------------------------------------------+------------------------------------------------+
+| Threads | Throughput | avg min p25 p50 p75 p90 p99 max | avg min p25 p50 p75 p90 p99 max |
++---------+----------------+------------------------------------------------+------------------------------------------------+
+| 8 | 58.4 MB/s | 88 35 65 79 88 96 288 307 | 1063 458 564 759 928 1151 4967 6841 |
+| 9 | 50.4 MB/s | 137 32 52 69 145 286 509 1404 | 1212 160 471 581 1720 2873 3744 4871 |
+| 10 | 58.2 MB/s | 77 46 54 66 77 98 275 285 | 1319 377 432 962 1264 3232 4266 6151 |
+| 11 | 58.4 MB/s | 97 32 63 72 80 91 323 707 | 1429 325 593 722 1648 3020 6172 6370 |
+| 12 | 58.5 MB/s | 108 26 65 81 91 261 301 519 | 1569 472 696 1101 1915 3175 4066 5110 |
+| 13 | 56.1 MB/s | 115 35 69 83 93 125 329 1092 | 1712 458 801 1165 2354 3559 3865 5945 |
+| 14 | 58.6 MB/s | 103 26 70 78 88 112 309 656 | 1807 789 999 1269 1998 3258 5201 6651 |
+| 15 | 58.3 MB/s | 113 31 55 67 79 134 276 1490 | 1947 497 1081 1756 2730 3557 3799 3974 |
+| 16 | 58.0 MB/s | 99 35 67 79 96 146 282 513 | 2091 531 882 1136 2161 6034 6686 6702 |
++---------+----------------+------------------------------------------------+------------------------------------------------+
+----
+
+We believe that the actual OVH Swift S3' throughput should be at least about 100 MB/s. This was not fully achieved due to
+network limitations of the client machine performing the benchmark.
+
+=== Benchmark Redis
+
+==== Benchmark methodology
+
+We can use the built-in https://redis.io/docs/latest/operate/oss_and_stack/management/optimization/benchmarks/[redis-benchmark utility].
+
+The tool is easy to use with good documentation. Just to be sure that you specify the redis-benchmark to use multi-thread if it runs against a multi-thread Redis instance.
+
+Example:
+```
+redis-benchmark -n 1000000 --threads 4
+```
diff --git a/docs/modules/servers/partials/benchmark/index.adoc b/docs/modules/servers/partials/benchmark/index.adoc
new file mode 100644
index 00000000000..6077f67481f
--- /dev/null
+++ b/docs/modules/servers/partials/benchmark/index.adoc
@@ -0,0 +1,7 @@
+The following pages detail how to do performance testing for the {server-name} also its database.
+
+Once you have a {server-name} up and running you then need to ensure it operates correctly and has a decent performance.
+You may need to do performance testings periodically to make sure your James performs well.
+
+We introduced xref:{xref-base}/benchmark/james-benchmark.adoc[tools and base benchmark result for {server-name}] also xref:{xref-base}/benchmark/db-benchmark.adoc[James database's base performance and how to benchmark them]
+to cover this topic.
\ No newline at end of file
diff --git a/docs/modules/servers/partials/benchmark/james-benchmark.adoc b/docs/modules/servers/partials/benchmark/james-benchmark.adoc
new file mode 100644
index 00000000000..308281428cc
--- /dev/null
+++ b/docs/modules/servers/partials/benchmark/james-benchmark.adoc
@@ -0,0 +1,101 @@
+This document provides benchmark methodology and basic performance of {server-name} as a basis for a James administrator who
+can test and evaluate if his {server-name} is performing well.
+
+It includes:
+
+* A sample {server-name} deployment topology
+* Propose benchmark methodology
+* Sample performance results
+
+This aims to help operators quickly identify performance issues.
+
+== Sample deployment topology
+
+We deploy a sample topology of {server-name} with these following components:
+
+- {server-name}: 3 Kubernetes pods, each pod has 2 OVH vCore CPU and 4 GB memory limit.
+- OpenDistro 1.13.1 as search engine: 3 nodes, each node has 8 OVH vCores CPU and 30 GB memory limit (OVH b2-30 instance).
+- RabbitMQ 3.8.17 as message queue: 3 Kubernetes pods, each pod has 0.6 OVH vCore CPU and 2 GB memory limit.
+- OVH Swift S3 as an object storage
+- {backend-database-extend-sample}
+
+== Benchmark methodology and base performance
+
+include::{benchmark_prepare_extend}[]
+
+=== Provision testing data
+
+Before doing the performance test, you should make sure you have a {server-name} up and running with some provisioned testing
+data so that it is representative of reality.
+
+Please follow these steps to provision testing data:
+
+* Prepare James with a custom `mailetcontainer.xml` having Random storing mailet. This help us easily setting a good amount of
+provisioned emails.
+
+Add this under transport processor
+
+[source,xml]
+----
+
+----
+
+* Modify {provision_file_url}[provision.sh]
+upon your need (number of users, mailboxes, emails to be provisioned).
+
+Currently, this script provisions 10 users, 15 mailboxes and hundreds of emails for example. Normally to make the performance test representative, you
+should provision thousands of users, thousands of mailboxes and millions of emails.
+
+* Add the permission to execute the script:
+----
+chmod +x provision.sh
+----
+
+* Install postfix (to get the smtp-source command):
+----
+sudo apt-get install postfix
+----
+
+* Run the provision script:
+----
+./provision.sh
+----
+
+After provisioning once, you should remove the Random storing mailet and move on to performance testing phase.
+
+=== Provide performance testing method
+
+We introduce the tailored https://github.com/linagora/james-gatling[James Gatling] which bases on https://gatling.io/[Gatling - Load testing framework]
+for performance testing against IMAP/JMAP servers. Other testing method is welcome as long as you feel it is appropriate.
+
+Here are steps to do performance testing with James Gatling:
+
+* Setup James Gatling with `sbt` build tool
+
+* Configure the `Configuration.scala` to point to your {server-name} IMAP/JMAP server(s). For more configuration details, please read
+https://github.com/linagora/james-gatling#readme[James Gatling Readme].
+
+* Run the performance testing simulation:
+----
+$ sbt
+> gatling:testOnly SIMULATION_FQDN
+----
+
+In there: `SIMULATION_FQDN` is fully qualified class name of a performance test simulation.
+
+We did provide a lot of simulations in `org.apache.james.gatling.simulation` path. You can have a look and choose the suitable simulation.
+`sbt gatling:testOnly org.apache.james.gatling.simulation.imap.PlatformValidationSimulation` is a good starting point. Or you can even customize your simulation also!
+
+Some symbolic simulations we often use:
+
+* IMAP: `org.apache.james.gatling.simulation.imap.PlatformValidationSimulation`
+* JMAP rfc8621: `org.apache.james.gatling.simulation.jmap.rfc8621.PushPlatformValidationSimulation`
+
+=== Base performance result
+
+A sample IMAP performance testing result (PlatformValidationSimulation):
+
+image::{james-imap-base-performance-picture}[]
+
+If you get a IMAP performance far below this base performance, you should consider investigating for performance issues.
+
diff --git a/docs/modules/servers/partials/configure/batchsizes.adoc b/docs/modules/servers/partials/configure/batchsizes.adoc
new file mode 100644
index 00000000000..6e123c9f90b
--- /dev/null
+++ b/docs/modules/servers/partials/configure/batchsizes.adoc
@@ -0,0 +1,31 @@
+This files allow to define the amount of data that should be fetched 'at once' when interacting with the mailbox. This is
+needed as IMAP can generate some potentially large requests.
+
+Increasing these values tend to fasten individual requests, at the cost of enabling potential higher load.
+
+Consult this link:{sample-configuration-prefix-url}/batchsizes.properties[example]
+to get some examples and hints.
+
+.batchsizes.properties content
+|===
+| Property name | explanation
+
+| fetch.metadata
+| Optional, defaults to 200. How many messages should be read in a batch when using FetchType.MetaData
+
+| fetch.headers
+| Optional, defaults to 200. How many messages should be read in a batch when using FetchType.Header
+
+| fetch.body
+| Optional, defaults to 100. How many messages should be read in a batch when using FetchType.Body
+
+| fetch.full
+| Optional, defaults to 50. How many messages should be read in a batch when using FetchType.Full
+
+| copy
+| Optional, defaults to 200. How many messages should be copied in a batch.
+
+| move
+| Optional, defaults to 200. How many messages should be moved in a batch.
+
+|===
\ No newline at end of file
diff --git a/docs/modules/servers/partials/configure/blobstore.adoc b/docs/modules/servers/partials/configure/blobstore.adoc
new file mode 100644
index 00000000000..e928386bbbe
--- /dev/null
+++ b/docs/modules/servers/partials/configure/blobstore.adoc
@@ -0,0 +1,173 @@
+
+=== Encryption choice
+
+Data can be optionally encrypted with a symmetric key using AES before being stored in the blobStore. As many user relies
+on third party for object storage, a compromised third party will not escalate to a data disclosure. Of course, a
+performance price have to be paid, as encryption takes resources.
+
+*encryption.aes.enable* : Optional boolean, defaults to false.
+
+If AES encryption is enabled, then the following properties MUST be present:
+
+ - *encryption.aes.password* : String
+ - *encryption.aes.salt* : Hexadecimal string
+
+The following properties CAN be supplied:
+
+ - *encryption.aes.private.key.algorithm* : String, defaulting to PBKDF2WithHmacSHA512. Previously was
+PBKDF2WithHmacSHA1.
+
+WARNING: Once chosen this choice can not be reverted, all the data is either clear or encrypted. Mixed encryption
+is not supported.
+
+Here is an example of how you can generate the above values (be mindful to customize the byte lengths in order to add
+enough entropy.
+
+....
+# Password generation
+openssl rand -base64 64
+
+# Salt generation
+generate salt with : openssl rand -hex 16
+....
+
+AES blob store supports the following system properties that could be configured in `jvm.properties`:
+
+....
+# Threshold from which we should buffer the blob to a file upon encrypting
+# Unit supported: K, M, G, default to no unit
+james.blob.aes.file.threshold.encrypt=100K
+
+# Threshold from which we should buffer the blob to a file upon decrypting
+# Unit supported: K, M, G, default to no unit
+james.blob.aes.file.threshold.decrypt=256K
+
+# Maximum size of a blob. Larger blobs will be rejected.
+# Unit supported: K, M, G, default to no unit
+james.blob.aes.blob.max.size=100M
+....
+
+=== Object storage configuration
+
+==== AWS S3 Configuration
+
+.blobstore.properties S3 related properties
+|===
+| Property name | explanation
+
+| objectstorage.s3.endPoint
+| S3 service endpoint
+
+| objectstorage.s3.region
+| S3 region
+
+| objectstorage.s3.accessKeyId
+| https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys[S3 access key id]
+
+| objectstorage.s3.secretKey
+| https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys[S3 access key secret]
+
+| objectstorage.s3.http.concurrency
+| Allow setting the number of concurrent HTTP requests allowed by the Netty driver.
+
+| objectstorage.s3.truststore.path
+| optional: Verify the S3 server certificate against this trust store file.
+
+| objectstorage.s3.truststore.type
+| optional: Specify the type of the trust store, e.g. JKS, PKCS12
+
+| objectstorage.s3.truststore.secret
+| optional: Use this secret/password to access the trust store; default none
+
+| objectstorage.s3.truststore.algorithm
+| optional: Use this specific trust store algorithm; default SunX509
+
+| objectstorage.s3.trustall
+| optional: boolean. Defaults to false. Cannot be set to true with other trustore options. Wether James should validate
+S3 endpoint SSL certificates.
+
+| objectstorage.s3.read.timeout
+| optional: HTTP read timeout. duration, default value being second. Leaving it empty relies on S3 driver defaults.
+
+| objectstorage.s3.write.timeout
+| optional: HTTP write timeout. duration, default value being second. Leaving it empty relies on S3 driver defaults.
+
+| objectstorage.s3.connection.timeout
+| optional: HTTP connection timeout. duration, default value being second. Leaving it empty relies on S3 driver defaults.
+
+| objectstorage.s3.in.read.limit
+| optional: Object read in memory will be rejected if they exceed the size limit exposed here. Size, exemple `100M`.
+Supported units: K, M, G, defaults to B if no unit is specified. If unspecified, big object won't be prevented
+from being loaded in memory. This settings complements protocol limits.
+
+| objectstorage.s3.upload.retry.maxAttempts
+| optional: Integer. Default is zero. This property specifies the maximum number of retry attempts allowed for failed upload operations.
+
+| objectstorage.s3.upload.retry.backoffDurationMillis
+| optional: Long (Milliseconds). Default is 10 (miliseconds).
+Only takes effect when the "objectstorage.s3.upload.retry.maxAttempts" property is declared.
+This property determines the duration (in milliseconds) to wait between retry attempts for failed upload operations.
+This delay is known as backoff. The jitter factor is 0.5
+
+|===
+
+==== Buckets Configuration
+
+.Bucket configuration
+|===
+| Property name | explanation
+
+| objectstorage.bucketPrefix
+| Bucket is a concept in James and similar to Containers in Swift or Buckets in AWS S3.
+BucketPrefix is the prefix of bucket names in James BlobStore
+
+| objectstorage.namespace
+| BlobStore default bucket name. Most of blobs storing in BlobStore are inside the default bucket.
+Unless a special case like storing blobs of deleted messages.
+|===
+
+== Blob Export
+
+Blob Exporting is the mechanism to help James to export a blob from an user to another user.
+It is commonly used to export deleted messages (consult configuring deleted messages vault).
+The deleted messages are transformed into a blob and James will export that blob to the target user.
+
+This configuration helps you choose the blob exporting mechanism fit with your James setup and it is only applicable with Guice products.
+
+Consult {sample-configuration-prefix-url}/blob.properties[blob.properties]
+in GIT to get some examples and hints.
+
+Configuration for exporting blob content:
+
+.blobstore.properties content
+|===
+| blob.export.implementation
+
+| localFile: Local File Exporting Mechanism (explained below). Default: localFile
+
+| linshare: LinShare Exporting Mechanism (explained below)
+|===
+
+=== Local File Blob Export Configuration
+
+For each request, this mechanism retrieves the content of a blob and save it to a distinct local file, then send an email containing the absolute path of that file to the target mail address.
+
+Note: that absolute file path is the file location on James server. Therefore, if there are two or more James servers connected, it should not be considered an option.
+
+*blob.export.localFile.directory*: The directory URL to store exported blob data in files, and the URL following
+http://james.apache.org/server/3/apidocs/org/apache/james/filesystem/api/FileSystem.html[James File System scheme].
+Default: file://var/blobExporting
+
+=== LinShare Blob Export Configuration
+
+Instead of exporting blobs in local file system, using https://www.linshare.org[LinShare]
+helps you upload your blobs and people you have been shared to can access those blobs by accessing to
+LinShare server and download them.
+
+This way helps you to share via whole network as long as they can access to LinShare server.
+
+To get an example or details explained, visit {sample-configuration-prefix-url}/blob.properties[blob.properties]
+
+*blob.export.linshare.url*: The URL to connect to LinShare
+
+*blob.export.linshare.token*: The authentication token to connect to LinShare
diff --git a/docs/modules/servers/partials/configure/collecting-contacts.adoc b/docs/modules/servers/partials/configure/collecting-contacts.adoc
new file mode 100644
index 00000000000..ed103124559
--- /dev/null
+++ b/docs/modules/servers/partials/configure/collecting-contacts.adoc
@@ -0,0 +1,38 @@
+== Motivation
+
+Many modern applications combines email and contacts.
+
+We want recipients of emails sent by a user to automatically be added to this user contacts, for convenience. This
+should even be performed when a user sends emails via SMTP for example using thunderbird.
+
+== Design
+
+The idea is to send AMQP messages holding information about mail envelope for a traitment via a tierce application.
+
+== Configuration
+
+We can achieve this goal by combining simple mailets building blocks.
+
+Here is a sample pipeline achieving aforementioned objectives :
+
+[source,xml]
+....
+
+ extractedContacts
+
+
+ amqp://${env:JAMES_AMQP_USERNAME}:${env:JAMES_AMQP_PASSWORD}@${env:JAMES_AMQP_HOST}:${env:JAMES_AMQP_PORT}
+ collector:email
+ extractedContacts
+
+
+....
+
+A sample message looks like:
+
+....
+{
+ "userEmail": "sender@james.org",
+ "emails": ["to@james.org"]
+}
+....
\ No newline at end of file
diff --git a/docs/modules/servers/partials/configure/collecting-events.adoc b/docs/modules/servers/partials/configure/collecting-events.adoc
new file mode 100644
index 00000000000..4a3ee1f87d0
--- /dev/null
+++ b/docs/modules/servers/partials/configure/collecting-events.adoc
@@ -0,0 +1,68 @@
+== Motivation
+
+Many calendar application do add events invitation received by email directly in ones calendar.
+
+Such behaviours requires the calendar application to be aware of the ICalendar related emails a user received.
+
+== Design
+
+The idea is to write a portion of mailet pipeline extracting Icalendar attachments and to hold them as attachments that
+can later be sent to other applications over AMQP to be treated in an asynchronous, decoupled fashion.
+
+== Configuration
+
+We can achieve this goal by combining simple mailets building blocks.
+
+Here is a sample pipeline achieving aforementioned objectives :
+
+[source,xml]
+....
+
+
+ text/calendar
+ rawIcalendar
+
+
+ rawIcalendar
+
+
+ rawIcalendar
+ icalendar
+
+
+ icalendar
+
+
+
+ icalendarAsJson
+ rawIcalendar
+
+
+ amqp://${env:JAMES_AMQP_USERNAME}:${env:JAMES_AMQP_PASSWORD}@${env:JAMES_AMQP_HOST}:${env:JAMES_AMQP_PORT}
+ james:events
+ icalendarAsJson
+
+
+....
+
+A sample message looks like:
+
+....
+{
+ "ical": "RAW_DATA_AS_TEXT_FOLLOWING_ICS_FORMAT",
+ "sender": "other@james.apache.org",
+ "recipient": "any@james2.apache.org",
+ "replyTo": "other@james.apache.org",
+ "uid": "f1514f44bf39311568d640727cff54e819573448d09d2e5677987ff29caa01a9e047feb2aab16e43439a608f28671ab7c10e754ce92be513f8e04ae9ff15e65a9819cf285a6962bc",
+ "dtstamp": "20170106T115036Z",
+ "method": "REQUEST",
+ "sequence": "0",
+ "recurrence-id": null
+}
+....
+
+The following pipeline positions the X-MEETING-UID in the Header in order for mail user agent to correlate events with this mail.
+The sample look like:
+```
+X-MEETING-UID: f1514f44bf39311568d640727cff54e819573448d09d2e5677987ff29caa01a9e047feb2aab16e43439a608f28671ab7c10e754ce92be513f8e04ae9ff15e65a9819cf285a6962bc
+```
\ No newline at end of file
diff --git a/docs/modules/servers/partials/configure/dns.adoc b/docs/modules/servers/partials/configure/dns.adoc
new file mode 100644
index 00000000000..e61491f20e5
--- /dev/null
+++ b/docs/modules/servers/partials/configure/dns.adoc
@@ -0,0 +1,52 @@
+Consult this link:{sample-configuration-prefix-url}/dnsservice.xml[example]
+to get some examples and hints.
+
+Specifies DNS Server information for use by various components inside Apache James Server.
+
+DNS Transport services are controlled by a configuration block in
+the dnsservice.xml. This block affects SMTP remote delivery.
+
+The dnsservice tag defines the boundaries of the configuration
+block. It encloses all the relevant configuration for the DNS server.
+The behavior of the DNS service is controlled by the attributes and
+children of this tag.
+
+.dnsservice.xml content
+|===
+| Property name | explanation
+
+| servers
+| Information includes a list of DNS Servers to be used by James. These are
+specified by the server elements, each of which is a child element of the
+servers element. Each server element is the IP address of a single DNS server.
+The server elements can have multiple server children. Enter ip address of your DNS server, one IP address per server
+element. If no DNS servers are found and you have not specified any below, 127.0.0.1 will be used
+
+| autodiscover
+| true or false - If you use autodiscover and add DNS servers manually a combination of all the DNS servers will be used.
+If autodiscover is true, James will attempt to autodiscover the DNS servers configured on your underlying system.
+Currently, this works if the OS has a unix-like /etc/resolv.xml,
+or the system is Windows based with ipconfig or winipcfg. Change autodiscover to false if you would like to turn off autodiscovery
+and set the DNS servers manually in the servers section
+
+| authoritative
+| *true/false* - This tag specifies whether or not
+to require authoritative (non-cached) DNS records; to only accept DNS responses that are
+authoritative for the domain. It is primarily useful in an intranet/extranet environment.
+This should always be *false* unless you understand the implications.
+
+| maxcachesize
+| Maximum number of entries to maintain in the DNS cache (typically 50000)
+
+| negativeCacheTTL
+| Sets the maximum length of time that negative records will be stored in the DNS negative cache in
+seconds (a negative record means the name has not been found in the DNS). Values for this cache
+can be positive meaning the time in seconds before retrying to resolve the name, zero meaning no
+cache or a negative value meaning infinite caching.
+
+| singleIPperMX
+| true or false (default) - Specifies if Apache James Server must try a single server for each multihomed mx host
+
+| verbose
+| Turn on general debugging statements
+|===
diff --git a/docs/modules/servers/partials/configure/domainlist.adoc b/docs/modules/servers/partials/configure/domainlist.adoc
new file mode 100644
index 00000000000..bd693f7094b
--- /dev/null
+++ b/docs/modules/servers/partials/configure/domainlist.adoc
@@ -0,0 +1,42 @@
+Consult this link:{sample-configuration-prefix-url}/domainlist.xml[example]
+to get some examples and hints.
+
+This configuration block is defined by the *domainlist* tag.
+
+.domainlist.xml content
+|===
+| Property name | explanation
+
+| domainnames
+| Domainnames identifies the DNS namespace served by this instance of James.
+These domainnames are used for both matcher/mailet processing and SMTP auth
+to determine when a mail is intended for local delivery - Only applicable for XMLDomainList. The entries mentionned here will be created upon start.
+
+|autodetect
+|true or false - If autodetect is true, James wil attempt to discover its own host name AND
+use any explicitly specified servernames.
+If autodetect is false, James will use only the specified domainnames. Defaults to false.
+
+|autodetectIP
+|true or false - If autodetectIP is not false, James will also allow add the IP address for each servername.
+The automatic IP detection is to support RFC 2821, Sec 4.1.3, address literals. Defaults to false.
+
+|defaultDomain
+|Set the default domain which will be used if an email is send to a recipient without a domain part.
+If no defaultdomain is set the first domain of the DomainList gets used. If the default is not yet contained by the Domain List, the domain will be created upon start.
+
+|read.cache.enable
+|Experimental. Boolean, defaults to false.
+Whether or not to cache domainlist.contains calls. Enable a faster execution however writes will take time
+to propagate.
+
+|read.cache.expiracy
+|Experimental. String (duration), defaults to 10 seconds (10s). Supported units are ms, s, m, h, d, w, month, y.
+Expiracy of the cache. Longer means less reads are performed to the backend but writes will take longer to propagate.
+Low values (a few seconds) are advised.
+
+
+|===
+
+To override autodetected domainnames simply add explicit domainname elements.
+In most cases this will be necessary. By default, the domainname 'localhost' is specified. This can be removed, if required.
diff --git a/docs/modules/servers/partials/configure/droplists.adoc b/docs/modules/servers/partials/configure/droplists.adoc
new file mode 100644
index 00000000000..f08ae18a9b7
--- /dev/null
+++ b/docs/modules/servers/partials/configure/droplists.adoc
@@ -0,0 +1,30 @@
+The DropList, also known as the mail blacklist, is a collection of
+domains and email addresses that are denied from sending emails within the system.
+It is disabled by default.
+To enable it, modify the `droplists.properties` file and include the `IsInDropList` matcher in the `mailetcontainer.xml`.
+To disable it, adjust the `droplists.properties` file and remove the `IsInDropList` matcher from the `mailetcontainer.xml`.
+
+.droplists.properties content
+|===
+| Property name | explanation
+
+| enabled
+| Boolean. Governs whether DropLists should be enabled. Defaults to `false`.
+|===
+
+== Enabling Matcher
+
+Plug the `IsInDropList` matcher within `mailetcontainer.xml` :
+
+[source,xml]
+....
+
+ transport
+
+....
+
+== DropList management
+
+DropList management, including adding and deleting entries, is performed through the WebAdmin REST API.
+
+See xref:{pages-path}/operate/webadmin.adoc#_administrating_droplists[WebAdmin DropLists].
\ No newline at end of file
diff --git a/docs/modules/servers/partials/configure/dsn.adoc b/docs/modules/servers/partials/configure/dsn.adoc
new file mode 100644
index 00000000000..9ff0cfb3f72
--- /dev/null
+++ b/docs/modules/servers/partials/configure/dsn.adoc
@@ -0,0 +1,217 @@
+DSN introduced in link:https://tools.ietf.org/html/rfc3461[RFC-3461] allows a SMTP sender to demand status messages,
+defined in link:https://tools.ietf.org/html/rfc3464[RFC-3464] to be sent back to the `Return-Path` upon delivery
+progress.
+
+DSN support is not enabled by default, as it needs specific configuration of the
+xref:{pages-path}/configure/mailetcontainer.adoc[mailetcontainer.xml] to be specification compliant.
+
+To enable it you need to:
+
+- Add DSN SMTP hooks as part of the SMTP server stack
+- Configure xref:{pages-path}/configure/mailetcontainer.adoc[mailetcontainer.xml] to generate DSN bounces when needed
+
+== Enabling DSN in SMTP server stack
+
+For this simply add the `DSN hooks` in the handler chain in `smtpserver.xml` :
+
+[source,xml]
+....
+
+ <...>
+
+
+
+
+
+ <...>
+
+
+
+....
+
+== Enabling DSN generation as part of mail processing
+
+For the below conditions to be matched we assume you follow
+xref:{pages-path}/configure/remote-delivery-error-handling.adoc[RemoteDelivery error handling for MXs], which is a
+requirement for detailed RemoteDelivery error and delay handling on top of the {server-name}.
+
+Here is a sample xref:{pages-path}/configure/mailetcontainer.adoc[mailetcontainer.xml] achieving the following DSN generation:
+
+- Generate a generic `delivered` notification if LocalDelivery succeeded, if requested
+- Generate a generic `failed` notification in case of local errors, if requested
+- Generate a specific `failed` notification in case of a non existing local user, if requested
+- Generate a specific `failed` notification in case of an address rewriting loop, if requested
+- Generate a `failed` notification in case of remote permanent errors, if requested. We blame the remote server...
+- Generate a `delayed` notification in case of temporary remote errors we are about to retry, if requested. We blame the remote server...
+- Generate a `failed` notification in case of temporary remote errors we are not going to retry (failed too many time), if requested. We blame the remote server...
+
+[subs=attributes+,xml]
+----
+
+
+
+
+ \
+
+
+
+
+
+
+
+
+
+ [FAILED]
+ true
+ Hi. This is the James mail server at [machine].
+I'm afraid I wasn't able to deliver your message to the following addresses.
+This is a permanent error; I've given up. Sorry it didn't work out. Below
+I include the list of recipients, and the reason why I was unable to deliver
+your message.
+ failed
+ 5.0.0
+
+
+ {mailet-repository-path-prefix}://var/mail/error/
+
+
+
+
+
+
+
+ false
+
+
+
+ [SUCCESS]
+ true
+ Hi. This is the James mail server at [machine].
+I successfully delivered your message to the following addresses.
+Note that it indicates your recipients received the message but do
+not imply they read it.
+ delivered
+ 2.0.0
+
+
+
+
+
+
+
+ outgoing
+ 0
+ 0
+ 10
+ true
+
+ remote-delivery-error
+
+
+
+ [FAILED]
+ true
+ Hi. This is the James mail server at [machine].
+I'm afraid I wasn't able to deliver your message to the following addresses.
+This is a permanent error; I've given up. Sorry it didn't work out.
+The remote server we should relay this mail to keep on failing.
+Below I include the list of recipients, and the reason why I was unable to deliver
+your message.
+ failed
+ 5.0.0
+
+
+ {mailet-repository-path-prefix}://var/mail/error/remote-delivery/permanent/
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ [FAILED]
+ true
+ Hi. This is the James mail server at [machine].
+I'm afraid I wasn't able to deliver your message to the following addresses.
+This is a permanent error; I've given up. Sorry it didn't work out.
+The remote server we should relay this mail to returns a permanent error.
+Below I include the list of recipients, and the reason why I was unable to deliver
+your message.
+ failed
+ 5.0.0
+
+
+
+ [DELAYED]
+ true
+ Hi. This is the James mail server at [machine].
+I'm afraid I wasn't able to deliver your message to the following addresses yet.
+This is a temporary error: I will keep on trying.
+Below I include the list of recipients, and the reason why I was unable to deliver
+your message.
+ delayed
+ 4.0.0
+
+
+
+
+
+
+
+ [FAILED]
+ true
+ Hi. This is the James mail server at [machine].
+I'm afraid I wasn't able to deliver your message to the following addresses.
+This is a permanent error; I've given up. Sorry it didn't work out.
+The following addresses do not exist here. Sorry.
+ failed
+ 5.0.0
+
+
+ {mailet-repository-path-prefix}://var/mail/address-error/
+
+
+
+
+
+
+ {mailet-repository-path-prefix}://var/mail/relay-denied/
+ Warning: You are sending an e-mail to a remote server. You must be authenticated to perform such an operation
+
+
+
+
+
+ {mailet-repository-path-prefix}://var/mail/rrt-error/
+ true
+
+
+
+ [FAILED]
+ true
+ Hi. This is the James mail server at [machine].
+I'm afraid I wasn't able to deliver your message to the following addresses.
+This is a permanent error; I've given up. Sorry it didn't work out.
+The following addresses is caught in a rewriting loop. An admin should come and fix it (you likely want to report it).
+Once resolved the admin should be able to resume the processing of your email.
+Below I include the list of recipients, and the reason why I was unable to deliver
+your message.
+ failed
+ 5.1.6
+
+
+
+
+----
+
+== Limitations
+
+The out of the box tooling do not allow generating `relayed` DSN notification as RemoteDelivery misses a success
+callback.
\ No newline at end of file
diff --git a/docs/modules/servers/partials/configure/extensions.adoc b/docs/modules/servers/partials/configure/extensions.adoc
new file mode 100644
index 00000000000..e26adc69ee8
--- /dev/null
+++ b/docs/modules/servers/partials/configure/extensions.adoc
@@ -0,0 +1,60 @@
+This files enables an operator to define additional bindings used to instantiate others extensions
+
+*guice.extension.module*: come separated list of fully qualified class name. These classes need to implement Guice modules.
+
+Here is an example of such a class :
+
+[source,java]
+....
+public class MyServiceModule extends AbstractModule {
+ @Override
+ protected void configure() {
+ bind(MyServiceImpl.class).in(Scopes.SINGLETON);
+ bind(MyService.class).to(MyServiceImpl.class);
+ }
+}
+....
+
+Recording it in extensions.properties :
+
+....
+guice.extension.module=com.project.MyServiceModule
+....
+
+Enables to inject MyService into your extensions.
+
+
+*guice.extension.tasks*: come separated list of fully qualified class name.
+
+The extension can rely on the Task manager to supervise long-running task execution (progress, await, cancellation, scheduling...).
+These extensions need to implement Task extension modules.
+
+Here is an example of such a class :
+
+[source,java]
+....
+public class RspamdTaskExtensionModule implements TaskExtensionModule {
+
+ @Inject
+ public RspamdTaskExtensionModule() {
+ }
+
+ @Override
+ public Set> taskDTOModules() {
+ return Set.of(...);
+ }
+
+ @Override
+ public Set> taskAdditionalInformationDTOModules() {
+ return Set.of(...);
+ }
+}
+....
+
+Recording it in extensions.properties :
+
+....
+guice.extension.tasks=com.project.RspamdTaskExtensionModule
+....
+
+Read xref:customization:index.adoc#_defining_custom_injections_for_your_extensions[this page] for more details.
\ No newline at end of file
diff --git a/docs/modules/servers/partials/configure/forCoreComponentsPartial.adoc b/docs/modules/servers/partials/configure/forCoreComponentsPartial.adoc
new file mode 100644
index 00000000000..2ea8a961022
--- /dev/null
+++ b/docs/modules/servers/partials/configure/forCoreComponentsPartial.adoc
@@ -0,0 +1,15 @@
+== For core components
+
+By omitting these files, sane default values are used.
+
+** xref:{xref-base}/batchsizes.adoc[*batchsizes.properties*] allows to configure mailbox read batch sizes link:{sample-configuration-prefix-url}/sample-configuration/batchsizes.properties[example]
+** xref:{xref-base}/dns.adoc[*dnsservice.xml*] allows to configure DNS resolution link:{sample-configuration-prefix-url}/sample-configuration/dnsservice.xml[example]
+** xref:{xref-base}/domainlist.adoc[*domainlist.xml*] allows to configure Domain storage link:{sample-configuration-prefix-url}/sample-configuration/domainlist.xml[example]
+** xref:{xref-base}/healthcheck.adoc[*healthcheck.properties*] allows to configure periodical healthchecks link:{sample-configuration-prefix-url}/sample-configuration/healthcheck.properties[example]
+** xref:{xref-base}/mailetcontainer.adoc[*mailetcontainer.xml*] allows configuring mail processing link:{sample-configuration-prefix-url}/sample-configuration/mailetcontainer.xml[example]
+*** xref:{xref-base}/mailets.adoc[This page] list matchers that can be used out of the box with the {server-name}.
+*** xref:{xref-base}/matchers.adoc[This page] list matchers that can be used out of the box with the {server-name}.
+** xref:{xref-base}/mailrepositorystore.adoc[*mailrepositorystore.xml*] enables registration of allowed MailRepository protcols and link them to MailRepository implementations link:{sample-configuration-prefix-url}/sample-configuration/mailrepositorystore.xml[example]
+** xref:{xref-base}/recipientrewritetable.adoc[*recipientrewritetable.xml*] enables advanced configuration for the Recipient Rewrite Table component link:{sample-configuration-prefix-url}/sample-configuration/recipientrewritetable.xml[example]
+*** xref:{xref-base}/matchers.adoc[This page] allows choosing the indexing technology.
+** xref:{xref-base}/usersrepository.adoc[*usersrepository.xml*] allows configuration of user storage link:{sample-configuration-prefix-url}/sample-configuration/usersrepository.xml[example]
diff --git a/docs/modules/servers/partials/configure/forExtensionsPartial.adoc b/docs/modules/servers/partials/configure/forExtensionsPartial.adoc
new file mode 100644
index 00000000000..49720b50432
--- /dev/null
+++ b/docs/modules/servers/partials/configure/forExtensionsPartial.adoc
@@ -0,0 +1,14 @@
+== For extensions
+
+By omitting these files, no extra behaviour is added.
+
+** xref:{xref-base}/vault.adoc[*deletedMessageVault.properties*] allows to configure the DeletedMessageVault link:{sample-configuration-prefix-url}/sample-configuration/deletedMessageVault.properties[example]
+** xref:{xref-base}/listeners.adoc[*listeners.xml*] enables configuration of Mailbox Listeners link:{sample-configuration-prefix-url}/sample-configuration/listeners.xml[example]
+** xref:{xref-base}/extensions.adoc[*extensions.properties*] allows to extend James behaviour by loading your extensions in it link:{sample-configuration-prefix-url}/sample-configuration/extensions.properties[example]
+** xref:{xref-base}/jvm.adoc[*jvm.properties*] lets you specify additional system properties without cluttering your command line
+** xref:{xref-base}/spam.adoc[This page] documents Anti-Spam setup with SpamAssassin, Rspamd.
+** xref:{xref-base}/remote-delivery-error-handling.adoc[This page] proposes a simple strategy for RemoteDelivery error handling.
+** xref:{xref-base}/collecting-contacts.adoc[This page] documents contact collection
+** xref:{xref-base}/collecting-events.adoc[This page] documents event collection
+** xref:{xref-base}/dsn.adoc[This page] specified how to support SMTP Delivery Submission Notification (link:https://tools.ietf.org/html/rfc3461[RFC-3461])
+** xref:{xref-base}/droplists.adoc[This page] allows configuring drop lists.
\ No newline at end of file
diff --git a/docs/modules/servers/partials/configure/forProtocolsPartial.adoc b/docs/modules/servers/partials/configure/forProtocolsPartial.adoc
new file mode 100644
index 00000000000..0999218482c
--- /dev/null
+++ b/docs/modules/servers/partials/configure/forProtocolsPartial.adoc
@@ -0,0 +1,15 @@
+== For protocols
+
+By omitting these files, the underlying protocols will be disabled.
+
+** xref:{xref-base}/imap.adoc[*imapserver.xml*] allows configuration for the IMAP protocol link:{sample-configuration-prefix-url}imapserver.xml[example]
+** xref:{xref-base}/jmap.adoc[*jmap.properties*] allows to configure the JMAP protocol link:{sample-configuration-prefix-url}jmap.properties[example]
+** xref:{xref-base}/jmx.adoc[*jmx.properties*] allows configuration of JMX being used by the Command Line Interface link:{sample-configuration-prefix-url}jmx.properties[example]
+** xref:{xref-base}/smtp.adoc#_lmtp_configuration[*lmtpserver.xml*] allows configuring the LMTP protocol link:{sample-configuration-prefix-url}lmtpserver.xml[example]
+** *managesieveserver.xml* allows configuration for ManagedSieve (unsupported) link:{sample-configuration-prefix-url}managesieveserver.xml[example]
+** xref:{xref-base}/pop3.adoc[*pop3server.xml*] allows configuration for the POP3 protocol (experimental) link:{sample-configuration-prefix-url}pop3server.xml[example]
+** xref:{xref-base}/smtp.adoc[*smtpserver.xml*] allows configuration for the SMTP protocol link:{sample-configuration-prefix-url}smtpserver.xml[example]
+*** xref:{xref-base}/smtp-hooks.adoc[This page] list SMTP hooks that can be used out of the box with the {server-name}.
+** xref:{xref-base}/webadmin.adoc[*webadmin.properties*] enables configuration for the WebAdmin protocol link:{sample-configuration-prefix-url}webadmin.properties[example]
+** xref:{xref-base}/ssl.adoc[This page] details SSL & TLS configuration.
+** xref:{xref-base}/sieve.adoc[This page] details Sieve setup and how to enable ManageSieve.
\ No newline at end of file
diff --git a/docs/modules/servers/partials/configure/forStorageDependenciesPartial.adoc b/docs/modules/servers/partials/configure/forStorageDependenciesPartial.adoc
new file mode 100644
index 00000000000..2d498aeda1c
--- /dev/null
+++ b/docs/modules/servers/partials/configure/forStorageDependenciesPartial.adoc
@@ -0,0 +1,11 @@
+== For storage dependencies
+
+Except specific documented cases, these files are required, at least to establish a connection with the storage components.
+
+** xref:{xref-base}/blobstore.adoc[*blobstore.properties*] allows to configure the BlobStore link:{sample-configuration-prefix-url}/sample-configuration/blob.properties[example]
+
+** xref:{xref-base}/opensearch.adoc[*opensearch.properties*] allows to configure OpenSearch driver link:{sample-configuration-prefix-url}/sample-configuration/opensearch.properties[example]
+** xref:{xref-base}/rabbitmq.adoc[*rabbitmq.properties*] allows configuration for the RabbitMQ driver link:{sample-configuration-prefix-url}/sample-configuration/rabbitmq.properties[example]
+** xref:{xref-base}/redis.adoc[*redis.properties*] allows configuration for the Redis driver link:https://github.com/apache/james-project/blob/fabfdf4874da3aebb04e6fe4a7277322a395536a/server/mailet/rate-limiter-redis/redis.properties[example], that is used by optional
+distributed rate limiting component.
+** xref:{xref-base}/tika.adoc[*tika.properties*] allows configuring Tika as a backend for text extraction link:{sample-configuration-prefix-url}/sample-configuration/tika.properties[example]
\ No newline at end of file
diff --git a/docs/modules/servers/partials/configure/healthcheck.adoc b/docs/modules/servers/partials/configure/healthcheck.adoc
new file mode 100644
index 00000000000..afcb1098a85
--- /dev/null
+++ b/docs/modules/servers/partials/configure/healthcheck.adoc
@@ -0,0 +1,22 @@
+Consult this link:{sample-configuration-prefix-url}/healthcheck.properties[example]
+to get some examples and hints.
+
+Use this configuration to define the initial delay and period for the PeriodicalHealthChecks. It is only applicable with Guice products.
+
+.healthcheck.properties content
+|===
+| Property name | explanation
+
+| healthcheck.period
+| Define the period between two periodical health checks (default: 60s). Units supported are (ms - millisecond, s - second, m - minute, h - hour, d - day). Default unit is millisecond.
+
+| reception.check.user
+| User to be using for running the "mail reception" health check. The user must exist.
+If not specified, the mail reception check is a noop.
+
+| reception.check.timeout
+| Period after which mail reception is considered faulty. Defaults to one minute.
+
+| additional.healthchecks
+| List of fully qualified HealthCheck class names in addition to James' default healthchecks. Default to empty list.
+|===
\ No newline at end of file
diff --git a/docs/modules/servers/partials/configure/imap.adoc b/docs/modules/servers/partials/configure/imap.adoc
new file mode 100644
index 00000000000..ad910019124
--- /dev/null
+++ b/docs/modules/servers/partials/configure/imap.adoc
@@ -0,0 +1,179 @@
+Consult this link:{sample-configuration-prefix-url}/imapserver.xml[example]
+to get some examples and hints.
+
+The IMAP4 service is controlled by a configuration block in the imap4server.xml.
+The imap4server tag defines the boundaries of the configuration block. It encloses
+all the relevant configuration for the IMAP4 server. The behavior of the IMAP4 service is
+controlled by the attributes and children of this tag.
+
+This tag has an optional boolean attribute - *enabled* - that defines whether the service is active or not.
+The value defaults to "true" if not present.
+
+The standard children of the imapserver tag are:
+
+.imapserver.xml content
+|===
+| Property name | explanation
+
+| bind
+| Configure this to bind to a specific inetaddress. This is an optional integer value. This value is the port on which this IMAP4 server is configured
+to listen. If the tag or value is absent then the service
+will bind to all network interfaces for the machine If the tag or value is omitted, the value will default to the standard IMAP4 port
+port 143 is the well-known/IANA registered port for IMAP
+port 993 is the well-known/IANA registered port for IMAPS ie over SSL/TLS
+
+| connectionBacklog
+| Number of connection backlog of the server (maximum number of queued connection requests)
+
+| compress
+| true or false - Use or don't use COMPRESS extension. Defaults to false.
+
+| maxLineLength
+| Maximal allowed line-length before a BAD response will get returned to the client
+This should be set with caution as a to high value can make the server a target for DOS (Denial of Service)!
+
+| inMemorySizeLimit
+| Optional. Size limit before we will start to stream to a temporary file.
+Defaults to 10MB. Must be a positive integer, optionally with a unit: B, K, M, G.
+
+| literalSizeLimit
+| Optional. Maximum size of a literal (IMAP APPEND).
+Defaults to 0 (unlimited). Must be a positive integer, optionally with a unit: B, K, M, G.
+
+| plainAuthDisallowed
+| Deprecated. Should use `auth.plainAuthEnabled`, `auth.requireSSL` instead.
+Whether to enable Authentication PLAIN if the connection is not encrypted via SSL or STARTTLS. Defaults to `true`.
+
+| auth.plainAuthEnabled
+| Whether to enable Authentication PLAIN/ LOGIN command. Defaults to `true`.
+
+| auth.requireSSL
+| true or false. Defaults to `true`. Whether to require SSL to authenticate. If this is required, the IMAP server will disable authentication on unencrypted channels.
+
+| auth.oidc.oidcConfigurationURL
+| Provide OIDC url address for information to user. Only configure this when you want to authenticate IMAP server using a OIDC provider.
+
+| auth.oidc.jwksURL
+| Provide url to get OIDC's JSON Web Key Set to validate user token. Only configure this when you want to authenticate IMAP server using a OIDC provider.
+
+| auth.oidc.claim
+| Claim string uses to identify user. E.g: "email_address". Only configure this when you want to authenticate IMAP server using a OIDC provider.
+
+| auth.oidc.scope
+| An OAuth scope that is valid to access the service (RF: RFC7628). Only configure this when you want to authenticate IMAP server using a OIDC provider.
+
+| timeout
+| Default to 30 minutes. After this time, inactive channels that have not performed read, write, or both operation for a while
+will be closed. Negative value disable this behaviour.
+
+| enableIdle
+| Default to true. If enabled IDLE commands will generate a server heartbeat on a regular period.
+
+| idleTimeInterval
+| Defaults to 120. Needs to be a strictly positive integer.
+
+| idleTimeIntervalUnit
+| Default to SECONDS. Needs to be a parseable TimeUnit.
+
+| disabledCaps
+| Implemented server capabilities NOT to advertise to the client. Coma separated list. Defaults to no disabled capabilities.
+
+| jmxName
+| The name given to the configuration
+
+| tls
+| Set to true to support STARTTLS or SSL for the Socket.
+To use this you need to copy sunjce_provider.jar to /path/james/lib directory. To create a new keystore execute:
+`keytool -genkey -alias james -keyalg RSA -storetype PKCS12 -keystore /path/to/james/conf/keystore`.
+Please note that each IMAP server exposed on different port can specify its own keystore, independently from any other
+TLS based protocols.
+
+| handler.helloName
+| This is the name used by the server to identify itself in the IMAP4
+protocol. If autodetect is TRUE, the server will discover its
+own host name and use that in the protocol. If discovery fails,
+the value of 'localhost' is used. If autodetect is FALSE, James
+will use the specified value.
+
+| connectiontimeout
+| Connection timeout in seconds
+
+| connectionLimit
+| Set the maximum simultaneous incoming connections for this service
+
+| connectionLimitPerIP
+| Set the maximum simultaneous incoming connections per IP for this service
+
+| concurrentRequests
+| Maximum number of IMAP requests executed simultaneously. Past that limit requests are queued. Defaults to 20.
+Negative values deactivate this feature, leading to unbounded concurrency.
+
+| maxQueueSize
+| Upper bound to the IMAP throttler queue. Upon burst, requests that cannot be queued are rejected and not executed.
+Integer, defaults to 4096, must be positive, 0 means no queue.
+
+| proxyRequired
+| Enables proxy support for this service for incoming connections. HAProxy's protocol
+(https://www.haproxy.org/download/2.7/doc/proxy-protocol.txt) is used and might be compatible
+with other proxies (e.g. traefik). If enabled, it is *required* to initiate the connection
+using HAProxy's proxy protocol.
+
+| bossWorkerCount
+| Set the maximum count of boss threads. Boss threads are responsible for accepting incoming IMAP connections
+and initializing associated resources. Optional integer, by default, boss threads are not used and this responsibility is being dealt with
+by IO threads.
+
+| ioWorkerCount
+| Set the maximum count of IO threads. IO threads are responsible for receiving incoming IMAP messages and framing them
+(split line by line). IO threads also take care of compression and SSL encryption. Their tasks are short-lived and non-blocking.
+Optional integer, defaults to 2 times the count of CPUs.
+
+| ignoreIDLEUponProcessing
+| true or false - Allow disabling the heartbeat handler. Defaults to true.
+
+| useEpoll
+| true or false - If true uses native EPOLL implementation for Netty otherwise uses NIO. Defaults to false.
+
+| gracefulShutdown
+| true or false - If true attempts a graceful shutdown, which is safer but can take time. Defaults to true.
+
+| highWriteBufferWaterMark
+| Netty's write buffer high watermark configuration. Unit supported: none, K, M. Netty defaults applied.
+
+| lowWriteBufferWaterMark
+| Netty's write buffer low watermark configuration. Unit supported: none, K, M. Netty defaults applied.
+|===
+
+== OIDC setup
+James IMAP support XOAUTH2 authentication mechanism which allow authenticating against a OIDC providers.
+Please configure `auth.oidc` part to use this.
+
+We do supply an link:https://github.com/apache/james-project/tree/master/examples/oidc[example] of such a setup.
+It uses the Keycloak OIDC provider, but usage of similar technologies is definitely doable.
+
+== Extending IMAP
+
+IMAP decoders, processors and encoder can be customized. xref:customization:imap.adoc[Read more].
+
+Check this link:https://github.com/apache/james-project/tree/master/examples/custom-imap[example].
+
+The following configuration properties are available for extensions:
+
+.imapserver.xml content
+|===
+| Property name | explanation
+
+| imapPackages
+| Configure (union) of IMAP packages. IMAP packages bundles decoders (parsing IMAP commands) processors and encoders,
+thus enable implementing new IMAP commands or replace existing IMAP processors. List of FQDNs, which can be located in
+James extensions.
+
+| additionalConnectionChecks
+| Configure (union) of additional connection checks. ConnectionCheck will check if the connection IP is secure or not.
+| customProperties
+| Properties for custom extension. Each tag is a property entry, and holds a string under the form key=value.
+|===
+
+== Mail user agents auto-configuration
+
+Check this example on link:https://github.com/apache/james-project/tree/master/examples/imap-autoconf[Mail user agents auto-configuration].
diff --git a/docs/modules/servers/partials/configure/jmap.adoc b/docs/modules/servers/partials/configure/jmap.adoc
new file mode 100644
index 00000000000..5dbfd835078
--- /dev/null
+++ b/docs/modules/servers/partials/configure/jmap.adoc
@@ -0,0 +1,181 @@
+https://jmap.io/[JMAP] is intended to be a new standard for email clients to connect to mail
+stores. It therefore intends to primarily replace IMAP + SMTP submission. It is also designed to be more
+generic. It does not replace MTA-to-MTA SMTP transmission.
+
+Consult this link:{sample-configuration-prefix-url}/jmap.properties[example]
+to get some examples and hints.
+
+.jmap.properties content
+|===
+| Property name | explanation
+
+| enabled
+| true/false. Governs whether JMAP should be enabled
+
+| jmap.port
+| Optional. Defaults to 80. The port this server will be listening on. This value must be a valid
+port, ranging between 1 and 65535 (inclusive)
+
+| tls.keystoreURL
+| Keystore to be used for generating authentication tokens for password authentication mechanism.
+This should not be the same keystore than the ones used by TLS based protocols.
+
+| tls.secret
+| Password used to read the keystore
+
+| jwt.publickeypem.url
+| Optional. Coma separated list of RSA public keys URLs to validate JWT tokens allowing requests to bypass authentication.
+Defaults to an empty list.
+
+| url.prefix
+| Optional. Configuration urlPrefix for JMAP routes. Default value: http://localhost.
+
+| websocket.url.prefix
+| Optional. URL for JMAP WebSocket route. Default value: ws://localhost
+
+| email.send.max.size
+| Optional. Configuration max size for message created in RFC-8621.
+Default value: None. Supported units are B (bytes) K (KB) M (MB) G (GB).
+
+| max.size.attachments.per.mail
+| Optional. Defaults to 20MB. RFC-8621 `maxSizeAttachmentsPerEmail` advertised to JMAP client as part of the
+`urn:ietf:params:jmap:mail` capability. This needs to be at least 33% lower than `email.send.max.size` property
+(in order to account for text body, headers, base64 encoding and MIME structures).
+JMAP clients would use this property in order not to create too big emails.
+Default value: None. Supported units are B (bytes) K (KB) M (MB) G (GB).
+
+| upload.max.size
+| Optional. Configuration max size for each upload file in new JMAP-RFC-8621.
+Default value: 30M. Supported units are B (bytes) K (KB) M (MB) G (GB).
+
+| upload.quota.limit
+| Optional. Configure JMAP upload quota for total existing uploads' size per user. User exceeding the upload quota would result in old uploads being cleaned up.
+Default value: 200M. Supported units are B (bytes) K (KB) M (MB) G (GB).
+
+| view.email.query.enabled
+| Optional boolean. Defaults to false. Should simple Email/query be resolved against a {backend-name} projection, or should we resolve them against OpenSearch?
+This enables a higher resilience, but the projection needs to be correctly populated.
+
+| user.provisioning.enabled
+| Optional boolean. Defaults to true. Governs whether authenticated users that do not exist locally should be created in the users repository.
+
+| authentication.strategy.rfc8621
+| Optional List[String] with delimiter `,` . Specify which authentication strategies system admin want to use for JMAP RFC-8621 server.
+The implicit package name is `org.apache.james.jmap.http`. If you have a custom authentication strategy outside this package, you have to specify its FQDN.
+If no authentication strategy is specified, JMAP RFC-8621 server will fallback to default strategies:
+`JWTAuthenticationStrategy`, `BasicAuthenticationStrategy`.
+
+| jmap.version.default
+| Optional string. Defaults to `rfc-8621`. Allowed values: rfc-8621
+Which version of the JMAP protocol should be served when none supplied in the Accept header.
+
+| dynamic.jmap.prefix.resolution.enabled
+| Optional boolean. Defaults to false. Supported Jmap session endpoint returns dynamic prefix in response.
+When its config is true, and the HTTP request to Jmap session endpoint has a `X-JMAP-PREFIX` header with the value `http://new-domain/prefix`,
+then `apiUrl, downloadUrl, uploadUrl, eventSourceUrl, webSocketUrl` in response will be changed with a new prefix. Example: The `apiUrl` will be "http://new-domain/prefix/jmap".
+If the HTTP request to Jmap session endpoint has the `X-JMAP-WEBSOCKET-PREFIX` header with the value `ws://new-domain/prefix`,
+then `capabilities."urn:ietf:params:jmap:websocket".url` in response will be "ws://new-domain/prefix/jmap/ws".
+
+| webpush.prevent.server.side.request.forgery
+| Optional boolean. Prevent server side request forgery by preventing calls to the private network ranges. Defaults to true, can be disabled for testing.
+
+| cassandra.filter.projection.activated
+|Optional boolean. Defaults to false. Casandra backends only. Whether to use or not the Cassandra projection
+for JMAP filters. This projection optimizes reads, but needs to be correctly populated. Turning it on on
+systems with filters already defined would result in those filters to be not read.
+
+| delay.sends.enabled
+| Optional boolean. Defaults to false. Whether to support or not the delay send with JMAP protocol.
+
+| disabled.capabilities
+| Optional, defaults to empty. Coma separated list of JMAP capabilities to reject.
+This allows to prevent users from using some specific JMAP extensions.
+
+| email.get.full.max.size
+| Optional, default value is 5. The max number of items for EmailGet full reads.
+
+| get.max.size
+| Optional, default value is 500. The max number of items for /get methods.
+
+| set.max.size
+| Optional, default value is 500. The max number of items for /set methods.
+|===
+
+== Wire tapping
+
+Enabling *TRACE* on `org.apache.james.jmap.wire` enables reactor-netty wiretap, logging of
+all incoming and outgoing requests, outgoing requests. This will log also potentially sensible information
+like authentication credentials.
+
+== OIDC set up
+
+The use of `XUserAuthenticationStrategy` allow delegating the authentication responsibility to a third party system,
+which could be used to set up authentication against an OIDC provider.
+
+We do supply an link:https://github.com[example] of such a setup. It combines the link:https://www.keycloak.org/[Keycloack]
+OIDC provider with the link:https://www.krakend.io/[Krackend] API gateway, but usage of similar technologies is definitely doable.
+
+== Generating a JWT key pair
+
+Apache James can alternatively be configured to check the validity of JWT tokens itself. No revocation mechanism is
+supported in such a setup, and the `sub` claim is used to identify the user. The key configuration is static.
+
+This requires the `JWTAuthenticationStrategy` authentication strategy to be used.
+
+The {server-name} enforces the use of RSA-SHA-256.
+
+One can use OpenSSL to generate a JWT key pair :
+
+ # private key
+ openssl genrsa -out rs256-4096-private.rsa 4096
+ # public key
+ openssl rsa -in rs256-4096-private.rsa -pubout > rs256-4096-public.pem
+
+The private key can be used to generate JWT tokens, for instance
+using link:https://github.com/vandium-io/jwtgen[jwtgen]:
+
+ jwtgen -a RS256 -p rs256-4096-private.rsa 4096 -c "sub=bob@domain.tld" -e 3600 -V
+
+This token can then be passed as `Bearer` of the `Authorization` header :
+
+ curl -H "Authorization: Bearer $token" -XPOST http://127.0.0.1:80/jmap -d '...'
+
+The public key can be referenced as `jwt.publickeypem.url` of the `jmap.properties` configuration file.
+
+== Annotated specification
+
+The [annotated documentation](https://github.com/apache/james-project/tree/master/server/protocols/jmap-rfc-8621/doc/specs/spec)
+presents the limits of the JMAP RFC-8621 implementation part of the Apache James project. We furthermore implement
+[JSON Meta Application Protocol (JMAP) Subprotocol for WebSocket](https://tools.ietf.org/html/rfc8887).
+
+Some methods / types are not yet implemented, some implementations are naive, and the PUSH is not supported yet.
+
+Users are invited to read these limitations before using actively the JMAP RFC-8621 implementation, and should ensure their
+client applications only uses supported operations.
+
+Contributions enhancing support are furthermore welcomed.
+
+The list of tested JMAP clients are:
+
+ - Experiments had been run on top of [LTT.RS](https://github.com/iNPUTmice/lttrs-android). Version in the Accept
+ headers needs to be explicitly set to `rfc-8621`. [Read more](https://github.com/linagora/james-project/pull/4089).
+
+== JMAP auto-configuration
+
+link:https://datatracker.ietf.org/doc/html/rfc8620[RFC-8620] defining JMAP core RFC defines precisely service location.
+
+James already redirects `http://jmap.domain.tld/.well-known/jmap` to the JMAP session.
+
+You can further help your clients by publishing extra SRV records.
+
+Eg:
+
+----
+_jmap._tcp.domain.tld. 3600 IN SRV 0 1 443 jmap.domain.tld.
+----
+
+== JMAP reverse-proxy set up
+
+James implementation adds the value of `X-Real-IP` header as part of the logging MDC.
+
+This allows for reverse proxies to cary other the IP address of the client down to the JMAP server for diagnostic purpose.
\ No newline at end of file
diff --git a/docs/modules/servers/partials/configure/jmx.adoc b/docs/modules/servers/partials/configure/jmx.adoc
new file mode 100644
index 00000000000..706bd52298e
--- /dev/null
+++ b/docs/modules/servers/partials/configure/jmx.adoc
@@ -0,0 +1,64 @@
+== Disclaimer
+
+JMX poses several security concerns and had been leveraged to conduct arbitrary code execution.
+This threat is mitigated by not allowing remote connections to JMX, setting up authentication and pre-authentication filters.
+However, we recommend to either run James in isolation (docker / own virtual machine) or disable JMX altogether.
+
+James JMX endpoint provides command line utilities and exposes a few metrics, also available on the metric endpoint.
+
+== Configuration
+
+This is used to configure the JMX MBean server via which all management is achieved.
+
+Consult this link:{sample-configuration-prefix-url}/jmx.properties[example]
+in GIT to get some examples and hints.
+
+.jmx.properties content
+|===
+| Property name | explanation
+
+| jmx.enabled
+| Boolean. Should the JMX server be enabled? Defaults to `true`.
+
+| jmx.address
+|The IP address (host name) the MBean Server will bind/listen to.
+
+| jmx.port
+| The port number the MBean Server will bind/listen to.
+|===
+
+To access from a remote location, it has been reported that `-Dcom.sun.management.jmxremote.ssl=false` is needed as
+a JVM argument.
+
+== JMX Security
+
+In order to set up JMX authentication, we need to put `jmxremote.password` and `jmxremote.access` file
+to `/conf` directory.
+
+- `jmxremote.password`: define the username and password, that will be used by the client (here is james-cli)
+
+File's content example:
+```
+james-admin pass1
+```
+
+- `jmxremote.access`: define the pair of username and access permission
+
+File's content example:
+```
+james-admin readwrite
+```
+
+When James runs with option `-Djames.jmx.credential.generation=true`, James will automatically generate `jmxremote.password` if the file does not exist.
+Then the default username is `james-admin` and a random password. This option defaults to true.
+
+=== James-cli
+
+When the JMX server starts with authentication configuration, it will require the client need provide username/password for bypass.
+To do that, we need set arguments `-username` and `-password` for the command request.
+
+Command example:
+```
+james-cli -h 127.0.0.1 -p 9999 -username james-admin -password pass1 listdomains
+```
+
diff --git a/docs/modules/servers/partials/configure/jvm.adoc b/docs/modules/servers/partials/configure/jvm.adoc
new file mode 100644
index 00000000000..08e59812644
--- /dev/null
+++ b/docs/modules/servers/partials/configure/jvm.adoc
@@ -0,0 +1,102 @@
+This file may contain any additional system properties for tweaking JVM execution. When you normally would add a command line option `-Dmy.property=whatever`, you can put it in this file as `my.property=whatever` instead. These properties will be added as system properties on server start.
+
+Note that in some rare cases this might not work,
+when a property affects very early JVM start behaviour.
+
+For testing purposes, you may specify a different file path via the command line option `-Dextra.props=/some/other/jvm.properties`.
+
+== Control the threshold memory
+This governs the threshold MimeMessageInputStreamSource relies on for storing MimeMessage content on disk.
+
+In `jvm.properties`
+----
+james.message.memory.threshold=12K
+----
+
+(Optional). String (size, integer + size units, example: `12 KIB`, supported units are bytes KIB MIB GIB TIB). Defaults to 100KIB.
+
+== Enable the copy of message in memory
+Should MimeMessageWrapper use a copy of the message in memory? Or should bigger message exceeding james.message.memory.threshold
+be copied to temporary files?
+
+----
+james.message.usememorycopy=true
+----
+
+Optional. Boolean. Defaults to false. Recommended value is false.
+
+== Running resource leak detection
+It is used to detect a resource not be disposed of before it's garbage-collected.
+
+In `jvm.properties`
+----
+james.lifecycle.leak.detection.mode=advanced
+----
+
+Allowed mode values are: none, simple, advanced, testing
+
+The purpose of each mode is introduced in `config-system.xml`
+
+== Disabling host information in protocol MDC logging context
+
+Should we add the host in the MDC logging context for incoming IMAP, SMTP, POP3? Doing so, a DNS resolution
+is attempted for each incoming connection, which can be costly. Remote IP is always added to the logging context.
+
+
+In `jvm.properties`
+----
+james.protocols.mdc.hostname=false
+----
+
+Optional. Boolean. Defaults to true.
+
+== Change the encoding type used for the blobId
+
+By default, the blobId is encoded in base64 url. The property `james.blob.id.hash.encoding` allows to change the encoding type.
+The support value are: base16, hex, base32, base32Hex, base64, base64Url.
+
+Ex in `jvm.properties`
+----
+james.blob.id.hash.encoding=base16
+----
+
+Optional. String. Defaults to base64Url.
+
+== JMAP Quota draft compatibility
+
+Some JMAP clients depend on the JMAP Quota draft specifications. The property `james.jmap.quota.draft.compatibility` allows
+to enable JMAP Quota draft compatibility for those clients and allow them a time window to adapt to the RFC-9245 JMAP Quota.
+
+Optional. Boolean. Default to false.
+
+Ex in `jvm.properties`
+----
+james.jmap.quota.draft.compatibility=true
+----
+To enable the compatibility.
+
+== Enable S3 metrics
+
+James supports extracting some S3 client-level metrics e.g. number of connections being used, time to acquire an S3 connection, total time to finish a S3 request...
+
+The property `james.s3.metrics.enabled` allows to enable S3 metrics collection. Please pay attention that enable this
+would impact a bit on S3 performance.
+
+Optional. Boolean. Default to true.
+
+Ex in `jvm.properties`
+----
+james.s3.metrics.enabled=false
+----
+To disable the S3 metrics.
+
+== Reactor Stream Prefetch
+
+Prefetch to use in Reactor to stream convertions (S3 => InputStream). Default to 1.
+Higher values will tend to block less often at the price of higher memory consumptions.
+
+Ex in `jvm.properties`
+----
+# james.reactor.inputstream.prefetch=4
+----
+
diff --git a/docs/modules/servers/partials/configure/listeners.adoc b/docs/modules/servers/partials/configure/listeners.adoc
new file mode 100644
index 00000000000..4b8acb66709
--- /dev/null
+++ b/docs/modules/servers/partials/configure/listeners.adoc
@@ -0,0 +1,156 @@
+{server-name} relies on an event bus system to enrich mailbox capabilities. Each
+operation performed on the mailbox will trigger related events, that can
+be processed asynchronously by potentially any James node on a
+distributed system.
+
+Mailbox listeners can register themselves on this event bus system to be
+called when an event is fired, allowing to do different kind of extra
+operations on the system.
+
+{server-name} allows the user to register potentially user defined additional mailbox listeners.
+
+Consult this link:{sample-configuration-prefix-url}/listener.xml[example]
+to get some examples and hints.
+
+== Configuration
+
+The controls whether to launch group mailbox listener consumption. Defaults to true. Use with caution:
+never disable on standalone james servers, and ensure at least some instances do consume group mailbox listeners within a
+clustered topology.
+
+Mailbox listener configuration is under the XML element .
+
+Some MailboxListener allows you to specify if you want to run them synchronously or asynchronously. To do so,
+for MailboxListener that supports this, you can use the *async* attribute (optional, per mailet default) to govern the execution mode.
+If *true* the execution will be scheduled in a reactor elastic scheduler. If *false*, the execution is synchronous.
+
+Already provided additional listeners are documented below.
+
+=== SpamAssassinListener
+
+Provides per user real-time HAM/SPAM feedback to a SpamAssassin server depending on user actions.
+
+This mailet is asynchronous by default, but this behaviour can be overridden by the *async*
+configuration property.
+
+This MailboxListener is supported.
+
+Example:
+
+[source,xml]
+....
+
+
+
+ org.apache.james.mailbox.spamassassin.SpamAssassinListener
+
+
+....
+
+Please note that a `spamassassin.properties` file is needed. Read also
+xref:{pages-path}/configure/spam.adoc[this page] for extra configuration required to support this feature.
+
+=== RspamdListener
+
+Provides HAM/SPAM feedback to a Rspamd server depending on user actions.
+
+This MailboxListener is supported.
+
+Example:
+
+[source,xml]
+....
+
+
+
+ org.apache.james.rspamd.RspamdListener
+
+
+....
+
+Please note that a `rspamd.properties` file is needed. Read also
+xref:{pages-path}/configure/spam.adoc[this page] for extra configuration required to support this feature.
+
+
+=== QuotaThresholdCrossingListener
+
+Sends emails to users exceeding 80% and 99% of their quota to warn them (for instance).
+
+Here are the following properties you can configure:
+
+.QuotaThresholdCrossingListener configuration properties
+|===
+| Property name | explanation
+
+| name
+| Useful when configuring several time this listener. You might want to do so to use different rendering templates for
+different occupation thresholds.
+
+| gracePeriod
+| Period during which no more email for a given threshold should be sent.
+
+| subjectTemplate
+| Mustache template for rendering the subject of the warning email.
+
+| bodyTemplate
+| Mustache template for rendering the body of the warning email.
+
+| thresholds
+| Floating number between 0 and 1 representing the threshold of quota occupation from which a mail should be sent.
+Configuring several thresholds is supported.
+
+|===
+
+Example:
+
+[source,xml]
+....
+
+
+
+ org.apache.james.mailbox.quota.mailing.listeners.QuotaThresholdCrossingListener
+ QuotaThresholdCrossingListener-upper-threshold
+
+
+
+ 0.8
+
+
+ thirst
+ conf://templates/QuotaThresholdMailSubject.mustache
+ conf://templates/QuotaThresholdMailBody.mustache
+ 1week/
+
+
+
+....
+
+Here are examples of templates you can use:
+
+* For subject template: `conf://templates/QuotaThresholdMailSubject.mustache`
+
+....
+Warning: Your email usage just exceeded a configured threshold
+....
+
+* For body template: `conf://templates/QuotaThresholdMailBody.mustache`
+
+....
+You receive this email because you recently exceeded a threshold related to the quotas of your email account.
+
+{{#hasExceededSizeThreshold}}
+You currently occupy more than {{sizeThreshold}} % of the total size allocated to you.
+You currently occupy {{usedSize}}{{#hasSizeLimit}} on a total of {{limitSize}} allocated to you{{/hasSizeLimit}}.
+
+{{/hasExceededSizeThreshold}}
+{{#hasExceededCountThreshold}}
+You currently occupy more than {{countThreshold}} % of the total message count allocated to you.
+You currently have {{usedCount}} messages{{#hasCountLimit}} on a total of {{limitCount}} allowed for you{{/hasCountLimit}}.
+
+{{/hasExceededCountThreshold}}
+You need to be aware that actions leading to exceeded quotas will be denied. This will result in a degraded service.
+To mitigate this issue you might reach your administrator in order to increase your configured quota. You might also delete some non important emails.
+....
+
+This MailboxListener is supported.
+
diff --git a/docs/modules/servers/partials/configure/mailetcontainer.adoc b/docs/modules/servers/partials/configure/mailetcontainer.adoc
new file mode 100644
index 00000000000..18ef8a5aee2
--- /dev/null
+++ b/docs/modules/servers/partials/configure/mailetcontainer.adoc
@@ -0,0 +1,95 @@
+This documents explains how to configure Mail processing. Mails pass through the MailetContainer. The
+MailetContainer is a Matchers (condition for executing a mailet) and Mailets (execution units that perform
+actions based on incoming mail) pipeline arranged into processors (List of mailet/matcher pairs allowing
+better logical organisation). You can read more about these concepts on
+xref:{pages-path}/architecture/index.adoc#_mail_processing[the mailet container feature description].
+
+Apache James Server includes a number of xref:{pages-path}/configure/mailets.adoc[Packaged Mailets] and
+xref:{pages-path}/configure/matchers.adoc[Packaged Matchers].
+
+Furthermore, you can write and use with James xref:customization:mail-processing.adoc[your own mailet and matchers].
+
+Consult this link:{sample-configuration-prefix-url}/mailetcontainer.xml[example]
+to get some examples and hints.
+
+.mailetcontainer.xml content
+|===
+| Property name | explanation
+
+| context.postmaster
+| The body of this element is the address that the server
+will consider its postmaster address. This address will be listed as the sender address
+of all error messages that originate from James. Also, all messages addressed to
+postmaster@, where is one of the domain names whose
+mail is being handled by James, will be redirected to this email address.
+Set this to the appropriate email address for error reports
+If this is set to a non-local email address, the mail server
+will still function, but will generate a warning on startup.
+
+| spooler.threads
+| Number of simultaneous threads used to spool the mails. Set to zero, it disables mail processing - use with
+caution.
+
+| spooler.errorRepository
+| Mail repository to store email in after several unrecoverable errors. Mails failing processing, for which
+the Mailet Container could not handle Error, will be stored there after their processing had been attempted
+5 times. Note that if standard java Exception occurs, *Error handling* section below will be applied
+instead.
+|===
+
+== The Mailet Tag
+
+Consider the following simple *mailet* tag:
+
+[source,xml]
+....
+
+ spam
+
+....
+
+The mailet tag has two required attributes, *match* and *class*.
+
+The *match* attribute is set to the value of the specific Matcher class to be instantiated with a an
+optional argument. If present, the argument is separated from the Matcher class name by an '='. Semantic
+interpretation of the argument is left to the particular mailet.
+
+The *class* attribute is set to the value of the Mailet class that is to be instantiated.
+
+Finally, the children of the *mailet* tag define the configuration that is passed to the Mailet. The
+tags used in this section should have no attributes or children. The names and bodies of the elements will be passed to
+the mailet as (name, value) pairs.
+
+So in the example above, a Matcher instance of RemoteAddrNotInNetwork would be instantiated, and the value "127.0.0.1"
+would be passed to the matcher. The Mailet of the pair will be an instance of ToProcessor, and it will be passed the (name, value)
+pair of ("processor", "spam").
+
+== Error handling
+
+If an exception is encountered during the execution of a mailet or a matcher, the default behaviour is to
+process the mail using the *error* processor.
+
+The *onMailetException* property allows you to override this behaviour. You can specify another
+processor than the *error* one for handling the errors of this mailet.
+
+The *ignore* special value also allows to continue processing and ignore the error.
+
+The *propagate* special value causes the mailet container to rethrow the
+exception, propagating it to the execution context. In an SMTP execution context, the spooler will then requeue
+the item and automatic retries will be setted up - note that attempts will be done for each recipients. In LMTP
+(if LMTP is configured to execute the mailetContainer), the entire mail transaction is reported as failed to the caller.
+
+Moreover, the *onMatcherException* allows you to override matcher error handling. You can
+specify another processor than the *error* one for handling the errors of this mailet. The *matchall*
+special value also allows you to match all recipients when there is an error. The *nomatch*
+special value also allows you to match no recipients when there is an error.
+
+Here is a short example to illustrate this:
+
+[source,xml]
+....
+
+ deliveryError
+ nomatch
+
+....
\ No newline at end of file
diff --git a/docs/modules/servers/partials/configure/mailets.adoc b/docs/modules/servers/partials/configure/mailets.adoc
new file mode 100644
index 00000000000..9c534c12748
--- /dev/null
+++ b/docs/modules/servers/partials/configure/mailets.adoc
@@ -0,0 +1,144 @@
+This documentation page lists and documents Mailet that can be used within the
+{server-name} MailetContainer in order to write your own mail processing logic with out-of-the-box components.
+
+== Supported mailets
+
+include::partial$AddDeliveredToHeader.adoc[]
+
+include::partial$AddFooter.adoc[]
+
+include::partial$AddSubjectPrefix.adoc[]
+
+include::partial$AmqpForwardAttribute.adoc[]
+
+include::partial$Bounce.adoc[]
+
+include::partial$ContactExtractor.adoc[]
+
+include::partial$ConvertTo7Bit.adoc[]
+
+include::partial$DKIMSign.adoc[]
+
+include::partial$DKIMVerify.adoc[]
+
+include::partial$DSNBounce.adoc[]
+
+include::partial$Expires.adoc[]
+
+include::partial$ExtractMDNOriginalJMAPMessageId.adoc[]
+
+include::partial$Forward.adoc[]
+
+include::partial$ICalendarParser.adoc[]
+
+include::partial$ICALToHeader.adoc[]
+
+include::partial$ICALToJsonAttribute.adoc[]
+
+include::partial$ICSSanitizer.adoc[]
+
+include::partial$LocalDelivery.adoc[]
+
+include::partial$LogMessage.adoc[]
+
+include::partial$MailAttributesListToMimeHeaders.adoc[]
+
+include::partial$MailAttributesToMimeHeaders.adoc[]
+
+include::partial$MetricsMailet.adoc[]
+
+include::partial$MimeDecodingMailet.adoc[]
+
+include::partial$NotifyPostmaster.adoc[]
+
+include::partial$NotifySender.adoc[]
+
+include::partial$Null.adoc[]
+
+include::partial$PostmasterAlias.adoc[]
+
+include::partial$RandomStoring.adoc[]
+
+include::partial$RecipientRewriteTable.adoc[]
+
+include::partial$RecipientToLowerCase.adoc[]
+
+include::partial$Redirect.adoc[]
+
+include::partial$RemoteDelivery.adoc[]
+
+include::partial$RemoveAllMailAttributes.adoc[]
+
+include::partial$RemoveMailAttribute.adoc[]
+
+include::partial$RemoveMimeHeader.adoc[]
+
+include::partial$RemoveMimeHeaderByPrefix.adoc[]
+
+include::partial$ReplaceContent.adoc[]
+
+include::partial$Resend.adoc[]
+
+include::partial$SetMailAttribute.adoc[]
+
+include::partial$SetMimeHeader.adoc[]
+
+include::partial$Sieve.adoc[]
+
+include::partial$Sign.adoc[]
+
+include::partial$SMIMECheckSignature.adoc[]
+
+include::partial$SMIMEDecrypt.adoc[]
+
+include::partial$SMIMESign.adoc[]
+
+include::partial$SpamAssassin.adoc[]
+
+include::partial$StripAttachment.adoc[]
+
+include::partial$TextCalendarBodyToAttachment.adoc[]
+
+include::partial$ToProcessor.adoc[]
+
+include::partial$ToRepository.adoc[]
+
+include::partial$ToSenderDomainRepository.adoc[]
+
+include::partial$VacationMailet.adoc[]
+
+include::partial$WithPriority.adoc[]
+
+include::partial$WithStorageDirective.adoc[]
+
+== Experimental mailets
+
+include::partial$ClamAVScan.adoc[]
+
+include::partial$ClassifyBounce.adoc[]
+
+include::partial$FromRepository.adoc[]
+
+include::partial$HeadersToHTTP.adoc[]
+
+include::partial$OnlyText.adoc[]
+
+include::partial$ManageSieveMailet.adoc[]
+
+include::partial$RecoverAttachment.adoc[]
+
+include::partial$SerialiseToHTTP.adoc[]
+
+include::partial$ServerTime.adoc[]
+
+include::partial$SPF.adoc[]
+
+include::partial$ToPlainText.adoc[]
+
+include::partial$ToSenderFolder.adoc[]
+
+include::partial$UnwrapText.adoc[]
+
+include::partial$UseHeaderRecipients.adoc[]
+
+include::partial$WrapText.adoc[]
\ No newline at end of file
diff --git a/docs/modules/servers/partials/configure/mailrepositorystore.adoc b/docs/modules/servers/partials/configure/mailrepositorystore.adoc
new file mode 100644
index 00000000000..2f3589df670
--- /dev/null
+++ b/docs/modules/servers/partials/configure/mailrepositorystore.adoc
@@ -0,0 +1,34 @@
+A `mail repository` allows storage of a mail as part of its
+processing. Standard configuration relies on the following mail
+repository.
+
+A mail repository is identified by its *url*, constituted of a *protocol* and a *path*.
+
+For instance in the url `{mailet-repository-path-prefix}://var/mail/error/` `{mail-repository-protocol}` is the protocol and `var/mail/error` the path.
+
+The *mailrepositorystore.xml* file allows registration of available protocols, and their binding to actual MailRepository
+implementation. Note that extension developers can write their own MailRepository implementations, load them via the
+`extensions-jars` mechanism as documented in xref:customization:index.adoc['writing your own extensions'], and finally
+associated to a protocol in *mailrepositorystore.xml* for a usage in *mailetcontainer.xml*.
+
+== Configuration
+
+Consult this link:{sample-configuration-prefix-url}/mailrepositorystore.xml[example]
+to get some examples and hints.
+
+[subs=attributes+,xml]
+----
+
+ {mail-repository-protocol}
+
+
+
+ {mail-repository-protocol}
+
+
+
+
+----
+
+Only the *{mail-repository-class}* is available by default for the {server-name}. Mails metadata are stored in
+{mail-repository-protocol} while the headers and bodies are stored within the xref:{pages-path}/architecture/index.adoc#_blobstore[BlobStore].
diff --git a/docs/modules/servers/partials/configure/matchers.adoc b/docs/modules/servers/partials/configure/matchers.adoc
new file mode 100644
index 00000000000..8d7915949cd
--- /dev/null
+++ b/docs/modules/servers/partials/configure/matchers.adoc
@@ -0,0 +1,164 @@
+This documentation page lists and documents Matchers that can be used within the
+{server-name} MailetContainer in order to write your own mail processing logic with out-of-the-box components.
+
+== Supported matchers
+
+include::partial$All.adoc[]
+
+include::partial$AtLeastPriority.adoc[]
+
+include::partial$AtMost.adoc[]
+
+include::partial$AtMostPriority.adoc[]
+
+include::partial$DLP.adoc[]
+
+include::partial$FetchedFrom.adoc[]
+
+include::partial$HasAttachment.adoc[]
+
+include::partial$HasException.adoc[]
+
+include::partial$HasHeader.adoc[]
+
+include::partial$HasHeaderWithPrefix.adoc[]
+
+include::partial$HasMailAttribute.adoc[]
+
+include::partial$HasMailAttributeWithValue.adoc[]
+
+include::partial$HasMailAttributeWithValueRegex.adoc[]
+
+include::partial$HasMimeType.adoc[]
+
+include::partial$HasMimeTypeParameter.adoc[]
+
+include::partial$HasPriority.adoc[]
+
+include::partial$HostIs.adoc[]
+
+include::partial$HostIsLocal.adoc[]
+
+include::partial$IsMarkedAsSpam.adoc[]
+
+include::partial$IsOverQuota.adoc[]
+
+include::partial$IsRemoteDeliveryPermanentError.adoc[]
+
+include::partial$IsRemoteDeliveryTemporaryError.adoc[]
+
+include::partial$IsSenderInRRTLoop.adoc[]
+
+include::partial$IsSingleRecipient.adoc[]
+
+include::partial$IsSMIMEEncrypted.adoc[]
+
+include::partial$IsSMIMESigned.adoc[]
+
+include::partial$IsX509CertificateSubject.adoc[]
+
+include::partial$RecipientDomainIs.adoc[]
+
+include::partial$RecipientIs.adoc[]
+
+include::partial$RecipientIsLocal.adoc[]
+
+include::partial$RecipientIsRegex.adoc[]
+
+include::partial$RelayLimit.adoc[]
+
+include::partial$RemoteAddrInNetwork.adoc[]
+
+include::partial$RemoteAddrNotInNetwork.adoc[]
+
+include::partial$RemoteDeliveryFailedWithSMTPCode.adoc[]
+
+include::partial$SenderDomainIs.adoc[]
+
+include::partial$SenderHostIs.adoc[]
+
+include::partial$SenderIs.adoc[]
+
+include::partial$SenderIsLocal.adoc[]
+
+include::partial$SenderIsNull.adoc[]
+
+include::partial$SenderIsRegex.adoc[]
+
+include::partial$SentByJmap.adoc[]
+
+include::partial$SentByMailet.adoc[]
+
+include::partial$SizeGreaterThan.adoc[]
+
+include::partial$SMTPAuthSuccessful.adoc[]
+
+include::partial$SMTPAuthUserIs.adoc[]
+
+include::partial$SMTPIsAuthNetwork.adoc[]
+
+include::partial$SubjectIs.adoc[]
+
+include::partial$SubjectStartsWith.adoc[]
+
+include::partial$TooManyRecipients.adoc[]
+
+include::partial$UserIs.adoc[]
+
+include::partial$XOriginatingIpInNetwork.adoc[]
+
+== Experimental matchers
+
+include::partial$AttachmentFileNameIs.adoc[]
+
+include::partial$CommandForListserv.adoc[]
+
+include::partial$CommandListservMatcher.adoc[]
+
+include::partial$CompareNumericHeaderValue.adoc[]
+
+include::partial$FileRegexMatcher.adoc[]
+
+include::partial$InSpammerBlacklist.adoc[]
+
+include::partial$NESSpamCheck.adoc[]
+
+include::partial$SenderInFakeDomain.adoc[]
+
+== Composite matchers
+
+It is possible to combine together matchers in order to create a composite matcher, thus simplifying your
+Mailet Container logic.
+
+Here are the available logical operations:
+
+* *And* : This matcher performs And conjunction between the two matchers: recipients needs to match both matcher in order to
+match the composite matcher.
+* *Or* : This matcher performs Or conjunction between the two matchers: consider it to be a union of the results.
+It returns recipients from the Or composition results of the child matchers.
+* *Not* : It returns recipients from the negated composition of the child Matcher(s). Consider what wasn't
+in the result set of each child matcher. Of course it is easier to understand if it only
+includes one matcher in the composition, the normal recommended use.
+* *Xor* : It returns Recipients from the Xor composition of the child matchers. Consider it to be the inequality
+operator for recipients. If any recipients match other matcher results
+then the result does not include that recipient.
+
+Here is the syntax to adopt in *mailetcontainer.xml*:
+
+[source,xml]
+....
+
+
+
+
+
+
+
+
+
+
+
+ relay
+
+
+....
\ No newline at end of file
diff --git a/docs/modules/servers/partials/configure/opensearch.adoc b/docs/modules/servers/partials/configure/opensearch.adoc
new file mode 100644
index 00000000000..970c33550f6
--- /dev/null
+++ b/docs/modules/servers/partials/configure/opensearch.adoc
@@ -0,0 +1,319 @@
+== Search overrides
+
+*Search overrides* allow resolution of predefined search queries against alternative sources of data
+and allow bypassing OpenSearch. This is useful to handle most resynchronisation queries that
+are simple enough to be resolved against {package-tag}.
+
+Possible values are:
+
+- `org.apache.james.mailbox.{package-tag}.search.AllSearchOverride` Some IMAP clients uses SEARCH ALL to fully list messages in
+a mailbox and detect deletions. This is typically done by clients not supporting QRESYNC and from an IMAP perspective
+is considered an optimisation as less data is transmitted compared to a FETCH command. Resolving such requests against
+Cassandra is enabled by this search override and likely desirable.
+- `org.apache.james.mailbox.{package-tag}.search.UidSearchOverride`. Same as above but restricted by ranges.
+- `org.apache.james.mailbox.{package-tag}.search.DeletedSearchOverride`. Find deleted messages by looking up in the relevant Cassandra
+table.
+- `org.apache.james.mailbox.{package-tag}.search.DeletedWithRangeSearchOverride`. Same as above but limited by ranges.
+- `org.apache.james.mailbox.{package-tag}.search.NotDeletedWithRangeSearchOverride`. List non deleted messages in a given range.
+Lists all messages and filters out deleted message thus this is based on the following heuristic: most messages are not marked as deleted.
+- `org.apache.james.mailbox.{package-tag}.search.UnseenSearchOverride`. List unseen messages in the corresponding cassandra projection.
+
+Please note that custom overrides can be defined here. `opensearch.search.overrides` allow specifying search overrides and is a
+coma separated list of search override FQDNs. Default to none.
+
+EG:
+
+[subs=attributes+]
+----
+opensearch.search.overrides=org.apache.james.mailbox.{package-tag}.search.AllSearchOverride,org.apache.james.mailbox.{package-tag}.search.DeletedSearchOverride, org.apache.james.mailbox.{package-tag}.search.DeletedWithRangeSearchOverride,org.apache.james.mailbox.{package-tag}.search.NotDeletedWithRangeSearchOverride,org.apache.james.mailbox.{package-tag}.search.UidSearchOverride,org.apache.james.mailbox.{package-tag}.search.UnseenSearchOverride
+----
+
+Consult this link:{sample-configuration-prefix-url}/opensearch.properties[example]
+to get some examples and hints.
+
+If you want more explanation about OpenSearch configuration, you should visit the dedicated https://opensearch.org/[documentation].
+
+== OpenSearch Configuration
+
+This file section is used to configure the connection tp an OpenSearch cluster.
+
+Here are the properties allowing to do so :
+
+.opensearch.properties content
+|===
+| Property name | explanation
+
+| opensearch.clusterName
+| Is the name of the cluster used by James.
+
+| opensearch.nb.shards
+| Number of shards for index provisionned by James
+
+| opensearch.nb.replica
+| Number of replica for index provisionned by James (default: 0)
+
+| opensearch.index.waitForActiveShards
+| Wait for a certain number of active shard copies before proceeding with the operation. Defaults to 1.
+You may consult the https://www.elastic.co/guide/en/elasticsearch/reference/7.10/docs-index_.html#active-shards[documentation] for more information.
+
+| opensearch.retryConnection.maxRetries
+| Number of retries when connecting the cluster
+
+| opensearch.retryConnection.minDelay
+| Minimum delay between connection attempts
+
+| opensearch.max.connections
+| Maximum count of HTTP connections allowed for the OpenSearch driver. Optional integer, if unspecified driver defaults
+applies (30 connections).
+
+| opensearch.max.connections.per.hosts
+| Maximum count of HTTP connections per host allowed for the OpenSearch driver. Optional integer, if unspecified driver defaults
+applies (10 connections).
+
+|===
+
+=== Mailbox search
+
+The main use of OpenSearch within the {server-name} is indexing the mailbox content of users in order to enable
+powerful and efficient full-text search of the mailbox content.
+
+Data indexing is performed asynchronously in a reliable fashion via a MailboxListener.
+
+Here are the properties related to the use of OpenSearch for Mailbox Search:
+
+.opensearch.properties content
+|===
+| Property name | explanation
+
+| opensearch.index.mailbox.name
+| Name of the mailbox index backed by the alias. It will be created if missing.
+
+| opensearch.index.name
+| *Deprecated* Use *opensearch.index.mailbox.name* instead.
+Name of the mailbox index backed by the alias. It will be created if missing.
+
+| opensearch.alias.read.mailbox.name
+| Name of the alias to use by Apache James for mailbox reads. It will be created if missing.
+The target of the alias is the index name configured above.
+
+| opensearch.alias.read.name
+| *Deprecated* Use *opensearch.alias.read.mailbox.name* instead.
+Name of the alias to use by Apache James for mailbox reads. It will be created if missing.
+The target of the alias is the index name configured above.
+
+| opensearch.alias.write.mailbox.name
+| Name of the alias to use by Apache James for mailbox writes. It will be created if missing.
+The target of the alias is the index name configured above.
+
+| opensearch.alias.write.name
+| *Deprecated* Use *opensearch.alias.write.mailbox.name* instead.
+Name of the alias to use by Apache James for mailbox writes. It will be created if missing.
+The target of the alias is the index name configured above.
+
+| opensearch.indexAttachments
+| Indicates if you wish to index attachments or not (default: true).
+
+| opensearch.indexHeaders
+| Indicates if you wish to index headers or not (default: true). Note that specific headers
+(From, To, Cc, Bcc, Subject, Message-Id, Date, Content-Type) are still indexed in their dedicated type.
+Header indexing is expensive as each header currently need to be stored as a nested document but
+turning off headers indexing result in non-strict compliance with the IMAP / JMAP standards.
+
+| opensearch.message.index.optimize.move
+| When set to true, James will attempt to reindex from the indexed message when moved.
+If the message is not found, it will fall back to the old behavior (The message will be indexed from the blobStore source)
+Default to false.
+
+| opensearch.text.fuzziness.search
+| Use fuzziness on text searches. This option helps to correct user typing mistakes and makes the result a bit more flexible.
+
+Default to false.
+
+| opensearch.indexBody
+| Indicates if you wish to index body or not (default: true). This can be used to decrease the performance cost associated with indexing.
+
+| opensearch.indexUser
+| Indicates if you wish to index user or not (default: false). This can be used to have per user reports in OpenSearch Dashboards.
+
+|===
+
+=== Quota search
+
+Users are indexed by quota usage, allowing operators a quick audit of users quota occupation.
+
+Users quota are asynchronously indexed upon quota changes via a dedicated MailboxListener.
+
+The following properties affect quota search :
+
+.opensearch.properties content
+|===
+| Property name | explanation
+
+| opensearch.index.quota.ratio.name
+| Specify the OpenSearch alias name used for quotas
+
+| opensearch.alias.read.quota.ratio.name
+| Specify the OpenSearch alias name used for reading quotas
+
+| opensearch.alias.write.quota.ratio.name
+| Specify the OpenSearch alias name used for writing quotas
+|===
+
+=== Disabling OpenSearch
+
+OpenSearch component can be disabled but consider it would make search feature to not work. In particular it will break JMAP protocol and SEARCH IMAP comment in an nondeterministic way.
+This is controlled in the `search.properties` file via the `implementation` property (defaults
+to `OpenSearch`). Setting this configuration parameter to `scanning` will effectively disable OpenSearch, no
+further indexation will be done however searches will rely on the scrolling search, leading to expensive and longer
+searches. Disabling OpenSearch requires no extra action, however
+xref:{pages-path}/operate/webadmin.adoc#_reindexing_all_mails[a full re-indexing]needs to be carried out when enabling OpenSearch.
+
+== SSL Trusting Configuration
+
+By default, James will use the system TrustStore to validate https server certificates, if the certificate on
+ES side is already in the system TrustStore, you can leave the sslValidationStrategy property empty or set it to default.
+
+.opensearch.properties content
+|===
+| Property name | explanation
+
+| opensearch.hostScheme.https.sslValidationStrategy
+| Optional. Accept only *default*, *ignore*, *override*. Default is *default*. default: Use the default SSL TrustStore of the system.
+ignore: Ignore SSL Validation check (not recommended).
+override: Override the SSL Context to use a custom TrustStore containing ES server's certificate.
+
+|===
+
+In some cases, you want to secure the connection from clients to ES by setting up a *https* protocol
+with a self signed certificate. And you prefer to left the system ca-certificates un touch.
+There are possible solutions to let the ES RestHighLevelClient to trust your self signed certificate.
+
+Second solution: importing a TrustStore containing the certificate into SSL context.
+A certificate normally contains two parts: a public part in .crt file, another private part in .key file.
+To trust the server, the client needs to be acknowledged that the server's certificate is in the list of
+client's TrustStore. Basically, you can create a local TrustStore file containing the public part of a remote server
+by execute this command:
+
+....
+keytool -import -v -trustcacerts -file certificatePublicFile.crt -keystore trustStoreFileName.jks -keypass fillThePassword -storepass fillThePassword
+....
+
+When there is a TrustStore file and the password to read, fill two options *trustStorePath*
+and *trustStorePassword* with the TrustStore location and the password. ES client will accept
+the certificate of ES service.
+
+.opensearch.properties content
+|===
+| Property name | explanation
+
+| opensearch.hostScheme.https.trustStorePath
+| Optional. Use it when https is configured in opensearch.hostScheme, and sslValidationStrategy is *override*
+Configure OpenSearch rest client to use this trustStore file to recognize nginx's ssl certificate.
+Once you chose *override*, you need to specify both trustStorePath and trustStorePassword.
+
+| opensearch.hostScheme.https.trustStorePassword
+| Optional. Use it when https is configured in opensearch.hostScheme, and sslValidationStrategy is *override*
+Configure OpenSearch rest client to use this trustStore file with the specified password.
+Once you chose *override*, you need to specify both trustStorePath and trustStorePassword.
+
+|===
+
+During SSL handshaking, the client can determine whether accept or reject connecting to a remote server by its hostname.
+You can configure to use which HostNameVerifier in the client.
+
+.opensearch.properties content
+|===
+| Property name | explanation
+
+| opensearch.hostScheme.https.hostNameVerifier
+| Optional. Default is *default*. default: using the default hostname verifier provided by apache http client.
+accept_any_hostname: accept any host (not recommended).
+
+|===
+
+== Configure dedicated language analyzers for mailbox index
+
+OpenSearch supports various language analyzers out of the box: https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-lang-analyzer.html.
+
+James could utilize this to improve the user searching experience upon his language.
+
+While one could modify mailbox index mapping programmatically to customize this behavior, here we should just document a manual way to archive this without breaking our common index' mapping code.
+
+The idea is modifying mailbox index mappings with the target language analyzer as a JSON file, then submit it directly
+to OpenSearch via cURL command to create the mailbox index before James start. Let's adapt dedicated language analyzers
+where appropriate for the following fields:
+
+.Language analyzers propose change
+|===
+| Field | Analyzer change
+
+| from.name
+| `keep_mail_and_url` analyzer -> `keep_mail_and_url_language_a` analyzer
+
+| subject
+| `keep_mail_and_url` analyzer -> `keep_mail_and_url_language_a` analyzer
+
+| to.name
+| `keep_mail_and_url` analyzer -> `keep_mail_and_url_language_a` analyzer
+
+| cc.name
+| `keep_mail_and_url` analyzer -> `keep_mail_and_url_language_a` analyzer
+
+| bcc.name
+| `keep_mail_and_url` analyzer -> `keep_mail_and_url_language_a` analyzer
+
+| textBody
+| `standard` analyzer -> `language_a` analyzer
+
+| htmlBody
+| `standard` analyzer -> `language_a` analyzer
+
+| attachments.fileName
+| `standard` analyzer -> `language_a` analyzer
+
+| attachments.textContent
+| `standard` analyzer -> `language_a` analyzer
+
+|===
+
+In there:
+
+ - `keep_mail_and_url` and `standard` are our current analyzers for mailbox index.
+ - `language_a` analyzer: the built-in analyzer of OpenSearch. EG: `french`
+ - `keep_mail_and_url_language_a` analyzer: a custom of `keep_mail_and_url` analyzer with some language filters.Every language has
+their own filters so please have a look at filters which your language need to add. EG which need to be added for French:
+----
+"filter": {
+ "french_elision": {
+ "type": "elision",
+ "articles_case": true,
+ "articles": [
+ "l", "m", "t", "qu", "n", "s",
+ "j", "d", "c", "jusqu", "quoiqu",
+ "lorsqu", "puisqu"
+ ]
+ },
+ "french_stop": {
+ "type": "stop",
+ "stopwords": "_french_"
+ },
+ "french_stemmer": {
+ "type": "stemmer",
+ "language": "light_french"
+ }
+}
+----
+
+After modifying above proposed change, you should have a JSON file that contains new setting and mapping of mailbox index. Here
+we provide https://github.com/apache/james-project/blob/master/mailbox/opensearch/example_french_index.json[a sample JSON for French language].
+If you want to customize that JSON file for your own language need, please make these modifications:
+
+ - Replace the `french` analyzer with your built-in language (have a look at https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-lang-analyzer.html[built-in language analyzers])
+ - Modify `keep_mail_and_url_french` analyzer' filters with your language filters, and customize the analyzer' name.
+
+Please change also `number_of_shards`, `number_of_replicas` and `index.write.wait_for_active_shards` values in the sample file according to your need.
+
+Run this cURL command with above JSON file to create `mailbox_v1` (Mailbox index' default name) index before James start:
+----
+curl -X PUT ES_IP:ES_PORT/mailbox_v1 -H "Content-Type: application/json" -d @example_french_index.json
+----
diff --git a/docs/modules/servers/partials/configure/pop3.adoc b/docs/modules/servers/partials/configure/pop3.adoc
new file mode 100644
index 00000000000..dc01589791f
--- /dev/null
+++ b/docs/modules/servers/partials/configure/pop3.adoc
@@ -0,0 +1,74 @@
+Consult this link:{sample-configuration-prefix-url}/pop3server.xml[example]
+to get some examples and hints.
+
+The POP3 service is controlled by a configuration block in the pop3server.xml.
+The pop3server tag defines the boundaries of the configuration block. It encloses
+all the relevant configuration for the POP3 server. The behavior of the POP service is
+controlled by the attributes and children of this tag.
+
+This tag has an optional boolean attribute - *enabled* - that defines whether the service is active or not.
+The value defaults to "true" if not present.
+
+The standard children of the pop3server tag are:
+
+.jmx.properties content
+|===
+| Property name | explanation
+
+| bind
+| Configure this to bind to a specific inetaddress. This is an optional integer value.
+This value is the port on which this POP3 server is configured
+to listen. If the tag or value is absent then the service
+will bind to all network interfaces for the machine If the tag or value is omitted,
+the value will default to the standard POP3 port, 11
+port 995 is the well-known/IANA registered port for POP3S ie over SSL/TLS
+port 110 is the well-known/IANA registered port for Standard POP3
+
+| connectionBacklog
+|
+
+| tls
+| Set to true to support STARTTLS or SSL for the Socket.
+To create a new keystore execute:
+`keytool -genkey -alias james -keyalg RSA -storetype PKCS12 -keystore /path/to/james/conf/keystore`
+Please note that each POP3 server exposed on different port can specify its own keystore, independently from any other
+TLS based protocols. Read xref:{pages-path}/configure/ssl.adoc[SSL configuration page] for more information.
+
+| handler.helloName
+| This is the name used by the server to identify itself in the POP3
+protocol. If autodetect is TRUE, the server will discover its
+own host name and use that in the protocol. If discovery fails,
+the value of 'localhost' is used. If autodetect is FALSE, James
+will use the specified value.
+
+| handler.connectiontimeout
+| Connection timeout in seconds
+
+| handler.connectionLimit
+| Set the maximum simultaneous incoming connections for this service
+
+| handler.connectionLimitPerIP
+| Set the maximum simultaneous incoming connections per IP for this service
+
+| handler.handlerchain
+| This loads the core CommandHandlers. Only remove this if you really know what you are doing.
+
+| bossWorkerCount
+| Set the maximum count of boss threads. Boss threads are responsible for accepting incoming POP3 connections
+and initializing associated resources. Optional integer, by default, boss threads are not used and this responsibility is being dealt with
+by IO threads.
+
+| ioWorkerCount
+| Set the maximum count of IO threads. IO threads are responsible for receiving incoming POP3 messages and framing them
+(split line by line). IO threads also take care of compression and SSL encryption. Their tasks are short-lived and non-blocking.
+Optional integer, defaults to 2 times the count of CPUs.
+
+| maxExecutorCount
+| Set the maximum count of worker threads. Worker threads takes care of potentially blocking tasks like executing POP3 requests. Optional integer, defaults to 16.
+
+| useEpoll
+| true or false - If true uses native EPOLL implementation for Netty otherwise uses NIO. Defaults to false.
+
+| gracefulShutdown
+| true or false - If true attempts a graceful shutdown, which is safer but can take time. Defaults to true.
+|===
\ No newline at end of file
diff --git a/docs/modules/servers/partials/configure/queue.adoc b/docs/modules/servers/partials/configure/queue.adoc
new file mode 100644
index 00000000000..cbec12d7252
--- /dev/null
+++ b/docs/modules/servers/partials/configure/queue.adoc
@@ -0,0 +1,16 @@
+This configuration helps you configure mail queue you want to select.
+
+== Queue Configuration
+
+.queue.properties content
+|===
+| Property name | explanation
+
+| mail.queue.choice
+| Mail queue can be implemented by many type of message brokers: Pulsar, RabbitMQ,... This property will choose which mail queue you want, defaulting to RABBITMQ
+|===
+
+`mail.queue.choice` supports the following options:
+
+* You can specify the `RABBITMQ` if you want to choose RabbitMQ mail queue
+* You can specify the `PULSAR` if you want to choose Pulsar mail queue
diff --git a/docs/modules/servers/partials/configure/rabbitmq.adoc b/docs/modules/servers/partials/configure/rabbitmq.adoc
new file mode 100644
index 00000000000..689bb17a57c
--- /dev/null
+++ b/docs/modules/servers/partials/configure/rabbitmq.adoc
@@ -0,0 +1,162 @@
+This configuration helps you configure components using RabbitMQ.
+
+Consult this link:{sample-configuration-prefix-url}/rabbitmq.properties[example]
+to get some examples and hints.
+
+== RabbitMQ Configuration
+
+.rabbitmq.properties content
+|===
+| Property name | explanation
+
+| uri
+| the amqp URI pointing to RabbitMQ server. If you use a vhost, specify it as well at the end of the URI.
+Details about amqp URI format is in https://www.rabbitmq.com/uri-spec.html[RabbitMQ URI Specification]
+
+| management.uri
+| the URI pointing to RabbitMQ Management Service. James need to retrieve some information about listing queues
+from this service in runtime.
+Details about URI format is in https://www.rabbitmq.com/management.html#usage-ui[RabbitMQ Management URI]
+
+| management.user
+| username used to access management service
+
+| management.password
+| password used to access management service
+
+| connection.pool.retries
+| Configure retries count to retrieve a connection. Exponential backoff is performed between each retries.
+Optional integer, defaults to 10
+
+| connection.pool.min.delay.ms
+| Configure initial duration (in ms) between two connection retries. Exponential backoff is performed between each retries.
+Optional integer, defaults to 100
+
+| channel.pool.retries
+| Configure retries count to retrieve a channel. Exponential backoff is performed between each retries.
+Optional integer, defaults to 3
+
+| channel.pool.max.delay.ms
+| Configure timeout duration (in ms) to obtain a rabbitmq channel. Defaults to 30 seconds.
+Optional integer, defaults to 30 seconds.
+
+| channel.pool.size
+| Configure the size of the channel pool.
+Optional integer, defaults to 3
+
+| driver.network.recovery.interval
+| Optional, non-negative integer, default to 100ms. The interval (in ms) that RabbitMQ driver will automatic recovery wait before attempting to reconnect. See https://www.rabbitmq.com/client-libraries/java-api-guide#connection-recovery
+
+| ssl.enabled
+| Is using ssl enabled
+Optional boolean, defaults to false
+
+| ssl.management.enabled
+| Is using ssl on management api enabled
+Optional boolean, defaults to false
+
+| ssl.validation.strategy
+| Configure the validation strategy used for rabbitmq connections. Possible values are default, ignore and override.
+Optional string, defaults to using systemwide ssl configuration
+
+| ssl.truststore
+| Points to the truststore (PKCS12) used for verifying rabbitmq connection. If configured then "ssl.truststore.password" must also be configured,
+Optional string, defaults to systemwide truststore. "ssl.validation.strategy: override" must be configured if you want to use this
+
+| ssl.truststore.password
+| Configure the truststore password. If configured then "ssl.truststore" must also be configured,
+Optional string, defaults to empty string. "ssl.validation.strategy: override" must be configured if you want to use this
+
+| ssl.hostname.verifier
+| Configure host name verification. Possible options are default and accept_any_hostname
+Optional string, defaults to subject alternative name host verifier
+
+| ssl.keystore
+| Points to the keystore(PKCS12) used for client certificate authentication. If configured then "ssl.keystore.password" must also be configured,
+Optional string, defaults to empty string
+
+| ssl.keystore.password
+| Configure the keystore password. If configured then "ssl.keystore" must also be configured,
+Optional string, defaults to empty string
+
+| quorum.queues.enable
+| Boolean. Whether to activate Quorum queue usage for all queues.
+Quorum queues enables high availability.
+False (default value) results in the usage of classic queues.
+
+| quorum.queues.replication.factor
+| Strictly positive integer. The replication factor to use when creating quorum queues.
+
+| quorum.queues.delivery.limit
+| Strictly positive integer. Value for x-delivery-limit queue parameter, default to none. Setting a delivery limit can
+prevent RabbitMQ outage if message processing fails. Read https://www.rabbitmq.com/docs/quorum-queues#poison-message-handling
+
+| hosts
+| Optional, default to the host specified as part of the URI.
+Allow creating cluster aware connections.
+A coma separated list of hosts, example: hosts=ip1:5672,ip2:5672
+
+| mailqueue.publish.confirm.enabled
+| Whether or not to enable publish confirms for the mail queue. Optional boolean, defaults to true.
+
+| event.bus.publish.confirm.enabled
+| Whether or not to enable publish confirms for the event bus. Optional boolean, defaults to true.
+
+| event.bus.notification.durability.enabled
+| Whether or not the queue backing notifications should be durable. Optional boolean, defaults to true.
+
+| event.bus.propagate.dispatch.error
+| Whether to propagate errors back to the callers when eventbus fails to dispatch group events to RabbitMQ (then store the failed events in the event dead letters).
+Optional boolean, defaults to true.
+
+| vhost
+| Optional string. This parameter is only a workaround to support invalid URIs containing character like '_'.
+You still need to specify the vhost in the uri parameter.
+
+|===
+
+== Tuning RabbitMQ for quorum queue use
+
+While quorum queues are great at preserving your data and enabling High Availability, they demand more resources and
+a greater care than regular RabbitMQ queues.
+
+See link:https://www.rabbitmq.com/docs/quorum-queues#performance-tuning[this section of RabbitMQ documentation regarding RabbitMQ quroum queue performance tunning].
+
+ - Provide decent amount of RAM memory to RabbitMQ. 4GB is a good start.
+ - Setting a delivery limit is advised as looping messages can cause extreme memory consumptions onto quorum queues.
+ - Set up Raft for small messages:
+
+....
+raft.segment_max_entries = 32768
+....
+
+== RabbitMQ Tasks Configuration
+
+Tasks are WebAdmin triggered long running jobs. RabbitMQ is used to organise their execution in a work queue,
+with an exclusive consumer.
+
+.rabbitmq.properties content
+|===
+| Property name | explanation
+
+| task.consumption.enabled
+| Whether to enable task consumption on this node.
+Disable with caution (this only makes sense in a distributed setup where other nodes consume tasks).
+Defaults to true.
+
+Limitation: Sometimes, some tasks running on James can be very heavy and take a couple of hours to complete.
+If other tasks are being triggered meanwhile on WebAdmin, they go on the TaskManagerWorkQueue and James unack them,
+telling RabbitMQ it will consume them later. If they don't get consumed before the consumer timeout setup in
+RabbitMQ (default being 30 minutes), RabbitMQ closes the channel on an exception. It is thus advised to declare a
+longer timeout in rabbitmq.conf. More https://www.rabbitmq.com/consumers.html#acknowledgement-timeout[here].
+
+| task.queue.consumer.timeout
+| Task queue consumer timeout.
+
+Optional. Duration (support multiple time units cf `DurationParser`), defaults to 1 day.
+
+Required at least RabbitMQ version 3.12 to have effect.
+This is used to avoid the task queue consumer (which could run very long tasks) being disconnected by RabbitMQ after the default acknowledgement timeout 30 minutes.
+References: https://www.rabbitmq.com/consumers.html#acknowledgement-timeout.
+
+|===
\ No newline at end of file
diff --git a/docs/modules/servers/partials/configure/recipientrewritetable.adoc b/docs/modules/servers/partials/configure/recipientrewritetable.adoc
new file mode 100644
index 00000000000..67edfe32ad5
--- /dev/null
+++ b/docs/modules/servers/partials/configure/recipientrewritetable.adoc
@@ -0,0 +1,15 @@
+Here are explanations on the different kinds about xref:{pages-path}/architecture/index.adoc#_recipient_rewrite_tables[recipient rewriting].
+
+Consult this link:{sample-configuration-prefix-url}/recipientrewritetable.xml[example]
+to get some examples and hints.
+
+.recipientrewritetable.xml
+|===
+| Property name | explanation
+
+| recursiveMapping
+| If set to false only the first mapping will get processed - Default true.
+
+| mappingLimit
+|By setting the mappingLimit you can specify how much mapping will get processed before a bounce will send. This avoids infinity loops. Default 10.
+|===
diff --git a/docs/modules/servers/partials/configure/redis.adoc b/docs/modules/servers/partials/configure/redis.adoc
new file mode 100644
index 00000000000..6b1fcfd2457
--- /dev/null
+++ b/docs/modules/servers/partials/configure/redis.adoc
@@ -0,0 +1,44 @@
+This configuration helps you configure components using Redis. This so far only includes optional rate limiting component.
+
+Consult this link:https://github.com/apache/james-project/blob/fabfdf4874da3aebb04e6fe4a7277322a395536a/server/mailet/rate-limiter-redis/redis.properties[example]
+to get some examples and hints.
+
+== Redis Configuration
+
+.redis.properties content
+|===
+| Property name | explanation
+
+| redisURL
+| the Redis URI pointing to Redis server. Compulsory.
+
+| redis.topology
+| Redis server topology. Defaults to standalone. Possible values: standalone, cluster, master-replica
+
+| redis.readFrom
+| The property to determine how Lettuce routes read operations to Redis server with topologies other than standalone. Defaults to master. Possible values: master, masterPreferred, replica, replicaPreferred, any
+
+Reference: https://github.com/redis/lettuce/wiki/ReadFrom-Settings
+
+| redis.ioThreads
+| IO threads to be using for the underlying Netty networking resources. If unspecified driver defaults applies.
+
+| redis.workerThreads
+| Worker threads to be using for the underlying driver. If unspecified driver defaults applies.
+|===
+
+== Enabling Multithreading in Redis
+
+Redis 6 and later versions support multithreading, but by default, Redis operates as a single-threaded process.
+
+On a virtual machine with multiple CPU cores, you can enhance Redis performance by enabling multithreading. This can significantly improve I/O operations, particularly for workloads with high concurrency or large data volumes.
+
+See link:https://redis.io/docs/latest/operate/oss_and_stack/management/config-file/[THREADED I/O section].
+
+Example if you have a 4 cores CPU, you can enable the following lines in the `redis.conf` file:
+....
+io-threads 3
+io-threads-do-reads yes
+....
+
+However, if your machine has only 1 CPU core or your Redis usage is not intensive, you will not benefit from this.
diff --git a/docs/modules/servers/partials/configure/remote-delivery-error-handling.adoc b/docs/modules/servers/partials/configure/remote-delivery-error-handling.adoc
new file mode 100644
index 00000000000..25d7c121bcc
--- /dev/null
+++ b/docs/modules/servers/partials/configure/remote-delivery-error-handling.adoc
@@ -0,0 +1,117 @@
+The advanced server mailQueue implemented by combining RabbitMQ for messaging and {mailet-repository-path-prefix} for administrative operation
+does not support delays.
+
+Delays are an important feature for Mail Exchange servers, allowing to defer in time the retries, potentially letting the
+time for the remote server to recover. Furthermore, they enable implementation of advanced features like throttling and
+rate limiting of emails sent to a given domain.
+
+As such, the use of the distributed server as a Mail Exchange server is currently discouraged.
+
+However, for operators willing to inter-operate with a limited set of well-identified, trusted remote mail servers, such
+limitation can be reconsidered. The main concern then become error handling for remote mail server failures. The following
+document will present a well tested strategy for Remote Delivery error handling leveraging standards Mail Processing components
+and mechanisms.
+
+== Expectations
+
+Such a solution should:
+
+- Attempt delivery a single time
+- Store transient and permanent failure in different mail repositories
+- After a given number of tries, transient failures should be considered permanent
+
+== Design
+
+image::remote-delivery-error-handling.png[Schema detailing the proposed solution]
+
+- Remote Delivery is configured for performing a single retry.
+- Remote Delivery attaches the error code and if the failure is permanent/temporary when transferring failed emails to the
+bounce processor.
+- The specified bounce processor will categorise the failure, and store temporary and permanent failures in different
+mail repositories.
+- A reprocessing of the temporary delivery errors mailRepository needs to be scheduled in a recurring basis. For
+instance via a CRON job calling the right webadmin endpoint.
+- A counter ensures that a configured number of delivery tries is not exceeded.
+
+=== Limitation
+
+MailRepositories are not meant for transient data storage, and thus are prone to tombstone issues.
+
+This might be acceptable if you need to send mail to well-known peers. For instance handling your mail gateway failures.
+However a Mail Exchange server doing relay on the internet would quickly hit this limitation.
+
+Also note that external triggering of the retry process is needed.
+
+== Operation
+
+Here is an example of configuration achieving the proposed solution:
+
+[subs=attributes+,xml]
+----
+
+
+
+ outgoing
+ 0
+ 0
+ 10
+ true
+
+ remote-delivery-error
+
+
+
+ {mailet-repository-path-prefix}://var/mail/error/remote-delivery/permanent/
+
+
+
+
+
+
+ {mailet-repository-path-prefix}://var/mail/error/remote-delivery/temporary/
+
+
+
+ {mailet-repository-path-prefix}://var/mail/error/remote-delivery/permanent/
+
+
+
+ {mailet-repository-path-prefix}://var/mail/error/
+
+
+----
+
+Note:
+
+- The *relay* processor holds a RemoteDelivery mailet configured to do a single try, at most 5 times (see the AtMost matcher).
+Mails exceeding the AtMost condition are considered as permanent delivery errors. Delivery errors are sent to the
+*remote-delivery-error* processor.
+- The *remote-delivery-error* stores temporary and permanent errors.
+- Permanent relay errors are stored in `{mailet-repository-path-prefix}://var/mail/error/remote-delivery/permanent/`.
+- Temporary relay errors are stored in `{mailet-repository-path-prefix}://var/mail/error/remote-delivery/temporary/`.
+
+In order to retry the relay of temporary failed emails, operators will have to configure a cron job for reprocessing
+emails from *{mailet-repository-path-prefix}://var/mail/error/remote-delivery/temporary/* mailRepository into the *relay* processor.
+
+This can be achieved via the following webAdmin call :
+
+[subs=attributes+]
+----
+curl -XPATCH 'http://ip:8000/mailRepositories/{mailet-repository-path-prefix}%3A%2F%2Fvar%2Fmail%2Ferror%2Fremote-delivery%2Ftemporary%2F/mails?action=reprocess&processor=relay'
+----
+
+See xref:{pages-path}/operate/webadmin.adoc#_reprocessing_mails_from_a_mail_repository[the documentation].
+
+Administrators need to keep a close eye on permanent errors (that might require audit, and potentially contacting the remote
+service supplier).
+
+To do so, one should regularly audit the content of *{mailet-repository-path-prefix}://var/mail/error/remote-delivery/permanent/*. This can be done
+via webAdmin calls:
+
+[subs=attributes+]
+----
+curl -XGET 'http://ip:8000/mailRepositories/{mailet-repository-path-prefix}%3A%2F%2Fvar%2Fmail%2Ferror%2Fremote-delivery%2Ftemporary%2F/mails'
+----
+
+See xref:{pages-path}/operate/webadmin.adoc#_listing_mails_contained_in_a_mail_repository[the documentation].
diff --git a/docs/modules/servers/partials/configure/search.adoc b/docs/modules/servers/partials/configure/search.adoc
new file mode 100644
index 00000000000..239e266c21e
--- /dev/null
+++ b/docs/modules/servers/partials/configure/search.adoc
@@ -0,0 +1,15 @@
+This configuration helps you configure the components used to back search.
+
+.search.properties content
+|===
+| Property name | explanation
+
+| implementation
+| The implementation to be used for search. Should be one of:
+ - *opensearch* : Index and search mails into OpenSearch.
+ - *scanning* : Do not index documents and perform scanning search, scrolling mailbox for matching contents.
+ This implementation can have a prohibitive cost.
+ - *opensearch-disabled* : Saves events to index into event dead letter. Make searches fails.
+ This is useful to start James without OpenSearch while still tracking messages to index for later recovery. This
+ can be used in order to ease delays for disaster recovery action plans.
+|===
\ No newline at end of file
diff --git a/docs/modules/servers/partials/configure/sieve.adoc b/docs/modules/servers/partials/configure/sieve.adoc
new file mode 100644
index 00000000000..7ecd4c452f7
--- /dev/null
+++ b/docs/modules/servers/partials/configure/sieve.adoc
@@ -0,0 +1,89 @@
+James servers are able to evaluate and execute Sieve scripts.
+
+Sieve is an extensible mail filtering language. It's limited
+expressiveness (no loops or variables, no tests with side
+effects) allows user created scripts to be run safely on email
+servers. Sieve is targeted at the final delivery phase (where
+an incoming email is transferred to a user's mailbox).
+
+The following Sieve capabilities are supported by Apache James:
+
+ - link:https://www.ietf.org/rfc/rfc2234.txt[RFC 2234 ABNF]
+ - link:https://www.ietf.org/rfc/rfc2244.txt[RFC 2244 ACAP]
+ - link:https://www.ietf.org/rfc/rfc2298.txt[RFC 2298 MDN]
+ - link:https://tools.ietf.org/html/rfc5228[RFC 5228 Sieve]
+ - link:https://tools.ietf.org/html/rfc4790[RFC 4790 IAPCR]
+ - link:https://tools.ietf.org/html/rfc5173[RFC 5173 Body Extension]
+ - link:https://datatracker.ietf.org/doc/html/rfc5230[RFC 5230 Vacations]
+
+To be correctly executed, please note that the *Sieve* mailet is required to be positioned prior the
+*LocalDelivery* mailet.
+
+== Managing Sieve scripts
+
+A user willing to manage his Sieve scripts on the server can do so via several means:
+
+He can ask an admin to upload his script via the xref:{pages-path}/operate/cli.adoc[CLI]
+
+As James supports ManageSieve (link:https://datatracker.ietf.org/doc/html/rfc5804[RFC-5804]) a user
+can thus use compatible software to manage his Sieve scripts.
+
+== ManageSieve protocol
+
+*WARNING*: ManageSieve protocol should be considered experimental.
+
+Consult link:{sample-configuration-prefix-url}/managesieveserver.xml[managesieveserver.xml]
+in GIT to get some examples and hints.
+
+The service is controlled by a configuration block in the managesieveserver.xml.
+The managesieveserver tag defines the boundaries of the configuration block. It encloses
+all the relevant configuration for the ManageSieve server. The behavior of the ManageSieve service is
+controlled by the attributes and children of this tag.
+
+This tag has an optional boolean attribute - *enabled* - that defines whether the service is active or not.
+The value defaults to "false" if
+not present.
+
+The standard children of the managesieveserver tag are:
+
+.managesieveserver.xml content
+|===
+| Property name | explanation
+
+| bind
+| Configure this to bind to a specific inetaddress. This is an optional integer value. This value is the port on which this ManageSieve server is configured to listen. If the tag or value is absent then the service
+will bind to all network interfaces for the machine If the tag or value is omitted, the value will default to the standard ManageSieve port (port 4190 is the well-known/IANA registered port for ManageSieve.)
+
+| tls
+| Set to true to support STARTTLS or SSL for the Socket.
+To use this you need to copy sunjce_provider.jar to /path/james/lib directory. To create a new keystore execute:
+`keytool -genkey -alias james -keyalg RSA -storetype PKCS12 -keystore /path/to/james/conf/keystore`.
+Please note that each ManageSieve server exposed on different port can specify its own keystore, independently from any other
+TLS based protocols.
+
+| connectionBacklog
+| Number of connection backlog of the server (maximum number of queued connection requests)
+
+| connectiontimeout
+| Connection timeout in seconds
+
+| connectionLimit
+| Set the maximum simultaneous incoming connections for this service
+
+| connectionLimitPerIP
+| Set the maximum simultaneous incoming connections per IP for this service
+
+| bossWorkerCount
+| Set the maximum count of boss threads. Boss threads are responsible for accepting incoming ManageSieve connections
+and initializing associated resources. Optional integer, by default, boss threads are not used and this responsibility is being dealt with
+by IO threads.
+
+| ioWorkerCount
+| Set the maximum count of IO threads. IO threads are responsible for receiving incoming ManageSieve messages and framing them
+(split line by line). IO threads also take care of compression and SSL encryption. Their tasks are short-lived and non-blocking.
+Optional integer, defaults to 2 times the count of CPUs.
+
+| maxExecutorCount
+| Set the maximum count of worker threads. Worker threads takes care of potentially blocking tasks like executing ManageSieve commands.
+Optional integer, defaults to 16.
+|===
\ No newline at end of file
diff --git a/docs/modules/servers/partials/configure/smtp-hooks.adoc b/docs/modules/servers/partials/configure/smtp-hooks.adoc
new file mode 100644
index 00000000000..d2ba36c2718
--- /dev/null
+++ b/docs/modules/servers/partials/configure/smtp-hooks.adoc
@@ -0,0 +1,382 @@
+This documentation page lists and documents SMTP hooks that can be used within the
+{server-name} SMTP protocol stack in order to customize the way your SMTP server
+behaves without of the box components.
+
+== DNSRBLHandler
+
+This command handler check against https://www.wikiwand.com/en/Domain_Name_System-based_Blackhole_List[RBL-Lists]
+(Real-time Blackhole List).
+
+If getDetail is set to true it try to retrieve information from TXT Record
+why the ip was blocked. Default to false.
+
+before you enable out the DNS RBL handler documented as an example below,
+please take a moment to review each block in the list.
+We have included some that various JAMES committers use,
+but you must decide which, if any, are appropriate
+for your environment.
+
+The mail servers hosting
+@apache.org mailing lists, for example, use a
+slightly different list than we have included below.
+And it is likely that most JAMES committers also have
+slightly different sets of lists.
+
+The SpamAssassin user's list would be one good place to discuss the
+measured quality of various block lists.
+
+NOTA BENE: the domain names, below, are terminated
+with '.' to ensure that they are absolute names in
+DNS lookups. Under some circumstances, names that
+are not explicitly absolute could be treated as
+relative names, leading to incorrect results. This
+has been observed on *nix and MS-Windows platforms
+by users of multiple mail servers, and is not JAMES
+specific. If you are unsure what this means for you,
+please speak with your local system/network admins.
+
+This handler should be considered experimental.
+
+Example configuration:
+
+[source,xml]
+....
+
+
+
+ false
+
+ query.bondedsender.org.
+ sbl-xbl.spamhaus.org.
+ dul.dnsbl.sorbs.net.
+ list.dsbl.org.
+
+
+
+....
+
+== DSN hooks
+
+The {server-name} has optional support for DSN (link:https://tools.ietf.org/html/rfc3461[RFC-3461])
+
+Please read carefully xref:{pages-path}/configure/dsn.adoc[this page].
+
+[source,xml]
+....
+
+ <...>
+
+
+
+
+
+ <...>
+
+
+
+....
+
+Note that a specific configuration of xref:{pages-path}/configure/mailetcontainer.adoc[mailetcontainer.xml] is
+required as well to be spec compliant.
+
+== MailPriorityHandler
+
+This handler can add a hint to the mail which tells the MailQueue which email should get processed first.
+
+Normally the MailQueue will just handle Mails in FIFO manner.
+
+Valid priority values are 1,5,9 where 9 is the highest.
+
+This handler should be considered experimental.
+
+Example configuration:
+
+[source,xml]
+....
+
+
+
+
+
+ yourdomain1
+ 1
+
+
+ yourdomain2
+ 9
+
+
+
+
+....
+
+== MaxRcptHandler
+If activated you can limit the maximal recipients.
+
+This handler should be considered experimental.
+
+Example configuration:
+
+[source,xml]
+....
+
+
+
+ 10
+
+
+....
+
+== POP3BeforeSMTPHandler
+
+This connect handler can be used to enable POP3 before SMTP support.
+
+Please note that only the ip get stored to identify an authenticated client.
+
+The expireTime is the time after which an ipAddress is handled as expired.
+
+This handler should be considered as unsupported.
+
+Example configuration:
+
+[source,xml]
+....
+
+
+
+ 1 hour
+
+
+....
+
+== ResolvableEhloHeloHandler
+
+Checks for resolvable HELO/EHLO before accept the HELO/EHLO.
+
+If checkAuthNetworks is set to true sender domain will be checked also for clients that
+are allowed to relay. Default is false.
+
+This handler should be considered experimental.
+
+Example configuration:
+
+[source,xml]
+....
+
+
+
+
+....
+
+== ReverseEqualsEhloHeloHandler
+
+Checks HELO/EHLO is equal the reverse of the connecting client before accept it
+If checkAuthNetworks is set to true sender domain will be checked also for clients that
+are allowed to relay. Default is false.
+
+This handler should be considered experimental.
+
+Example configuration:
+
+[source,xml]
+....
+
+
+
+
+....
+
+== SetMimeHeaderHandler
+
+This handler allows you to add mime headers to the processed mails.
+
+This handler should be considered experimental.
+
+Example configuration:
+
+[source,xml]
+....
+
+
+
+ SPF-test
+ passed
+
+
+....
+
+== SpamAssassinHandler
+
+This MessageHandler could be used to check message against spamd before
+accept the email. So it's possible to reject a message on smtplevel if a
+configured hits amount is reached.
+
+This handler should be considered experimental.
+
+Example configuration:
+
+[source,xml]
+....
+
+
+
+ 127.0.0.1
+ 783
+ 10
+
+
+....
+
+== SPFHandler
+
+This command handler can be used to reject emails with not match the SPF record of the sender domain.
+
+If checkAuthNetworks is set to true sender domain will be checked also for clients that
+are allowed to relay. Default is false.
+
+This handler should be considered experimental.
+
+Example configuration:
+
+[source,xml]
+....
+
+
+
+ false
+ true
+
+
+....
+
+== URIRBLHandler
+
+This MessageHandler could be used to extract domain out of the message and check
+this domains against uriRbllists. See http://www.surbl.org for more information.
+The message get rejected if a domain matched.
+
+This handler should be considered experimental.
+
+Example configuration:
+
+[source,xml]
+....
+
+
+
+ reject
+ true
+
+ multi.surbl.org
+
+
+
+....
+
+== ValidRcptHandler
+
+With ValidRcptHandler, all email will get rejected which has no valid user.
+
+You need to add the recipient to the validRecipient list if you want
+to accept email for a recipient which not exist on the server.
+
+If you want James to act as a spamtrap or honeypot, you may comment ValidRcptHandler
+and implement the needed processors in spoolmanager.xml.
+
+This handler should be considered stable.
+
+Example configuration:
+
+[source,xml]
+....
+
+
+
+
+....
+
+== ValidSenderDomainHandler
+
+If activated mail is only accepted if the sender contains
+a resolvable domain having a valid MX Record or A Record associated!
+
+If checkAuthNetworks is set to true sender domain will be checked also for clients that
+are allowed to relay. Default is false.
+
+Example configuration:
+
+[source,xml]
+....
+
+
+
+
+....
+
+== FUTURERELEASE hooks
+
+The {server-name} has optional support for FUTURERELEASE (link:https://www.rfc-editor.org/rfc/rfc4865.html[RFC-4865])
+
+[source,xml]
+....
+
+ <...>
+
+
+
+
+
+
+....
+
+== Message Transfer Priorities hooks
+
+The Distributed server has optional support for SMTP Extension for Message Transfer Priorities (link:https://www.rfc-editor.org/rfc/rfc6710.html[RFC-6710])
+
+The SMTP server does not allow positive priorities from unauthorized sources and sets the priority to the default value (0).
+
+....
+
+ <...>
+
+
+
+
+
+
+
+....
+
+== DKIM checks hooks
+
+Hook for verifying DKIM signatures of incoming mails.
+
+This hook can be restricted to specific sender domains and authenticate those emails against
+their DKIM signature. Given a signed outgoing traffic this hook can use operators to accept legitimate
+emails emitted by their infrastructure but redirected without envelope changes to there own domains by
+some intermediate third parties. See link:https://issues.apache.org/jira/browse/JAMES-4032[JAMES-4032].
+
+Supported configuration elements:
+
+- *forceCRLF*: Should CRLF be forced when computing body hashes.
+- *onlyForSenderDomain*: If specified, the DKIM checks are applied just for the emails whose MAIL FROM specifies this domain. If unspecified, all emails are checked (default).
+- *signatureRequired*: If DKIM signature is checked, the absence of signature will generate failure. Defaults to false.
+- *expectedDToken*: If DKIM signature is checked, the body should contain at least one DKIM signature with this d token. If unspecified, all d tokens are considered valid (default).
+
+Example handlerchain configuration for `smtpserver.xml`:
+
+[source,xml]
+....
+
+
+ true
+ apache.org
+ true
+ apache.org
+
+
+
+....
+
+Would allow emails using `apache.org` as a MAIL FROM domain if, and only if they contain a
+valid DKIM signature for the `apache.org` domain.
\ No newline at end of file
diff --git a/docs/modules/servers/partials/configure/smtp.adoc b/docs/modules/servers/partials/configure/smtp.adoc
new file mode 100644
index 00000000000..d2d8d519c4e
--- /dev/null
+++ b/docs/modules/servers/partials/configure/smtp.adoc
@@ -0,0 +1,315 @@
+== Incoming SMTP
+
+Consult this link:{sample-configuration-prefix-url}/smtpserver.xml[example]
+to get some examples and hints.
+
+The SMTP service is controlled by a configuration block in the smptserver.xml.
+The smtpserver tag defines the boundaries of the configuration block. It encloses
+all the relevant configuration for the SMTP server. The behavior of the SMTP service is
+controlled by the attributes and children of this tag.
+
+This tag has an optional boolean attribute - *enabled* - that defines whether the service is active or not. The value defaults to "true" if
+not present.
+
+The standard children of the smtpserver tag are:
+
+.smtpserver.xml content
+|===
+| Property name | explanation
+
+| bind
+| A list of address:port separed by comma - This is an optional value. If present, this value is a string describing
+the IP address to which this service should be bound. If the tag or value is absent then the service
+will bind to all network interfaces for the machine on port 25. Port 25 is the well-known/IANA registered port for SMTP.
+Port 465 is the well-known/IANA registered port for SMTP over TLS.
+
+| connectBacklog
+|The IP address (host name) the MBean Server will bind/listen to.
+
+| tls
+| Set to true to support STARTTLS or SSL for the Socket.
+To use this you need to copy sunjce_provider.jar to /path/james/lib directory. To create a new keystore execute:
+`keytool -genkey -alias james -keyalg RSA -storetype PKCS12 -keystore /path/to/james/conf/keystore`.
+The algorithm is optional and only needs to be specified when using something other
+than the Sun JCE provider - You could use IbmX509 with IBM Java runtime.
+Please note that each SMTP/LMTP server exposed on different port can specify its own keystore, independently from any other
+TLS based protocols.
+
+| helloName
+| This is a required tag with an optional body that defines the server name
+used in the initial service greeting. The tag may have an optional attribute - *autodetect*. If
+the autodetect attribute is present and true, the service will use the local hostname
+returned by the Java libraries. If autodetect is absent or false, the body of the tag will be used. In
+this case, if nobody is present, the value "localhost" will be used.
+
+| connectionTimeout
+| This is an optional tag with a non-negative integer body. Connection timeout in seconds.
+
+| connectionLimit
+| Set the maximum simultaneous incoming connections for this service.
+
+| connectionLimitPerIP
+| Set the maximum simultaneous incoming connections per IP for this service.
+
+| proxyRequired
+| Enables proxy support for this service for incoming connections. HAProxy's protocol
+(https://www.haproxy.org/download/2.7/doc/proxy-protocol.txt) is used and might be compatible
+with other proxies (e.g. traefik). If enabled, it is *required* to initiate the connection
+using HAProxy's proxy protocol.
+
+| authRequired
+| (deprecated) use auth.announce instead.
+
+This is an optional tag with a boolean body. If true, then the server will
+announce authentication after HELO command. If this tag is absent, or the value
+is false then the client will not be prompted for authentication. Only simple user/password authentication is
+supported at this time. Supported values:
+
+ * true: announced only to not authorizedAddresses
+
+ * false: don't announce AUTH. If absent, *authorizedAddresses* are set to a wildcard to accept all remote hosts.
+
+ * announce: like true, but always announce AUTH capability to clients
+
+Please note that emails are only relayed if, and only if, the user did authenticate, or is in an authorized network,
+regardless of this option.
+
+| auth.announce
+| This is an optional tag. Possible values are:
+
+* never: Don't announce auth.
+
+* always: always announce AUTH capability to clients.
+
+* forUnauthorizedAddresses: announced only to not authorizedAddresses
+
+Please note that emails are only relayed if, and only if, the user did authenticate, or is in an authorized network,
+regardless of this option.
+
+| auth.requireSSL
+| This is an optional tag, defaults to true. If true, authentication is not advertised via capabilities on unencrypted
+channels.
+
+| auth.plainAuthEnabled
+| This is an optional tag, defaults to true. If false, AUTH PLAIN and AUTH LOGIN will not be exposed. This setting
+can be used to enforce strong authentication mechanisms.
+
+| auth.oidc.oidcConfigurationURL
+| Provide OIDC url address for information to user. Only configure this when you want to authenticate SMTP server using a OIDC provider.
+
+| auth.oidc.jwksURL
+| Provide url to get OIDC's JSON Web Key Set to validate user token. Only configure this when you want to authenticate SMTP server using a OIDC provider.
+
+| auth.oidc.claim
+| Claim string uses to identify user. E.g: "email_address". Only configure this when you want to authenticate SMTP server using a OIDC provider.
+
+| auth.oidc.scope
+| An OAuth scope that is valid to access the service (RF: RFC7628). Only configure this when you want to authenticate SMTP server using a OIDC provider.
+
+| auth.oidc.introspection.url
+| Optional. An OAuth introspection token URL will be called to validate the token (RF: RFC7662).
+Only configure this when you want to validate the revocation token by the OIDC provider.
+Note that James always verifies the signature of the token even whether this configuration is provided or not.
+
+| auth.oidc.introspection.auth
+| Optional. Provide Authorization in header request when introspecting token.
+Eg: `Basic xyz`
+
+| auth.oidc.userinfo.url
+| Optional. An Userinfo URL will be called to validate the token (RF: OpenId.Core https://openid.net/specs/openid-connect-core-1_0.html).
+Only configure this when you want to validate the revocation token by the OIDC provider.
+Note that James always verifies the signature of the token even whether this configuration is provided or not.
+James will ignore check token by userInfo if the `auth.oidc.introspection.url` is already configured
+
+| authorizedAddresses
+| Authorize specific addresses/networks.
+
+If you use SMTP AUTH, addresses that match those specified here will
+be permitted to relay without SMTP AUTH. If you do not use SMTP
+AUTH, and you specify addresses here, then only addresses that match
+those specified will be permitted to relay.
+
+Addresses may be specified as a IP address or domain name, with an
+optional netmask, e.g.,
+
+127.*, 127.0.0.0/8, 127.0.0.0/255.0.0.0, and localhost/8 are all the same
+
+See also the RemoteAddrNotInNetwork matcher in the transport processor.
+You would generally use one OR the other approach.
+
+| verifyIdentity
+| This is an optional tag. This options governs MAIL FROM verifications, and prevents spoofing of the MAIL FROM
+envelop field.
+
+The following values are supported:
+
+ - `strict`: use of a local domain in MAIL FROM requires the SMTP client to be authenticated with a matching user or one
+ of its aliases. It will verify that the sender address matches the address of the user or one of its alias (from user or domain aliases).
+ This prevents a user of your mail server from acting as someone else
+ - `disabled`: no check is performed and third party are free to send emails as local users. Note that relaying emails will
+ need third party to be authenticated thus preventing open relays.
+ - `relaxed`: Based on a simple heuristic to determine if the SMTP client is a MUA or a MX (use of a valid domain in EHLO),
+ we do act as `strict` for MUAs thus prompting them early for the need of authentication, but accept use of local MAIL FROM for
+ MX. Authentication can then be delayed to later, eg after DATA transaction with the DKIMHook which might allow email looping through
+ third party domains via mail redirection, effectively enforcing that the mail originates from our servers. See
+ link:https://issues.apache.org/jira/browse/JAMES-4032[JAMES-4032] for detailed explanation.
+
+Backward compatibility is provided and thus the following values are supported:
+
+ - `true`: act as `strict`
+ - `false`: act as `disabled`
+
+| maxmessagesize
+| This is an optional tag with a non-negative integer body. It specifies the maximum
+size, in kbytes, of any message that will be transmitted by this SMTP server. It is a service-wide, as opposed to
+a per user, limit. If the value is zero then there is no limit. If the tag isn't specified, the service will
+default to an unlimited message size. Must be a positive integer, optionally with a unit: B, K, M, G.
+
+| heloEhloEnforcement
+| This sets whether to enforce the use of HELO/EHLO salutation before a
+MAIL command is accepted. If unspecified, the value defaults to true.
+
+| smtpGreeting
+| This sets the SMTPGreeting which will be used when connect to the smtpserver
+If none is specified a default is generated
+
+| handlerchain
+| The configuration handler chain. See xref:{pages-path}/configure/smtp-hooks.adoc[this page] for configuring out-of the
+box extra SMTP handlers and hooks.
+
+| bossWorkerCount
+| Set the maximum count of boss threads. Boss threads are responsible for accepting incoming SMTP connections
+and initializing associated resources. Optional integer, by default, boss threads are not used and this responsibility is being dealt with
+by IO threads.
+
+| ioWorkerCount
+| Set the maximum count of IO threads. IO threads are responsible for receiving incoming SMTP messages and framing them
+(split line by line). IO threads also take care of compression and SSL encryption. Their tasks are short-lived and non-blocking.
+Optional integer, defaults to 2 times the count of CPUs.
+
+| maxExecutorCount
+| Set the maximum count of worker threads. Worker threads takes care of potentially blocking tasks like executing SMTP commands.
+Optional integer, defaults to 16.
+
+| useEpoll
+| true or false - If true uses native EPOLL implementation for Netty otherwise uses NIO. Defaults to false.
+
+| gracefulShutdown
+| true or false - If true attempts a graceful shutdown, which is safer but can take time. Defaults to true.
+
+| disabledFeatures
+| Extended SMTP features to hide in EHLO responses.
+|===
+
+=== OIDC setup
+James SMTP support XOAUTH2 authentication mechanism which allow authenticating against a OIDC providers.
+Please configure `auth.oidc` part to use this.
+
+We do supply an link:https://github.com/apache/james-project/tree/master/examples/oidc[example] of such a setup.
+It uses the Keycloak OIDC provider, but usage of similar technologies is definitely doable.
+
+== About open relays
+
+Authenticated SMTP is a method of securing your SMTP server. With SMTP AUTH enabled senders who wish to
+relay mail through the SMTP server (that is, send mail that is eventually to be delivered to another SMTP
+server) must authenticate themselves to Apache James Server before sending their message. Mail that is to be delivered
+locally does not require authentication. This method ensures that spammers cannot use your SMTP server
+to send unauthorized mail, while still enabling users who may not have fixed IP addresses to send their
+messages.
+
+Mail servers that allow spammers to send unauthorized email are known as open relays. So SMTP AUTH
+is a mechanism for ensuring that your server is not an open relay.
+
+It is extremely important that your server not be configured as an open relay. Aside from potential
+costs associated with usage by spammers, connections from servers that are determined to be open relays
+are routinely rejected by SMTP servers. This can severely impede the ability of your mail server to
+send mail.
+
+At this time Apache James Server only supports simple user name / password authentication.
+
+As mentioned above, SMTP AUTH requires that Apache James Server be able to distinguish between mail intended
+for local delivery and mail intended for remote delivery. Apache James Server makes this determination by matching the
+domain to which the mail was sent against the *DomainList* component, configured by
+xref:{pages-path}/configure/domainlist.adoc[*domainlist.xml*].
+
+The {server-name} is configured out of the box so as to not serve as an open relay for spammers. This is done
+by relayed emails originate from a trusted source. This includes:
+
+* Authenticated SMTP/JMAP users
+* Mails generated by the server (eg: bounces)
+* Mails originating from a trusted network as configured in *smtpserver.xml*
+
+If you wish to ensure that authenticated users can only send email from their own account, you may
+optionally set the verifyIdentity element of the smtpserver configuration block to "true".
+
+=== Verification
+
+Verify that you have not inadvertently configured your server as an open relay. This is most easily
+accomplished by using the service provided at https://mxtoolbox.com/diagnostic.aspx[mxtoolbox.com]. mxtoolbox.com will
+check your mail server and inform you if it is an open relay. This tool further more verifies additional properties like:
+
+* Your DNS configuration, especially that you mail server IP has a valid reverse DNS entry
+* That your SMTP connection is secured
+* That you are not an OpenRelay
+* This website also allow a quick lookup to ensure your mail server is not in public blacklists.
+
+Of course it is also necessary to confirm that users and log in and send
+mail through your server. This can be accomplished using any standard mail client (i.e. Thunderbird, Outlook,
+Eudora, Evolution).
+
+== LMTP Configuration
+
+Consult this link:{sample-configuration-prefix-url}/lmtpserver.xml[example]
+to get some examples and hints.
+
+The configuration is the same of for SMTP.
+
+By default, it is deactivated. You can activate it alongside SMTP and bind for example on port 24.
+
+The default LMTP server stores directly emails in user mailboxes, without further treatment.
+
+However we do ship an alternative handler chain allowing to execute the mailet container, thus achieving a behaviour similar
+to the default SMTP protocol. Here is how to achieve this:
+
+[source,xml]
+....
+
+
+ lmtpserver
+ 0.0.0.0:24
+ 200
+ 1200
+ 0
+ 0
+ 0
+
+
+
+
+
+....
+
+Note that by default the mailet container is executed with all recipients at once and do not allow per recipient
+error reporting. An option splitExecution allow to execute the mailet container for each recipient separately and mitigate this
+limitation at the cost of performance.
+
+[source,xml]
+....
+
+
+ lmtpserver
+ 0.0.0.0:24
+ 200
+ 1200
+ 0
+ 0
+ 0
+
+
+
+ true
+
+
+
+
+....
\ No newline at end of file
diff --git a/docs/modules/servers/partials/configure/spam.adoc b/docs/modules/servers/partials/configure/spam.adoc
new file mode 100644
index 00000000000..5e5b8b2d6f0
--- /dev/null
+++ b/docs/modules/servers/partials/configure/spam.adoc
@@ -0,0 +1,191 @@
+Anti-Spam system can be configured via two main different mechanisms:
+
+* SMTP Hooks;
+* Mailets;
+
+== AntiSpam SMTP Hooks
+
+"FastFail" SMTP Hooks acts to reject before spooling
+on the SMTP level. The Spam detector hook can be used as a fastfail hook, therefore
+Spam filtering system must run as a server on the same machine as the Apache James Server.
+
+SMTP Hooks for non-existent users, DSN filter, domains with invalid MX record,
+can also be configured.
+
+*SpamAssassinHandler* (experimental) also enables to classify the messages as spam or not
+with a configurable score threshold (`0.0`, non-configurable). Only a global database is supported. Per user spam
+detection is not supported by this hook.
+
+== AntiSpam Mailets
+
+James' repository provide two AntiSpam mailets: SpamAssassin and RspamdScanner.
+We can select one in them for filtering spam mail.
+
+* *SpamAssassin and RspamdScanner* Mailet is designed to classify the messages as spam or not
+with a configurable score threshold. Usually a message will only be
+considered as spam if it matches multiple criteria; matching just a single test
+will not usually be enough to reach the threshold. Note that this mailet is executed on a per-user basis.
+
+=== Rspamd
+
+The Rspamd extension (optional) requires an extra configuration file `rspamd.properties` to configure RSpamd connection
+
+.rspamd.properties content
+|===
+| Property name | explanation
+
+| rSpamdUrl
+| URL defining the Rspamd's server. Eg: http://rspamd:11334
+
+| rSpamdPassword
+| Password for pass authentication when request to Rspamd's server. Eg: admin
+
+| rspamdTimeout
+| Integer. Timeout for http requests to Rspamd. Default to 15 seconds.
+
+| perUserBayes
+| Boolean. Whether to scan/learn mails using per-user Bayes. Default to false.
+|===
+
+`RspamdScanner` supports the following options:
+
+* You can specify the `virusProcessor` if you want to enable virus scanning for mail. Upon configurable `virusProcessor`
+you can specify how James process mail virus. We provide a sample Rspamd mailet and `virusProcessor` configuration:
+
+* You can specify the `rejectSpamProcessor`. Emails marked as `rejected` by Rspamd will be redirected to this
+processor. This corresponds to emails with the highest spam score, thus delivering them to users as marked as spam
+might not even be desirable.
+
+* The `rewriteSubject` option allows to rewritte subjects when asked by Rspamd.
+
+This mailet can scan mails against per-user Bayes by configure `perUserBayes` in `rspamd.properties`. This is achieved
+through the use of Rspamd `Deliver-To` HTTP header. If true, Rspamd will be called for each recipient of the mail, which comes at a performance cost. If true, subjects are not rewritten.
+If true `virusProcessor` and `rejectSpamProcessor` are honnered per user, at the cost of email copies. Default to false.
+
+Here is an example of mailet pipeline conducting out RspamdScanner execution:
+
+[subs=attributes+,xml]
+----
+
+
+ true
+ virus
+ spam
+
+
+ Spam
+
+
+
+
+
+
+
+ file://var/mail/virus/
+
+
+
+
+
+ all
+ .*
+
+
+ [VIRUS]
+
+
+
+
+
+
+ {mailet-repository-path-prefix}://var/mail/spam
+
+
+----
+
+==== Feedback for Rspamd
+If enabled, the `RspamdListener` will base on the Mailbox event to detect the message is a spam or not, then James will send report `spam` or `ham` to Rspamd.
+This listener can report mails to per-user Bayes by configure `perUserBayes` in `rspamd.properties`.
+The Rspamd listener needs to explicitly be registered with xref:{pages-path}/configure/listeners.adoc[listeners.xml].
+
+Example:
+
+[source,xml]
+....
+
+
+ org.apache.james.rspamd.RspamdListener
+
+
+....
+
+For more detail about how to use Rspamd's extension: `third-party/rspamd/index.md`
+
+Alternatively, batch reports can be triggered on user mailbox content via webAdmin. link:https://github.com/apache/james-project/tree/master/third-party/rspamd#additional-webadmin-endpoints[Read more].
+
+
+=== SpamAssassin
+Here is an example of mailet pipeline conducting out SpamAssassin execution:
+
+[source,xml]
+....
+
+ ignore
+ spamassassin
+ 783
+
+
+
+ org.apache.james.spamassassin.status; X-JAMES-SPAMASSASSIN-STATUS
+ org.apache.james.spamassassin.flag; X-JAMES-SPAMASSASSIN-FLAG
+
+
+ Spam
+
+....
+
+* *BayesianAnalysis* (unsupported) in the Mailet uses Bayesian probability to classify mail as
+spam or not spam. It relies on the training data coming from the users’ judgment.
+Users need to manually judge as spam and send to spam@thisdomain.com, oppositely,
+if not spam they then send to not.spam@thisdomain.com. BayesianAnalysisfeeder learns
+from this training dataset, and build predictive models based on Bayesian probability.
+There will be a certain table for maintaining the frequency of Corpus for keywords
+in the database. Every 10 mins a thread in the BayesianAnalysis will check and update
+the table. Also, the correct approach is to send the original spam or non-spam
+as an attachment to another message sent to the feeder in order to avoid bias from the
+current sender's email header.
+
+==== Feedback for SpamAssassin
+
+If enabled, the `SpamAssassinListener` will asynchronously report users mails moved to the `Spam` mailbox as Spam,
+and other mails as `Ham`, effectively populating the user database for per user spam detection. This enables a per-user
+Spam categorization to be conducted out by the SpamAssassin mailet, the SpamAssassin hook being unaffected.
+
+The SpamAssassin listener requires an extra configuration file `spamassassin.properties` to configure SpamAssassin connection (optional):
+
+.spamassassin.properties content
+|===
+| Property name | explanation
+
+| spamassassin.host
+| Hostname of the SpamAssassin server. Defaults to 127.0.0.1.
+
+| spamassassin.port
+| Port of the SpamAssassin server. Defaults to 783.
+|===
+
+Note that this configuration file only affects the listener, and not the hook or mailet.
+
+The SpamAssassin listener needs to explicitly be registered with xref:{pages-path}/configure/listeners.adoc[listeners.xml].
+
+Example:
+
+[source,xml]
+....
+
+
+ org.apache.james.mailbox.spamassassin.SpamAssassinListener
+ true
+
+
+....
diff --git a/docs/modules/servers/partials/configure/ssl.adoc b/docs/modules/servers/partials/configure/ssl.adoc
new file mode 100644
index 00000000000..df740c26bb4
--- /dev/null
+++ b/docs/modules/servers/partials/configure/ssl.adoc
@@ -0,0 +1,253 @@
+This document explains how to enable James 3.0 servers to use Transport Layer Security (TLS)
+for encrypted client-server communication.
+
+== Configure a Server to Use SSL/TLS
+
+Each of the servers xref:{pages-path}/configure/smtp.adoc[SMTP - LMTP],
+xref:{pages-path}/configure/pop3.adoc[POP3] and xref:{pages-path}/configure/imap.adoc[IMAP]
+supports use of SSL/TLS.
+
+TLS (Transport Layer Security) and SSL (Secure Sockets Layer) are protocols that provide
+data encryption and authentication between applications in scenarios where that data is
+being sent across an insecure network, such as checking your email
+(How does the Secure Socket Layer work?). The terms SSL and TLS are often used
+interchangeably or in conjunction with each other (TLS/SSL),
+but one is in fact the predecessor of the other — SSL 3.0 served as the basis
+for TLS 1.0 which, as a result, is sometimes referred to as SSL 3.1.
+
+You need to add a block in the corresponding configuration file (smtpserver.xml, pop3server.xml, imapserver.xml,..)
+
+[source,xml]
+....
+
+ file://conf/keystore
+ PKCS12
+ yoursecret
+ org.bouncycastle.jce.provider.BouncyCastleProvider
+
+....
+
+Alternatively TLS keys can be supplied via PEM files:
+
+[source,xml]
+....
+
+ file://conf/private.key
+ file://conf/certs.self-signed.csr
+
+....
+
+An optional secret might be specified for the private key:
+
+[source,xml]
+....
+
+ file://conf/private.key
+ file://conf/certs.self-signed.csr
+ yoursecret
+
+....
+
+Optionally, TLS protocols and/or cipher suites can be specified explicitly (smtpserver.xml, pop3server.xml, imapserver.xml,..).
+Otherwise, the default protocols and cipher suites of the used JDK will be used:
+
+[source,xml]
+....
+
+
+ TLSv1.2
+ TLSv1.1
+ TLSv1
+ SSLv3
+
+
+ TLS_AES_256_GCM_SHA384
+ TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256
+
+
+....
+
+Each of these block has an optional boolean configuration element socketTLS and startTLS which is used to toggle
+use of SSL or TLS for the service.
+
+With socketTLS (SSL/TLS in Thunderbird), all the communication is encrypted.
+
+With startTLS (STARTTLS in Thunderbird), the preamble is readable, but the rest is encrypted.
+
+....
+* OK JAMES IMAP4rev1 Server Server 192.168.1.4 is ready.
+* CAPABILITY IMAP4rev1 LITERAL+ CHILDREN WITHIN STARTTLS IDLE NAMESPACE UIDPLUS UNSELECT AUTH=PLAIN
+1 OK CAPABILITY completed.
+2 OK STARTTLS Begin TLS negotiation now.
+... rest is encrypted...
+....
+
+You can only enable one of the both at the same time for a service.
+
+It is also recommended to change the port number on which the service will listen:
+
+* POP3 - port 110, Secure POP3 - port 995
+* IMAP - port 143, Secure IMAP4 - port 993
+* SMTP - port 25, Secure SMTP - port 465
+
+You will now need to create your certificate store and place it in the james/conf/ folder with the name you defined in the keystore tag.
+
+Please note `JKS` keystore format is also supported (default value if no keystore type is specified):
+
+[source,xml]
+....
+
+ file://conf/keystore
+ JKS
+ yoursecret
+ org.bouncycastle.jce.provider.BouncyCastleProvider
+
+....
+
+
+=== Client authentication via certificates
+
+When you enable TLS, you may also configure the server to require a client certificate for authentication:
+
+[source,xml]
+....
+
+ file://conf/keystore
+ JKS
+ yoursecret
+
+
+ file://conf/truststore
+ JKS
+ yoursecret
+ false
+
+
+....
+
+James verifies client certificates against the provided truststore. You can fill it with trusted peer certificates directly, or an issuer certificate (CA) if you trust all certificates created by it. If you omit the truststore configuration, James will use the Java default truststore instead, effectively trusting any known CA.
+
+James can optionally enable OCSP verifications for client certificates against Certificate Revocation List referenced
+in the certificate itself.
+
+== Creating your own PEM keys
+
+The following commands can be used to create self signed PEM keys:
+
+[source,xml]
+....
+# Generating your private key
+openssl genrsa -des3 -out private.key 2048
+
+# Creating your certificates
+openssl req -new -key private.key -out certs.csr
+
+# Signing the certificate yourself
+openssl x509 -req -days 365 -in certs.csr -signkey private.key -out certs.self-signed.csr
+
+# Removing the password from the private key
+# Not necessary if you supply the secret in the configuration
+openssl rsa -in private.key -out private.nopass.key
+....
+
+You may then supply this TLS configuration:
+
+[source,xml]
+....
+
+ file://conf/private.nopass.key
+ file://conf/certs.self-signed.csr
+
+....
+
+== Certificate Keystores
+
+This section gives more indication for users relying on keystores.
+
+=== Creating your own Certificate Keystore
+
+(Adapted from the Tomcat 4.1 documentation)
+
+James currently operates only on JKS or PKCS12 format keystores. This is Java's standard "Java KeyStore" format, and is
+the format created by the keytool command-line utility. This tool is included in the JDK.
+
+To import an existing certificate into a JKS keystore, please read the documentation (in your JDK documentation package)
+about keytool.
+
+To create a new keystore from scratch, containing a single self-signed Certificate, execute the following from a terminal
+command line:
+
+....
+keytool -genkey -alias james -keyalg RSA -storetype PKCS12 -keystore your_keystore_filename
+....
+
+(The RSA algorithm should be preferred as a secure algorithm, and this also ensures general compatibility with other
+servers and components.)
+
+As a suggested standard, create the keystore in the james/conf directory, with a name like james.keystore.
+
+After executing this command, you will first be prompted for the keystore password.
+
+Next, you will be prompted for general information about this Certificate, such as company, contact name, and so on.
+This information may be displayed to users when importing into the certificate store of the client, so make sure that
+the information provided here matches what they will expect.
+
+Important: in the "distinguished name", set the "common name" (CN) to the DNS name of your James server, the one
+you will use to access it from your mail client (like "mail.xyz.com").
+
+Finally, you will be prompted for the key password, which is the password specifically for this Certificate
+(as opposed to any other Certificates stored in the same keystore file).
+
+If everything was successful, you now have a keystore file with a Certificate that can be used by your server.
+
+You MUST have only one certificate in the keystore file used by James.
+
+=== Installing a Certificate provided by a Certificate Authority
+
+(Adapted from the Tomcat 4.1 documentation
+
+To obtain and install a Certificate from a Certificate Authority (like verisign.com, thawte.com or trustcenter.de)
+you should have read the previous section and then follow these instructions:
+
+==== Create a local Certificate Signing Request (CSR)
+
+In order to obtain a Certificate from the Certificate Authority of your choice you have to create a so called
+Certificate Signing Request (CSR). That CSR will be used by the Certificate Authority to create a Certificate
+that will identify your James server as "secure". To create a CSR follow these steps:
+
+* Create a local Certificate as described in the previous section.
+
+The CSR is then created with:
+
+....
+ keytool -certreq -keyalg RSA -alias james -file certreq.csr -keystore your_keystore_filename
+....
+
+Now you have a file called certreq.csr. The file is encoded in PEM format. You can submit it to the Certificate Authority
+(look at the documentation of the Certificate Authority website on how to do this). In return you get a Certificate.
+
+Now that you have your Certificate you can import it into you local keystore. First of all you may have to import a so
+called Chain Certificate or Root Certificate into your keystore (the major Certificate Authorities are already in place,
+so it's unlikely that you will need to perform this step). After that you can procede with importing your Certificate.
+
+==== Optionally Importing a so called Chain Certificate or Root Certificate
+
+Download a Chain Certificate from the Certificate Authority you obtained the Certificate from.
+
+* For Verisign.com go to: http://www.verisign.com/support/install/intermediate.html
+* For Trustcenter.de go to: http://www.trustcenter.de/certservices/cacerts/en/en.htm#server
+* For Thawte.com go to: http://www.thawte.com/certs/trustmap.html (seems no longer valid)
+
+==== Import the Chain Certificate into you keystore
+
+....
+keytool -import -alias root -keystore your_keystore_filename -trustcacerts -file filename_of_the_chain_certificate
+....
+
+And finally import your new Certificate (It must be in X509 format):
+
+....
+keytool -import -alias james -keystore your_keystore_filename -trustcacerts -file your_certificate_filename
+....
+
+See also http://www.agentbob.info/agentbob/79.html[this page]
\ No newline at end of file
diff --git a/docs/modules/servers/partials/configure/systemPropertiesPartial.adoc b/docs/modules/servers/partials/configure/systemPropertiesPartial.adoc
new file mode 100644
index 00000000000..40648e9b9df
--- /dev/null
+++ b/docs/modules/servers/partials/configure/systemPropertiesPartial.adoc
@@ -0,0 +1,23 @@
+== System properties
+
+Some tuning can be done via system properties. This includes:
+
+.System properties
+|===
+| Property name | explanation
+
+| james.message.memory.threshold
+| (Optional). String (size, integer + size units, example: `12 KIB`, supported units are bytes KIB MIB GIB TIB). Defaults to 100KIB.
+This governs the threshold MimeMessageInputStreamSource relies on for storing MimeMessage content on disk.
+Below, data is stored in memory. Above data is stored on disk.
+Lower values will lead to longer processing time but will minimize heap memory usage. Modern SSD hardware
+should however support a high throughput. Higher values will lead to faster single mail processing at the cost
+of higher heap usage.
+
+
+| james.message.usememorycopy
+|Optional. Boolean. Defaults to false. Recommended value is false.
+Should MimeMessageWrapper use a copy of the message in memory? Or should bigger message exceeding james.message.memory.threshold
+be copied to temporary files?
+
+|===
\ No newline at end of file
diff --git a/docs/modules/servers/partials/configure/tika.adoc b/docs/modules/servers/partials/configure/tika.adoc
new file mode 100644
index 00000000000..4e2ae166620
--- /dev/null
+++ b/docs/modules/servers/partials/configure/tika.adoc
@@ -0,0 +1,48 @@
+When using OpenSearch, you can configure an external Tika server for extracting and indexing text from attachments.
+Thus you can significantly improve user experience upon text searches.
+
+Note: You can launch a tika server using this command line:
+
+....
+docker run --name tika linagora/docker-tikaserver:1.24
+....
+
+Here are the different properties:
+
+.tika.properties content
+|===
+| Property name | explanation
+
+| tika.enabled
+| Should Tika text extractor be used?
+If true, the TikaTextExtractor will be used behind a cache.
+If false, the DefaultTextExtractor will be used (naive implementation only supporting text).
+Defaults to false.
+
+| tika.host
+| IP or domain name of your Tika server. The default value is 127.0.0.1
+
+| tika.port
+| Port of your tika server. The default value is 9998
+
+| tika.timeoutInMillis
+| Timeout when issuing request to the tika server. The default value is 3 seconds.
+
+| tika.cache.eviction.period
+| A cache is used to avoid, when possible, query Tika multiple time for the same attachments.
+This entry determines how long after the last read an entry vanishes.
+Please note that units are supported (ms - millisecond, s - second, m - minute, h - hour, d - day). Default unit is seconds.
+Default value is *1 day*
+
+| tika.cache.enabled
+| Should the cache be used? False by default
+
+| tika.cache.weight.max
+| Maximum weight of the cache.
+A value of *0* disables the cache
+Please note that units are supported (K for KB, M for MB, G for GB). Defaults is no units, so in bytes.
+Default value is *100 MB*.
+
+| tika.contentType.blacklist
+| Blacklist of content type is known-to-be-failing with Tika. Specify the list with comma separator.
+|===
diff --git a/docs/modules/servers/partials/configure/usersrepository.adoc b/docs/modules/servers/partials/configure/usersrepository.adoc
new file mode 100644
index 00000000000..390a772528d
--- /dev/null
+++ b/docs/modules/servers/partials/configure/usersrepository.adoc
@@ -0,0 +1,138 @@
+User repositories are required to store James user information and authentication data.
+
+Consult this link:{sample-configuration-prefix-url}/usersrepository.xml[example]
+to get some examples and hints.
+
+== The user data model
+
+A user has two attributes: username and password.
+
+A valid user should satisfy these criteria:
+
+* username and password cannot be null or empty
+* username should not be longer than 255 characters
+* username can not contain '/'
+* username can not contain multiple domain delimiter('@')
+* A username can have only a local part when virtualHosting is disabled. E.g.'myUser'
+* When virtualHosting is enabled, a username should have a domain part, and the domain part should be concatenated
+after a domain delimiter('@'). E.g. 'myuser@james.org'
+
+A user is always considered as lower cased, so 'myUser' and 'myuser' are the same user, and can be used as well as
+recipient local part than as login for different protocols.
+
+== Configuration
+
+.usersrepository.xml content
+|===
+| Property name | explanation
+
+| enableVirtualHosting
+| true or false. Add domain support for users (default: false, except for Cassandra Users Repository)
+
+| administratorId
+|user's name. Allow a user to access to the https://tools.ietf.org/html/rfc4616#section-2[impersonation command],
+acting on the behalf of any user.
+
+| verifyFailureDelay
+| Delay after a failed authentication attempt with an invalid user name or password. Duration string defaulting to seconds, e.g. `2`, `2s`, `2000ms`. Default `0s` (disabled).
+
+| algorithm
+| use a specific hash algorithm to compute passwords, with optional mode `plain` (default) or `salted`; e.g. `SHA-512`, `SHA-512/plain`, `SHA-512/salted`, `PBKDF2`, `PBKDF2-SHA512` (default).
+Note: When using `PBKDF2` or `PBKDF2-SHA512` one can specify the iteration count and the key size in bytes. You can specify it as part of the algorithm. EG: `PBKDF2-SHA512-2000-512` will use
+2000 iterations with a key size of 512 bytes.
+
+| hashingMode
+| specify the hashing mode to use if there is none recorded in the database: `plain` (default) for newer installations or `legacy` for older ones
+
+|===
+
+== Configuring a LDAP
+
+Alternatively you can authenticate your users against a LDAP server. You need to configure
+the properties for accessing your LDAP server in this file.
+
+Consult this link:{sample-configuration-prefix-url}/usersrepository.xml[example]
+to get some examples and hints.
+
+Example:
+
+[source,xml]
+....
+
+ true
+
+....
+
+SSL can be enabled by using `ldaps` scheme. `trustAllCerts` option can be used to trust all LDAP client certificates
+(optional, defaults to false).
+
+Example:
+
+[source,xml]
+....
+
+ true
+
+....
+
+Moreover, per domain base DN can be configured:
+
+[source,xml]
+....
+true
+
+ ou=People,o=other.com,ou=system
+
+
+....
+
+You can connect to multiple LDAP servers for better availability by using `ldapHosts` option (fallback to `ldapHost` is supported) to specify the list of LDAP Server URL with the comma `,` delimiter. We do support different schemas for LDAP servers.
+
+Example:
+
+[source,xml]
+....
+
+ true
+
+....
+
+When VirtualHosting is on, you can enable local part as login username by configure the `resolveLocalPartAttribute`.
+This is the LDAP attribute that allows to retrieve the local part of users. Optional, default to empty, which disables login with local part as username.
+
+Example:
+
+[source,xml]
+....
+
+ true
+
+....
+
+The "userListBase" configuration option is used to differentiate users that can login from those that are listed
+ as regular users. This is useful for dis-activating users, for instance.
+
+A different values from "userBase" can be used for setting up virtual logins,
+for instance in conjunction with "resolveLocalPartAttribute". This can also be used to manage
+disactivated users (in "userListBase" but not in "userBase").
+
+Note that "userListBase" can not be specified on a per-domain-basis.
+
+=== LDAP connection pool size tuning
+
+Apache James offers some options for configuring the LDAP connection pool used by unboundid:
+
+* *poolSize*: (optional, default = 4) The maximum number of connection in the pool. Note that if the pool is exhausted,
+extra connections will be created on the fly as needed.
+* *maxWaitTime*: (optional, default = 1000) the number of milli seconds to wait before creating off-pool connections,
+using a pool connection if released in time. This effectively smooth out traffic burst, thus in some case can help
+not overloading the LDAP
+* *connectionTimeout:* (optional) Sets the connection timeout on the underlying to the specified integer value
+* *readTimeout:* (optional) Sets property the read timeout to the specified integer value.
diff --git a/docs/modules/servers/partials/configure/vault.adoc b/docs/modules/servers/partials/configure/vault.adoc
new file mode 100644
index 00000000000..89496861750
--- /dev/null
+++ b/docs/modules/servers/partials/configure/vault.adoc
@@ -0,0 +1,35 @@
+Deleted Messages Vault is the component in charge of retaining messages before they are going to be deleted.
+Messages stored in the Deleted Messages Vault could be deleted after exceeding their retentionPeriod (explained below).
+It also supports to restore or export messages matching with defined criteria in
+xref:{pages-path}/operate/webadmin.adoc#_deleted_messages_vault[WebAdmin deleted messages vault document] by using
+xref:{pages-path}/operate/webadmin.adoc#_deleted_messages_vault[WebAdmin endpoints].
+
+== Deleted Messages Vault Configuration
+
+Once the vault is active, James will start moving deleted messages to it asynchronously.
+
+The Deleted Messages Vault also stores and manages deleted messages into a BlobStore. The BlobStore can be either
+based on an object storage or on {backend-name}. For configuring the BlobStore the vault will use, you can look at
+xref:{pages-path}/configure/blobstore.adoc[*blobstore.properties*] BlobStore Configuration section.
+
+== deletedMessageVault.properties
+
+Consult this link:{sample-configuration-prefix-url}/deletedMessageVault.properties[example]
+to get some examples and hints.
+
+.deletedMessageVault.properties content
+|===
+| Property name | explanation
+
+| enabled
+| Allows to enable or disable usage of the Deleted Message Vault. Default to false.
+
+| workQueueEnabled
+| Enable work queue to be used with deleted message vault. Default to false.
+
+| retentionPeriod
+| Deleted messages stored in the Deleted Messages Vault are expired after this period (default: 1 year). It can be expressed in *y* years, *d* days, *h* hours, ...
+
+| restoreLocation
+| Messages restored from the Deleted Messages Vault are placed in a mailbox with this name (default: ``Restored-Messages``). The mailbox will be created if it does not exist yet.
+|===
diff --git a/docs/modules/servers/partials/configure/webadmin.adoc b/docs/modules/servers/partials/configure/webadmin.adoc
new file mode 100644
index 00000000000..6a9a6fb79c9
--- /dev/null
+++ b/docs/modules/servers/partials/configure/webadmin.adoc
@@ -0,0 +1,104 @@
+The web administration supports for now the CRUD operations on:
+
+- The domains
+- The users
+- Their mailboxes
+- Their quotas
+- Managing mail repositories
+- Performing cassandra migrations [small]*_(only for Distributed James Server that uses cassandra as backend)_*
+- And much more, as described in the following sections.
+
+*WARNING*: This API allows authentication only via the use of JWT. If not
+configured with JWT, an administrator should ensure an attacker can not
+use this API.
+
+By the way, some endpoints are not filtered by authentication. Those endpoints are not related to data stored in James,
+for example: Swagger documentation & James health checks.
+
+== Configuration
+
+Consult this link:{sample-configuration-prefix-url}/webadmin.properties[example]
+to get some examples and hints.
+
+.webadmin.properties content
+|===
+| Property name | explanation
+
+| enabled
+| Define if WebAdmin is launched (default: false)
+
+| port
+| Define WebAdmin's port (default: 8080)
+
+| host
+| Define WebAdmin's host (default: localhost, use 0.0.0.0 to listen on all addresses)
+
+| cors.enable
+| Allow the Cross-origin resource sharing (default: false)
+
+| cors.origin
+| Specify ths CORS origin (default: null)
+
+| jwt.enable
+| Allow JSON Web Token as an authentication mechanism (default: false)
+
+| https.enable
+| Use https (default: false)
+
+| https.keystore
+| Specify a keystore file for https (default: null)
+
+| https.password
+| Specify the keystore password (default: null)
+
+| https.trust.keystore
+| Specify a truststore file for https (default: null)
+
+| https.trust.password
+| Specify the truststore password (default: null)
+
+| jwt.publickeypem.url
+| Optional. JWT tokens allow request to bypass authentication. Path to the JWT public key.
+Defaults to the `jwt.publickeypem.url` value of `jmap.properties` file if unspecified
+(legacy behaviour)
+
+| extensions.routes
+| List of Routes specified as fully qualified class name that should be loaded in addition to your product routes list. Routes
+needs to be on the classpath or in the ./extensions-jars folder. Read mode about
+xref:customization:webadmin-routes.adoc[creating you own webadmin routes].
+
+| maxThreadCount
+| Maximum threads used by the underlying Jetty server. Optional.
+
+| minThreadCount
+| Minimum threads used by the underlying Jetty server. Optional.
+
+|===
+
+== Generating a JWT key pair
+
+The {server-name} enforces the use of RSA-SHA-256.
+
+One can use OpenSSL to generate a JWT key pair :
+
+ # private key
+ openssl genrsa -out rs256-4096-private.rsa 4096
+ # public key
+ openssl rsa -in rs256-4096-private.rsa -pubout > rs256-4096-public.pem
+
+The private key can be used to generate JWT tokens, for instance
+using link:https://github.com/vandium-io/jwtgen[jwtgen]:
+
+ jwtgen -a RS256 -p rs256-4096-private.rsa 4096 -c "sub=bob@domain.tld" -c "admin=true" -e 3600 -V
+
+This token can then be passed as `Bearer` of the `Authorization` header :
+
+ curl -H "Authorization: Bearer $token" -XGET http://127.0.0.1:8000/domains
+
+The public key can be referenced as `jwt.publickeypem.url` of the `jmap.properties` configuration file.
+
+== Reverse-proxy set up
+
+WebAdmin adds the value of `X-Real-IP` header as part of the logging MDC.
+
+This allows for reverse proxies to cary other the IP address of the client down to the JMAP server for diagnostic purpose.
diff --git a/docs/modules/servers/partials/operate/cli.adoc b/docs/modules/servers/partials/operate/cli.adoc
new file mode 100644
index 00000000000..32f4731cda9
--- /dev/null
+++ b/docs/modules/servers/partials/operate/cli.adoc
@@ -0,0 +1,332 @@
+The {server-name} is packed with a command line client.
+
+To run this command line client simply execute:
+
+....
+java -jar /root/james-cli.jar -h 127.0.0.1 -p 9999 COMMAND
+....
+
+The following document will explain you which are the available options
+for *COMMAND*.
+
+Note: the above command line before *COMMAND* will be documented as _\{cli}_.
+
+== Manage Domains
+
+Domains represent the domain names handled by your server.
+
+You can add a domain:
+
+....
+{cli} AddDomain domain.tld
+....
+
+You can remove a domain:
+
+....
+{cli} RemoveDomain domain.tld
+....
+
+(Note: associated users are not removed automatically)
+
+Check if a domain is handled:
+
+....
+{cli} ContainsDomain domain.tld
+....
+
+And list your domains:
+
+....
+{cli} ListDomains
+....
+
+== Managing users
+
+Note: the following commands are explained with virtual hosting turned
+on.
+
+Users are accounts on the mail server. James can maintain mailboxes for
+them.
+
+You can add a user:
+
+....
+{cli} AddUser user@domain.tld password
+....
+
+Note: the domain used should have been previously created.
+
+You can delete a user:
+
+....
+{cli} RemoveUser user@domain.tld
+....
+
+(Note: associated mailboxes are not removed automatically)
+
+And change a user password:
+
+....
+{cli} SetPassword user@domain.tld password
+....
+
+Note: All these write operations can not be performed on LDAP backend,
+as the implementation is read-only.
+
+Finally, you can list users:
+
+....
+{cli} ListUsers
+....
+
+=== Virtual hosting
+
+James supports virtualhosting.
+
+* If set to true in the configuration, then the username is the full
+mail address.
+
+The domains then become a part of the user.
+
+_usera@domaina.com and_ _usera@domainb.com_ on a mail server with
+_domaina.com_ and _domainb.com_ configured are mail addresses that
+belongs to different users.
+
+* If set to false in the configurations, then the username is the mail
+address local part.
+
+It means that a user is automatically created for all the domains
+configured on your server.
+
+_usera@domaina.com and_ _usera@domainb.com_ on a mail server with
+_domaina.com_ and _domainb.com_ configured are mail addresses that
+belongs to the same users.
+
+Here are some sample commands for managing users when virtual hosting is
+turned off:
+
+....
+{cli} AddUser user password
+{cli} RemoveUser user
+{cli} SetPassword user password
+....
+
+== Managing mailboxes
+
+An administrator can perform some basic operation on user mailboxes.
+
+Note on mailbox formatting: mailboxes are composed of three parts.
+
+* The namespace, indicating what kind of mailbox it is. (Shared or
+not?). The value for users mailboxes is #private . Note that for now no
+other values are supported as James do not support shared mailboxes.
+* The username as stated above, depending on the virtual hosting value.
+* And finally mailbox name. Be aware that `.' serves as mailbox
+hierarchy delimiter.
+
+An administrator can delete all of the mailboxes of a user, which is not
+done automatically when removing a user (to avoid data loss):
+
+....
+{cli} DeleteUserMailboxes user@domain.tld
+....
+
+He can delete a specific mailbox:
+
+....
+{cli} DeleteMailbox #private user@domain.tld INBOX.toBeDeleted
+....
+
+He can list the mailboxes of a specific user:
+
+....
+{cli} ListUserMailboxes user@domain.tld
+....
+
+And finally can create a specific mailbox:
+
+....
+{cli} CreateMailbox #private user@domain.tld INBOX.newFolder
+....
+
+== Adding a message in a mailbox
+
+The administrator can use the CLI to add a message in a mailbox. this
+can be done using:
+
+....
+{cli} ImportEml #private user@domain.tld INBOX.newFolder /full/path/to/file.eml
+....
+
+This command will add a message having the content specified in file.eml
+(that needs to be at the EML format). It will get added in the
+INBOX.subFolder mailbox belonging to user user@domain.tld.
+
+== Managing mappings
+
+A mapping is a recipient rewriting rule. There is several kind of
+rewriting rules:
+
+* address mapping: rewrite a given mail address into an other one.
+* regex mapping.
+
+You can manage address mapping like (redirects email from
+fromUser@fromDomain.tld to redirected@domain.new, then deletes the
+mapping):
+
+....
+{cli} AddAddressMapping fromUser fromDomain.tld redirected@domain.new
+{cli} RemoveAddressMapping fromUser fromDomain.tld redirected@domain.new
+....
+
+You can manage regex mapping like this:
+
+....
+{cli} AddRegexMapping redirected domain.new .*@domain.tld
+{cli} RemoveRegexMapping redirected domain.new .*@domain.tld
+....
+
+You can view mapping for a mail address:
+
+....
+{cli} ListUserDomainMappings user domain.tld
+....
+
+And all mappings defined on the server:
+
+....
+{cli} ListMappings
+....
+
+== Manage quotas
+
+Quotas are limitations on a group of mailboxes. They can limit the
+*size* or the *messages count* in a group of mailboxes.
+
+James groups by defaults mailboxes by user (but it can be overridden),
+and labels each group with a quotaroot.
+
+To get the quotaroot a given mailbox belongs to:
+
+....
+{cli} GetQuotaroot #private user@domain.tld INBOX
+....
+
+Then you can get the specific quotaroot limitations.
+
+For the number of messages:
+
+....
+{cli} GetMessageCountQuota quotaroot
+....
+
+And for the storage space available:
+
+....
+{cli} GetStorageQuota quotaroot
+....
+
+You see the maximum allowed for these values:
+
+For the number of messages:
+
+....
+{cli} GetMaxMessageCountQuota quotaroot
+....
+
+And for the storage space available:
+
+....
+{cli} GetMaxStorageQuota quotaroot
+....
+
+You can also specify maximum for these values.
+
+For the number of messages:
+
+....
+{cli} SetMaxMessageCountQuota quotaroot value
+....
+
+And for the storage space available:
+
+....
+{cli} SetMaxStorageQuota quotaroot value
+....
+
+With value being an integer. Please note the use of units for storage
+(K, M, G). For instance:
+
+....
+{cli} SetMaxStorageQuota someone@apache.org 4G
+....
+
+Moreover, James allows to specify global maximum values, at the server
+level. Note: syntax is similar to what was exposed previously.
+
+....
+{cli} SetGlobalMaxMessageCountQuota value
+{cli} GetGlobalMaxMessageCountQuota
+{cli} SetGlobalMaxStorageQuota value
+{cli} GetGlobalMaxStorageQuota
+....
+
+== Re-indexing
+
+James allow you to index your emails in a search engine, for making
+search faster.
+
+For some reasons, you might want to re-index your mails (inconsistencies
+across datastore, migrations).
+
+To re-index all mails of all mailboxes of all users, type:
+
+....
+{cli} ReindexAll
+....
+
+And for a specific mailbox:
+
+....
+{cli} Reindex #private user@domain.tld INBOX
+....
+
+== Sieve scripts quota
+
+James implements Sieve (RFC-5228). Your users can then write scripts
+and upload them to the server. Thus they can define the desired behavior
+upon email reception. James defines a Sieve mailet for this, and stores
+Sieve scripts. You can update them via the ManageSieve protocol, or via
+the ManageSieveMailet.
+
+You can define quota for the total size of Sieve scripts, per user.
+
+Syntax is similar to what was exposed for quotas. For defaults values:
+
+....
+{cli} GetSieveQuota
+{cli} SetSieveQuota value
+{cli} RemoveSieveQuota
+....
+
+And for specific user quotas:
+
+....
+{cli} GetSieveUserQuota user@domain.tld
+{cli} SetSieveQuota user@domain.tld value
+{cli} RemoveSieveUserQuota user@domain.tld
+....
+
+== Switching of mailbox implementation
+
+Migration is experimental for now. You would need to customize *Spring*
+configuration to add a new mailbox manager with a different bean name.
+
+You can then copy data across mailbox managers using:
+
+....
+{cli} CopyMailbox srcBean dstBean
+....
+
+You will then need to reconfigure James to use the new mailbox manager.
\ No newline at end of file
diff --git a/docs/modules/servers/partials/operate/guide.adoc b/docs/modules/servers/partials/operate/guide.adoc
new file mode 100644
index 00000000000..cdf2f4a6d4b
--- /dev/null
+++ b/docs/modules/servers/partials/operate/guide.adoc
@@ -0,0 +1,270 @@
+This guide aims to be an entry-point to the James documentation for user
+managing a {server-name}.
+
+It includes:
+
+* Simple architecture explanations
+* Propose some diagnostics for some common issues
+* Present procedures that can be set up to address these issues
+
+In order to not duplicate information, existing documentation will be
+linked.
+
+Please note that this product is under active development, should be
+considered experimental and thus targets advanced users.
+
+== Basic Monitoring
+
+A toolbox is available to help an administrator diagnose issues:
+
+* xref:{xref-base}/operate/logging.adoc[Structured logging into Kibana]
+* xref:{xref-base}/operate/metrics.adoc[Metrics graphs into Grafana]
+* xref:{xref-base}/operate/webadmin.adoc#_healthcheck[WebAdmin HealthChecks]
+
+== Mail processing
+
+Currently, an administrator can monitor mail processing failure through `ERROR` log
+review. We also recommend watching in Kibana INFO logs using the
+`org.apache.james.transport.mailets.ToProcessor` value as their `logger`. Metrics about
+mail repository size, and the corresponding Grafana boards are yet to be contributed.
+
+Furthermore, given the default mailet container configuration, we recommend monitoring
+`{mailet-repository-path-prefix}://var/mail/error/` to be empty.
+
+WebAdmin exposes all utilities for
+xref:{xref-base}/operate/webadmin.adoc#_reprocessing_mails_from_a_mail_repository[reprocessing
+all mails in a mail repository] or
+xref:{xref-base}/operate/webadmin.adoc#_reprocessing_a_specific_mail_from_a_mail_repository[reprocessing
+a single mail in a mail repository].
+
+In order to prevent unbounded processing that could consume unbounded resources. We can provide a CRON with `limit` parameter.
+Ex: 10 reprocessed per minute
+Note that it only support the reprocessing all mails.
+
+Also, one can decide to
+xref:{xref-base}/operate/webadmin.adoc#_removing_all_mails_from_a_mail_repository[delete
+all the mails of a mail repository] or
+xref:{xref-base}/operate/webadmin.adoc#_removing_a_mail_from_a_mail_repository[delete
+a single mail of a mail repository].
+
+Performance of mail processing can be monitored via the
+https://github.com/apache/james-project/blob/d2cf7c8e229d9ed30125871b3de5af3cb1553649/server/grafana-reporting/es-datasource/MAILET-1490071694187-dashboard.json[mailet
+grafana board] and
+https://github.com/apache/james-project/blob/d2cf7c8e229d9ed30125871b3de5af3cb1553649/server/grafana-reporting/es-datasource/MATCHER-1490071813409-dashboard.json[matcher
+grafana board].
+
+=== Recipient rewriting
+
+Given the default configuration, errors (like loops) uopn recipient rewritting will lead
+to emails being stored in `{mailet-repository-path-prefix}://var/mail/rrt-error/`.
+
+We recommend monitoring the content of this mail repository to be empty.
+
+If it is not empty, we recommend
+verifying user mappings via xref:{xref-base}/operate/webadmin.adoc#_listing_user_mappings_[User Mappings webadmin API] then once identified break the loop by removing
+some Recipient Rewrite Table entry via the
+xref:{xref-base}/operate/webadmin.adoc#_removing_an_alias_of_an_user[Delete Alias],
+xref:{xref-base}/operate/webadmin.adoc#_removing_a_group_member[Delete Group member],
+xref:{xref-base}/operate/webadmin.adoc#_removing_a_destination_of_a_forward[Delete forward],
+xref:{xref-base}/operate/webadmin.adoc#_remove_an_address_mapping[Delete Address mapping],
+xref:{xref-base}/operate/webadmin.adoc#_removing_a_domain_mapping[Delete Domain mapping]
+or xref:{xref-base}/operate/webadmin.adoc#_removing_a_regex_mapping[Delete Regex mapping]
+APIs (as needed).
+
+The `Mail.error` field can help diagnose the issue as well. Then once
+the root cause has been addressed, the mail can be reprocessed.
+
+== Mailbox Event Bus
+
+It is possible for the administrator of James to define the mailbox
+listeners he wants to use, by adding them in the
+{sample-configuration-prefix-url}/listeners.xml[listeners.xml]
+configuration file. It’s possible also to add your own custom mailbox
+listeners. This enables to enhance capabilities of James as a Mail
+Delivery Agent. You can get more information about those
+ xref:{xref-base}/configure/listeners.adoc[here].
+
+Currently, an administrator can monitor listeners failures through
+`ERROR` log review. Metrics regarding mailbox listeners can be monitored
+via
+https://github.com/apache/james-project/blob/d2cf7c8e229d9ed30125871b3de5af3cb1553649/server/grafana-reporting/es-datasource/MailboxListeners-1528958667486-dashboard.json[mailbox_listeners
+grafana board] and
+https://github.com/apache/james-project/blob/d2cf7c8e229d9ed30125871b3de5af3cb1553649/server/grafana-reporting/es-datasource/MailboxListeners%20rate-1552903378376.json[mailbox_listeners_rate
+grafana board].
+
+Upon exceptions, a bounded number of retries are performed (with
+exponential backoff delays). If after those retries the listener is
+still failing to perform its operation, then the event will be stored in
+the xref:{xref-base}/operate/webadmin.adoc#_event_dead_letter[Event Dead Letter]. This
+API allows diagnosing issues, as well as redelivering the events.
+
+To check that you have undelivered events in your system, you can first
+run the associated with
+xref:{xref-base}/operate/webadmin.adoc#_healthcheck[event dead letter health check] .
+You can explore Event DeadLetter content through WebAdmin. For
+this, xref:{xref-base}/operate/webadmin.adoc#_listing_mailbox_listener_groups[list mailbox listener groups]
+you will get a list of groups back, allowing
+you to check if those contain registered events in each by
+xref:{xref-base}/operate/webadmin.adoc#_listing_failed_events[listing their failed events].
+
+If you get failed events IDs back, you can as well
+xref:{xref-base}/operate/webadmin.adoc#_getting_event_details[check their details].
+
+An easy way to solve this is just to trigger then the
+xref:{xref-base}/operate/webadmin.adoc#_redeliver_all_events[redeliver all events]
+task. It will start reprocessing all the failed events registered in
+event dead letters.
+
+In order to prevent unbounded processing that could consume unbounded resources. We can provide a CRON with `limit` parameter.
+Ex: 10 redelivery per minute
+
+If for some other reason you don’t need to redeliver all events, you
+have more fine-grained operations allowing you to
+xref:{xref-base}/operate/webadmin.adoc#_redeliver_group_events[redeliver group events]
+or even just
+xref:{xref-base}/operate/webadmin.adoc#_redeliver_a_single_event[redeliver a single event].
+
+== OpenSearch Indexing
+
+A projection of messages is maintained in OpenSearch via a listener
+plugged into the mailbox event bus in order to enable search features.
+
+You can find more information about OpenSearch configuration
+xref:{xref-base}/configure/opensearch.adoc[here].
+
+=== Usual troubleshooting procedures
+
+As explained in the link:#_mailbox_event_bus[Mailbox Event Bus] section,
+processing those events can fail sometimes.
+
+Currently, an administrator can monitor indexation failures through
+`ERROR` log review. You can as well
+xref:{xref-base}/operate/webadmin.adoc#_listing_failed_events[list failed events] by
+looking with the group called
+`org.apache.james.mailbox.opensearch.events.OpenSearchListeningMessageSearchIndex$OpenSearchListeningMessageSearchIndexGroup`.
+A first on-the-fly solution could be to just
+link:#_mailbox_event_bus[redeliver those group events with event dead letter].
+
+If the event storage in dead-letters fails (for instance in the face of
+{backend-name} storage exceptions), then you might need to use our WebAdmin
+reIndexing tasks.
+
+From there, you have multiple choices. You can
+xref:{xref-base}/operate/webadmin.adoc#_reindexing_all_mails[reIndex all mails],
+xref:{xref-base}/operate/webadmin.adoc#_reindexing_a_mailbox_mails[reIndex mails from a mailbox] or even just
+xref:{xref-base}/operate/webadmin.adoc#_reindexing_a_single_mail_by_messageid[reIndex a single mail].
+
+When checking the result of a reIndexing task, you might have failed
+reprocessed mails. You can still use the task ID to
+xref:{xref-base}/operate/webadmin.adoc#_fixing_previously_failed_reindexing[reprocess previously failed reIndexing mails].
+
+=== On the fly OpenSearch Index setting update
+
+Sometimes you might need to update index settings. Cases when an
+administrator might want to update index settings include:
+
+* Scaling out: increasing the shard count might be needed.
+* Changing string analysers, for instance to target another language
+* etc.
+
+In order to achieve such a procedure, you need to:
+
+* https://www.elastic.co/guide/en/elasticsearch/reference/7.10/indices-create-index.html[Create
+the new index] with the right settings and mapping
+* James uses two aliases on the mailbox index: one for reading
+(`mailboxReadAlias`) and one for writing (`mailboxWriteAlias`). First
+https://www.elastic.co/guide/en/elasticsearch/reference/7.10/indices-aliases.html[add
+an alias] `mailboxWriteAlias` to that new index, so that now James
+writes on the old and new indexes, while only keeping reading on the
+first one
+* Now trigger a
+https://www.elastic.co/guide/en/elasticsearch/reference/7.10/docs-reindex.html[reindex]
+from the old index to the new one (this actively relies on `_source`
+field being present)
+* When this is done, add the `mailboxReadAlias` alias to the new index
+* Now that the migration to the new index is done, you can
+https://www.elastic.co/guide/en/elasticsearch/reference/7.10/indices-delete-index.html[drop
+the old index]
+* You might want as well modify the James configuration file
+{sample-configuration-prefix-url}/opensearch.properties[opensearch.properties]
+by setting the parameter `opensearch.index.mailbox.name` to the name
+of your new index. This is to avoid that James re-creates index upon
+restart
+
+_Note_: keep in mind that reindexing can be a very long operation
+depending on the volume of mails you have stored.
+
+== Mail Queue
+
+=== Fine tune configuration for RabbitMQ
+
+In order to adapt mail queue settings to the actual traffic load, an
+administrator needs to perform fine configuration tunning as explained
+in
+https://github.com/apache/james-project/blob/master/src/site/xdoc/server/config-rabbitmq.xml[rabbitmq.properties].
+
+Be aware that `MailQueue::getSize` is currently performing a browse and
+thus is expensive. Size recurring metric reporting thus introduces
+performance issues. As such, we advise setting
+`mailqueue.size.metricsEnabled=false`.
+
+=== Managing email queues
+
+Managing an email queue is an easy task if you follow this procedure:
+
+* First, xref:{xref-base}/operate/webadmin.adoc#_listing_mail_queues[List mail queues]
+and xref:{xref-base}/operate/webadmin.adoc#_getting_a_mail_queue_details[get a mail queue details].
+* And then
+xref:{xref-base}/operate/webadmin.adoc#_listing_the_mails_of_a_mail_queue[List the mails of a mail queue].
+
+In case, you need to clear an email queue because there are only spam or
+trash emails in the email queue you have this procedure to follow:
+
+* All mails from the given mail queue will be deleted with
+xref:{xref-base}/operate/webadmin.adoc#_clearing_a_mail_queue[Clearing a mail queue].
+
+== Deleted Message Vault
+
+We recommend the administrator to
+xref:#_cleaning_expired_deleted_messages[run it] in cron job to save
+storage volume.
+
+=== How to configure deleted messages vault
+
+To setup James with Deleted Messages Vault, you need to follow those
+steps:
+
+* Enable Deleted Messages Vault by configuring Pre Deletion Hooks.
+* Configuring the retention time for the Deleted Messages Vault.
+
+==== Enable Deleted Messages Vault by configuring Pre Deletion Hooks
+
+You need to configure this hook in
+{sample-configuration-prefix-url}/listeners.xml[listeners.xml]
+configuration file. More details about configuration & example can be
+found at http://james.apache.org/server/config-listeners.html[Pre
+Deletion Hook Configuration]
+
+==== Configuring the retention time for the Deleted Messages Vault
+
+In order to configure the retention time for the Deleted Messages Vault,
+an administrator needs to perform fine configuration tunning as
+explained in
+{sample-configuration-prefix-url}/deletedMessageVault.properties[deletedMessageVault.properties].
+Mails are not retained forever as you have to configure a retention
+period (by `retentionPeriod`) before using it (with one-year retention
+by default if not defined).
+
+=== Restore deleted messages after deletion
+
+After users deleted their mails and emptied the trash, the admin can use
+xref:{xref-base}/operate/webadmin.adoc#_restore_deleted_messages[Restore Deleted Messages]
+to restore all the deleted mails.
+
+=== Cleaning expired deleted messages
+
+You can delete all deleted messages older than the configured
+`retentionPeriod` by using
+xref:{xref-base}/operate/webadmin.adoc#_deleted_messages_vault[Purge Deleted Messages].
+We recommend calling this API in CRON job on 1st day each
+month.
diff --git a/docs/modules/servers/partials/operate/index.adoc b/docs/modules/servers/partials/operate/index.adoc
new file mode 100644
index 00000000000..e3b5ec8b67c
--- /dev/null
+++ b/docs/modules/servers/partials/operate/index.adoc
@@ -0,0 +1,24 @@
+The following pages detail how to operate the {server-name}.
+
+Once you have a {server-name} up and running you then need to ensure it operates correctly and has a decent performance.
+You may also need to perform some operation maintenance or recover from incidents. This section covers
+these topics.
+
+Read more about xref:{xref-base}/operate/logging.adoc[Logging].
+
+The xref:{xref-base}/operate/webadmin.adoc[WebAdmin Restfull administration API] is the
+recommended way to operate the {server-name}. It allows managing and interacting with most
+server components.
+
+The xref:{xref-base}/operate/cli.adoc[Command line interface] allows to interact with some
+server components. However it relies on JMX technologies and its use is discouraged.
+
+The xref:{xref-base}/operate/metrics.adoc[metrics] allows to build latency and throughput
+graphs, that can be visualized, for instance in *Grafana*.
+
+We did put together a xref:{xref-base}/operate/guide.adoc[detailed guide] for
+{server-tag} James operators. We also propose a xref:{xref-base}/operate/performanceChecklist.adoc[performance checklist].
+
+We also included a guide for xref:{xref-base}/operate/migrating.adoc[migrating existing data] into the {server-tag} server.
+
+Additional functional visualisations can be set up using OpenSearch dashboards as documented in link:https://github.com/apache/james-project/tree/master/examples/opensearch-dahsboard[this example].
diff --git a/docs/modules/servers/partials/operate/logging.adoc b/docs/modules/servers/partials/operate/logging.adoc
new file mode 100644
index 00000000000..f48f35d92ae
--- /dev/null
+++ b/docs/modules/servers/partials/operate/logging.adoc
@@ -0,0 +1,173 @@
+We recommend to closely monitoring *ERROR* and *WARNING* logs. Those
+logs should be considered not normal.
+
+If you encounter some suspicious logs:
+
+* If you have any doubt about the log being caused by a bug in James
+source code, please reach us via the bug tracker, the user mailing list or our Gitter channel (see our
+http://james.apache.org/#second[community page])
+* They can be due to insufficient performance from tier applications (eg
+{backend-name} timeouts). In such case we advise you to conduct a close
+review of performances at the tier level.
+
+Leveraging filters in Kibana discover view can help to filter out
+''already known'' frequently occurring logs.
+
+When reporting ERROR or WARNING logs, consider adding the full logs, and
+related data (eg the raw content of a mail triggering an issue) to the
+bug report in order to ease resolution.
+
+== Logging configuration
+
+{server-name} uses link:http://logback.qos.ch/[logback] as a logging library
+and link:https://docs.fluentbit.io/[FluentBit] as centralize logging.
+
+Information about logback configuration can be found
+link:http://logback.qos.ch/manual/configuration.html[here].
+
+== Structured logging
+
+=== Using FluentBit as a log forwarder
+
+==== Using Docker
+
+{server-name} leverages the use of MDC in order to achieve structured logging, and better add context to the logged information. We furthermore ship json logs to file with RollingFileAppender on the classpath to easily allow FluentBit to directly tail the log file.
+Here is a sample conf/logback.xml configuration file for logback with the following pre-requisites:
+
+Logging in a structured json fashion and write to file for centralizing logging.
+Centralize logging third party like FluentBit can tail from logging’s file then filter/process and put in to OpenSearch
+
+....
+
+
+
+
+ true
+
+
+
+
+ logs/james.%d{yyyy-MM-dd}.%i.log
+ 1
+ 200MB
+ 100MB
+
+
+
+
+ yyyy-MM-dd'T'HH:mm:ss.SSSX
+ Etc/UTC
+
+
+ true
+
+
+ false
+
+
+
+
+
+
+
+
+
+
+....
+
+First you need to create a `logs` folder, then mount it to James container and to FluentBit.
+
+docker-compose:
+
+include::{docker-compose-code-block-sample}[]
+
+FluentBit config as:
+the `Host opensearch` pointing to `opensearch` service in docker-compose file.
+....
+[SERVICE]
+ Parsers_File /fluent-bit/etc/parsers.conf
+
+[INPUT]
+ name tail
+ path /fluent-bit/log/*.log
+ Parser docker
+ docker_mode on
+ buffer_chunk_size 1MB
+ buffer_max_size 1MB
+ mem_buf_limit 64MB
+ Refresh_Interval 30
+
+[OUTPUT]
+ Name stdout
+ Match *
+
+
+[OUTPUT]
+ Name es
+ Match *
+ Host opensearch
+ Port 9200
+ Index fluentbit
+ Logstash_Format On
+ Logstash_Prefix fluentbit-james
+ Type docker
+....
+
+FluentBit Parser config:
+....
+[PARSER]
+ Name docker
+ Format json
+ Time_Key timestamp
+ Time_Format %Y-%m-%dT%H:%M:%S.%LZ
+ Time_Keep On
+ Decode_Field_As escaped_utf8 log do_next
+ Decode_Field_As escaped log do_next
+ Decode_Field_As json log
+....
+
+==== Using Kubernetes
+
+If using James in a Kubernetes environment, you can just append the logs to the console in a JSON formatted way
+using Jackson to easily allow FluentBit to directly tail them.
+
+Here is a sample conf/logback.xml configuration file for achieving this:
+
+....
+
+
+
+
+ true
+
+
+
+
+
+ yyyy-MM-dd'T'HH:mm:ss.SSSX
+ Etc/UTC
+
+
+ true
+
+
+ false
+
+
+
+
+
+
+
+
+
+
+....
+
+Regarding FluentBit on Kubernetes, you need to install it as a DaemonSet. Some official template exist
+with FluentBit outputting logs to OpenSearch. For more information on how to install it,
+with your cluster, you can look at this https://docs.fluentbit.io/manual/installation/kubernetes[documentation].
+
+As stated by the https://docs.fluentbit.io/manual/installation/kubernetes#details[detail] of the
+official documentation, FluentBit is configured to consume out of the box logs from containers
+on the same running node. So it should scrap your James logs without extra configuration.
diff --git a/docs/modules/servers/partials/operate/metrics.adoc b/docs/modules/servers/partials/operate/metrics.adoc
new file mode 100644
index 00000000000..4c8e105aa2d
--- /dev/null
+++ b/docs/modules/servers/partials/operate/metrics.adoc
@@ -0,0 +1,179 @@
+James relies on the https://metrics.dropwizard.io/4.1.2/manual/core.html[Dropwizard metric library]
+for keeping track of some core metrics of James.
+
+Such metrics are made available via JMX. You can connect for instance using VisualVM and the associated
+mbean plugins.
+
+We also support displaying them via https://grafana.com/[Grafana]. Two methods can be used to back grafana display:
+
+ - Prometheus metric collection - Data are exposed on a HTTP endpoint for Prometheus scrape.
+ - ElasticSearch metric collection - This method is depreciated and will be removed in next version.
+
+== Expose metrics for Prometheus collection
+
+To enable James metrics, add ``extensions.routes`` to xref:{xref-base}/operate/webadmin.adoc[webadmin.properties] file:
+
+```
+extensions.routes=org.apache.james.webadmin.dropwizard.MetricsRoutes
+```
+Connect to james-admin url to test the result:
+....
+http://james-admin-url/metrics
+....
+
+== Configure Prometheus Data source
+You need to set up https://prometheus.io/docs/prometheus/latest/getting_started/[Prometheus] first to scrape James metrics. +
+Add Apache James WebAdmin Url or IP address to ``prometheus.yaml`` configuration file:
+....
+scrape_configs:
+ # The job name is added as a label `job=` to any timeseries scraped from this config.
+ - job_name: 'WebAdmin url Example'
+ scrape_interval: 5s
+ metrics_path: /metrics
+ static_configs:
+ - targets: ['james-webamin-url']
+ - job_name: 'WebAdmin IP Example'
+ scrape_interval: 5s
+ metrics_path: /metrics
+ static_configs:
+ - targets: ['192.168.100.10:8000']
+....
+
+== Connect Prometheus to Grafana
+
+You can do this either from https://prometheus.io/docs/visualization/grafana/[Grafana UI] or from a https://grafana.com/docs/grafana/latest/datasources/prometheus/[configuration file]. +
+The following `docker-compose.yaml` will help you install a simple Prometheus/ Grafana stack :
+
+```
+version: '3'
+#Metric monitoring
+ grafana:
+ image: grafana/grafana:latest
+ container_name: grafana
+ ports:
+ - "3000:3000"
+
+ prometheus:
+ image: prom/prometheus:latest
+ restart: unless-stopped
+ ports:
+ - "9090:9090"
+ volumes:
+ - ./conf/prometheus.yml:/etc/prometheus/prometheus.yml
+```
+
+== Getting dashboards
+Now that the Promtheus/Grafana servers are up, go to this https://github.com/apache/james-project/tree/master/server/grafana-reporting/prometheus-datasource/[link] to get all dashboards JSON file. Import the different JSON files in this directory to Grafana via UI.
+
+
+image::preload-dashboards.png[Pre-loaded dashboards]
+
+*Note: For communication between multiple docker-compose projects, see https://stackoverflow.com/questions/38088279/communication-between-multiple-docker-compose-projects[here] for example. An easier approach is to merge James and Metric docker-compose files together.
+
+== Available metrics
+
+Here are the available metrics :
+
+ - James JVM metrics
+ - Number of active SMTP connections
+ - Number of SMTP commands received
+ - Number of active IMAP connections
+ - Number of IMAP commands received
+ - Number of active LMTP connections
+ - Number of LMTP commands received
+ - Number of per queue number of enqueued mails
+ - Number of sent emails
+ - Number of delivered emails
+ - Diverse Response time percentiles, counts and rates for JMAP
+ - Diverse Response time percentiles, counts and rates for IMAP
+ - Diverse Response time percentiles, counts and rates for SMTP
+ - Diverse Response time percentiles, counts and rates for WebAdmin
+ - Diverse Response time percentiles, counts and rates for each Mail Queue
+ - Per mailet and per matcher Response time percentiles
+ - Diverse Response time percentiles, counts and rates for DNS
+ - Tika HTTP client statistics
+ - SpamAssassin TCP client statistics
+ - Mailbox listeners statistics time percentiles
+ - Mailbox listeners statistics requests rate
+ - Pre-deletion hooks execution statistics time percentiles
+ - {other-metrics}
+
+== Available Grafana boards
+
+Here are the various relevant Grafana boards for the {server-name}:
+
+- https://github.com/apache/james-project/tree/master/server/grafana-reporting/prometheus-datasource/James_BlobStore.json[BlobStore] :
+Rates and percentiles for the BlobStore component
+- https://github.com/apache/james-project/tree/master/server/grafana-reporting/prometheus-datasource/James_DNS_Dashboard.json[DNS] :
+Latencies and query counts for DNS resolution.
+- https://github.com/apache/james-project/tree/master/server/grafana-reporting/prometheus-datasource/James_IMAP_Board.json[IMAP] :
+Latencies for the IMAP protocol
+- https://github.com/apache/james-project/tree/master/server/grafana-reporting/prometheus-datasource/James_IMAP_CountBoard.json[IMAP counts] :
+Request counts for the IMAP protocol
+- https://github.com/apache/james-project/tree/master/server/grafana-reporting/prometheus-datasource/James_JMAP_Board.json[JMAP] :
+Latencies for the JMAP protocol
+- https://github.com/apache/james-project/tree/master/server/grafana-reporting/prometheus-datasource/James_JMAP_CountBoard.json[JMAP counts] :
+Request counts for the JMAP protocol
+- https://github.com/apache/james-project/tree/master/server/grafana-reporting/prometheus-datasource/James_JVM.json[JVM] :
+JVM statistics (heap, gcs, etc...)
+- https://github.com/apache/james-project/tree/master/server/grafana-reporting/prometheus-datasource/James_MAILET.json[Mailets] :
+Per-mailet execution timings.
+- https://github.com/apache/james-project/tree/master/server/grafana-reporting/prometheus-datasource/James_MATCHER.json[Matchers] :
+Per-matcher execution timings
+- https://github.com/apache/james-project/tree/master/server/grafana-reporting/prometheus-datasource/James_MailQueue.json[MailQueue] :
+MailQueue statistics
+- https://github.com/apache/james-project/tree/master/server/grafana-reporting/prometheus-datasource/James_SMTP_Board.json[SMTP] :
+SMTP latencies reports
+- https://github.com/apache/james-project/tree/master/server/grafana-reporting/prometheus-datasource/James_SMTP_CountBoard.json[SMTP count] :
+Request count for the SMTP protocol
+
+=== Dashboard samples
+Latencies for the JMAP protocol +
+
+image::JMAP_board.png[JMAP]
+
+Latencies for the IMAP protocol +
+
+image::IMAP_board.png[IMAP]
+
+JVM Statistics +
+
+image::JVM_board.png[JVM]
+
+BlobStore Statistics +
+
+image::BlobStore.png[BlobStore]
+
+webAdmin Statistics +
+
+image::webAdmin.png[webAdmin]
+
+== Expose metrics for Elasticsearch collection
+
+The following command allow you to run a fresh grafana server :
+
+....
+docker run -i -p 3000:3000 grafana/grafana
+....
+
+Once running, you need to set up an ElasticSearch data-source : - select
+proxy mode - Select version 2.x of ElasticSearch - make the URL point
+your ES node - Specify the index name. By default, it should be :
+
+....
+[james-metrics-]YYYY-MM
+....
+
+Import the different dashboards you want.
+
+You then need to enable reporting through ElasticSearch. Modify your
+James ElasticSearch configuration file accordingly. To help you doing
+this, you can take a look to
+link:https://github.com/apache/james-project/blob/3.7.x/server/apps/distributed-app/sample-configuration/elasticsearch.properties[elasticsearch.properties].
+
+If some metrics seem abnormally slow despite in depth database
+performance tuning, feedback is appreciated as well on the bug tracker,
+the user mailing list or our Gitter channel (see our
+http://james.apache.org/#second[community page]) . Any additional
+details categorizing the slowness are appreciated as well (details of
+the slow requests for instance).
diff --git a/docs/modules/servers/partials/operate/migrating.adoc b/docs/modules/servers/partials/operate/migrating.adoc
new file mode 100644
index 00000000000..643f9f5a9dd
--- /dev/null
+++ b/docs/modules/servers/partials/operate/migrating.adoc
@@ -0,0 +1,31 @@
+This page presents how operators can migrate your user mailbox and mails into the {server-name} in order to adopt it.
+
+We assume you have a xref:{xref-base}/configure/index.adoc[well configured] running {server-name}
+at hand. We also assume existing mails are hosted on a tier mail server which can be accessed via IMAP and supports
+impersonation.
+
+First, you want to create the domains handled by your server, as well as the users you will be hosting. This operation
+can be performed via WebAdmin or the CLI.
+
+ * Using webadmin :
+ ** Read xref:{xref-base}/operate/webadmin.adoc#_create_a_domain[this section] for creating domains
+ ** Read xref:{xref-base}/operate/webadmin.adoc#_create_a_user[this section] for creating users
+ * Using the CLI :
+ ** Read xref:{xref-base}/operate/cli.adoc#_manage_domains[this section] for creating domains
+ ** Read xref:{xref-base}/operate/cli.adoc#_managing_users[this section] for creating users
+
+Second, you want to allow an administrator account of your {server-name} to have write access on other user mailboxes.
+This can be setted up this the *administratorId* configuration option of the xref:{xref-base}/configure/usersrepository.adoc[usersrepository.xml] configuration file.
+
+Then, it is time to run https://github.com/imapsync/imapsync[imapsync] script to copy the emails from the previous mail server
+into the {server-name}. Here is an example migrating a single user, relying on impersonation:
+
+....
+imapsync --host1 previous.server.domain.tld \
+ --user1 user@domain.tld --authuser1 adminOldServer@domain.tld \
+ --proxyauth1 --password1 passwordOfTheOldAdmin \
+ --host2 distributed.james.domain.tld \
+ --user2 use1@domain.tld \
+ --authuser2 adminNewServer@domain.tld --proxyauth2 \
+ --password2 passwordOfTheNewAdmin
+....
\ No newline at end of file
diff --git a/docs/modules/servers/partials/operate/performanceChecklist.adoc b/docs/modules/servers/partials/operate/performanceChecklist.adoc
new file mode 100644
index 00000000000..2216d514444
--- /dev/null
+++ b/docs/modules/servers/partials/operate/performanceChecklist.adoc
@@ -0,0 +1,80 @@
+This guide aims to help James operators refine their James configuration and set up to achieve better performance.
+
+== Database setup
+
+{backend-name}, OpenSearch, RabbitMQ is a large topic in itself that we do not intend to cover here. Yet, here are some
+very basic recommendation that are always beneficial to keep in mind.
+
+We recommend:
+
+* Running {backend-name}, OpenSearch on commodity hardware with attached SSD. SAN disks are known to cause performance
+issues for these technologies. HDD disks are to be banned for these performance related applications.
+* We recommend getting an Object Storage SaaS offering that suites your needs. Most generalist S3 offers will suite
+James needs.
+* We do provide a guide on xref:[Database benchmarks] that can help identify and fix issues.
+
+== James configuration
+
+=== JMAP protocol
+
+If you are not using JMAP, disabling it will avoid you the cost of populating related projections and thus is recommended.
+Within `jmap.properties`:
+
+....
+enabled=false
+....
+
+We recommend turning on EmailQueryView as it enables resolution of mailbox listing against {backend-name}, thus unlocking massive
+stability / performance gains. Within `jmap.properties`:
+
+....
+view.email.query.enabled=true
+....
+
+=== IMAP / SMTP
+
+We recommend against resolving client connection DNS names. This behaviour can be disabled via a system property within
+`jvm.properties`:
+
+....
+james.protocols.mdc.hostname=false
+....
+
+Concurrent IMAP request count is the critical setting. In `imapServer.xml`:
+
+....
+200
+4096
+....
+
+Other recommendation includes avoiding unecessary work upon IMAP IDLE, not starting dedicated BOSS threads:
+
+....
+false
+0
+....
+
+=== Other generic recommendations
+
+* Remove unneeded listeners / mailets
+* Reduce duplication of Matchers within mailetcontainer.xml
+* Limit usage of "DEBUG" loglevel. INFO should be more than decent in most cases.
+* While GC tunning is a science in itself, we had good results with G1GC and a low pause time:
+
+....
+-Xlog:gc*:file=/root/gc.log -XX:MaxGCPauseMillis=20 -XX:ParallelGCThreads=2
+....
+
+* We recommand tunning bach sizes: `batchsizes.properties`. This allows, limiting parallel S3 reads, while loading many
+messages concurrently on {backend-name}, and improves IMAP massive operations support.
+
+....
+fetch.metadata=200
+fetch.headers=30
+fetch.body=30
+fetch.full=30
+
+copy=8192
+
+move=8192
+....
\ No newline at end of file
diff --git a/docs/modules/servers/partials/operate/security.adoc b/docs/modules/servers/partials/operate/security.adoc
new file mode 100644
index 00000000000..7f84aeb5ded
--- /dev/null
+++ b/docs/modules/servers/partials/operate/security.adoc
@@ -0,0 +1,246 @@
+This document aims as summarizing threats, security best practices as well as recommendations.
+
+== Threats
+
+Operating an email server exposes you to the following threats:
+
+ - Spammers might attempt to use your servers to send their spam messages on their behalf. We speak of
+*open relay*. In addition to the resources consumed being an open relay will affect the trust other mail
+installations have in you, and thus will cause legitimate traffic to be rejected.
+ - Emails mostly consist of private data, which shall only be accessed by their legitimate user. Failure
+to do so might result in *information disclosure*.
+ - *Email forgery*. An attacker might craft an email on the behalf of legitimate users.
+ - Email protocols allow user to authenticate and thus can be used as *oracles* to guess user passwords.
+ - *Spam*. Non legitimate traffic can be a real burden to your users.
+ - *Phishing*: Crafted emails that tricks the user into doing unintended actions.
+ - *Viruses*: An attacker sends an attachment that contains an exploit that could run if a user opens it.
+ - *Denial of service*: A small request may result in a very large response and require considerable work on the server...
+ - *Denial of service*: A malicious JMAP client may use the JMAP push subscription to attempt to flood a third party
+server with requests, creating a denial-of-service attack and masking the attacker’s true identity.
+ - *Dictionary Harvest Attacks*: An attacker can rely on SMTP command reply code to know if a user exists or not. This
+ can be used to obtain the list of local users and later use those address as targets for other attacks.
+
+== Best practices
+
+The following sections ranks best practices.
+
+=== Best practices: Must
+
+ - 1. Configure James in order not to be an xref:{xref-base}/configure/smtp.adoc#_about_open_relays[open relay]. This should be the
+case with the default configuration.
+
+Be sure in xref:{xref-base}/configure/smtp.adoc[smtpserver.xml] to activate the following options: `verifyIdentity`.
+
+We then recommend to manually test your installation in order to ensure that:
+
+ - Unauthenticated SMTP users cannot send mails to external email addresses (they are not relayed)
+ - Unauthenticated SMTP users can send mails to internal email addresses
+ - Unauthenticated SMTP users cannot use local addresses in their mail from, and send emails both locally and to distant targets.
+
+ - 2. Avoid *STARTTLS* usage and favor SSL. Upgrade from a non encrypted channel into an encrypted channel is an opportunity
+for additional vulnerabilities. This is easily prevented by requiring SSL connection upfront. link:https://nostarttls.secvuln.info/[Read more...]
+
+Please note that STARTTLS is still beneficial in the context of email relaying, which happens on SMTP port 25 unencrypted,
+and enable opportunistic encryption upgrades that would not overwise be possible. We recommend keeping STARTTLS activated
+for SMTP port 25.
+
+ - 3. Use SSL for xref:{xref-base}/configure/mailets.adoc#_remotedelivery[remote delivery] whenever you are using a gateway relaying SMTP server.
+
+ - 4. Rely on an external identity service, dedicated to user credential storage. James supports xref:{xref-base}/configure/usersrepository.adoc#_configuring_a_ldap[LDAP]. If you are
+forced to store users in James be sure to choose `PBKDF2` as a hashing algorithm. Also, delays on authentication failures
+are supported via the `verifyFailureDelay` property. Note that IMAP / SMTP connections are closed after 3 authentication
+failures.
+
+ - 5. Ensure that xref:{xref-base}/configure/webadmin.adoc[WebAdmin] is not exposed unencrypted to the outer world. Doing so trivially
+exposes yourself. You can either disable it, activate JWT security, or restrict it to listen only on localhost.
+
+ - 6. Set up `HTTPS` for http based protocols, namely *JMAP* and *WebAdmin*. We recommend the use of a reverse proxy like Nginx.
+
+ - 7. Set up link:https://james.apache.org/howTo/spf.html[SPF] and link:https://james.apache.org/howTo/dkim.html[DKIM]
+for your outgoing emails to be trusted.
+
+ - 8. Prevent access to JMX. This can be achieved through a strict firewalling policy
+(link:https://nickbloor.co.uk/2017/10/22/analysis-of-cve-2017-12628/[blocking port 9999 is not enough])
+or xref:{xref-base}/configure/jmx.adoc[disabling JMX]. JMX is needed to use the existing CLI application but webadmin do offer similar
+features. Set the `jmx.remote.x.mlet.allow.getMBeansFromURL` to `false` to disable JMX remote code execution feature.
+
+ - 9. If JMAP is enabled, be sure that JMAP PUSH cannot be used for server side request forgery. This can be
+xref:{xref-base}/configure/jmap.adoc[configured] using the `push.prevent.server.side.request.forgery=true` property,
+forbidding push to private addresses.
+
+=== Best practice: Should
+
+ - 1. Avoid advertising login/authenticate capabilities in clear channels. This might prevent some clients to attempt login
+on clear channels, and can be configured for both xref:{xref-base}/configure/smtp.adoc[SMTP] and xref:{xref-base}/configure/imap.adoc[IMAP]
+using `auth.plainAuthEnabled=false`.
+
+ - 2. Verify link:https://james.apache.org/howTo/spf.html[SPF] and xref:{xref-base}/configure/mailets.adoc#_dkimverify[DKIM] for your incoming emails.
+
+ - 3. Set up reasonable xref:{xref-base}/operate/webadmin.adoc#_administrating_quotas[storage quota] for your users.
+
+ - 4. We recommend setting up anti-spam and anti-virus solutions. James comes with some xref:{xref-base}/configure/spam.adoc[Rspamd and SpamAssassin]
+integration, and some xref:{xref-base}/configure/mailets.adoc#_clamavscan[ClamAV] tooling exists.
+Rspamd supports anti-phishing modules.
+Filtering with third party systems upstream is also possible.
+
+ - 5. In order to limit your attack surface, disable protocols you or your users do not use. This includes the JMAP protocol,
+POP3, ManagedSieve, etc... Be conservative on what you expose.
+
+ - 6. If operating behind a load-balancer, set up the link:https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt[PROXY protocol] for
+TCP based protocols (IMAP and SMTP `proxyRequired` option)
+
+=== Best practice: Could
+
+ - 1. Set up link:https://openid.net/connect/[OIDC] for IMAP, SMTP and JMAP. Disable login/plain/basic authentication.
+
+ - 2. You can configure xref:{xref-base}/configure/ssl.adoc#_client_authentication_via_certificates[Client authentication via certificates].
+
+ - 3. You can xref:{xref-base}/configure/mailets.adoc#_smimesign[sign], xref:{xref-base}/configure/mailets.adoc#_smimechecksignature[verify]
+and xref:{xref-base}/configure/mailets.adoc#_smimedecrypt[decrypt] your email traffic using link:https://datatracker.ietf.org/doc/html/rfc5751[SMIME].
+
+== Known vulnerabilities
+
+Several vulnerabilities have had been reported for previous releases of Apache James server.
+
+Be sure not to run those! We highly recommend running the latest release, which we put great effort in not to use
+outdated dependencies.
+
+=== Reporting vulnerabilities
+
+We follow the standard procedures within the ASF regarding link:https://apache.org/security/committers.html#vulnerability-handling[vulnerability handling]
+
+=== CVE-2024-21742: Mime4J DOM header injection
+
+Apache JAMES MIME4J prior to version 0.8.10 allow attackers able to specify the value of a header field to craft other header fields.
+
+*Severity*: Moderate
+
+*Mitigation*: Release 0.8.10 rejects the use of LF inside a header field thus preventing the issue.
+
+Upgrading to Apache James MIME4J 0.8.10 is thus advised.
+
+=== CVE-2023-51747: SMTP smuggling in Apache James
+
+Apache James distribution prior to release 3.7.5 and release 3.8.1 is subject to SMTP smuggling, when used in combination
+of antother vulnerable server and can result in SPF bypass, leading to email forgery.
+
+*Severity*: High
+
+*Mitigation*: Release 3.7.5 and 3.8.1 interpret strictly the CRLF delimiter and thus prevent the issue.
+
+Upgrading to Apache James 3.7.5 or 3.8.1 is thus advised.
+
+=== CVE-2023-51518: Privilege escalation via JMX pre-authentication deserialisation
+
+Apache James distribution prior to release 3.7.5 and 3.8.1 allow privilege escalation via JMX pre-authentication deserialisation.
+An attacker would need to identify a deserialization glitch before triggering an exploit.
+
+*Severity*: Moderate
+
+*Mitigation*:We recommend turning off JMX whenever possible.
+
+Release 3.7.5 and 3.8.1 disable deserialization on unauthencited channels.
+
+Upgrading to Apache James 3.7.5 on 3.8.1 is thus advised.
+
+
+=== CVE-2023-26269: Privilege escalation through unauthenticated JMX
+
+Apache James distribution prior to release 3.7.4 allows privilege escalation through the use of JMX.
+
+*Severity*: Moderate
+
+*Mitigation*: We recommend turning on authentication on. If the CLI is unused we recommend turning JMX off.
+
+Release 3.7.4 set up implicitly JMX authentication for Guice based products and addresses the underlying JMX exploits.
+
+Upgrading to Apache James 3.7.4 is thus advised.
+
+=== CVE-2022-45935: Temporary File Information Disclosure in Apache JAMES
+
+Apache James distribution prior to release 3.7.3 is vulnerable to a temporary File Information Disclosure.
+
+*Severity*: Moderate
+
+*Mitigation*: We recommend to upgrade to Apache James 3.7.3 or higher, which fixes this vulnerability.
+
+
+=== CVE-2021-44228: STARTTLS command injection in Apache JAMES
+
+Apache James distribution prior to release 3.7.1 is vulnerable to a buffering attack relying on the use of the STARTTLS command.
+
+Fix of CVE-2021-38542, which solved similar problem from Apache James 3.6.1, is subject to a parser differential and do not take into account concurrent requests.
+
+*Severity*: Moderate
+
+*Mitigation*: We recommend to upgrade to Apache James 3.7.1 or higher, which fixes this vulnerability.
+
+=== CVE-2021-38542: Apache James vulnerable to STARTTLS command injection (IMAP and POP3)
+
+Apache James prior to release 3.6.1 is vulnerable to a buffering attack relying on the use of the STARTTLS
+command. This can result in Man-in -the-middle command injection attacks, leading potentially to leakage
+of sensible information.
+
+*Severity*: Moderate
+
+This issue is being tracked as link:https://issues.apache.org/jira/browse/JAMES-1862[JAMES-1862]
+
+*Mitigation*: We recommend upgrading to Apache James 3.6.1, which fixes this vulnerability.
+
+Furthermore, we recommend, if possible to dis-activate STARTTLS and rely solely on explicit TLS for mail protocols, including SMTP, IMAP and POP3.
+
+Read more link:https://nostarttls.secvuln.info/[about STARTTLS security here].
+
+=== CVE-2021-40110: Apache James IMAP vulnerable to a ReDoS
+
+Using Jazzer fuzzer, we identified that an IMAP user can craft IMAP LIST commands to orchestrate a Denial
+Of Service using a vulnerable Regular expression. This affected Apache James prior to 3.6.1
+
+*Severity*: Moderate
+
+This issue is being tracked as link:https://issues.apache.org/jira/browse/JAMES-3635[JAMES-3635]
+
+*Mitigation*: We recommend upgrading to Apache James 3.6.1, which enforce the use of RE2J regular
+expression engine to execute regex in linear time without back-tracking.
+
+=== CVE-2021-40111: Apache James IMAP parsing Denial Of Service
+
+While fuzzing with Jazzer the IMAP parsing stack we discover that crafted APPEND and STATUS IMAP command
+could be used to trigger infinite loops resulting in expensive CPU computations and OutOfMemory exceptions.
+This can be used for a Denial Of Service attack. The IMAP user needs to be authenticated to exploit this
+vulnerability. This affected Apache James prior to version 3.6.1.
+
+*Severity*: Moderate
+
+This issue is being tracked as link:https://issues.apache.org/jira/browse/JAMES-3634[JAMES-3634]
+
+*Mitigation*: We recommend upgrading to Apache James 3.6.1, which fixes this vulnerability.
+
+=== CVE-2021-40525: Apache James: Sieve file storage vulnerable to path traversal attacks
+
+Apache James ManagedSieve implementation alongside with the file storage for sieve scripts is vulnerable
+to path traversal, allowing reading and writing any file.
+
+*Severity*: Moderate
+
+This issue is being tracked as link:https://issues.apache.org/jira/browse/JAMES-3646[JAMES-3646]
+
+*Mitigation*:This vulnerability had been patched in Apache James 3.6.1 and higher. We recommend the upgrade.
+
+This could also be mitigated by ensuring manageSieve is disabled, which is the case by default.
+
+Distributed and {backend-name} based products are also not impacted.
+
+=== CVE-2017-12628 Privilege escalation using JMX
+
+The Apache James Server prior version 3.0.1 is vulnerable to Java deserialization issues.
+One can use this for privilege escalation.
+This issue can be mitigated by:
+
+ - Upgrading to James 3.0.1 onward
+ - Using a recent JRE (Exploit could not be reproduced on OpenJdk 8 u141)
+ - Exposing JMX socket only to localhost (default behaviour)
+ - Possibly running James in a container
+ - Disabling JMX all-together (Guice only)
+
+Read more link:http://james.apache.org//james/update/2017/10/20/james-3.0.1.html[here].
\ No newline at end of file
diff --git a/docs/modules/servers/partials/operate/webadmin.adoc b/docs/modules/servers/partials/operate/webadmin.adoc
new file mode 100644
index 00000000000..ddbc85df079
--- /dev/null
+++ b/docs/modules/servers/partials/operate/webadmin.adoc
@@ -0,0 +1,4517 @@
+The web administration supports for now the CRUD operations on the domains, the users, their mailboxes and their quotas,
+managing mail repositories, performing {backend-name} migrations, and much more, as described in the following sections.
+
+*WARNING*: This API allow authentication only via the use of JWT. If not
+configured with JWT, an administrator should ensure an attacker can not
+use this API.
+
+By the way, some endpoints are not filtered by authentication. Those endpoints are not related to data stored in James,
+for example: Swagger documentation & James health checks.
+
+In case of any error, the system will return an error message which is
+json format like this:
+
+....
+{
+ statusCode: ,
+ type: ,
+ message:
+ cause:
+}
+....
+
+Also be aware that, in case things go wrong, all endpoints might return
+a 500 internal error (with a JSON body formatted as exposed above). To
+avoid information duplication, this is omitted on endpoint specific
+documentation.
+
+Finally, please note that in case of a malformed URL the 400 bad request
+response will contain an HTML body.
+
+== HealthCheck
+
+=== Check all components
+
+This endpoint is simple for now and is just returning the http status
+code corresponding to the state of checks (see below). The user has to
+check in the logs in order to have more information about failing
+checks.
+
+....
+curl -XGET http://ip:port/healthcheck
+....
+
+Will return a list of healthChecks execution result, with an aggregated
+result:
+
+....
+{
+ "status": "healthy",
+ "checks": [
+ {
+ "componentName": "{backend-name} backend",
+ "escapedComponentName": "{backend-name}%20backend",
+ "status": "healthy"
+ "cause": null
+ }
+ ]
+}
+....
+
+*status* field can be:
+
+* *healthy*: Component works normally
+* *degraded*: Component works in degraded mode. Some non-critical
+services may not be working, or latencies are high, for example. Cause
+contains explanations.
+* *unhealthy*: The component is currently not working. Cause contains
+explanations.
+
+Supported health checks include:
+
+* *{backend-name} backend*: {backend-name} storage.
+* *OpenSearch Backend*: OpenSearch storage.
+* *EventDeadLettersHealthCheck*
+* *Guice application lifecycle*
+* *JPA Backend*: JPA storage.
+* *MailReceptionCheck* We rely on a configured user, send an email to him and
+assert that the email is well received, and can be read within the given configured
+period. Unhealthy means that the email could not be received before reacing the timeout.
+* *MessageFastViewProjection* Health check of the component storing JMAP properties
+which are fast to retrieve. Those properties are computed in advance
+from messages and persisted in order to archive a better performance.
+There are some latencies between a source update and its projections
+updates. Incoherency problems arise when reads are performed in this
+time-window. We piggyback the projection update on missed JMAP read in
+order to decrease the outdated time window for a given entry. The health
+is determined by the ratio of missed projection reads. (lower than 10%
+causes `degraded`)
+* *RabbitMQ backend*: RabbitMQ messaging.
+
+Response codes:
+
+* 200: All checks have answered with a Healthy or Degraded status. James
+services can still be used.
+* 503: At least one check have answered with a Unhealthy status
+
+=== Check single component
+
+Performs a health check for the given component. The component is
+referenced by its URL encoded name.
+
+....
+curl -XGET http://ip:port/healthcheck/checks/{backend-name}%20backend
+....
+
+Will return the component’s name, the component’s escaped name, the
+health status and a cause.
+
+....
+{
+ "componentName": "{backend-name} backend",
+ "escapedComponentName": "{backend-name}%20backend",
+ "status": "healthy"
+ "cause": null
+}
+....
+
+Response codes:
+
+* 200: The check has answered with a Healthy or Degraded status.
+* 404: A component with the given name was not found.
+* 503: The check has answered with an Unhealthy status.
+
+=== List all health checks
+
+This endpoint lists all the available health checks.
+
+....
+curl -XGET http://ip:port/healthcheck/checks
+....
+
+Will return the list of all available health checks.
+
+....
+[
+ {
+ "componentName": "{backend-name} backend",
+ "escapedComponentName": "{backend-name}%20backend"
+ }
+]
+....
+
+Response codes:
+
+* 200: List of available health checks
+
+== Task management
+
+Some webadmin features schedule tasks. The task management API allow to
+monitor and manage the execution of the following tasks.
+
+Note that the `taskId` used in the following APIs is returned by other
+WebAdmin APIs scheduling tasks.
+
+=== Getting a task details
+
+....
+curl -XGET http://ip:port/tasks/3294a976-ce63-491e-bd52-1b6f465ed7a2
+....
+
+An Execution Report will be returned:
+
+....
+{
+ "submitDate": "2017-12-27T15:15:24.805+0700",
+ "startedDate": "2017-12-27T15:15:24.809+0700",
+ "completedDate": "2017-12-27T15:15:24.815+0700",
+ "cancelledDate": null,
+ "failedDate": null,
+ "taskId": "3294a976-ce63-491e-bd52-1b6f465ed7a2",
+ "additionalInformation": {},
+ "status": "completed",
+ "type": "type-of-the-task"
+}
+....
+
+Note that:
+
+* `status` can have the value:
+** `waiting`: The task is scheduled but its execution did not start yet
+** `inProgress`: The task is currently executed
+** `cancelled`: The task had been cancelled
+** `completed`: The task execution is finished, and this execution is a
+success
+** `failed`: The task execution is finished, and this execution is a
+failure
+* `additionalInformation` is a task specific object giving additional
+information and context about that task. The structure of this
+`additionalInformation` field is provided along the specific task
+submission endpoint.
+
+Response codes:
+
+* 200: The specific task was found and the execution report exposed
+above is returned
+* 400: Invalid task ID
+* 404: Task ID was not found
+
+=== Awaiting a task
+
+One can await the end of a task, then receive its final execution
+report.
+
+That feature is especially usefully for testing purpose but still can
+serve real-life scenario.
+
+....
+curl -XGET http://ip:port/tasks/3294a976-ce63-491e-bd52-1b6f465ed7a2/await?timeout=duration
+....
+
+An Execution Report will be returned.
+
+`timeout` is optional. By default it is set to 365 days (the maximum
+value). The expected value is expressed in the following format:
+`Nunit`. `N` should be strictly positive. `unit` could be either in the
+short form (`s`, `m`, `h`, etc.), or in the long form (`day`, `week`,
+`month`, etc.).
+
+Examples:
+
+* `30s`
+* `5m`
+* `7d`
+* `1y`
+
+Response codes:
+
+* 200: The specific task was found and the execution report exposed
+above is returned
+* 400: Invalid task ID or invalid timeout
+* 404: Task ID was not found
+* 408: The timeout has been reached
+
+=== Cancelling a task
+
+You can cancel a task by calling:
+
+....
+curl -XDELETE http://ip:port/tasks/3294a976-ce63-491e-bd52-1b6f465ed7a2
+....
+
+Response codes:
+
+* 204: Task had been cancelled
+* 400: Invalid task ID
+
+=== Listing tasks
+
+A list of all tasks can be retrieved:
+
+....
+curl -XGET http://ip:port/tasks
+....
+
+Will return a list of Execution reports
+
+One can filter the above results by status. For example:
+
+....
+curl -XGET http://ip:port/tasks?status=inProgress
+....
+
+Will return a list of Execution reports that are currently in progress. This list is sorted by
+reverse submitted date (recent tasks goes first).
+
+Response codes:
+
+* 200: A list of corresponding tasks is returned
+* 400: Invalid status value
+
+Additional optional task parameters are supported:
+
+- `status` one of `waiting`, `inProgress`, `canceledRequested`, `completed`, `canceled`, `failed`. Only
+tasks with the given status are returned.
+- `type`: only tasks with the given type are returned.
+- `submittedBefore`: Date. Returns only tasks submitted before this date.
+- `submittedAfter`: Date. Returns only tasks submitted after this date.
+- `startedBefore`: Date. Returns only tasks started before this date.
+- `startedAfter`: Date. Returns only tasks started after this date.
+- `completedBefore`: Date. Returns only tasks completed before this date.
+- `completedAfter`: Date. Returns only tasks completed after this date.
+- `failedBefore`: Date. Returns only tasks failed before this date.
+- `failedAfter`: Date. Returns only tasks faield after this date.
+- `offset`: Integer, number of tasks to skip in the response. Useful for paging.
+- `limit`: Integer, maximum number of tasks to return in one call
+
+Example of date format: `2023-04-15T07:23:27.541254+07:00` and `2023-04-15T07%3A23%3A27.541254%2B07%3A00` once URL encoded.
+
+=== Endpoints returning a task
+
+Many endpoints do generate a task.
+
+Example:
+
+....
+curl -XPOST /endpoint?action={action}
+....
+
+The response to these requests will be the scheduled `taskId` :
+
+....
+{"taskId":"5641376-02ed-47bd-bcc7-76ff6262d92a"}
+....
+
+Positionned headers:
+
+* Location header indicates the location of the resource associated with
+the scheduled task. Example:
+
+....
+Location: /tasks/3294a976-ce63-491e-bd52-1b6f465ed7a2
+....
+
+Response codes:
+
+* 201: Task generation succeeded. Corresponding task id is returned.
+* Other response codes might be returned depending on the endpoint
+
+The additional information returned depends on the scheduled task type
+and is documented in the endpoint documentation.
+
+== Administrating domains
+
+=== Create a domain
+
+....
+curl -XPUT http://ip:port/domains/domainToBeCreated
+....
+
+Resource name domainToBeCreated:
+
+* can not be null or empty
+* can not contain `@'
+* can not be more than 255 characters
+* can not contain `/'
+
+Response codes:
+
+* 204: The domain was successfully added
+* 400: The domain name is invalid
+
+=== Delete a domain
+
+....
+curl -XDELETE http://ip:port/domains/{domainToBeDeleted}
+....
+
+Note: Deletion of an auto-detected domain, default domain or of an
+auto-detected ip is not supported. We encourage you instead to review
+your https://james.apache.org/server/config-domainlist.html[domain list
+configuration].
+
+Response codes:
+
+* 204: The domain was successfully removed
+
+=== Test if a domain exists
+
+....
+curl -XGET http://ip:port/domains/{domainName}
+....
+
+Response codes:
+
+* 204: The domain exists
+* 404: The domain does not exist
+
+=== Get the list of domains
+
+....
+curl -XGET http://ip:port/domains
+....
+
+Possible response:
+
+....
+["domain1", "domain2"]
+....
+
+Response codes:
+
+* 200: The domain list was successfully retrieved
+
+=== Get the list of aliases for a domain
+
+....
+curl -XGET http://ip:port/domains/destination.domain.tld/aliases
+....
+
+Possible response:
+
+....
+[
+ {"source": "source1.domain.tld"},
+ {"source": "source2.domain.tld"}
+]
+....
+
+When sending an email to an email address having `source1.domain.tld` or
+`source2.domain.tld` as a domain part (example:
+`user@source1.domain.tld`), then the domain part will be rewritten into
+destination.domain.tld (so into `user@destination.domain.tld`).
+
+Response codes:
+
+* 200: The domain aliases was successfully retrieved
+* 400: destination.domain.tld has an invalid syntax
+* 404: destination.domain.tld is not part of handled domains and does
+not have local domains as aliases.
+
+=== Create an alias for a domain
+
+To create a domain alias execute the following query:
+
+....
+curl -XPUT http://ip:port/domains/destination.domain.tld/aliases/source.domain.tld
+....
+
+When sending an email to an email address having `source.domain.tld` as
+a domain part (example: `user@source.domain.tld`), then the domain part
+will be rewritten into `destination.domain.tld` (so into
+`user@destination.domain.tld`).
+
+Response codes:
+
+* 204: The redirection now exists
+* 400: `source.domain.tld` or `destination.domain.tld` have an invalid
+syntax
+* 400: `source, domain` and `destination domain` are the same
+* 404: `source.domain.tld` are not part of handled domains.
+
+Be aware that no checks to find possible loops that would result of this creation will be performed.
+
+=== Delete an alias for a domain
+
+To delete a domain alias execute the following query:
+
+....
+curl -XDELETE http://ip:port/domains/destination.domain.tld/aliases/source.domain.tld
+....
+
+When sending an email to an email address having `source.domain.tld` as
+a domain part (example: `user@source.domain.tld`), then the domain part
+will be rewritten into `destination.domain.tld` (so into
+`user@destination.domain.tld`).
+
+Response codes:
+
+* 204: The redirection now no longer exists
+* 400: `source.domain.tld` or destination.domain.tld have an invalid
+syntax
+* 400: source, domain and destination domain are the same
+* 404: `source.domain.tld` are not part of handled domains.
+
+=== Delete all users data of a domain
+
+....
+curl -XPOST http://ip:port/domains/{domainToBeUsed}?action=deleteData
+....
+
+Would create a task that deletes data of all users of the domain.
+
+[More details about endpoints returning a task](#_endpoints_returning_a_task).
+
+Response codes:
+
+* 201: Success. Corresponding task id is returned.
+* 400: Error in the request. Details can be found in the reported error.
+
+The scheduled task will have the following type `DeleteUsersDataOfDomainTask` and the following `additionalInformation`:
+
+....
+{
+ "type": "DeleteUsersDataOfDomainTask",
+ "domain": "domain.tld",
+ "successfulUsersCount": 2,
+ "failedUsersCount": 1,
+ "failedUsers": ["faileduser@domain.tld"],
+ "timestamp": "2023-05-22T08:52:47.076261Z"
+}
+....
+
+Notes: `failedUsers` only lists maximum 100 failed users.
+
+== Administrating users
+
+=== Create a user
+
+....
+curl -XPUT http://ip:port/users/usernameToBeUsed \
+ -d '{"password":"passwordToBeUsed"}' \
+ -H "Content-Type: application/json"
+....
+
+Resource name usernameToBeUsed representing valid users, hence it should
+match the criteria at xref:{xref-base}/configure/usersrepository.adoc[User Repositories documentation]
+
+Response codes:
+
+* 204: The user was successfully created
+* 400: The user name or the payload is invalid
+* 409: The user name already exists
+
+Note: If the user exists already, its password cannot be updated using this.
+If you want to update a user's password, please have a look at *Update a user password* below.
+
+=== Updating a user password
+
+....
+curl -XPUT http://ip:port/users/usernameToBeUsed?force \
+ -d '{"password":"passwordToBeUsed"}' \
+ -H "Content-Type: application/json"
+....
+
+Response codes:
+
+- 204: The user's password was successfully updated
+- 400: The user name or the payload is invalid
+
+This also can be used to create a new user.
+
+=== Verifying a user password
+
+....
+curl -XPOST http://ip:port/users/usernameToBeUsed/verify \
+ -d '{"password":"passwordToBeVerified"}' \
+ -H "Content-Type: application/json"
+....
+
+Response codes:
+
+- 204: The user's password was correct
+- 401: Wrong password or user does not exist
+- 400: The user name or the payload is invalid
+
+This intentionally treats non-existing users as unauthenticated, to prevent a username oracle attack.
+
+=== Testing a user existence
+
+....
+curl -XHEAD http://ip:port/users/usernameToBeUsed
+....
+
+Resource name ``usernameToBeUsed'' represents a valid user, hence it
+should match the criteria at xref:{xref-base}/configure/usersrepository.adoc[User Repositories documentation]
+
+Response codes:
+
+* 200: The user exists
+* 400: The user name is invalid
+* 404: The user does not exist
+
+=== Deleting a user
+
+....
+curl -XDELETE http://ip:port/users/{userToBeDeleted}
+....
+
+Response codes:
+
+* 204: The user was successfully deleted
+
+=== Retrieving the user list
+
+....
+curl -XGET http://ip:port/users
+....
+
+The answer looks like:
+
+....
+[{"username":"username@domain-jmapauthentication.tld"},{"username":"username@domain.tld"}]
+....
+
+Response codes:
+
+* 200: The user name list was successfully retrieved
+
+=== Retrieving the list of allowed `From` headers for a given user
+
+This endpoint allows to know which From headers a given user is allowed to use when sending mails.
+
+....
+curl -XGET http://ip:port/users/givenUser/allowedFromHeaders
+....
+
+The answer looks like:
+
+....
+["user@domain.tld","alias@domain.tld"]
+....
+
+Response codes:
+
+* 200: The list was successfully retrieved
+* 400: The user is invalid
+* 404: The user is unknown
+
+=== Add a delegated user of a base user
+
+....
+curl -XPUT http://ip:port/users/baseUser/authorizedUsers/delegatedUser
+....
+
+Response codes:
+
+* 200: Addition of the delegated user succeeded
+* 404: The base user does not exist
+* 400: The delegated user does not exist
+
+Note: Delegation is only available on top of {backend-name} products and not implemented yet on top of JPA backends.
+
+=== Remove a delegated user of a base user
+
+....
+curl -XDELETE http://ip:port/users/baseUser/authorizedUsers/delegatedUser
+....
+
+Response codes:
+
+* 200: Removal of the delegated user succeeded
+* 404: The base user does not exist
+* 400: The delegated user does not exist
+
+Note: Delegation is only available on top of {backend-name} products and not implemented yet on top of JPA backends.
+
+=== Retrieving the list of delegated users of a base user
+
+....
+curl -XGET http://ip:port/users/baseUser/authorizedUsers
+....
+
+The answer looks like:
+
+....
+["alice@domain.tld","bob@domain.tld"]
+....
+
+Response codes:
+
+* 200: The list was successfully retrieved
+* 404: The base user does not exist
+
+Note: Delegation is only available on top of {backend-name} products and not implemented yet on top of JPA backends.
+
+=== Remove all delegated users of a base user
+
+....
+curl -XDELETE http://ip:port/users/baseUser/authorizedUsers
+....
+
+Response codes:
+
+* 200: Removal of the delegated users succeeded
+* 404: The base user does not exist
+
+Note: Delegation is only available on top of {backend-name} products and not implemented yet on top of JPA backends.
+
+=== Change a username
+
+....
+curl -XPOST http://ip:port/users/oldUser/rename/newUser?action=rename
+....
+
+Would migrate account data from `oldUser` to `newUser`.
+
+link:#_endpoints_returning_a_task[More details about endpoints returning
+a task].
+
+Implemented migration steps are:
+
+- `ForwardUsernameChangeTaskStep`: creates forward from old user to new user and migrates existing forwards
+- `FilterUsernameChangeTaskStep`: migrates users filtering rules
+- `DelegationUsernameChangeTaskStep`: migrates delegations where the impacted user is either delegatee or delegator
+- `MailboxUsernameChangeTaskStep`: migrates mailboxes belonging to the old user to the account of the new user. It also
+migrates user's mailbox subscriptions.
+- `ACLUsernameChangeTaskStep`: migrates ACLs on mailboxes the migrated user has access to and updates subscriptions accordingly.
+- `QuotaUsernameChangeTaskStep`: migrates quotas user from old user to new user.
+
+Response codes:
+
+* 201: Success. Corresponding task id is returned.
+* 400: Error in the request. Details can be found in the reported error. If you encounter the error "'oldUser' parameter should be an existing user," please note that this validation can be bypassed by specifying the `force` query parameter.
+
+The `fromStep` query parameter allows skipping previous steps, allowing to resume the username change from a failed step.
+
+The scheduled task will have the following type `UsernameChangeTask` and the following `additionalInformation`:
+
+....
+{
+ "type": "UsernameChangeTask",
+ "oldUser": "jessy.jones@domain.tld",
+ "newUser": "jessy.smith@domain.tld",
+ "status": {
+ "A": "DONE",
+ "B": "FAILED",
+ "C": "ABORTED"
+ },
+ "fromStep": null,
+ "timestamp": "2023-02-17T02:54:01.246477Z"
+}
+....
+
+Valid status includes:
+
+- `SKIPPED`: bypassed via `fromStep` setting
+- `WAITING`: Awaits execution
+- `IN_PROGRESS`: Currently executed
+- `FAILED`: Error encountered while executing this step. Check the logs.
+- `ABORTED`: Won't be executed because of previous step failures.
+
+=== Delete data of a user
+
+....
+curl -XPOST http://ip:port/users/usernameToBeUsed?action=deleteData
+....
+
+Would create a task that deletes data of the user.
+
+link:#_endpoints_returning_a_task[More details about endpoints returning a task].
+
+Implemented deletion steps are:
+
+- `RecipientRewriteTableUserDeletionTaskStep`: deletes all rewriting rules related to this user.
+- `FilterUserDeletionTaskStep`: deletes all filters belonging to the user.
+- `DelegationUserDeletionTaskStep`: deletes all delegations from / to the user.
+- `MailboxUserDeletionTaskStep`: deletes mailboxes of this user, all ACLs of this user, as well as his subscriptions.
+- `WebPushUserDeletionTaskStep`: deletes push data registered for this user.
+- `IdentityUserDeletionTaskStep`: deletes identities registered for this user.
+- `VacationUserDeletionTaskStep`: deletes vacations registered for this user.
+
+Response codes:
+
+* 201: Success. Corresponding task id is returned.
+* 400: Error in the request. Details can be found in the reported error.
+
+The `fromStep` query parameter allows skipping previous steps, allowing to resume the user data deletion from a failed step.
+
+The scheduled task will have the following type `DeleteUserDataTask` and the following `additionalInformation`:
+
+....
+{
+ "type": "DeleteUserDataTask",
+ "username": "jessy.jones@domain.tld",
+ "status": {
+ "A": "DONE",
+ "B": "FAILED",
+ "C": "ABORTED"
+ },
+ "fromStep": null,
+ "timestamp": "2023-02-17T02:54:01.246477Z"
+}
+....
+
+Valid status includes:
+
+- `SKIPPED`: bypassed via `fromStep` setting
+- `WAITING`: Awaits execution
+- `IN_PROGRESS`: Currently executed
+- `FAILED`: Error encountered while executing this step. Check the logs.
+- `ABORTED`: Won't be executed because of previous step failures.
+
+=== Retrieving the user identities
+
+....
+curl -XGET http://ip:port/users/{baseUser}/identities?default=true
+....
+
+API to get the list of identities of a user
+
+The response will look like:
+
+```
+[
+ {
+ "name":"identity name 1",
+ "email":"bob@domain.tld",
+ "id":"4c039533-75b9-45db-becc-01fb0e747aa8",
+ "mayDelete":true,
+ "textSignature":"textSignature 1",
+ "htmlSignature":"htmlSignature 1",
+ "sortOrder":1,
+ "bcc":[
+ {
+ "emailerName":"bcc name 1",
+ "mailAddress":"bcc1@domain.org"
+ }
+ ],
+ "replyTo":[
+ {
+ "emailerName":"reply name 1",
+ "mailAddress":"reply1@domain.org"
+ }
+ ]
+ }
+]
+```
+
+Query parameters:
+
+* default: (Optional) allows getting the default identity of a user. In order to do that: `default=true`
+
+Response codes:
+
+* 200: The list was successfully retrieved
+* 400: The user is invalid
+* 404: The user is unknown or the default identity can not be found.
+
+The optional `default` query parameter allows getting the default identity of a user.
+In order to do that: `default=true`
+
+The web-admin server will return `404` response code when the default identity can not be found.
+
+=== Creating a JMAP user identity
+
+API to create a new JMAP user identity
+....
+curl -XPOST http://ip:port/users/{username}/identities \
+-d '{
+ "name": "Bob",
+ "email": "bob@domain.tld",
+ "mayDelete": true,
+ "htmlSignature": "a html signature",
+ "textSignature": "a text signature",
+ "bcc": [{
+ "email": "boss2@domain.tld",
+ "name": "My Boss 2"
+ }],
+ "replyTo": [{
+ "email": "boss@domain.tld",
+ "name": "My Boss"
+ }],
+ "sortOrder": 0
+ }' \
+-H "Content-Type: application/json"
+....
+
+Response codes:
+
+* 201: The new identity was successfully created
+* 404: The username is unknown
+* 400: The payload is invalid
+
+Resource name ``username'' represents a valid user
+
+=== Updating a JMAP user identity
+
+API to update an exist JMAP user identity
+....
+curl -XPUT http://ip:port/users/{username}/identities/{identityId} \
+-d '{
+ "name": "Bob",
+ "htmlSignature": "a html signature",
+ "textSignature": "a text signature",
+ "bcc": [{
+ "email": "boss2@domain.tld",
+ "name": "My Boss 2"
+ }],
+ "replyTo": [{
+ "email": "boss@domain.tld",
+ "name": "My Boss"
+ }],
+ "sortOrder": 1
+ }' \
+-H "Content-Type: application/json"
+....
+
+Response codes:
+
+* 204: The identity were successfully updated
+* 404: The username is unknown
+* 400: The payload is invalid
+
+Resource name ``username'' represents a valid user
+Resource name ``identityId'' represents a exist user identity
+
+== Administrating vacation settings
+
+=== Get vacation settings
+
+....
+curl -XGET http://ip:port/vacation/usernameToBeUsed
+....
+
+Resource name usernameToBeUsed representing valid users, hence it should
+match the criteria at xref:{xref-base}/configure/usersrepository.adoc[User Repositories documentation]
+
+The response will look like this:
+
+....
+{
+ "enabled": true,
+ "fromDate": "2021-09-20T10:00:00Z",
+ "toDate": "2021-09-27T18:00:00Z",
+ "subject": "Out of office",
+ "textBody": "I am on vacation, will be back soon.",
+ "htmlBody": "
I am on vacation, will be back soon.
"
+}
+....
+
+Response codes:
+
+* 200: The vacation settings were successfully retrieved
+* 404: The user name is unknown
+
+=== Update vacation settings
+
+....
+curl -XPOST http://ip:port/vacation/usernameToBeUsed
+....
+
+Request body must be a JSON structure as described above.
+
+If any field is not set in the request, the corresponding field in the existing vacation message is left unchanged.
+
+Response codes:
+
+* 204: The vacation settings were successfully updated
+* 404: The user name is unknown
+* 400: The payload is invalid
+
+=== Delete vacation settings
+
+....
+curl -XDELETE http://ip:port/vacation/usernameToBeUsed
+....
+
+For convenience, this disables and clears the existing vacation settings of the user.
+
+Response codes:
+
+* 204: The vacation settings were successfully disabled
+* 404: The user name is unknown
+
+== Administrating mailboxes
+
+=== All mailboxes
+
+Several actions can be performed on the server mailboxes.
+
+Request pattern is:
+
+....
+curl -XPOST /mailboxes?action={action1},...
+....
+
+link:#_endpoints_returning_a_task[More details about endpoints returning
+a task].
+
+Response codes:
+
+* 201: Success. Corresponding task id is returned.
+* 400: Error in the request. Details can be found in the reported error.
+
+The kind of task scheduled depends on the action parameter. See below
+for details.
+
+
+==== Recomputing Global JMAP fast message view projection
+
+Message fast view projection stores message properties expected to be
+fast to fetch but are actually expensive to compute, in order for
+GetMessages operation to be fast to execute for these properties.
+
+These projection items are asynchronously computed on mailbox events.
+
+You can force the full projection recomputation by calling the following
+endpoint:
+
+....
+curl -XPOST /mailboxes?task=recomputeFastViewProjectionItems
+....
+
+Will schedule a task for recomputing the fast message view projection
+for all mailboxes.
+
+link:#_endpoints_returning_a_task[More details about endpoints returning
+a task].
+
+An admin can specify the concurrency that should be used when running
+the task:
+
+* `messagesPerSecond` rate at which messages should be processed, per
+second. Defaults to 10.
+
+This optional parameter must have a strictly positive integer as a value
+and be passed as query parameters.
+
+Example:
+
+....
+curl -XPOST /mailboxes?task=recomputeFastViewProjectionItems&messagesPerSecond=20
+....
+
+The scheduled task will have the following type
+`RecomputeAllFastViewProjectionItemsTask` and the following
+`additionalInformation`:
+
+....
+{
+ "type":"RecomputeAllPreviewsTask",
+ "processedUserCount": 3,
+ "processedMessageCount": 3,
+ "failedUserCount": 2,
+ "failedMessageCount": 1,
+ "runningOptions": {
+ "messagesPerSecond":20
+ }
+}
+....
+
+Response codes:
+
+* 201: Success. Corresponding task id is returned.
+* 400: Error in the request. Details can be found in the reported error.
+
+==== Populate email query view
+
+Email query view is an optional projection to offload common JMAP `Email/query` requests used for listing mails on {backend-name}
+and not on the search index thus improving the overall reliability / performance on this operation.
+
+These projection items are asynchronously computed on mailbox events.
+
+You can populate this projection with the following request:
+
+....
+curl -XPOST /mailboxes?task=populateEmailQueryView
+....
+
+Will schedule a task for recomputing the fast message view projection
+for all mailboxes.
+
+link:#_endpoints_returning_a_task[More details about endpoints returning
+a task].
+
+An admin can specify the concurrency that should be used when running
+the task:
+
+* `messagesPerSecond` rate at which messages should be processed, per
+second. Defaults to 10.
+
+This optional parameter must have a strictly positive integer as a value
+and be passed as query parameters.
+
+Example:
+
+....
+curl -XPOST /mailboxes?task=populateEmailQueryView&messagesPerSecond=20
+....
+
+The scheduled task will have the following type
+`PopulateEmailQueryViewTask` and the following
+`additionalInformation`:
+
+....
+{
+ "type":"PopulateEmailQueryViewTask",
+ "processedUserCount": 3,
+ "processedMessageCount": 3,
+ "failedUserCount": 2,
+ "failedMessageCount": 1,
+ "runningOptions": {
+ "messagesPerSecond":20
+ }
+}
+....
+
+Response codes:
+
+* 201: Success. Corresponding task id is returned.
+* 400: Error in the request. Details can be found in the reported error.
+
+==== Recomputing {backend-name} filtering projection
+
+You can force the reset of the {backend-name} filtering projection by calling the following
+endpoint:
+
+....
+curl -XPOST /mailboxes?task=populateFilteringProjection
+....
+
+Will schedule a task.
+
+link:#_endpoints_returning_a_task[More details about endpoints returning
+a task].
+
+The scheduled task will have the following type
+`PopulateFilteringProjectionTask` and the following
+`additionalInformation`:
+
+....
+{
+ "type":"RecomputeAllPreviewsTask",
+ "processedUserCount": 3,
+ "failedUserCount": 2
+}
+....
+
+Response codes:
+
+* 201: Success. Corresponding task id is returned.
+* 400: Error in the request. Details can be found in the reported error.
+
+==== ReIndexing action
+
+Be also aware of the limits of this API:
+
+Warning: During the re-indexing, the result of search operations might
+be altered.
+
+Warning: Canceling this task should be considered unsafe as it will
+leave the currently reIndexed mailbox as partially indexed.
+
+Warning: While we have been trying to reduce the inconsistency window to
+a maximum (by keeping track of ongoing events), concurrent changes done
+during the reIndexing might be ignored.
+
+===== ReIndexing all mails
+
+....
+curl -XPOST http://ip:port/mailboxes?task=reIndex
+....
+
+Will schedule a task for reIndexing all the mails stored on this James
+server.
+
+link:#_endpoints_returning_a_task[More details about endpoints returning
+a task].
+
+An admin can specify the concurrency that should be used when running
+the task:
+
+* `messagesPerSecond` rate at which messages should be processed per
+second. Default is 50.
+
+This optional parameter must have a strictly positive integer as a value
+and be passed as query parameter.
+
+An admin can also specify the reindexing mode it wants to use when
+running the task:
+
+* `mode` the reindexing mode used. There are 2 modes for the moment:
+** `rebuildAll` allows to rebuild all indexes. This is the default mode.
+** `fixOutdated` will check for outdated indexed document and reindex
+only those.
+
+This optional parameter must be passed as query parameter.
+
+It’s good to note as well that there is a limitation with the
+`fixOutdated` mode. As we first collect metadata of stored messages to
+compare them with the ones in the index, a failed `expunged` operation
+might not be well corrected (as the message might not exist anymore but
+still be indexed).
+
+Example:
+
+ curl -XPOST http://ip:port/mailboxes?task=reIndex&messagesPerSecond=200&mode=rebuildAll
+
+The scheduled task will have the following type `full-reindexing` and
+the following `additionalInformation`:
+
+....
+{
+ "type":"full-reindexing",
+ "runningOptions":{
+ "messagesPerSecond":200,
+ "mode":"REBUILD_ALL"
+ },
+ "successfullyReprocessedMailCount":18,
+ "failedReprocessedMailCount": 3,
+ "mailboxFailures": ["12", "23" ],
+ "messageFailures": [
+ {
+ "mailboxId": "1",
+ "uids": [1, 36]
+ }]
+}
+....
+
+===== Fixing previously failed ReIndexing
+
+Will schedule a task for reIndexing all the mails which had failed to be
+indexed from the ReIndexingAllMails task.
+
+Given `bbdb69c9-082a-44b0-a85a-6e33e74287a5` being a `taskId` generated
+for a reIndexing tasks
+
+....
+curl -XPOST 'http://ip:port/mailboxes?task=reIndex&reIndexFailedMessagesOf=bbdb69c9-082a-44b0-a85a-6e33e74287a5'
+....
+
+link:#_endpoints_returning_a_task[More details about endpoints returning
+a task].
+
+An admin can specify the concurrency that should be used when running
+the task:
+
+* `messagesPerSecond` rate at which messages should be processed per
+second. Default is 50.
+
+This optional parameter must have a strictly positive integer as a value
+and be passed as query parameter.
+
+An admin can also specify the reindexing mode it wants to use when
+running the task:
+
+* `mode` the reindexing mode used. There are 2 modes for the moment:
+** `rebuildAll` allows to rebuild all indexes. This is the default mode.
+** `fixOutdated` will check for outdated indexed document and reindex
+only those.
+
+This optional parameter must be passed as query parameter.
+
+It’s good to note as well that there is a limitation with the
+`fixOutdated` mode. As we first collect metadata of stored messages to
+compare them with the ones in the index, a failed `expunged` operation
+might not be well corrected (as the message might not exist anymore but
+still be indexed).
+
+Example:
+
+....
+curl -XPOST http://ip:port/mailboxes?task=reIndex&reIndexFailedMessagesOf=bbdb69c9-082a-44b0-a85a-6e33e74287a5&messagesPerSecond=200&mode=rebuildAll
+....
+
+The scheduled task will have the following type
+`error-recovery-indexation` and the following `additionalInformation`:
+
+....
+{
+ "type":"error-recovery-indexation"
+ "runningOptions":{
+ "messagesPerSecond":200,
+ "mode":"REBUILD_ALL"
+ },
+ "successfullyReprocessedMailCount":18,
+ "failedReprocessedMailCount": 3,
+ "mailboxFailures": ["12", "23" ],
+ "messageFailures": [{
+ "mailboxId": "1",
+ "uids": [1, 36]
+ }]
+}
+....
+
+===== Create missing parent mailboxes
+
+Will schedule a task for creating all the missing parent mailboxes in a hierarchical mailbox tree, which is the result
+of a partially failed rename operation of a child mailbox.
+
+....
+curl -XPOST http://ip:port/mailboxes?task=createMissingParents
+....
+
+link:#_endpoints_returning_a_task[More details about endpoints returning
+a task].
+
+Response codes:
+
+* 201: Success. Corresponding task id is returned.
+* 400: Error in the request. Details can be found in the reported error.
+
+The scheduled task will have the following type `createMissingParents` and the following `additionalInformation`:
+
+....
+{
+ "type":"createMissingParents"
+ "created": ["1", "2" ],
+ "totalCreated": 2,
+ "failures": [],
+ "totalFailure": 0
+}
+....
+
+=== Single mailbox
+
+==== ReIndexing a mailbox mails
+
+....
+curl -XPOST http://ip:port/mailboxes/{mailboxId}?task=reIndex
+....
+
+Will schedule a task for reIndexing all the mails in one mailbox.
+
+Note that `mailboxId' path parameter needs to be a (implementation
+dependent) valid mailboxId.
+
+link:#_endpoints_returning_a_task[More details about endpoints returning
+a task].
+
+An admin can specify the concurrency that should be used when running
+the task:
+
+* `messagesPerSecond` rate at which messages should be processed per
+second. Default is 50.
+
+This optional parameter must have a strictly positive integer as a value
+and be passed as query parameter.
+
+An admin can also specify the reindexing mode it wants to use when
+running the task:
+
+* `mode` the reindexing mode used. There are 2 modes for the moment:
+** `rebuildAll` allows to rebuild all indexes. This is the default mode.
+** `fixOutdated` will check for outdated indexed document and reindex
+only those.
+
+This optional parameter must be passed as query parameter.
+
+It’s good to note as well that there is a limitation with the
+`fixOutdated` mode. As we first collect metadata of stored messages to
+compare them with the ones in the index, a failed `expunged` operation
+might not be well corrected (as the message might not exist anymore but
+still be indexed).
+
+Example:
+
+....
+curl -XPOST http://ip:port/mailboxes/{mailboxId}?task=reIndex&messagesPerSecond=200&mode=fixOutdated
+....
+
+Response codes:
+
+* 201: Success. Corresponding task id is returned.
+* 400: Error in the request. Details can be found in the reported error.
+
+The scheduled task will have the following type `mailbox-reindexing` and
+the following `additionalInformation`:
+
+....
+{
+ "type":"mailbox-reindexing",
+ "runningOptions":{
+ "messagesPerSecond":200,
+ "mode":"FIX_OUTDATED"
+ },
+ "mailboxId":"{mailboxId}",
+ "successfullyReprocessedMailCount":18,
+ "failedReprocessedMailCount": 3,
+ "mailboxFailures": ["12"],
+ "messageFailures": [
+ {
+ "mailboxId": "1",
+ "uids": [1, 36]
+ }]
+}
+....
+
+Warning: During the re-indexing, the result of search operations might
+be altered.
+
+Warning: Canceling this task should be considered unsafe as it will
+leave the currently reIndexed mailbox as partially indexed.
+
+Warning: While we have been trying to reduce the inconsistency window to
+a maximum (by keeping track of ongoing events), concurrent changes done
+during the reIndexing might be ignored.
+
+include::{admin-mailboxes-extend}[]
+
+== Administrating Messages
+
+=== ReIndexing a single mail by messageId
+
+....
+curl -XPOST http://ip:port/messages/{messageId}?task=reIndex
+....
+
+Will schedule a task for reIndexing a single email in all the mailboxes
+containing it.
+
+Note that `messageId' path parameter needs to be a (implementation
+dependent) valid messageId.
+
+link:#_endpoints_returning_a_task[More details about endpoints returning
+a task].
+
+Response codes:
+
+* 201: Success. Corresponding task id is returned.
+* 400: Error in the request. Details can be found in the reported error.
+
+The scheduled task will have the following type `messageId-reindexing`
+and the following `additionalInformation`:
+
+....
+{
+ "messageId":"18"
+}
+....
+
+Warning: During the re-indexing, the result of search operations might
+be altered.
+
+=== Deleting old messages of all users
+
+*Note:*
+Consider enabling the xref:{xref-base}/configure/vault.adoc[Deleted Messages Vault]
+if you use this feature.
+
+Old messages tend to pile up in user INBOXes. An admin might want to delete
+these on behalf of the users, e.g. all messages older than 30 days:
+....
+curl -XDELETE http://ip:port/messages?olderThan=30d
+....
+
+link:#_endpoints_returning_a_task[More details about endpoints returning a task].
+
+The `olderThan` parameter should be expressed in the following format: `Nunit`.
+`N` should be strictly positive. `unit` could be either in the short form
+(`d`, `w`, `y` etc.), or in the long form (`days`, `weeks`, `months`, `years`).
+The default unit is `days`.
+
+Response codes:
+
+* 201: Success. Corresponding task id is returned.
+* 400: Error in the request. Details can be found in the reported error.
+
+The scheduled task will have the type `ExpireMailboxTask` and the following `additionalInformation`:
+
+....
+{
+ "type": "ExpireMailboxTask"
+ "mailboxesExpired": 5,
+ "mailboxesFailed": 2,
+ "mailboxesProcessed": 10,
+ "messagesDeleted": 23,
+}
+....
+
+To delete old mails from a different mailbox than INBOX, e.g. a mailbox
+named "Archived" :
+....
+curl -XDELETE http://ip:port/messages?mailbox=Archived&olderThan=30d
+....
+
+Since this is a somewhat expensive operation, the task is throttled to one user
+per second. You may speed it up via `usersPerSecond=10` for example. But keep
+in mind that a high rate might overwhelm your database or blob store.
+
+*Scanning search only:* (unsupported for Lucene and OpenSearch search implementations) +
+Some mail clients can add an `Expires` header (RFC 4021) to their messages.
+Instead of specifying an absolute age, you may choose to delete only such
+messages where the expiration date from this header lies in the past:
+....
+curl -XDELETE http://ip:port/messages?byExpiresHeader
+....
+In this case you should also add the xref:{xref-base}/configure/mailets.adoc[mailet]
+`Expires` to your mailet container, which can sanitize expiration date headers.
+
+include::{admin-messages-extend}[]
+
+== Administrating user mailboxes
+
+=== Creating a mailbox
+
+....
+curl -XPUT http://ip:port/users/{usernameToBeUsed}/mailboxes/{mailboxNameToBeCreated}
+....
+
+Resource name `usernameToBeUsed` should be an existing user Resource
+name `mailboxNameToBeCreated` should not be empty, nor contain % * characters, nor starting with #.
+
+Response codes:
+
+* 204: The mailbox now exists on the server
+* 400: Invalid mailbox name
+* 404: The user name does not exist. Note that this check can be bypassed by specifying the `force` query parameter.
+
+To create nested mailboxes, for instance a work mailbox inside the INBOX
+mailbox, people should use the . separator. The sample query is:
+
+....
+curl -XDELETE http://ip:port/users/{usernameToBeUsed}/mailboxes/INBOX.work
+....
+
+=== Deleting a mailbox and its children
+
+....
+curl -XDELETE http://ip:port/users/{usernameToBeUsed}/mailboxes/{mailboxNameToBeDeleted}
+....
+
+Resource name `usernameToBeUsed` should be an existing user Resource
+name `mailboxNameToBeDeleted` should not be empty
+
+Response codes:
+
+* 204: The mailbox now does not exist on the server
+* 400: Invalid mailbox name
+* 404: The user name does not exist. Note that this check can be bypassed by specifying the `force` query parameter.
+
+=== Testing existence of a mailbox
+
+....
+curl -XGET http://ip:port/users/{usernameToBeUsed}/mailboxes/{mailboxNameToBeTested}
+....
+
+Resource name `usernameToBeUsed` should be an existing user Resource
+name `mailboxNameToBeTested` should not be empty
+
+Response codes:
+
+* 204: The mailbox exists
+* 400: Invalid mailbox name
+* 404: The user name does not exist, the mailbox does not exist
+
+=== Listing user mailboxes
+
+....
+curl -XGET http://ip:port/users/{usernameToBeUsed}/mailboxes
+....
+
+The answer looks like:
+
+....
+[{"mailboxName":"INBOX"},{"mailboxName":"outbox"}]
+....
+
+Resource name `usernameToBeUsed` should be an existing user
+
+Response codes:
+
+* 200: The mailboxes list was successfully retrieved
+* 404: The user name does not exist, the mailbox does not exist. Note that this check can be bypassed by specifying the `force` query parameter.
+
+
+=== Deleting user mailboxes
+
+....
+curl -XDELETE http://ip:port/users/{usernameToBeUsed}/mailboxes
+....
+
+Resource name `usernameToBeUsed` should be an existing user
+
+Response codes:
+
+* 204: The user do not have mailboxes anymore
+* 404: The user name does not exist. Note that this check can be bypassed by specifying the `force` query parameter.
+
+=== Exporting user mailboxes
+
+....
+curl -XPOST http://ip:port/users/{usernameToBeUsed}/mailboxes?action=export
+....
+
+Resource name `usernameToBeUsed` should be an existing user
+
+Response codes:
+
+* 201: Success. Corresponding task id is returned
+* 404: The user name does not exist
+
+The scheduled task will have the following type `MailboxesExportTask`
+and the following `additionalInformation`:
+
+....
+{
+ "type":"MailboxesExportTask",
+ "timestamp":"2007-12-03T10:15:30Z",
+ "username": "user",
+ "stage": "STARTING"
+}
+....
+
+=== ReIndexing a user mails
+
+....
+curl -XPOST http://ip:port/users/{usernameToBeUsed}/mailboxes?task=reIndex
+....
+
+Will schedule a task for reIndexing all the mails in ``user@domain.com''
+mailboxes (encoded above).
+
+link:#_endpoints_returning_a_task[More details about endpoints returning
+a task].
+
+An admin can specify the concurrency that should be used when running
+the task:
+
+* `messagesPerSecond` rate at which messages should be processed per
+second. Default is 50.
+
+This optional parameter must have a strictly positive integer as a value
+and be passed as query parameter.
+
+An admin can also specify the reindexing mode it wants to use when
+running the task:
+
+* `mode` the reindexing mode used. There are 2 modes for the moment:
+** `rebuildAll` allows to rebuild all indexes. This is the default mode.
+** `fixOutdated` will check for outdated indexed document and reindex
+only those.
+
+This optional parameter must be passed as query parameter.
+
+It’s good to note as well that there is a limitation with the
+`fixOutdated` mode. As we first collect metadata of stored messages to
+compare them with the ones in the index, a failed `expunged` operation
+might not be well corrected (as the message might not exist anymore but
+still be indexed).
+
+Example:
+
+....
+curl -XPOST http://ip:port/users/{usernameToBeUsed}/mailboxes?task=reIndex&messagesPerSecond=200&mode=fixOutdated
+....
+
+Response codes:
+
+* 201: Success. Corresponding task id is returned.
+* 400: Error in the request. Details can be found in the reported error.
+
+The scheduled task will have the following type `user-reindexing` and
+the following `additionalInformation`:
+
+....
+{
+ "type":"user-reindexing",
+ "runningOptions":{
+ "messagesPerSecond":200,
+ "mode":"FIX_OUTDATED"
+ },
+ "user":"user@domain.com",
+ "successfullyReprocessedMailCount":18,
+ "failedReprocessedMailCount": 3,
+ "mailboxFailures": ["12", "23" ],
+ "messageFailures": [
+ {
+ "mailboxId": "1",
+ "uids": [1, 36]
+ }]
+}
+....
+
+Warning: During the re-indexing, the result of search operations might
+be altered.
+
+Warning: Canceling this task should be considered unsafe as it will
+leave the currently reIndexed mailbox as partially indexed.
+
+Warning: While we have been trying to reduce the inconsistency window to
+a maximum (by keeping track of ongoing events), concurrent changes done
+during the reIndexing might be ignored.
+
+=== Counting emails
+
+....
+curl -XGET http://ip:port/users/{usernameToBeUsed}/mailboxes/{mailboxName}/messageCount
+....
+
+Will return the total count of messages within the mailbox of that user.
+
+Resource name `usernameToBeUsed` should be an existing user.
+
+Resource name `mailboxName` should not be empty, nor contain `% *` characters, nor starting with `#`.
+
+Response codes:
+
+* 200: The number of emails in a given mailbox
+* 400: Invalid mailbox name
+* 404: Invalid get on user mailboxes. The `usernameToBeUsed` or `mailboxName` does not exit'
+
+=== Counting unseen emails
+
+....
+curl -XGET http://ip:port/users/{usernameToBeUsed}/mailboxes/{mailboxName}/unseenMessageCount
+....
+
+Will return the total count of unseen messages within the mailbox of that user.
+
+Resource name `usernameToBeUsed` should be an existing user.
+
+Resource name `mailboxName` should not be empty, nor contain `% *` characters, nor starting with `#`.
+
+Response codes:
+
+* 200: The number of unseen emails in a given mailbox
+* 400: Invalid mailbox name
+* 404: Invalid get on user mailboxes. The `usernameToBeUsed` or `mailboxName` does not exit'
+
+=== Clearing mailbox content
+
+....
+curl -XDELETE http://ip:port/users/{usernameToBeUsed}/mailboxes/{mailboxName}/messages
+....
+
+Will schedule a task for clearing all the mails in ``mailboxName`` mailbox of ``usernameToBeUsed``.
+
+link:#_endpoints_returning_a_task[More details about endpoints returning
+a task].
+
+Resource name `usernameToBeUsed` should be an existing user.
+
+Resource name `mailboxName` should not be empty, nor contain `% *` characters, nor starting with `#`.
+
+Response codes:
+
+* 201: Success. Corresponding task id is returned.
+* 400: Invalid mailbox name
+* 404: Invalid get on user mailboxes. The `username` or `mailboxName` does not exit
+
+The scheduled task will have the following type `ClearMailboxContentTask` and
+the following `additionalInformation`:
+
+....
+{
+ "mailboxName": "mbx1",
+ "messagesFailCount": 9,
+ "messagesSuccessCount": 10,
+ "timestamp": "2007-12-03T10:15:30Z",
+ "type": "ClearMailboxContentTask",
+ "username": "bob@domain.tld"
+}
+....
+
+=== Subscribing a user to all of its mailboxes
+
+....
+curl -XPOST http://ip:port/users/{usernameToBeUsed}/mailboxes?task=subscribeAll
+....
+
+Will schedule a task for subscribing a user to all of its mailboxes.
+
+link:#_endpoints_returning_a_task[More details about endpoints returning
+a task].
+
+Most users are unaware of what an IMAP subscription is, nor how they can manage it. If the subscription list gets out
+of sync with the mailbox list, it could result in downgraded user experience (see MAILBOX-405). This task allow
+to reset the subscription list to the mailbox list on a per user basis thus working around the aforementioned issues.
+
+Response codes:
+
+- 201: Success. Corresponding task id is returned.
+- 404: No such user
+
+The scheduled task will have the following type `SubscribeAllTask` and the following `additionalInformation`:
+
+....
+{
+ "type":"SubscribeAllTask",
+ "username":"user@domain.com",
+ "subscribedCount":18,
+ "unsubscribedCount": 3
+}
+....
+
+=== Recomputing User JMAP fast message view projection
+
+This action is only available for backends supporting JMAP protocol.
+
+Message fast view projection stores message properties expected to be
+fast to fetch but are actually expensive to compute, in order for
+GetMessages operation to be fast to execute for these properties.
+
+These projection items are asynchronously computed on mailbox events.
+
+You can force the full projection recomputation by calling the following
+endpoint:
+
+....
+curl -XPOST /users/{usernameToBeUsed}/mailboxes?task=recomputeFastViewProjectionItems
+....
+
+Will schedule a task for recomputing the fast message view projection
+for all mailboxes of `usernameToBeUsed`.
+
+link:#_endpoints_returning_a_task[More details about endpoints returning
+a task].
+
+An admin can specify the concurrency that should be used when running
+the task:
+
+* `messagesPerSecond` rate at which messages should be processed, per
+second. Defaults to 10.
+
+This optional parameter must have a strictly positive integer as a value
+and be passed as query parameters.
+
+Example:
+
+....
+curl -XPOST /mailboxes?task=recomputeFastViewProjectionItems&messagesPerSecond=20
+....
+
+The scheduled task will have the following type
+`RecomputeUserFastViewProjectionItemsTask` and the following
+`additionalInformation`:
+
+....
+{
+ "type":"RecomputeUserFastViewProjectionItemsTask",
+ "username": "{usernameToBeUsed}",
+ "processedMessageCount": 3,
+ "failedMessageCount": 1,
+ "runningOptions": {
+ "messagesPerSecond":20
+ }
+}
+....
+
+Response codes:
+
+* 201: Success. Corresponding task id is returned.
+* 400: Error in the request. Details can be found in the reported error.
+* 404: User not found.
+
+== Administrating quotas
+
+=== Administrating quotas by users
+
+==== Getting the quota for a user
+
+....
+curl -XGET http://ip:port/quota/users/{usernameToBeUsed}
+....
+
+Resource name `usernameToBeUsed` should be an existing user
+
+The answer is the details of the quota of that user.
+
+....
+{
+ "global": {
+ "count":252,
+ "size":242
+ },
+ "domain": {
+ "count":152,
+ "size":142
+ },
+ "user": {
+ "count":52,
+ "size":42
+ },
+ "computed": {
+ "count":52,
+ "size":42
+ },
+ "occupation": {
+ "size":13,
+ "count":21,
+ "ratio": {
+ "size":0.25,
+ "count":0.5,
+ "max":0.5
+ }
+ }
+}
+....
+
+* The `global` entry represent the quota limit allowed on this James
+server.
+* The `domain` entry represent the quota limit allowed for the user of
+that domain.
+* The `user` entry represent the quota limit allowed for this specific
+user.
+* The `computed` entry represent the quota limit applied for this user,
+resolved from the upper values.
+* The `occupation` entry represent the occupation of the quota for this
+user. This includes used count and size as well as occupation ratio
+(used / limit).
+
+Note that `quota` object can contain a fixed value, an empty value
+(null) or an unlimited value (-1):
+
+....
+{"count":52,"size":42}
+
+{"count":null,"size":null}
+
+{"count":52,"size":-1}
+....
+
+Response codes:
+
+* 200: The user’s quota was successfully retrieved
+* 404: The user does not exist
+
+==== Updating the quota for a user
+
+....
+curl -XPUT http://ip:port/quota/users/{usernameToBeUsed}
+....
+
+Resource name `usernameToBeUsed` should be an existing user
+
+The body can contain a fixed value, an empty value (null) or an
+unlimited value (-1):
+
+....
+{"count":52,"size":42}
+
+{"count":null,"size":null}
+
+{"count":52,"size":-1}
+....
+
+Response codes:
+
+* 204: The quota has been updated
+* 400: The body is not a positive integer neither an unlimited value
+(-1).
+* 404: The user does not exist
+
+==== Getting the quota count for a user
+
+....
+curl -XGET http://ip:port/quota/users/{usernameToBeUsed}/count
+....
+
+Resource name `usernameToBeUsed` should be an existing user
+
+The answer looks like:
+
+....
+52
+....
+
+Response codes:
+
+* 200: The user’s quota was successfully retrieved
+* 204: No quota count limit is defined at the user level for this user
+* 404: The user does not exist
+
+==== Updating the quota count for a user
+
+....
+curl -XPUT http://ip:port/quota/users/{usernameToBeUsed}/count
+....
+
+Resource name `usernameToBeUsed` should be an existing user
+
+The body can contain a fixed value or an unlimited value (-1):
+
+....
+52
+....
+
+Response codes:
+
+* 204: The quota has been updated
+* 400: The body is not a positive integer neither an unlimited value
+(-1).
+* 404: The user does not exist
+
+==== Deleting the quota count for a user
+
+....
+curl -XDELETE http://ip:port/quota/users/{usernameToBeUsed}/count
+....
+
+Resource name `usernameToBeUsed` should be an existing user
+
+Response codes:
+
+* 204: The quota has been updated to unlimited value.
+* 404: The user does not exist
+
+==== Getting the quota size for a user
+
+....
+curl -XGET http://ip:port/quota/users/{usernameToBeUsed}/size
+....
+
+Resource name `usernameToBeUsed` should be an existing user
+
+The answer looks like:
+
+....
+52
+....
+
+Response codes:
+
+* 200: The user’s quota was successfully retrieved
+* 204: No quota size limit is defined at the user level for this user
+* 404: The user does not exist
+
+==== Updating the quota size for a user
+
+....
+curl -XPUT http://ip:port/quota/users/{usernameToBeUsed}/size
+....
+
+Resource name `usernameToBeUsed` should be an existing user
+
+The body can contain a fixed value or an unlimited value (-1):
+
+....
+52
+....
+
+Response codes:
+
+* 204: The quota has been updated
+* 400: The body is not a positive integer neither an unlimited value
+(-1).
+* 404: The user does not exist
+
+==== Deleting the quota size for a user
+
+....
+curl -XDELETE http://ip:port/quota/users/{usernameToBeUsed}/size
+....
+
+Resource name `usernameToBeUsed` should be an existing user
+
+Response codes:
+
+* 204: The quota has been updated to unlimited value.
+* 404: The user does not exist
+
+==== Searching user by quota ratio
+
+....
+curl -XGET 'http://ip:port/quota/users?minOccupationRatio=0.8&maxOccupationRatio=0.99&limit=100&offset=200&domain=domain.com'
+....
+
+Will return:
+
+....
+[
+ {
+ "username":"user@domain.com",
+ "detail": {
+ "global": {
+ "count":252,
+ "size":242
+ },
+ "domain": {
+ "count":152,
+ "size":142
+ },
+ "user": {
+ "count":52,
+ "size":42
+ },
+ "computed": {
+ "count":52,
+ "size":42
+ },
+ "occupation": {
+ "size":48,
+ "count":21,
+ "ratio": {
+ "size":0.9230,
+ "count":0.5,
+ "max":0.9230
+ }
+ }
+ }
+ },
+ ...
+]
+....
+
+Where:
+
+* *minOccupationRatio* is a query parameter determining the minimum
+occupation ratio of users to be returned.
+* *maxOccupationRatio* is a query parameter determining the maximum
+occupation ratio of users to be returned.
+* *domain* is a query parameter determining the domain of users to be
+returned.
+* *limit* is a query parameter determining the maximum number of users
+to be returned.
+* *offset* is a query parameter determining the number of users to skip.
+
+Please note that users are alphabetically ordered on username.
+
+The response is a list of usernames, with attached quota details as
+defined link:#_getting_the_quota_for_a_user[here].
+
+Response codes:
+
+* 200: List of users had successfully been returned.
+* 400: Validation issues with parameters
+
+==== Recomputing current quotas for users
+
+....
+curl -XPOST /quota/users?task=RecomputeCurrentQuotas
+....
+
+Will recompute current quotas (count and size) for all users stored in
+James.
+
+James maintains per quota a projection for current quota count and size.
+As with any projection, it can go out of sync, leading to inconsistent
+results being returned to the client.
+
+link:#_endpoints_returning_a_task[More details about endpoints returning
+a task].
+
+An admin can specify the concurrency that should be used when running
+the task:
+
+* `usersPerSecond` rate at which users quotas should be reprocessed, per
+second. Defaults to 1.
+
+This optional parameter must have a strictly positive integer as a value
+and be passed as query parameters.
+
+An admin can select which quota component he wants to recompute:
+
+* `quotaComponent` component whose quota need to be reprocessed. It could be one of values: MAILBOX, SIEVE, JMAP_UPLOADS.
+
+The admin could select several quota components. If he does not select, quotas of all components would be recomputed.
+
+Example:
+
+....
+curl -XPOST /quota/users?task=RecomputeCurrentQuotas&usersPerSecond=20"aComponent=MAILBOX"aComponent=JMAP_UPLOADS
+....
+
+The scheduled task will have the following type
+`recompute-current-quotas` and the following `additionalInformation`:
+
+....
+{
+ "type":"recompute-current-quotas",
+ "recomputeSingleQuotaComponentResults": [
+ {
+ "quotaComponent": "MAILBOX",
+ "processedIdentifierCount": 3,
+ "failedIdentifiers": ["#private&bob@localhost"]
+ },
+ {
+ "quotaComponent": "JMAP_UPLOADS",
+ "processedIdentifierCount": 3,
+ "failedIdentifiers": ["bob@localhost"]
+ }
+ ],
+ "runningOptions": {
+ "usersPerSecond":20
+ }
+}
+....
+
+*WARNING*: this task do not take into account concurrent modifications
+upon a single current quota re-computation. Rerunning the task will
+_eventually_ provide the consistent result.
+
+=== Administrating quotas by domains
+
+==== Getting the quota for a domain
+
+....
+curl -XGET http://ip:port/quota/domains/{domainToBeUsed}
+....
+
+Resource name `domainToBeUsed` should be an existing domain. For
+example:
+
+....
+curl -XGET http://ip:port/quota/domains/james.org
+....
+
+The answer will detail the default quota applied to users belonging to
+that domain:
+
+....
+{
+ "global": {
+ "count":252,
+ "size":null
+ },
+ "domain": {
+ "count":null,
+ "size":142
+ },
+ "computed": {
+ "count":252,
+ "size":142
+ }
+}
+....
+
+* The `global` entry represents the quota limit defined on this James
+server by default.
+* The `domain` entry represents the quota limit allowed for the user of
+that domain by default.
+* The `computed` entry represents the quota limit applied for the users
+of that domain, by default, resolved from the upper values.
+
+Note that `quota` object can contain a fixed value, an empty value
+(null) or an unlimited value (-1):
+
+....
+{"count":52,"size":42}
+
+{"count":null,"size":null}
+
+{"count":52,"size":-1}
+....
+
+Response codes:
+
+* 200: The domain’s quota was successfully retrieved
+* 404: The domain does not exist
+* 405: Domain Quota configuration not supported when virtual hosting is
+deactivated.
+
+==== Updating the quota for a domain
+
+....
+curl -XPUT http://ip:port/quota/domains/{domainToBeUsed}
+....
+
+Resource name `domainToBeUsed` should be an existing domain.
+
+The body can contain a fixed value, an empty value (null) or an
+unlimited value (-1):
+
+....
+{"count":52,"size":42}
+
+{"count":null,"size":null}
+
+{"count":52,"size":-1}
+....
+
+Response codes:
+
+* 204: The quota has been updated
+* 400: The body is not a positive integer neither an unlimited value
+(-1).
+* 404: The domain does not exist
+* 405: Domain Quota configuration not supported when virtual hosting is
+deactivated.
+
+==== Getting the quota count for a domain
+
+....
+curl -XGET http://ip:port/quota/domains/{domainToBeUsed}/count
+....
+
+Resource name `domainToBeUsed` should be an existing domain.
+
+The answer looks like:
+
+....
+52
+....
+
+Response codes:
+
+* 200: The domain’s quota was successfully retrieved
+* 204: No quota count limit is defined at the domain level for this
+domain
+* 404: The domain does not exist
+* 405: Domain Quota configuration not supported when virtual hosting is
+desactivated.
+
+==== Updating the quota count for a domain
+
+....
+curl -XPUT http://ip:port/quota/domains/{domainToBeUsed}/count
+....
+
+Resource name `domainToBeUsed` should be an existing domain.
+
+The body can contain a fixed value or an unlimited value (-1):
+
+....
+52
+....
+
+Response codes:
+
+* 204: The quota has been updated
+* 400: The body is not a positive integer neither an unlimited value
+(-1).
+* 404: The domain does not exist
+* 405: Domain Quota configuration not supported when virtual hosting is
+desactivated.
+
+==== Deleting the quota count for a domain
+
+....
+curl -XDELETE http://ip:port/quota/domains/{domainToBeUsed}/count
+....
+
+Resource name `domainToBeUsed` should be an existing domain.
+
+Response codes:
+
+* 204: The quota has been updated to unlimited value.
+* 404: The domain does not exist
+* 405: Domain Quota configuration not supported when virtual hosting is
+deactivated.
+
+==== Getting the quota size for a domain
+
+....
+curl -XGET http://ip:port/quota/domains/{domainToBeUsed}/size
+....
+
+Resource name `domainToBeUsed` should be an existing domain.
+
+The answer looks like:
+
+....
+52
+....
+
+Response codes:
+
+* 200: The domain’s quota was successfully retrieved
+* 204: No quota size limit is defined at the domain level for this
+domain
+* 404: The domain does not exist
+* 405: Domain Quota configuration not supported when virtual hosting is
+deactivated.
+
+==== Updating the quota size for a domain
+
+....
+curl -XPUT http://ip:port/quota/domains/{domainToBeUsed}/size
+....
+
+Resource name `domainToBeUsed` should be an existing domain.
+
+The body can contain a fixed value or an unlimited value (-1):
+
+....
+52
+....
+
+Response codes:
+
+* 204: The quota has been updated
+* 400: The body is not a positive integer neither an unlimited value
+(-1).
+* 404: The domain does not exist
+* 405: Domain Quota configuration not supported when virtual hosting is
+deactivated.
+
+==== Deleting the quota size for a domain
+
+....
+curl -XDELETE http://ip:port/quota/domains/{domainToBeUsed}/size
+....
+
+Resource name `domainToBeUsed` should be an existing domain.
+
+Response codes:
+
+* 204: The quota has been updated to unlimited value.
+* 404: The domain does not exist
+
+=== Administrating global quotas
+
+==== Getting the global quota
+
+....
+curl -XGET http://ip:port/quota
+....
+
+The answer is the details of the global quota.
+
+....
+{
+ "count":252,
+ "size":242
+}
+....
+
+Note that `quota` object can contain a fixed value, an empty value
+(null) or an unlimited value (-1):
+
+....
+{"count":52,"size":42}
+
+{"count":null,"size":null}
+
+{"count":52,"size":-1}
+....
+
+Response codes:
+
+* 200: The quota was successfully retrieved
+
+==== Updating global quota
+
+....
+curl -XPUT http://ip:port/quota
+....
+
+The body can contain a fixed value, an empty value (null) or an
+unlimited value (-1):
+
+....
+{"count":52,"size":42}
+
+{"count":null,"size":null}
+
+{"count":52,"size":-1}
+....
+
+Response codes:
+
+* 204: The quota has been updated
+* 400: The body is not a positive integer neither an unlimited value
+(-1).
+
+==== Getting the global quota count
+
+....
+curl -XGET http://ip:port/quota/count
+....
+
+Resource name usernameToBeUsed should be an existing user
+
+The answer looks like:
+
+....
+52
+....
+
+Response codes:
+
+* 200: The quota was successfully retrieved
+* 204: No quota count limit is defined at the global level
+
+==== Updating the global quota count
+
+....
+curl -XPUT http://ip:port/quota/count
+....
+
+The body can contain a fixed value or an unlimited value (-1):
+
+....
+52
+....
+
+Response codes:
+
+* 204: The quota has been updated
+* 400: The body is not a positive integer neither an unlimited value
+(-1).
+
+==== Deleting the global quota count
+
+....
+curl -XDELETE http://ip:port/quota/count
+....
+
+Response codes:
+
+* 204: The quota has been updated to unlimited value.
+
+==== Getting the global quota size
+
+....
+curl -XGET http://ip:port/quota/size
+....
+
+The answer looks like:
+
+....
+52
+....
+
+Response codes:
+
+* 200: The quota was successfully retrieved
+* 204: No quota size limit is defined at the global level
+
+==== Updating the global quota size
+
+....
+curl -XPUT http://ip:port/quota/size
+....
+
+The body can contain a fixed value or an unlimited value (-1):
+
+....
+52
+....
+
+Response codes:
+
+* 204: The quota has been updated
+* 400: The body is not a positive integer neither an unlimited value
+(-1).
+
+==== Deleting the global quota size
+
+....
+curl -XDELETE http://ip:port/quota/size
+....
+
+Response codes:
+
+* 204: The quota has been updated to unlimited value.
+
+=== Administrating Sieve quotas
+
+Some limitations on space Users Sieve script can occupy can be
+configured by default, and overridden by user.
+
+==== Retrieving global sieve quota
+
+This endpoints allows to retrieve the global Sieve quota, which will be
+users default:
+
+....
+curl -XGET http://ip:port/sieve/quota/default
+....
+
+Will return the bytes count allowed by user per default on this server.
+
+....
+102400
+....
+
+Response codes:
+
+* 200: Request is a success and the value is returned
+* 204: No default quota is being configured
+
+==== Updating global sieve quota
+
+This endpoints allows to update the global Sieve quota, which will be
+users default:
+
+....
+curl -XPUT http://ip:port/sieve/quota/default
+....
+
+With the body being the bytes count allowed by user per default on this
+server.
+
+....
+102400
+....
+
+Response codes:
+
+* 204: Operation succeeded
+* 400: Invalid payload
+
+==== Removing global sieve quota
+
+This endpoints allows to remove the global Sieve quota. There will no
+more be users default:
+
+....
+curl -XDELETE http://ip:port/sieve/quota/default
+....
+
+Response codes:
+
+* 204: Operation succeeded
+
+==== Retrieving user sieve quota
+
+This endpoints allows to retrieve the Sieve quota of a user, which will
+be this users quota:
+
+....
+curl -XGET http://ip:port/sieve/quota/users/user@domain.com
+....
+
+Will return the bytes count allowed for this user.
+
+....
+102400
+....
+
+Response codes:
+
+* 200: Request is a success and the value is returned
+* 204: No quota is being configured for this user
+
+==== Updating user sieve quota
+
+This endpoints allows to update the Sieve quota of a user, which will be
+users default:
+
+....
+curl -XPUT http://ip:port/sieve/quota/users/user@domain.com
+....
+
+With the body being the bytes count allowed for this user on this
+server.
+
+....
+102400
+....
+
+Response codes:
+
+* 204: Operation succeeded
+* 400: Invalid payload
+
+==== Removing user sieve quota
+
+This endpoints allows to remove the Sieve quota of a user. There will no
+more quota for this user:
+
+....
+curl -XDELETE http://ip:port/sieve/quota/users/user@domain.com
+....
+
+Response codes:
+
+* 204: Operation succeeded
+
+== Administrating Jmap Uploads
+
+=== Cleaning upload repository
+
+....
+curl -XDELETE http://ip:port/jmap/uploads?scope=expired
+....
+
+Will schedule a task for clearing expired upload entries.
+
+
+link:#_endpoints_returning_a_task[More details about endpoints returning
+a task].
+
+
+Query parameter `scope` is required and have the value `expired`.
+
+Response codes:
+
+* 201: Success. Corresponding task id is returned.
+* 400: Scope invalid
+
+The scheduled task will have the following type `UploadRepositoryCleanupTask` and
+the following `additionalInformation`:
+
+....
+{
+ "scope": "expired",
+ "timestamp": "2007-12-03T10:15:30Z",
+ "type": "UploadRepositoryCleanupTask"
+}
+....
+
+== Running blob garbage collection
+
+When deduplication is enabled one needs to explicitly run a garbage collection in order to delete no longer referenced
+blobs.
+
+To do so:
+
+....
+curl -XDELETE http://ip:port/blobs?scope=unreferenced
+....
+
+link:#_endpoints_returning_a_task[More details about endpoints returning a task].
+
+Additional parameters include Bloom filter tuning parameters:
+
+- *associatedProbability*: Allow to define the targeted false positive rate. Note that subsequent runs do not have the
+same false-positives. Defaults to `0.01`.
+- *expectedBlobCount*: Expected count of blobs used to size the bloom filters. Defaults to `1.000.000`.
+
+These settings directly impacts the memory footprint of the bloom filter. link:https://hur.st/bloomfilter/[Simulators] can
+help understand those parameters.
+
+The created task has the following additional information:
+
+....
+{
+ "referenceSourceCount": 3456,
+ "blobCount": 5678,
+ "gcedBlobCount": 1234,
+ "bloomFilterExpectedBlobCount": 10000,
+ "bloomFilterAssociatedProbability": 0.01
+}
+....
+
+Where:
+
+- *bloomFilterExpectedBlobCount* correspond to the supplied *expectedBlobCount* query parameter.
+- *bloomFilterAssociatedProbability* correspond to the supplied *associatedProbability* query parameter.
+- *referenceSourceCount* is the count of distinct blob references encountered while populating the bloom filter.
+- *blobCount* is the count of blobs tried against the bloom filter. This value can be used to better size the bloom
+filter in later runs.
+- *gcedBlobCount* is the count of blobs that were garbage collected.
+
+== Administrating Recipient rewriting
+
+=== Address group
+
+You can use *webadmin* to define address groups.
+
+When a specific email is sent to the group mail address, every group
+member will receive it.
+
+Note that the group mail address is virtual: it does not correspond to
+an existing user.
+
+This feature uses xref:{xref-base}/architecture/index.adoc#_recipient_rewrite_tables[Recipients rewrite table]
+and requires the
+https://github.com/apache/james-project/blob/master/server/mailet/mailets/src/main/java/org/apache/james/transport/mailets/RecipientRewriteTable.java[RecipientRewriteTable
+mailet] to be configured.
+
+Note that email addresses are restricted to ASCII character set. Mail
+addresses not matching this criteria will be rejected.
+
+==== Listing groups
+
+....
+curl -XGET http://ip:port/address/groups
+....
+
+Will return the groups as a list of JSON Strings representing mail
+addresses. For instance:
+
+....
+["group1@domain.com", "group2@domain.com"]
+....
+
+Response codes:
+
+* 200: Success
+
+==== Listing members of a group
+
+....
+curl -XGET http://ip:port/address/groups/group@domain.com
+....
+
+Will return the group members as a list of JSON Strings representing
+mail addresses. For instance:
+
+....
+["member1@domain.com", "member2@domain.com"]
+....
+
+Response codes:
+
+* 200: Success
+* 400: Group structure is not valid
+* 404: The group does not exist
+
+==== Adding a group member
+
+....
+curl -XPUT http://ip:port/address/groups/group@domain.com/member@domain.com
+....
+
+Will add member@domain.com to group@domain.com, creating the group if
+needed
+
+Response codes:
+
+* 204: Success
+* 400: Group structure or member is not valid
+* 400: Domain in the source is not managed by the DomainList
+* 409: Requested group address is already used for another purpose
+* 409: The addition of the group member would lead to a loop and thus cannot be performed
+
+==== Removing a group member
+
+....
+curl -XDELETE http://ip:port/address/groups/group@domain.com/member@domain.com
+....
+
+Will remove member@domain.com from group@domain.com, removing the group
+if group is empty after deletion
+
+Response codes:
+
+* 204: Success
+* 400: Group structure or member is not valid
+
+=== Address forwards
+
+You can use *webadmin* to define address forwards.
+
+When a specific email is sent to the base mail address, every forward
+destination addresses will receive it.
+
+Please note that the base address can be optionaly part of the forward
+destination. In that case, the base recipient also receive a copy of the
+mail. Otherwise he is omitted.
+
+Forwards can be defined for existing users. It then defers from
+``groups''.
+
+This feature uses xref:{xref-base}/architecture/index.adoc#_recipient_rewrite_tables[Recipients rewrite table]
+and requires the
+https://github.com/apache/james-project/blob/master/server/mailet/mailets/src/main/java/org/apache/james/transport/mailets/RecipientRewriteTable.java[RecipientRewriteTable
+mailet] to be configured.
+
+Note that email addresses are restricted to ASCII character set. Mail
+addresses not matching this criteria will be rejected.
+
+==== Listing Forwards
+
+....
+curl -XGET http://ip:port/address/forwards
+....
+
+Will return the users having forwards configured as a list of JSON
+Strings representing mail addresses. For instance:
+
+....
+["user1@domain.com", "user2@domain.com"]
+....
+
+Response codes:
+
+* 200: Success
+
+==== Listing destinations in a forward
+
+....
+curl -XGET http://ip:port/address/forwards/user@domain.com
+....
+
+Will return the destination addresses of this forward as a list of JSON
+Strings representing mail addresses. For instance:
+
+....
+[
+ {"mailAddress":"destination1@domain.com"},
+ {"mailAddress":"destination2@domain.com"}
+]
+....
+
+Response codes:
+
+* 200: Success
+* 400: Forward structure is not valid
+* 404: The given user don’t have forwards or does not exist
+
+==== Adding a new destination to a forward
+
+....
+curl -XPUT http://ip:port/address/forwards/user@domain.com/targets/destination@domain.com
+....
+
+Will add destination@domain.com to user@domain.com, creating the forward
+if needed
+
+Response codes:
+
+* 204: Success
+* 400: Forward structure or member is not valid
+* 400: Domain in the source is not managed by the DomainList
+* 404: Requested forward address does not match an existing user
+* 409: The creation of the forward would lead to a loop and thus cannot be performed
+
+==== Removing a destination of a forward
+
+....
+curl -XDELETE http://ip:port/address/forwards/user@domain.com/targets/destination@domain.com
+....
+
+Will remove destination@domain.com from user@domain.com, removing the
+forward if forward is empty after deletion
+
+Response codes:
+
+* 204: Success
+* 400: Forward structure or member is not valid
+
+=== Address aliases
+
+You can use *webadmin* to define aliases for an user.
+
+When a specific email is sent to the alias address, the destination
+address of the alias will receive it.
+
+Aliases can be defined for existing users.
+
+This feature uses xref:{xref-base}/architecture/index.adoc#_recipient_rewrite_tables[Recipients rewrite table]
+and requires the
+https://github.com/apache/james-project/blob/master/server/mailet/mailets/src/main/java/org/apache/james/transport/mailets/RecipientRewriteTable.java[RecipientRewriteTable
+mailet] to be configured.
+
+Note that email addresses are restricted to ASCII character set. Mail
+addresses not matching this criteria will be rejected.
+
+==== Listing users with aliases
+
+....
+curl -XGET http://ip:port/address/aliases
+....
+
+Will return the users having aliases configured as a list of JSON
+Strings representing mail addresses. For instance:
+
+....
+["user1@domain.com", "user2@domain.com"]
+....
+
+Response codes:
+
+* 200: Success
+
+==== Listing alias sources of an user
+
+....
+curl -XGET http://ip:port/address/aliases/user@domain.com
+....
+
+Will return the aliases of this user as a list of JSON Strings
+representing mail addresses. For instance:
+
+....
+[
+ {"source":"alias1@domain.com"},
+ {"source":"alias2@domain.com"}
+]
+....
+
+Response codes:
+
+* 200: Success
+* 400: Alias structure is not valid
+
+==== Adding a new alias to an user
+
+....
+curl -XPUT http://ip:port/address/aliases/user@domain.com/sources/alias@domain.com
+....
+
+Will add alias@domain.com to user@domain.com, creating the alias if
+needed
+
+Response codes:
+
+* 204: OK
+* 400: Alias structure or member is not valid
+* 400: Source and destination can’t be the same!
+* 400: Domain in the destination or source is not managed by the
+DomainList
+* 409: The alias source exists as an user already
+* 409: The addition of the alias would lead to a loop and thus cannot be performed
+
+==== Removing an alias of an user
+
+....
+curl -XDELETE http://ip:port/address/aliases/user@domain.com/sources/alias@domain.com
+....
+
+Will remove alias@domain.com from user@domain.com, removing the alias if
+needed
+
+Response codes:
+
+* 204: OK
+* 400: Alias structure or member is not valid
+
+=== Domain mappings
+
+You can use *webadmin* to define domain mappings.
+
+Given a configured source (from) domain and a destination (to) domain,
+when an email is sent to an address belonging to the source domain, then
+the domain part of this address is overwritten, the destination domain
+is then used. A source (from) domain can have many destination (to)
+domains.
+
+For example: with a source domain `james.apache.org` maps to two
+destination domains `james.org` and `apache-james.org`, when a mail is
+sent to `admin@james.apache.org`, then it will be routed to
+`admin@james.org` and `admin@apache-james.org`
+
+This feature uses xref:{xref-base}/architecture/index.adoc#_recipient_rewrite_tables[Recipients rewrite table]
+and requires the
+https://github.com/apache/james-project/blob/master/server/mailet/mailets/src/main/java/org/apache/james/transport/mailets/RecipientRewriteTable.java[RecipientRewriteTable
+mailet] to be configured.
+
+Note that email addresses are restricted to ASCII character set. Mail
+addresses not matching this criteria will be rejected.
+
+==== Listing all domain mappings
+
+....
+curl -XGET http://ip:port/domainMappings
+....
+
+Will return all configured domain mappings
+
+....
+{
+ "firstSource.org" : ["firstDestination.com", "secondDestination.net"],
+ "secondSource.com" : ["thirdDestination.com", "fourthDestination.net"],
+}
+....
+
+Response codes:
+
+* 200: OK
+
+==== Listing all destination domains for a source domain
+
+....
+curl -XGET http://ip:port/domainMappings/sourceDomain.tld
+....
+
+With `sourceDomain.tld` as the value passed to `fromDomain` resource
+name, the API will return all destination domains configured to that
+domain
+
+....
+["firstDestination.com", "secondDestination.com"]
+....
+
+Response codes:
+
+* 200: OK
+* 400: The `fromDomain` resource name is invalid
+* 404: The `fromDomain` resource name is not found
+
+==== Adding a domain mapping
+
+....
+curl -XPUT http://ip:port/domainMappings/sourceDomain.tld
+....
+
+Body:
+
+....
+destination.tld
+....
+
+With `sourceDomain.tld` as the value passed to `fromDomain` resource
+name, the API will add a destination domain specified in the body to
+that domain
+
+Response codes:
+
+* 204: OK
+* 400: The `fromDomain` resource name is invalid
+* 400: The destination domain specified in the body is invalid
+
+Be aware that no checks to find possible loops that would result of this creation will be performed.
+
+==== Removing a domain mapping
+
+....
+curl -XDELETE http://ip:port/domainMappings/sourceDomain.tld
+....
+
+Body:
+
+....
+destination.tld
+....
+
+With `sourceDomain.tld` as the value passed to `fromDomain` resource
+name, the API will remove a destination domain specified in the body
+mapped to that domain
+
+Response codes:
+
+* 204: OK
+* 400: The `fromDomain` resource name is invalid
+* 400: The destination domain specified in the body is invalid
+
+=== Regex mapping
+
+You can use *webadmin* to create regex mappings.
+
+A regex mapping contains a mapping source and a Java Regular Expression
+(regex) in String as the mapping value. Everytime, if a mail containing
+a recipient matched with the mapping source, then that mail will be
+re-routed to a new recipient address which is re written by the regex.
+
+This feature uses xref:{xref-base}/architecture/index.adoc#_recipient_rewrite_tables[Recipients rewrite table]
+and requires the
+https://github.com/apache/james-project/blob/master/server/mailet/mailets/src/main/java/org/apache/james/transport/mailets/RecipientRewriteTable.java[RecipientRewriteTable
+API] to be configured.
+
+==== Adding a regex mapping
+
+....
+POST /mappings/regex/mappingSource/targets/regex
+....
+
+Where:
+
+* the `mappingSource` is the path parameter represents for the Regex
+Mapping mapping source
+* the `regex` is the path parameter represents for the Regex Mapping
+regex
+
+The route will add a regex mapping made from `mappingSource` and `regex`
+to RecipientRewriteTable.
+
+Example:
+
+....
+curl -XPOST http://ip:port/mappings/regex/james@domain.tld/targets/james@.*:james-intern@james.org
+....
+
+Response codes:
+
+* 204: Mapping added successfully.
+* 400: Invalid `mappingSource` path parameter.
+* 400: Invalid `regex` path parameter.
+
+Be aware that no checks to find possible loops that would result of this creation will be performed.
+
+==== Removing a regex mapping
+
+....
+DELETE /mappings/regex/{mappingSource}/targets/{regex}
+....
+
+Where:
+
+* the `mappingSource` is the path parameter representing the Regex
+Mapping mapping source
+* the `regex` is the path parameter representing the Regex Mapping regex
+
+The route will remove the regex mapping made from `regex` from the
+mapping source `mappingSource` to RecipientRewriteTable.
+
+Example:
+
+....
+curl -XDELETE http://ip:port/mappings/regex/james@domain.tld/targets/[O_O]:james-intern@james.org
+....
+
+Response codes:
+
+* 204: Mapping deleted successfully.
+* 400: Invalid `mappingSource` path parameter.
+* 400: Invalid `regex` path parameter.
+
+=== Address Mappings
+
+You can use *webadmin* to define address mappings.
+
+When a specific email is sent to the base mail address, every
+destination addresses will receive it.
+
+This feature uses xref:{xref-base}/architecture/index.adoc#_recipient_rewrite_tables[Recipients rewrite table]
+and requires the
+https://github.com/apache/james-project/blob/master/server/mailet/mailets/src/main/java/org/apache/james/transport/mailets/RecipientRewriteTable.java[RecipientRewriteTable
+mailet] to be configured.
+
+Note that email addresses are restricted to ASCII character set. Mail
+addresses not matching this criteria will be rejected.
+
+Please use address mappings with caution, as it’s not a typed address.
+If you know the type of your address (forward, alias, domain, group,
+etc), prefer using the corresponding routes to those types.
+
+Here are the following actions available on address mappings:
+
+==== Add an address mapping
+
+....
+curl -XPOST http://ip:port/mappings/address/{mappingSource}/targets/{destinationAddress}
+....
+
+Add an address mapping to the Recipients rewrite table
+Mapping source is the value of \{mappingSource} Mapping destination is
+the value of \{destinationAddress} Type of mapping destination is
+Address
+
+Response codes:
+
+* 204: Action successfully performed
+* 400: Invalid parameters
+* 409: The addition of the address mapping would lead to a loop and thus cannot be performed
+
+==== Remove an address mapping
+
+....
+curl -XDELETE http://ip:port/mappings/address/{mappingSource}/targets/{destinationAddress}
+....
+
+* Remove an address mapping from the Recipients rewrite table
+* Mapping source is the value of `mappingSource`
+* Mapping destination is the value of `destinationAddress`
+* Type of mapping destination is Address
+
+Response codes:
+
+* 204: Action successfully performed
+* 400: Invalid parameters
+
+=== List all mappings
+
+....
+curl -XGET http://ip:port/mappings
+....
+
+Get all mappings from the
+xref:{xref-base}/architecture/index.adoc#_recipient_rewrite_tables[Recipients rewrite table].
+
+Response body:
+
+....
+{
+ "alias@domain.tld": [
+ {
+ "type": "Alias",
+ "mapping": "user@domain.tld"
+ },
+ {
+ "type": "Group",
+ "mapping": "group-user@domain.tld"
+ }
+ ],
+ "aliasdomain.tld": [
+ {
+ "type": "Domain",
+ "mapping": "realdomain.tld"
+ }
+ ],
+ "group@domain.tld": [
+ {
+ "type": "Address",
+ "mapping": "user@domain.tld"
+ }
+ ]
+}
+....
+
+Response code:
+
+* 200: OK
+
+=== Listing User Mappings
+
+This endpoint allows receiving all mappings of a corresponding user.
+
+....
+curl -XGET http://ip:port/mappings/user/{userAddress}
+....
+
+Return all mappings of a user where:
+
+* `userAddress`: is the selected user
+
+Response body:
+
+....
+[
+ {
+ "type": "Address",
+ "mapping": "user123@domain.tld"
+ },
+ {
+ "type": "Alias",
+ "mapping": "aliasuser123@domain.tld"
+ },
+ {
+ "type": "Group",
+ "mapping": "group123@domain.tld"
+ }
+]
+....
+
+Response codes:
+
+* 200: OK
+* 400: Invalid parameter value
+
+== Administrating mail repositories
+
+=== Create a mail repository
+
+....
+curl -XPUT http://ip:port/mailRepositories/{encodedPathOfTheRepository}?protocol={someProtocol}
+....
+
+Resource name `encodedPathOfTheRepository` should be the resource path
+of the created mail repository. Example:
+
+....
+curl -XPUT http://ip:port/mailRepositories/mailRepo?protocol=file
+....
+
+Response codes:
+
+* 204: The repository is created
+
+=== Listing mail repositories
+
+....
+curl -XGET http://ip:port/mailRepositories
+....
+
+The answer looks like:
+
+....
+[
+ {
+ "repository": "var/mail/error/",
+ "path": "var%2Fmail%2Ferror%2F"
+ },
+ {
+ "repository": "var/mail/relay-denied/",
+ "path": "var%2Fmail%2Frelay-denied%2F"
+ },
+ {
+ "repository": "var/mail/spam/",
+ "path": "var%2Fmail%2Fspam%2F"
+ },
+ {
+ "repository": "var/mail/address-error/",
+ "path": "var%2Fmail%2Faddress-error%2F"
+ }
+]
+....
+
+You can use `id`, the encoded URL of the repository, to access it in
+later requests.
+
+Response codes:
+
+* 200: The list of mail repositories
+
+=== Getting additional information for a mail repository
+
+....
+curl -XGET http://ip:port/mailRepositories/{encodedPathOfTheRepository}
+....
+
+Resource name `encodedPathOfTheRepository` should be the resource path
+of an existing mail repository. Example:
+
+....
+curl -XGET http://ip:port/mailRepositories/var%2Fmail%2Ferror%2F
+....
+
+The answer looks like:
+
+....
+{
+ "repository": "var/mail/error/",
+ "path": "mail%2Ferror%2F",
+ "size": 243
+}
+....
+
+Response codes:
+
+* 200: Additonnal information for that repository
+* 404: This repository can not be found
+
+=== Listing mails contained in a mail repository
+
+....
+curl -XGET http://ip:port/mailRepositories/{encodedPathOfTheRepository}/mails
+....
+
+Resource name `encodedPathOfTheRepository` should be the resource path
+of an existing mail repository. Example:
+
+....
+curl -XGET http://ip:port/mailRepositories/var%2Fmail%2Ferror%2F/mails
+....
+
+The answer will contains all mailKey contained in that repository.
+
+....
+[
+ "mail-key-1",
+ "mail-key-2",
+ "mail-key-3"
+]
+....
+
+Note that this can be used to read mail details.
+
+You can pass additional URL parameters to this call in order to limit
+the output: - A limit: no more elements than the specified limit will be
+returned. This needs to be strictly positive. If no value is specified,
+no limit will be applied. - An offset: allow to skip elements. This
+needs to be positive. Default value is zero.
+
+Example:
+
+....
+curl -XGET 'http://ip:port/mailRepositories/var%2Fmail%2Ferror%2F/mails?limit=100&offset=500'
+....
+
+Response codes:
+
+* 200: The list of mail keys contained in that mail repository
+* 400: Invalid parameters
+* 404: This repository can not be found
+
+=== Reading/downloading a mail details
+
+....
+curl -XGET http://ip:port/mailRepositories/{encodedPathOfTheRepository}/mails/mailKey
+....
+
+Resource name `encodedPathOfTheRepository` should be the resource path
+of an existing mail repository. Resource name `mailKey` should be the
+key of a mail stored in that repository. Example:
+
+....
+curl -XGET http://ip:port/mailRepositories/var%2Fmail%2Ferror%2F/mails/mail-key-1
+....
+
+If the Accept header in the request is ``application/json'', then the
+response looks like:
+
+....
+{
+ "name": "mail-key-1",
+ "sender": "sender@domain.com",
+ "recipients": ["recipient1@domain.com", "recipient2@domain.com"],
+ "state": "address-error",
+ "error": "A small message explaining what happened to that mail...",
+ "remoteHost": "111.222.333.444",
+ "remoteAddr": "127.0.0.1",
+ "lastUpdated": null
+}
+....
+
+If the Accept header in the request is ``message/rfc822'', then the
+response will be the _eml_ file itself.
+
+Additional query parameter `additionalFields` add the existing
+information to the response for the supported values (only work with
+``application/json'' Accept header):
+
+* attributes
+* headers
+* textBody
+* htmlBody
+* messageSize
+* perRecipientsHeaders
+
+....
+curl -XGET http://ip:port/mailRepositories/file%3A%2F%2Fvar%2Fmail%2Ferror%2F/mails/mail-key-1?additionalFields=attributes,headers,textBody,htmlBody,messageSize,perRecipientsHeaders
+....
+
+Give the following kind of response:
+
+....
+{
+ "name": "mail-key-1",
+ "sender": "sender@domain.com",
+ "recipients": ["recipient1@domain.com", "recipient2@domain.com"],
+ "state": "address-error",
+ "error": "A small message explaining what happened to that mail...",
+ "remoteHost": "111.222.333.444",
+ "remoteAddr": "127.0.0.1",
+ "lastUpdated": null,
+ "attributes": {
+ "name2": "value2",
+ "name1": "value1"
+ },
+ "perRecipientsHeaders": {
+ "third@party": {
+ "headerName1": [
+ "value1",
+ "value2"
+ ],
+ "headerName2": [
+ "value3",
+ "value4"
+ ]
+ }
+ },
+ "headers": {
+ "headerName4": [
+ "value6",
+ "value7"
+ ],
+ "headerName3": [
+ "value5",
+ "value8"
+ ]
+ },
+ "textBody": "My body!!",
+ "htmlBody": "My body!!",
+ "messageSize": 42424242
+}
+....
+
+Response codes:
+
+* 200: Details of the mail
+* 404: This repository or mail can not be found
+
+=== Removing a mail from a mail repository
+
+....
+curl -XDELETE http://ip:port/mailRepositories/{encodedPathOfTheRepository}/mails/mailKey
+....
+
+Resource name `encodedPathOfTheRepository` should be the resource path
+of an existing mail repository. Resource name `mailKey` should be the
+key of a mail stored in that repository. Example:
+
+....
+curl -XDELETE http://ip:port/mailRepositories/var%2Fmail%2Ferror%2F/mails/mail-key-1
+....
+
+Response codes:
+
+* 204: This mail no longer exists in this repository
+* 404: This repository can not be found
+
+=== Removing all mails from a mail repository
+
+....
+curl -XDELETE http://ip:port/mailRepositories/{encodedPathOfTheRepository}/mails
+....
+
+Resource name `encodedPathOfTheRepository` should be the resource path
+of an existing mail repository. Example:
+
+....
+curl -XDELETE http://ip:port/mailRepositories/var%2Fmail%2Ferror%2F/mails
+....
+
+link:#_endpoints_returning_a_task[More details about endpoints returning
+a task].
+
+Response codes:
+
+* 201: Task generation succeeded. Corresponding task id is returned.
+* 404: Could not find that mail repository
+
+The scheduled task will have the following type `clear-mail-repository`
+and the following `additionalInformation`:
+
+....
+{
+ "mailRepositoryPath":"var/mail/error/",
+ "initialCount": 243,
+ "remainingCount": 17
+}
+....
+
+=== Reprocessing mails from a mail repository
+
+Sometime, you want to re-process emails stored in a mail repository. For
+instance, you can make a configuration error, or there can be a James
+bug that makes processing of some mails fail. Those mail will be stored
+in a mail repository. Once you solved the problem, you can reprocess
+them.
+
+To reprocess mails from a repository:
+
+....
+curl -XPATCH http://ip:port/mailRepositories/{encodedPathOfTheRepository}/mails?action=reprocess
+....
+
+Resource name `encodedPathOfTheRepository` should be the resource path
+of an existing mail repository. Example:
+
+For instance:
+
+....
+curl -XPATCH http://ip:port/mailRepositories/var%2Fmail%2Ferror%2F/mails?action=reprocess
+....
+
+Additional query parameters are supported:
+
+- `queue` allows you to
+target the mail queue you want to enqueue the mails in. Defaults to
+`spool`.
+- `processor` allows you to overwrite the state of the
+reprocessing mails, and thus select the processors they will start their
+processing in. Defaults to the `state` field of each processed email.
+- `consume` (boolean defaulting to `true`) whether the reprocessing should consume the mail in its originating mail repository. Passing
+this value to `false` allows non destructive reprocessing as you keep a copy of the email in the mail repository and can be valuable
+when debugging.
+- `limit` (integer value. Optional, default is empty). It enables to limit the count of elements reprocessed.
+If unspecified the count of the processed elements is unbounded.
+- `maxRetries` Optional integer, defaults to no max retries limit. Only processed emails that had been retried less
+than this value. Ignored by default.
+
+redeliver_group_events
+
+....
+curl -XPATCH 'http://ip:port/mailRepositories/var%2Fmail%2Ferror%2F/mails?action=reprocess&processor=transport&queue=spool'
+....
+
+Note that the `action` query parameter is compulsary and can only take
+value `reprocess`.
+
+link:#_endpoints_returning_a_task[More details about endpoints returning
+a task].
+
+Response codes:
+
+* 201: Task generation succeeded. Corresponding task id is returned.
+* 404: Could not find that mail repository
+
+The scheduled task will have the following type `reprocessing-all` and
+the following `additionalInformation`:
+
+....
+{
+ "mailRepositoryPath":"var/mail/error/",
+ "targetQueue":"spool",
+ "targetProcessor":"transport",
+ "initialCount": 243,
+ "remainingCount": 17
+}
+....
+
+=== Reprocessing a specific mail from a mail repository
+
+To reprocess a specific mail from a mail repository:
+
+....
+curl -XPATCH http://ip:port/mailRepositories/{encodedPathOfTheRepository}/mails/mailKey?action=reprocess
+....
+
+Resource name `encodedPathOfTheRepository` should be the resource id of
+an existing mail repository. Resource name `mailKey` should be the key
+of a mail stored in that repository. Example:
+
+For instance:
+
+....
+curl -XPATCH http://ip:port/mailRepositories/var%2Fmail%2Ferror%2F/mails/name1?action=reprocess
+....
+
+Additional query parameters are supported:
+
+- `queue` allows you to
+target the mail queue you want to enqueue the mails in. Defaults to
+`spool`.
+- `processor` allows you to overwrite the state of the
+reprocessing mails, and thus select the processors they will start their
+processing in. Defaults to the `state` field of each processed email.
+- `consume` (boolean defaulting to `true`) whether the reprocessing should consume the mail in its originating mail repository. Passing
+this value to `false` allows non destructive reprocessing as you keep a copy of the email in the mail repository and can be valuable
+when debugging.
+
+While `processor` being an optional parameter, not specifying it will
+result reprocessing the mails in their current state
+(https://james.apache.org/server/feature-mailetcontainer.html#Processors[see
+documentation about processors and state]). Consequently, only few cases
+will give a different result, definitively storing them out of the mail
+repository.
+
+For instance:
+
+....
+curl -XPATCH 'http://ip:port/mailRepositories/var%2Fmail%2Ferror%2F/mails/name1?action=reprocess&processor=transport&queue=spool'
+....
+
+Note that the `action` query parameter is compulsary and can only take
+value `reprocess`.
+
+link:#_endpoints_returning_a_task[More details about endpoints returning
+a task].
+
+Response codes:
+
+* 201: Task generation succeeded. Corresponding task id is returned.
+* 404: Could not find that mail repository
+
+The scheduled task will have the following type `reprocessing-one` and
+the following `additionalInformation`:
+
+....
+{
+ "mailRepositoryPath":"var/mail/error/",
+ "targetQueue":"spool",
+ "targetProcessor":"transport",
+ "mailKey":"name1"
+}
+....
+
+== Administrating mail queues
+
+=== Listing mail queues
+
+....
+curl -XGET http://ip:port/mailQueues
+....
+
+The answer looks like:
+
+....
+["outgoing","spool"]
+....
+
+Response codes:
+
+* 200: The list of mail queues
+
+=== Getting a mail queue details
+
+....
+curl -XGET http://ip:port/mailQueues/{mailQueueName}
+....
+
+Resource name `mailQueueName` is the name of a mail queue, this command
+will return the details of the given mail queue. For instance:
+
+....
+{"name":"outgoing","size":0}
+....
+
+Response codes:
+
+* 200: Success
+* 400: Mail queue is not valid
+* 404: The mail queue does not exist
+
+=== Listing the mails of a mail queue
+
+....
+curl -XGET http://ip:port/mailQueues/{mailQueueName}/mails
+....
+
+Additional URL query parameters:
+
+* `limit`: Maximum number of mails returned in a single call. Only
+strictly positive integer values are accepted. Example:
+
+....
+curl -XGET http://ip:port/mailQueues/{mailQueueName}/mails?limit=100
+....
+
+The answer looks like:
+
+....
+[{
+ "name": "Mail1516976156284-8b3093b9-eebf-4c40-9c26-1450f4fcdc3c-to-test.com",
+ "sender": "user@james.linagora.com",
+ "recipients": ["someone@test.com"],
+ "nextDelivery": "1969-12-31T23:59:59.999Z"
+}]
+....
+
+Response codes:
+
+* 200: Success
+* 400: Mail queue is not valid or limit is invalid
+* 404: The mail queue does not exist
+
+=== Deleting mails from a mail queue
+
+....
+curl -XDELETE http://ip:port/mailQueues/{mailQueueName}/mails?sender=senderMailAddress
+....
+
+This request should have exactly one query parameter from the following
+list:
+
+* sender: which is a mail address (i.e. sender@james.org)
+* name: which is a string
+* recipient: which is a mail address (i.e. recipient@james.org)
+
+The mails from the given mail queue matching the query parameter will be
+deleted.
+
+link:#_endpoints_returning_a_task[More details about endpoints returning
+a task].
+
+Response codes:
+
+* 201: Task generation succeeded. Corresponding task id is returned.
+* 400: Invalid request
+* 404: The mail queue does not exist
+
+The scheduled task will have the following type
+`delete-mails-from-mail-queue` and the following
+`additionalInformation`:
+
+....
+{
+ "queue":"outgoing",
+ "initialCount":10,
+ "remainingCount": 5,
+ "sender": "sender@james.org",
+ "name": "Java Developer",
+ "recipient: "recipient@james.org"
+}
+....
+
+=== Clearing a mail queue
+
+....
+curl -XDELETE http://ip:port/mailQueues/{mailQueueName}/mails
+....
+
+All mails from the given mail queue will be deleted.
+
+link:#_endpoints_returning_a_task[More details about endpoints returning
+a task].
+
+Response codes:
+
+* 201: Task generation succeeded. Corresponding task id is returned.
+* 400: Invalid request
+* 404: The mail queue does not exist
+
+The scheduled task will have the following type `clear-mail-queue` and
+the following `additionalInformation`:
+
+....
+{
+ "queue":"outgoing",
+ "initialCount":10,
+ "remainingCount": 0
+}
+....
+
+=== Flushing mails from a mail queue
+
+....
+curl -XPATCH http://ip:port/mailQueues/{mailQueueName}?delayed=true \
+ -d '{"delayed": false}' \
+ -H "Content-Type: application/json"
+....
+
+This request should have the query parameter _delayed_ set to _true_, in
+order to indicate only delayed mails are affected. The payload should
+set the `delayed` field to false inorder to remove the delay. This is
+the only supported combination, and it performs a flush.
+
+The mails delayed in the given mail queue will be flushed.
+
+link:#_endpoints_returning_a_task[More details about endpoints returning
+a task].
+
+Response codes:
+
+* 204: Success (No content)
+* 400: Invalid request
+* 404: The mail queue does not exist
+
+=== RabbitMQ republishing a mail queue from {backend-name}
+
+....
+curl -XPOST 'http://ip:port/mailQueues/{mailQueueName}?action=RepublishNotProcessedMails&olderThan=1d'
+....
+
+This method is specific to the distributed flavor of James, which relies
+on {backend-name} and RabbitMQ for implementing a mail queue. In case of a
+RabbitMQ crash resulting in a loss of messages, this task can be
+launched to repopulate the `mailQueueName` queue in RabbitMQ using the
+information stored in {backend-name}.
+
+The `olderThan` parameter is mandatory. It filters the mails to be
+restored, by taking into account only the mails older than the given
+value. The expected value should be expressed in the following format:
+`Nunit`. `N` should be strictly positive. `unit` could be either in the
+short form (`h`, `d`, `w`, etc.), or in the long form (`day`, `week`,
+`month`, etc.).
+
+Examples:
+
+* `5h`
+* `7d`
+* `1y`
+
+Response codes:
+
+* 201: Task created
+* 400: Invalid request
+
+The response body contains the id of the republishing task.
+`{ "taskId": "a650a66a-5984-431e-bdad-f1baad885856" }`
+
+include::{admin-mail-queues-extend}[]
+
+== Sending email over webAdmin
+
+....
+curl -XPOST /mail-transfer-service
+
+{MIME message}
+....
+
+Will send the following email to the recipients specified in the MIME message.
+
+The `{MIME message}` payload must match `message/rfc822` format.
+
+== Event Dead Letter
+
+The EventBus allows to register `group listeners' that are called in a
+distributed fashion. These group listeners enable the implementation of
+some advanced mailbox manager feature like indexing, spam reporting,
+quota management and the like.
+
+Upon exceptions, a bounded number of retries are performed (with
+exponential backoff delays). If after those retries the listener is
+still failing, then the event will be stored in the ``Event Dead
+Letter''. This API allows diagnosing issues, as well as performing event
+replay.
+
+=== Listing mailbox listener groups
+
+This endpoint allows discovering the list of mailbox listener groups.
+
+....
+curl -XGET http://ip:port/events/deadLetter/groups
+....
+
+Will return a list of group names that can be further used to interact
+with the dead letter API:
+
+....
+["org.apache.james.mailbox.events.EventBusTestFixture$GroupA", "org.apache.james.mailbox.events.GenericGroup-abc"]
+....
+
+Response codes:
+
+* 200: Success. A list of group names is returned.
+
+=== Listing failed events
+
+This endpoint allows listing failed events for a given group:
+
+....
+curl -XGET http://ip:port/events/deadLetter/groups/org.apache.james.mailbox.events.EventBusTestFixture$GroupA
+....
+
+Will return a list of insertionIds:
+
+....
+["6e0dd59d-660e-4d9b-b22f-0354479f47b4", "58a8f59d-660e-4d9b-b22f-0354486322a2"]
+....
+
+Response codes:
+
+* 200: Success. A list of insertion ids is returned.
+* 400: Invalid group name
+
+=== Getting event details
+
+....
+curl -XGET http://ip:port/events/deadLetter/groups/org.apache.james.mailbox.events.EventBusTestFixture$GroupA/6e0dd59d-660e-4d9b-b22f-0354479f47b4
+....
+
+Will return the full JSON associated with this event.
+
+Response codes:
+
+* 200: Success. A JSON representing this event is returned.
+* 400: Invalid group name or `insertionId`
+* 404: No event with this `insertionId`
+
+=== Deleting an event
+
+....
+curl -XDELETE http://ip:port/events/deadLetter/groups/org.apache.james.mailbox.events.EventBusTestFixture$GroupA/6e0dd59d-660e-4d9b-b22f-0354479f47b4
+....
+
+Will delete this event.
+
+Response codes:
+
+* 204: Success
+* 400: Invalid group name or `insertionId`
+
+=== Deleting all events of a group
+
+....
+curl -XDELETE http://ip:port/events/deadLetter/groups/org.apache.james.mailbox.events.EventBusTestFixture$GroupA
+....
+
+Will delete all events of this group.
+
+Response codes:
+
+* 204: Success
+* 400: Invalid group name
+
+=== Redeliver all events
+
+....
+curl -XPOST http://ip:port/events/deadLetter?action=reDeliver
+....
+
+Additional query parameters are supported:
+
+- `limit` (integer value. Optional, default is empty). It enables to limit the count of elements redelivered.
+If unspecified the count of the processed elements is unbounded
+
+For instance:
+
+....
+curl -XPOST http://ip:port/events/deadLetter?action=reDeliver&limit=10
+....
+
+Will create a task that will attempt to redeliver all events stored in
+``Event Dead Letter''. If successful, redelivered events will then be
+removed from ``Dead Letter''.
+
+link:#_endpoints_returning_a_task[More details about endpoints returning
+a task].
+
+Response codes:
+
+* 201: the taskId of the created task
+* 400: Invalid action argument
+
+=== Redeliver group events
+
+....
+curl -XPOST http://ip:port/events/deadLetter/groups/org.apache.james.mailbox.events.EventBusTestFixture$GroupA?action=reDeliver
+....
+
+Will create a task that will attempt to redeliver all events of a
+particular group stored in ``Event Dead Letter''. If successful,
+redelivered events will then be removed from ``Dead Letter''.
+
+Additional query parameters are supported:
+
+- `limit` (integer value. Optional, default is empty). It enables to limit the count of elements redelivered.
+If unspecified the count of the processed elements is unbounded
+
+For instance:
+
+....
+curl -XPOST http://ip:port/events/deadLetter/groups/org.apache.james.mailbox.events.EventBusTestFixture$GroupA?action=reDeliver&limit=10
+....
+
+link:#_endpoints_returning_a_task[More details about endpoints returning
+a task].
+
+Response codes:
+
+* 201: the taskId of the created task
+* 400: Invalid group name or action argument
+
+=== Redeliver a single event
+
+....
+curl -XPOST http://ip:port/events/deadLetter/groups/org.apache.james.mailbox.events.EventBusTestFixture$GroupA/6e0dd59d-660e-4d9b-b22f-0354479f47b4?action=reDeliver
+....
+
+Will create a task that will attempt to redeliver a single event of a
+particular group stored in ``Event Dead Letter''. If successful,
+redelivered event will then be removed from ``Dead Letter''.
+
+link:#_endpoints_returning_a_task[More details about endpoints returning
+a task].
+
+Response codes:
+
+* 201: the taskId of the created task
+* 400: Invalid group name, insertion id or action argument
+* 404: No event with this insertionId
+
+== Deleted Messages Vault
+
+The `Deleted Message Vault plugin' allows you to keep users deleted
+messages during a given retention time. This set of routes allow you to
+_restore_ users deleted messages or export them in an archive.
+
+To move deleted messages in the vault, you need to specifically
+configure the DeletedMessageVault PreDeletionHook.
+
+=== Restore Deleted Messages
+
+Deleted messages of a specific user can be restored by calling the
+following endpoint:
+
+....
+curl -XPOST http://ip:port/deletedMessages/users/userToRestore@domain.ext?action=restore
+
+{
+ "combinator": "and",
+ "criteria": [
+ {
+ "fieldName": "subject",
+ "operator": "containsIgnoreCase",
+ "value": "Apache James"
+ },
+ {
+ "fieldName": "deliveryDate",
+ "operator": "beforeOrEquals",
+ "value": "2014-10-30T14:12:00Z"
+ },
+ {
+ "fieldName": "deletionDate",
+ "operator": "afterOrEquals",
+ "value": "2015-10-20T09:08:00Z"
+ },
+ {
+ "fieldName": "recipients","
+ "operator": "contains","
+ "value": "recipient@james.org"
+ },
+ {
+ "fieldName": "hasAttachment",
+ "operator": "equals",
+ "value": "false"
+ },
+ {
+ "fieldName": "sender",
+ "operator": "equals",
+ "value": "sender@apache.org"
+ },
+ {
+ "fieldName": "originMailboxes",
+ "operator": "contains",
+ "value": "02874f7c-d10e-102f-acda-0015176f7922"
+ }
+ ]
+};
+....
+
+The requested Json body is made from a list of criterion objects which
+have the following structure:
+
+....
+{
+ "fieldName": "supportedFieldName",
+ "operator": "supportedOperator",
+ "value": "A plain string representing the matching value of the corresponding field"
+}
+....
+
+Deleted Messages which are matched with the *all* criterion in the query
+body will be restored. Here are a list of supported fieldName for the
+restoring:
+
+* subject: represents for deleted message `subject` field matching.
+Supports below string operators:
+** contains
+** containsIgnoreCase
+** equals
+** equalsIgnoreCase
+* deliveryDate: represents for deleted message `deliveryDate` field
+matching. Tested value should follow the right date time with zone
+offset format (ISO-8601) like `2008-09-15T15:53:00+05:00` or
+`2008-09-15T15:53:00Z` Supports below date time operators:
+** beforeOrEquals: is the deleted message’s `deliveryDate` before or
+equals the time of tested value.
+** afterOrEquals: is the deleted message’s `deliveryDate` after or
+equals the time of tested value
+* deletionDate: represents for deleted message `deletionDate` field
+matching. Tested value & Supports operators: similar to `deliveryDate`
+* sender: represents for deleted message `sender` field matching. Tested
+value should be a valid mail address. Supports mail address operator:
+** equals: does the tested sender equal to the sender of the tested
+deleted message ? +
+* recipients: represents for deleted message `recipients` field
+matching. Tested value should be a valid mail address. Supports list
+mail address operator:
+** contains: does the tested deleted message’s recipients contain tested
+recipient ?
+* hasAttachment: represents for deleted message `hasAttachment` field
+matching. Tested value could be `false` or `true`. Supports boolean
+operator:
+** equals: does the tested deleted message’s hasAttachment property
+equal to the tested hasAttachment value?
+* originMailboxes: represents for deleted message `originMailboxes`
+field matching. Tested value is a string serialized of mailbox id.
+Supports list mailbox id operators:
+** contains: does the tested deleted message’s originMailbox ids contain
+tested mailbox id ?
+
+Messages in the Deleted Messages Vault of a specified user that are
+matched with Query Json Object in the body will be appended to his
+`Restored-Messages' mailbox, which will be created if needed.
+
+*Note*:
+
+* Query parameter `action` is required and should have the value
+`restore` to represent the restoring feature. Otherwise, a bad request
+response will be returned
+* Query parameter `action` is case sensitive
+* fieldName & operator passed to the routes are case sensitive
+* Currently, we only support query combinator `and` value, otherwise,
+requests will be rejected
+* If you only want to restore by only one criterion, the json body could
+be simplified to a single criterion:
+
+....
+{
+ "fieldName": "subject",
+ "operator": "containsIgnoreCase",
+ "value": "Apache James"
+}
+....
+
+* For restoring all deleted messages, passing a query json with an empty
+criterion list to represent `matching all deleted messages`:
+
+....
+{
+ "combinator": "and",
+ "criteria": []
+}
+....
+
+* For limiting the number of restored messages, you can use the `limit` query property:
+
+....
+{
+ "combinator": "and",
+ "limit": 99
+ "criteria": []
+}
+....
+
+*Warning*: Current web-admin uses `US` locale as the default. Therefore,
+there might be some conflicts when using String `containsIgnoreCase`
+comparators to apply on the String data of other special locales stored
+in the Vault. More details at
+https://issues.apache.org/jira/browse/MAILBOX-384[JIRA]
+
+Response code:
+
+* 201: Task for restoring deleted has been created
+* 400: Bad request:
+** action query param is not present
+** action query param is not a valid action
+** user parameter is invalid
+** can not parse the JSON body
+** Json query object contains unsupported operator, fieldName
+** Json query object values violate parsing rules
+* 404: User not found
+
+link:#_endpoints_returning_a_task[More details about endpoints returning
+a task].
+
+The scheduled task will have the following type
+`deleted-messages-restore` and the following `additionalInformation`:
+
+....
+{
+ "successfulRestoreCount": 47,
+ "errorRestoreCount": 0,
+ "user": "userToRestore@domain.ext"
+}
+....
+
+while:
+
+* successfulRestoreCount: number of restored messages
+* errorRestoreCount: number of messages that failed to restore
+* user: owner of deleted messages need to restore
+
+=== Export Deleted Messages
+
+Retrieve deleted messages matched with requested query from an user then
+share the content to a targeted mail address (exportTo)
+
+....
+curl -XPOST 'http://ip:port/deletedMessages/users/userExportFrom@domain.ext?action=export&exportTo=userReceiving@domain.ext'
+
+BODY: is the json query has the same structure with Restore Deleted Messages section
+....
+
+*Note*: Json query passing into the body follows the same rules &
+restrictions like in link:#_restore_deleted_messages[Restore Deleted
+Messages]
+
+Response code:
+
+* 201: Task for exporting has been created
+* 400: Bad request:
+** exportTo query param is not present
+** exportTo query param is not a valid mail address
+** action query param is not present
+** action query param is not a valid action
+** user parameter is invalid
+** can not parse the JSON body
+** Json query object contains unsupported operator, fieldName
+** Json query object values violate parsing rules
+* 404: User not found
+
+link:#_endpoints_returning_a_task[More details about endpoints returning
+a task].
+
+The scheduled task will have the following type
+`deleted-messages-export` and the following `additionalInformation`:
+
+....
+{
+ "userExportFrom": "userToRestore@domain.ext",
+ "exportTo": "userReceiving@domain.ext",
+ "totalExportedMessages": 1432
+}
+....
+
+while:
+
+* userExportFrom: export deleted messages from this user
+* exportTo: content of deleted messages have been shared to this mail
+address
+* totalExportedMessages: number of deleted messages match with
+json query, then being shared to sharee.
+
+=== Purge Deleted Messages
+
+You can overwrite `retentionPeriod' configuration in
+`deletedMessageVault' configuration file or use the default value of 1
+year.
+
+Purge all deleted messages older than the configured `retentionPeriod'
+
+....
+curl -XDELETE http://ip:port/deletedMessages?scope=expired
+....
+
+link:#_endpoints_returning_a_task[More details about endpoints returning
+a task].
+
+Response code:
+
+* 201: Task for purging has been created
+* 400: Bad request:
+** action query param is not present
+** action query param is not a valid action
+
+You may want to call this endpoint on a regular basis.
+
+=== Permanently Remove Deleted Message
+
+Delete a Deleted Message with `MessageId`
+
+....
+curl -XDELETE http://ip:port/deletedMessages/users/user@domain.ext/messages/3294a976-ce63-491e-bd52-1b6f465ed7a2
+....
+
+link:#_endpoints_returning_a_task[More details about endpoints returning
+a task].
+
+Response code:
+
+* 201: Task for deleting message has been created
+* 400: Bad request:
+** user parameter is invalid
+** messageId parameter is invalid
+* 404: User not found
+
+The scheduled task will have the following type
+`deleted-messages-delete` and the following `additionalInformation`:
+
+....
+ {
+ "userName": "user@domain.ext",
+ "messageId": "3294a976-ce63-491e-bd52-1b6f465ed7a2"
+ }
+....
+
+while: - user: delete deleted messages from this user - deleteMessageId:
+messageId of deleted messages will be delete
+
+== Administrating DLP Configuration
+
+DLP (stands for Data Leak Prevention) is supported by James. A DLP
+matcher will, on incoming emails, execute regular expressions on email
+sender, recipients or content, in order to report suspicious emails to
+an administrator. WebAdmin can be used to manage these DLP rules on a
+per `senderDomain` basis.
+
+`senderDomain` is domain of the sender of incoming emails, for example:
+`apache.org`, `james.org`,… Each `senderDomain` correspond to a distinct
+DLP configuration.
+
+=== List DLP configuration by sender domain
+
+Retrieve a DLP configuration for corresponding `senderDomain`, a
+configuration contains list of configuration items
+
+....
+curl -XGET http://ip:port/dlp/rules/{senderDomain}
+....
+
+Response codes:
+
+* 200: A list of dlp configuration items is returned
+* 400: Invalid `senderDomain` or payload in request
+* 404: The domain does not exist.
+
+This is an example of returned body. The rules field is a list of rules
+as described below.
+
+....
+{"rules : [
+ {
+ "id": "1",
+ "expression": "james.org",
+ "explanation": "Find senders or recipients containing james[any char]org",
+ "targetsSender": true,
+ "targetsRecipients": true,
+ "targetsContent": false
+ },
+ {
+ "id": "2",
+ "expression": "Find senders containing apache[any char]org",
+ "explanation": "apache.org",
+ "targetsSender": true,
+ "targetsRecipients": false,
+ "targetsContent": false
+ }
+]}
+....
+
+=== Store DLP configuration by sender domain
+
+Store a DLP configuration for corresponding `senderDomain`, if any item
+of DLP configuration in the request is stored before, it will not be
+stored anymore
+
+....
+curl -XPUT http://ip:port/dlp/rules/{senderDomain}
+....
+
+The body can contain a list of DLP configuration items formed by those
+fields: - `id`(String) is mandatory, unique identifier of the
+configuration item - `expression`(String) is mandatory, regular
+expression to match contents of targets - `explanation`(String) is
+optional, description of the configuration item -
+`targetsSender`(boolean) is optional and defaults to false. If true,
+`expression` will be applied to Sender and to From headers of the mail -
+`targetsContent`(boolean) is optional and defaults to false. If true,
+`expression` will be applied to Subject headers and textual bodies
+(text/plain and text/html) of the mail - `targetsRecipients`(boolean) is
+optional and defaults to false. If true, `expression` will be applied to
+recipients of the mail
+
+This is an example of returned body. The rules field is a list of rules
+as described below.
+
+....
+{"rules": [
+ {
+ "id": "1",
+ "expression": "james.org",
+ "explanation": "Find senders or recipients containing james[any char]org",
+ "targetsSender": true,
+ "targetsRecipients": true,
+ "targetsContent": false
+ },
+ {
+ "id": "2",
+ "expression": "Find senders containing apache[any char]org",
+ "explanation": "apache.org",
+ "targetsSender": true,
+ "targetsRecipients": false,
+ "targetsContent": false
+ }
+]}
+....
+
+Response codes:
+
+* 204: List of dlp configuration items is stored
+* 400: Invalid `senderDomain` or payload in request
+* 404: The domain does not exist.
+
+=== Remove DLP configuration by sender domain
+
+Remove a DLP configuration for corresponding `senderDomain`
+
+....
+curl -XDELETE http://ip:port/dlp/rules/{senderDomain}
+....
+
+Response codes:
+
+* 204: DLP configuration is removed
+* 400: Invalid `senderDomain` or payload in request
+* 404: The domain does not exist.
+
+=== Fetch a DLP configuration item by sender domain and rule id
+
+Retrieve a DLP configuration rule for corresponding `senderDomain` and a
+`ruleId`
+
+....
+curl -XGET http://ip:port/dlp/rules/{senderDomain}/rules/{ruleId}
+....
+
+Response codes:
+
+* 200: A dlp configuration item is returned
+* 400: Invalid `senderDomain` or payload in request
+* 404: The domain and/or the rule does not exist.
+
+This is an example of returned body.
+
+....
+{
+ "id": "1",
+ "expression": "james.org",
+ "explanation": "Find senders or recipients containing james[any char]org",
+ "targetsSender": true,
+ "targetsRecipients": true,
+ "targetsContent": false
+}
+....
+
+== Reloading server certificates
+
+Certificates for TCP based protocols (IMAP, SMTP, POP3, LMTP and ManageSieve) can be updated at
+runtime, without service interuption and without closing existing connections.
+
+In order to do so:
+
+- Generate / retrieve your cryptographic materials and replace the ones specified in James configuration.
+- Then call the following endpoint:
+
+....
+curl -XPOST http://ip:port/servers?reload-certificate
+....
+
+Optional query parameters:
+
+- `port`: positive integer (valid port number). Only reload certificates for the specific port.
+
+Return code:
+
+- 204: the certificate is reloaded
+- 400: Invalid request.
\ No newline at end of file
diff --git a/event-bus/pom.xml b/event-bus/pom.xml
index 64b10dcded6..16a649f4322 100644
--- a/event-bus/pom.xml
+++ b/event-bus/pom.xml
@@ -34,5 +34,6 @@
cassandradistributedin-vm
+ postgres
diff --git a/event-bus/postgres/pom.xml b/event-bus/postgres/pom.xml
new file mode 100644
index 00000000000..033ab6dafc1
--- /dev/null
+++ b/event-bus/postgres/pom.xml
@@ -0,0 +1,70 @@
+
+
+
+ 4.0.0
+
+ org.apache.james
+ event-bus
+ 3.9.0-SNAPSHOT
+
+
+ dead-letter-postgres
+ Apache James :: Event Bus :: Dead Letter :: Postgres
+ In Postgres implementation for the eventDeadLetter API
+
+
+
+ ${james.groupId}
+ apache-james-backends-postgres
+
+
+ ${james.groupId}
+ apache-james-backends-postgres
+ test-jar
+ test
+
+
+ ${james.groupId}
+ event-bus-api
+
+
+ ${james.groupId}
+ event-bus-api
+ test-jar
+ test
+
+
+ ${james.groupId}
+ james-server-guice-common
+ test-jar
+ test
+
+
+ ${james.groupId}
+ testing-base
+ test
+
+
+ org.testcontainers
+ postgresql
+ test
+
+
+
diff --git a/event-bus/postgres/src/main/java/org/apache/james/events/PostgresEventDeadLetters.java b/event-bus/postgres/src/main/java/org/apache/james/events/PostgresEventDeadLetters.java
new file mode 100644
index 00000000000..01d7271cb71
--- /dev/null
+++ b/event-bus/postgres/src/main/java/org/apache/james/events/PostgresEventDeadLetters.java
@@ -0,0 +1,118 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.events;
+
+import static org.apache.james.events.PostgresEventDeadLettersModule.PostgresEventDeadLettersTable.EVENT;
+import static org.apache.james.events.PostgresEventDeadLettersModule.PostgresEventDeadLettersTable.GROUP;
+import static org.apache.james.events.PostgresEventDeadLettersModule.PostgresEventDeadLettersTable.INSERTION_ID;
+import static org.apache.james.events.PostgresEventDeadLettersModule.PostgresEventDeadLettersTable.TABLE_NAME;
+
+import jakarta.inject.Inject;
+
+import org.apache.james.backends.postgres.utils.PostgresExecutor;
+import org.jooq.Record;
+
+import com.github.fge.lambdas.Throwing;
+import com.google.common.base.Preconditions;
+
+import reactor.core.publisher.Flux;
+import reactor.core.publisher.Mono;
+
+public class PostgresEventDeadLetters implements EventDeadLetters {
+ private final PostgresExecutor postgresExecutor;
+ private final EventSerializer eventSerializer;
+
+ @Inject
+ public PostgresEventDeadLetters(PostgresExecutor postgresExecutor, EventSerializer eventSerializer) {
+ this.postgresExecutor = postgresExecutor;
+ this.eventSerializer = eventSerializer;
+ }
+
+ @Override
+ public Mono store(Group registeredGroup, Event failDeliveredEvent) {
+ Preconditions.checkArgument(registeredGroup != null, REGISTERED_GROUP_CANNOT_BE_NULL);
+ Preconditions.checkArgument(failDeliveredEvent != null, FAIL_DELIVERED_EVENT_CANNOT_BE_NULL);
+
+ InsertionId insertionId = InsertionId.random();
+ return postgresExecutor.executeVoid(dslContext -> Mono.from(dslContext.insertInto(TABLE_NAME)
+ .set(INSERTION_ID, insertionId.getId())
+ .set(GROUP, registeredGroup.asString())
+ .set(EVENT, eventSerializer.toJson(failDeliveredEvent))))
+ .thenReturn(insertionId);
+ }
+
+ @Override
+ public Mono remove(Group registeredGroup, InsertionId failDeliveredInsertionId) {
+ Preconditions.checkArgument(registeredGroup != null, REGISTERED_GROUP_CANNOT_BE_NULL);
+ Preconditions.checkArgument(failDeliveredInsertionId != null, FAIL_DELIVERED_ID_INSERTION_CANNOT_BE_NULL);
+
+ return postgresExecutor.executeVoid(dslContext -> Mono.from(dslContext.deleteFrom(TABLE_NAME)
+ .where(INSERTION_ID.eq(failDeliveredInsertionId.getId()))));
+ }
+
+ @Override
+ public Mono remove(Group registeredGroup) {
+ Preconditions.checkArgument(registeredGroup != null, REGISTERED_GROUP_CANNOT_BE_NULL);
+
+ return postgresExecutor.executeVoid(dslContext -> Mono.from(dslContext.deleteFrom(TABLE_NAME)
+ .where(GROUP.eq(registeredGroup.asString()))));
+ }
+
+ @Override
+ public Mono failedEvent(Group registeredGroup, InsertionId failDeliveredInsertionId) {
+ Preconditions.checkArgument(registeredGroup != null, REGISTERED_GROUP_CANNOT_BE_NULL);
+ Preconditions.checkArgument(failDeliveredInsertionId != null, FAIL_DELIVERED_ID_INSERTION_CANNOT_BE_NULL);
+
+ return postgresExecutor.executeRow(dslContext -> Mono.from(dslContext.select(EVENT)
+ .from(TABLE_NAME)
+ .where(INSERTION_ID.eq(failDeliveredInsertionId.getId()))))
+ .map(this::deserializeEvent);
+ }
+
+ private Event deserializeEvent(Record record) {
+ return eventSerializer.asEvent(record.get(EVENT));
+ }
+
+ @Override
+ public Flux failedIds(Group registeredGroup) {
+ Preconditions.checkArgument(registeredGroup != null, REGISTERED_GROUP_CANNOT_BE_NULL);
+
+ return postgresExecutor.executeRows(dslContext -> Flux.from(dslContext
+ .select(INSERTION_ID)
+ .from(TABLE_NAME)
+ .where(GROUP.eq(registeredGroup.asString()))))
+ .map(record -> InsertionId.of(record.get(INSERTION_ID)));
+ }
+
+ @Override
+ public Flux groupsWithFailedEvents() {
+ return postgresExecutor.executeRows(dslContext -> Flux.from(dslContext
+ .selectDistinct(GROUP)
+ .from(TABLE_NAME)))
+ .map(Throwing.function(record -> Group.deserialize(record.get(GROUP))));
+ }
+
+ @Override
+ public Mono containEvents() {
+ return postgresExecutor.executeExists(dslContext -> dslContext.selectOne()
+ .from(TABLE_NAME)
+ .where());
+ }
+}
diff --git a/event-bus/postgres/src/main/java/org/apache/james/events/PostgresEventDeadLettersModule.java b/event-bus/postgres/src/main/java/org/apache/james/events/PostgresEventDeadLettersModule.java
new file mode 100644
index 00000000000..28d5809c26a
--- /dev/null
+++ b/event-bus/postgres/src/main/java/org/apache/james/events/PostgresEventDeadLettersModule.java
@@ -0,0 +1,59 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.events;
+
+import java.util.UUID;
+
+import org.apache.james.backends.postgres.PostgresIndex;
+import org.apache.james.backends.postgres.PostgresModule;
+import org.apache.james.backends.postgres.PostgresTable;
+import org.jooq.Field;
+import org.jooq.Record;
+import org.jooq.Table;
+import org.jooq.impl.DSL;
+import org.jooq.impl.SQLDataType;
+
+public interface PostgresEventDeadLettersModule {
+ interface PostgresEventDeadLettersTable {
+ Table TABLE_NAME = DSL.table("event_dead_letters");
+
+ Field INSERTION_ID = DSL.field("insertion_id", SQLDataType.UUID.notNull());
+ Field GROUP = DSL.field("\"group\"", SQLDataType.VARCHAR.notNull());
+ Field EVENT = DSL.field("event", SQLDataType.VARCHAR.notNull());
+
+ PostgresTable TABLE = PostgresTable.name(TABLE_NAME.getName())
+ .createTableStep(((dsl, tableName) -> dsl.createTableIfNotExists(tableName)
+ .column(INSERTION_ID)
+ .column(GROUP)
+ .column(EVENT)
+ .primaryKey(INSERTION_ID)))
+ .disableRowLevelSecurity()
+ .build();
+
+ PostgresIndex GROUP_INDEX = PostgresIndex.name("event_dead_letters_group_index")
+ .createIndexStep((dsl, indexName) -> dsl.createIndexIfNotExists(indexName)
+ .on(TABLE_NAME, GROUP));
+ }
+
+ PostgresModule MODULE = PostgresModule.builder()
+ .addTable(PostgresEventDeadLettersTable.TABLE)
+ .addIndex(PostgresEventDeadLettersTable.GROUP_INDEX)
+ .build();
+}
diff --git a/event-bus/postgres/src/test/java/org/apache/james/events/PostgresEventDeadLettersTest.java b/event-bus/postgres/src/test/java/org/apache/james/events/PostgresEventDeadLettersTest.java
new file mode 100644
index 00000000000..6dff2be8e11
--- /dev/null
+++ b/event-bus/postgres/src/test/java/org/apache/james/events/PostgresEventDeadLettersTest.java
@@ -0,0 +1,35 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.events;
+
+import org.apache.james.backends.postgres.PostgresExtension;
+import org.apache.james.backends.postgres.PostgresModule;
+import org.junit.jupiter.api.extension.RegisterExtension;
+
+public class PostgresEventDeadLettersTest implements EventDeadLettersContract.AllContracts {
+ @RegisterExtension
+ static PostgresExtension postgresExtension = PostgresExtension.withoutRowLevelSecurity(
+ PostgresModule.aggregateModules(PostgresEventDeadLettersModule.MODULE));
+
+ @Override
+ public EventDeadLetters eventDeadLetters() {
+ return new PostgresEventDeadLetters(postgresExtension.getDefaultPostgresExecutor(), new EventBusTestFixture.TestEventSerializer());
+ }
+}
diff --git a/event-sourcing/event-store-postgres/pom.xml b/event-sourcing/event-store-postgres/pom.xml
new file mode 100644
index 00000000000..daf4273b9c6
--- /dev/null
+++ b/event-sourcing/event-store-postgres/pom.xml
@@ -0,0 +1,103 @@
+
+
+
+ 4.0.0
+
+
+ org.apache.james
+ event-sourcing
+ 3.9.0-SNAPSHOT
+
+
+ event-sourcing-event-store-postgres
+
+ Apache James :: Event sourcing :: Event Store :: Postgres
+ Postgres implementation for James Event Store
+
+
+
+ ${james.groupId}
+ apache-james-backends-postgres
+
+
+ ${james.groupId}
+ apache-james-backends-postgres
+ test-jar
+ test
+
+
+ ${james.groupId}
+ event-sourcing-core
+ test-jar
+ test
+
+
+ ${james.groupId}
+ event-sourcing-event-store-api
+
+
+ ${james.groupId}
+ event-sourcing-event-store-api
+ test-jar
+ test
+
+
+ ${james.groupId}
+ event-sourcing-pojo
+ test-jar
+ test
+
+
+ ${james.groupId}
+ james-json
+
+
+ ${james.groupId}
+ james-server-guice-common
+ test-jar
+ test
+
+
+ net.javacrumbs.json-unit
+ json-unit-assertj
+ test
+
+
+ org.assertj
+ assertj-core
+ test
+
+
+ org.junit.jupiter
+ junit-jupiter-engine
+ test
+
+
+ org.mockito
+ mockito-core
+ test
+
+
+ org.testcontainers
+ postgresql
+ test
+
+
+
diff --git a/event-sourcing/event-store-postgres/src/main/java/org/apache/james/eventsourcing/eventstore/postgres/PostgresEventStore.java b/event-sourcing/event-store-postgres/src/main/java/org/apache/james/eventsourcing/eventstore/postgres/PostgresEventStore.java
new file mode 100644
index 00000000000..5d408d0ab68
--- /dev/null
+++ b/event-sourcing/event-store-postgres/src/main/java/org/apache/james/eventsourcing/eventstore/postgres/PostgresEventStore.java
@@ -0,0 +1,81 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.eventsourcing.eventstore.postgres;
+
+import static org.apache.james.backends.postgres.utils.PostgresUtils.UNIQUE_CONSTRAINT_VIOLATION_PREDICATE;
+
+import java.util.List;
+import java.util.Optional;
+
+import jakarta.inject.Inject;
+
+import org.apache.james.eventsourcing.AggregateId;
+import org.apache.james.eventsourcing.Event;
+import org.apache.james.eventsourcing.EventId;
+import org.apache.james.eventsourcing.eventstore.EventStore;
+import org.apache.james.eventsourcing.eventstore.EventStoreFailedException;
+import org.apache.james.eventsourcing.eventstore.History;
+import org.reactivestreams.Publisher;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableList;
+
+import reactor.core.publisher.Mono;
+import scala.jdk.javaapi.CollectionConverters;
+
+public class PostgresEventStore implements EventStore {
+ private final PostgresEventStoreDAO eventStoreDAO;
+
+ @Inject
+ public PostgresEventStore(PostgresEventStoreDAO eventStoreDAO) {
+ this.eventStoreDAO = eventStoreDAO;
+ }
+
+ @Override
+ public Publisher appendAll(scala.collection.Iterable scalaEvents) {
+ if (scalaEvents.isEmpty()) {
+ return Mono.empty();
+ }
+ Preconditions.checkArgument(Event.belongsToSameAggregate(scalaEvents));
+ List events = ImmutableList.copyOf(CollectionConverters.asJava(scalaEvents));
+ Optional snapshotId = events.stream().filter(Event::isASnapshot).map(Event::eventId).findFirst();
+ return eventStoreDAO.appendAll(events, snapshotId)
+ .onErrorMap(UNIQUE_CONSTRAINT_VIOLATION_PREDICATE,
+ e -> new EventStoreFailedException("Concurrent update to the EventStore detected"));
+ }
+
+ @Override
+ public Publisher getEventsOfAggregate(AggregateId aggregateId) {
+ return eventStoreDAO.getSnapshot(aggregateId)
+ .flatMap(snapshotId -> eventStoreDAO.getEventsOfAggregate(aggregateId, snapshotId))
+ .flatMap(history -> {
+ if (history.getEventsJava().isEmpty()) {
+ return Mono.from(eventStoreDAO.getEventsOfAggregate(aggregateId));
+ } else {
+ return Mono.just(history);
+ }
+ }).defaultIfEmpty(History.empty());
+ }
+
+ @Override
+ public Publisher remove(AggregateId aggregateId) {
+ return eventStoreDAO.delete(aggregateId);
+ }
+}
diff --git a/event-sourcing/event-store-postgres/src/main/java/org/apache/james/eventsourcing/eventstore/postgres/PostgresEventStoreDAO.java b/event-sourcing/event-store-postgres/src/main/java/org/apache/james/eventsourcing/eventstore/postgres/PostgresEventStoreDAO.java
new file mode 100644
index 00000000000..cd5f8257a84
--- /dev/null
+++ b/event-sourcing/event-store-postgres/src/main/java/org/apache/james/eventsourcing/eventstore/postgres/PostgresEventStoreDAO.java
@@ -0,0 +1,124 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.eventsourcing.eventstore.postgres;
+
+import static org.apache.james.eventsourcing.eventstore.postgres.PostgresEventStoreModule.PostgresEventStoreTable.AGGREGATE_ID;
+import static org.apache.james.eventsourcing.eventstore.postgres.PostgresEventStoreModule.PostgresEventStoreTable.EVENT;
+import static org.apache.james.eventsourcing.eventstore.postgres.PostgresEventStoreModule.PostgresEventStoreTable.EVENT_ID;
+import static org.apache.james.eventsourcing.eventstore.postgres.PostgresEventStoreModule.PostgresEventStoreTable.SNAPSHOT;
+import static org.apache.james.eventsourcing.eventstore.postgres.PostgresEventStoreModule.PostgresEventStoreTable.TABLE_NAME;
+
+import java.util.List;
+import java.util.Optional;
+
+import jakarta.inject.Inject;
+
+import org.apache.james.backends.postgres.utils.PostgresExecutor;
+import org.apache.james.eventsourcing.AggregateId;
+import org.apache.james.eventsourcing.Event;
+import org.apache.james.eventsourcing.EventId;
+import org.apache.james.eventsourcing.eventstore.History;
+import org.apache.james.eventsourcing.eventstore.JsonEventSerializer;
+import org.apache.james.util.ReactorUtils;
+import org.jooq.JSON;
+import org.jooq.Record;
+
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.google.common.collect.ImmutableList;
+
+import reactor.core.publisher.Flux;
+import reactor.core.publisher.Mono;
+import scala.jdk.javaapi.CollectionConverters;
+
+public class PostgresEventStoreDAO {
+ private PostgresExecutor postgresExecutor;
+ private JsonEventSerializer jsonEventSerializer;
+
+ @Inject
+ public PostgresEventStoreDAO(PostgresExecutor postgresExecutor, JsonEventSerializer jsonEventSerializer) {
+ this.postgresExecutor = postgresExecutor;
+ this.jsonEventSerializer = jsonEventSerializer;
+ }
+
+ public Mono appendAll(List events, Optional lastSnapshot) {
+ return postgresExecutor.executeVoid(dslContext -> Mono.from(dslContext.insertInto(TABLE_NAME, AGGREGATE_ID, EVENT_ID, EVENT)
+ .valuesOfRecords(events.stream().map(event -> dslContext.newRecord(AGGREGATE_ID, EVENT_ID, EVENT)
+ .value1(event.getAggregateId().asAggregateKey())
+ .value2(event.eventId().serialize())
+ .value3(convertToJooqJson(event)))
+ .collect(ImmutableList.toImmutableList()))))
+ .then(lastSnapshot.map(eventId -> insertSnapshot(events.iterator().next().getAggregateId(), eventId)).orElse(Mono.empty()));
+ }
+
+ private Mono insertSnapshot(AggregateId aggregateId, EventId snapshotId) {
+ return postgresExecutor.executeVoid(dslContext -> Mono.from(dslContext.update(TABLE_NAME)
+ .set(SNAPSHOT, snapshotId.serialize())
+ .where(AGGREGATE_ID.eq(aggregateId.asAggregateKey()))));
+ }
+
+ private JSON convertToJooqJson(Event event) {
+ try {
+ return JSON.json(jsonEventSerializer.serialize(event));
+ } catch (JsonProcessingException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ public Mono getSnapshot(AggregateId aggregateId) {
+ return postgresExecutor.executeRow(dslContext -> Mono.from(dslContext.select(SNAPSHOT)
+ .from(TABLE_NAME)
+ .where(AGGREGATE_ID.eq(aggregateId.asAggregateKey()))
+ .limit(1)))
+ .map(record -> EventId.fromSerialized(Optional.ofNullable(record.get(SNAPSHOT)).orElse(0)));
+ }
+
+ public Mono getEventsOfAggregate(AggregateId aggregateId, EventId snapshotId) {
+ return postgresExecutor.executeRows(dslContext -> Flux.from(dslContext.selectFrom(TABLE_NAME)
+ .where(AGGREGATE_ID.eq(aggregateId.asAggregateKey()))
+ .and(EVENT_ID.greaterOrEqual(snapshotId.value()))
+ .orderBy(EVENT_ID)))
+ .concatMap(this::toEvent)
+ .collect(ImmutableList.toImmutableList())
+ .map(this::asHistory);
+ }
+
+ public Mono getEventsOfAggregate(AggregateId aggregateId) {
+ return postgresExecutor.executeRows(dslContext -> Flux.from(dslContext.selectFrom(TABLE_NAME)
+ .where(AGGREGATE_ID.eq(aggregateId.asAggregateKey()))
+ .orderBy(EVENT_ID)))
+ .concatMap(this::toEvent)
+ .collect(ImmutableList.toImmutableList())
+ .map(this::asHistory);
+ }
+
+ public Mono delete(AggregateId aggregateId) {
+ return postgresExecutor.executeVoid(dslContext -> Mono.from(dslContext.deleteFrom(TABLE_NAME)
+ .where(AGGREGATE_ID.eq(aggregateId.asAggregateKey()))));
+ }
+
+ private History asHistory(List events) {
+ return History.of(CollectionConverters.asScala(events).toList());
+ }
+
+ private Mono toEvent(Record record) {
+ return Mono.fromCallable(() -> jsonEventSerializer.deserialize(record.get(EVENT).data()))
+ .subscribeOn(ReactorUtils.BLOCKING_CALL_WRAPPER);
+ }
+}
diff --git a/event-sourcing/event-store-postgres/src/main/java/org/apache/james/eventsourcing/eventstore/postgres/PostgresEventStoreModule.java b/event-sourcing/event-store-postgres/src/main/java/org/apache/james/eventsourcing/eventstore/postgres/PostgresEventStoreModule.java
new file mode 100644
index 00000000000..f90eb5c1cc1
--- /dev/null
+++ b/event-sourcing/event-store-postgres/src/main/java/org/apache/james/eventsourcing/eventstore/postgres/PostgresEventStoreModule.java
@@ -0,0 +1,63 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.eventsourcing.eventstore.postgres;
+
+import static org.apache.james.eventsourcing.eventstore.postgres.PostgresEventStoreModule.PostgresEventStoreTable.INDEX;
+import static org.apache.james.eventsourcing.eventstore.postgres.PostgresEventStoreModule.PostgresEventStoreTable.TABLE;
+
+import org.apache.james.backends.postgres.PostgresIndex;
+import org.apache.james.backends.postgres.PostgresModule;
+import org.apache.james.backends.postgres.PostgresTable;
+import org.jooq.Field;
+import org.jooq.JSON;
+import org.jooq.Record;
+import org.jooq.Table;
+import org.jooq.impl.DSL;
+import org.jooq.impl.SQLDataType;
+
+public interface PostgresEventStoreModule {
+ interface PostgresEventStoreTable {
+ Table TABLE_NAME = DSL.table("event_store");
+
+ Field AGGREGATE_ID = DSL.field("aggregate_id", SQLDataType.VARCHAR.notNull());
+ Field EVENT_ID = DSL.field("event_id", SQLDataType.INTEGER.notNull());
+ Field SNAPSHOT = DSL.field("snapshot", SQLDataType.INTEGER);
+ Field EVENT = DSL.field("event", SQLDataType.JSON.notNull());
+
+ PostgresTable TABLE = PostgresTable.name(TABLE_NAME.getName())
+ .createTableStep(((dsl, tableName) -> dsl.createTableIfNotExists(tableName)
+ .column(AGGREGATE_ID)
+ .column(EVENT_ID)
+ .column(SNAPSHOT)
+ .column(EVENT)
+ .constraint(DSL.primaryKey(AGGREGATE_ID, EVENT_ID))))
+ .disableRowLevelSecurity()
+ .build();
+
+ PostgresIndex INDEX = PostgresIndex.name("event_store_aggregate_id_index")
+ .createIndexStep((dslContext, indexName) -> dslContext.createIndexIfNotExists(indexName)
+ .on(TABLE_NAME, AGGREGATE_ID));
+ }
+
+ PostgresModule MODULE = PostgresModule.builder()
+ .addTable(TABLE)
+ .addIndex(INDEX)
+ .build();
+}
diff --git a/event-sourcing/event-store-postgres/src/test/java/org/apache/james/eventsourcing/eventstore/postgres/PostgresEventSourcingSystemTest.java b/event-sourcing/event-store-postgres/src/test/java/org/apache/james/eventsourcing/eventstore/postgres/PostgresEventSourcingSystemTest.java
new file mode 100644
index 00000000000..1faf9842e2d
--- /dev/null
+++ b/event-sourcing/event-store-postgres/src/test/java/org/apache/james/eventsourcing/eventstore/postgres/PostgresEventSourcingSystemTest.java
@@ -0,0 +1,27 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.eventsourcing.eventstore.postgres;
+
+import org.apache.james.eventsourcing.EventSourcingSystemTest;
+import org.junit.jupiter.api.extension.ExtendWith;
+
+@ExtendWith(PostgresEventStoreExtensionForTestEvents.class)
+public class PostgresEventSourcingSystemTest implements EventSourcingSystemTest {
+}
diff --git a/event-sourcing/event-store-postgres/src/test/java/org/apache/james/eventsourcing/eventstore/postgres/PostgresEventStoreExtension.java b/event-sourcing/event-store-postgres/src/test/java/org/apache/james/eventsourcing/eventstore/postgres/PostgresEventStoreExtension.java
new file mode 100644
index 00000000000..6f5ea91e1c7
--- /dev/null
+++ b/event-sourcing/event-store-postgres/src/test/java/org/apache/james/eventsourcing/eventstore/postgres/PostgresEventStoreExtension.java
@@ -0,0 +1,72 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.eventsourcing.eventstore.postgres;
+
+import org.apache.james.backends.postgres.PostgresExtension;
+import org.apache.james.eventsourcing.eventstore.EventStore;
+import org.apache.james.eventsourcing.eventstore.JsonEventSerializer;
+import org.junit.jupiter.api.extension.AfterAllCallback;
+import org.junit.jupiter.api.extension.AfterEachCallback;
+import org.junit.jupiter.api.extension.BeforeAllCallback;
+import org.junit.jupiter.api.extension.BeforeEachCallback;
+import org.junit.jupiter.api.extension.ExtensionContext;
+import org.junit.jupiter.api.extension.ParameterContext;
+import org.junit.jupiter.api.extension.ParameterResolutionException;
+import org.junit.jupiter.api.extension.ParameterResolver;
+
+public class PostgresEventStoreExtension implements AfterAllCallback, BeforeAllCallback, AfterEachCallback, BeforeEachCallback, ParameterResolver {
+ private PostgresExtension postgresExtension;
+ private JsonEventSerializer jsonEventSerializer;
+
+ public PostgresEventStoreExtension(JsonEventSerializer jsonEventSerializer) {
+ this.jsonEventSerializer = jsonEventSerializer;
+ this.postgresExtension = PostgresExtension.withoutRowLevelSecurity(PostgresEventStoreModule.MODULE);
+ }
+
+ @Override
+ public void afterAll(ExtensionContext extensionContext) {
+ postgresExtension.afterAll(extensionContext);
+ }
+
+ @Override
+ public void afterEach(ExtensionContext extensionContext) {
+ postgresExtension.afterEach(extensionContext);
+ }
+
+ @Override
+ public void beforeAll(ExtensionContext extensionContext) throws Exception {
+ postgresExtension.beforeAll(extensionContext);
+ }
+
+ @Override
+ public void beforeEach(ExtensionContext extensionContext) {
+ postgresExtension.beforeEach(extensionContext);
+ }
+
+ @Override
+ public boolean supportsParameter(ParameterContext parameterContext, ExtensionContext extensionContext) throws ParameterResolutionException {
+ return parameterContext.getParameter().getType() == EventStore.class;
+ }
+
+ @Override
+ public PostgresEventStore resolveParameter(ParameterContext parameterContext, ExtensionContext extensionContext) throws ParameterResolutionException {
+ return new PostgresEventStore(new PostgresEventStoreDAO(postgresExtension.getDefaultPostgresExecutor(), jsonEventSerializer));
+ }
+}
diff --git a/event-sourcing/event-store-postgres/src/test/java/org/apache/james/eventsourcing/eventstore/postgres/PostgresEventStoreExtensionForTestEvents.java b/event-sourcing/event-store-postgres/src/test/java/org/apache/james/eventsourcing/eventstore/postgres/PostgresEventStoreExtensionForTestEvents.java
new file mode 100644
index 00000000000..dcebb2932ad
--- /dev/null
+++ b/event-sourcing/event-store-postgres/src/test/java/org/apache/james/eventsourcing/eventstore/postgres/PostgresEventStoreExtensionForTestEvents.java
@@ -0,0 +1,29 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.eventsourcing.eventstore.postgres;
+
+import org.apache.james.eventsourcing.eventstore.JsonEventSerializer;
+import org.apache.james.eventsourcing.eventstore.dto.TestEventDTOModules;
+
+public class PostgresEventStoreExtensionForTestEvents extends PostgresEventStoreExtension {
+ public PostgresEventStoreExtensionForTestEvents() {
+ super(JsonEventSerializer.forModules(TestEventDTOModules.TEST_TYPE(), TestEventDTOModules.SNAPSHOT_TYPE()).withoutNestedType());
+ }
+}
diff --git a/event-sourcing/event-store-postgres/src/test/java/org/apache/james/eventsourcing/eventstore/postgres/PostgresEventStoreTest.java b/event-sourcing/event-store-postgres/src/test/java/org/apache/james/eventsourcing/eventstore/postgres/PostgresEventStoreTest.java
new file mode 100644
index 00000000000..a1a00f8a3d4
--- /dev/null
+++ b/event-sourcing/event-store-postgres/src/test/java/org/apache/james/eventsourcing/eventstore/postgres/PostgresEventStoreTest.java
@@ -0,0 +1,65 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.eventsourcing.eventstore.postgres;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+import org.apache.james.eventsourcing.Event;
+import org.apache.james.eventsourcing.EventId;
+import org.apache.james.eventsourcing.TestEvent;
+import org.apache.james.eventsourcing.eventstore.EventStore;
+import org.apache.james.eventsourcing.eventstore.EventStoreContract;
+import org.apache.james.eventsourcing.eventstore.History;
+import org.apache.james.eventsourcing.eventstore.dto.SnapshotEvent;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.ExtendWith;
+
+import reactor.core.publisher.Mono;
+
+@ExtendWith(PostgresEventStoreExtensionForTestEvents.class)
+public class PostgresEventStoreTest implements EventStoreContract {
+ @Test
+ void getEventsOfAggregateShouldResumeFromSnapshot(EventStore testee) {
+ Event event1 = new TestEvent(EventId.first(), EventStoreContract.AGGREGATE_1(), "first");
+ Event event2 = new SnapshotEvent(EventId.first().next(), EventStoreContract.AGGREGATE_1(), "second");
+ Event event3 = new TestEvent(EventId.first().next().next(), EventStoreContract.AGGREGATE_1(), "third");
+
+ Mono.from(testee.append(event1)).block();
+ Mono.from(testee.append(event2)).block();
+ Mono.from(testee.append(event3)).block();
+
+ assertThat(Mono.from(testee.getEventsOfAggregate(EventStoreContract.AGGREGATE_1())).block())
+ .isEqualTo(History.of(event2, event3));
+ }
+
+ @Test
+ void getEventsOfAggregateShouldResumeFromLatestSnapshot(EventStore testee) {
+ Event event1 = new SnapshotEvent(EventId.first(), EventStoreContract.AGGREGATE_1(), "first");
+ Event event2 = new TestEvent(EventId.first().next(), EventStoreContract.AGGREGATE_1(), "second");
+ Event event3 = new SnapshotEvent(EventId.first().next().next(), EventStoreContract.AGGREGATE_1(), "third");
+
+ Mono.from(testee.append(event1)).block();
+ Mono.from(testee.append(event2)).block();
+ Mono.from(testee.append(event3)).block();
+
+ assertThat(Mono.from(testee.getEventsOfAggregate(EventStoreContract.AGGREGATE_1())).block())
+ .isEqualTo(History.of(event3));
+ }
+}
\ No newline at end of file
diff --git a/event-sourcing/pom.xml b/event-sourcing/pom.xml
index f14f296631e..836edca1473 100644
--- a/event-sourcing/pom.xml
+++ b/event-sourcing/pom.xml
@@ -37,6 +37,7 @@
event-store-apievent-store-cassandraevent-store-memory
+ event-store-postgres
diff --git a/mailbox/api/src/main/java/org/apache/james/mailbox/MessageManager.java b/mailbox/api/src/main/java/org/apache/james/mailbox/MessageManager.java
index bab6c535309..c87729aed68 100644
--- a/mailbox/api/src/main/java/org/apache/james/mailbox/MessageManager.java
+++ b/mailbox/api/src/main/java/org/apache/james/mailbox/MessageManager.java
@@ -38,7 +38,6 @@
import jakarta.mail.internet.SharedInputStream;
import org.apache.commons.io.IOUtils;
-import org.apache.james.mailbox.MailboxManager.MessageCapabilities;
import org.apache.james.mailbox.MessageManager.MailboxMetaData.RecentMode;
import org.apache.james.mailbox.exception.MailboxException;
import org.apache.james.mailbox.exception.UnsupportedCriteriaException;
@@ -441,7 +440,6 @@ default Publisher getMessagesReactive(MessageRange set, FetchGrou
*/
Mailbox getMailboxEntity() throws MailboxException;
- EnumSet getSupportedMessageCapabilities();
/**
* Gets the id of the referenced mailbox
diff --git a/mailbox/api/src/main/java/org/apache/james/mailbox/UuidBackedAttachmentIdFactory.java b/mailbox/api/src/main/java/org/apache/james/mailbox/UuidBackedAttachmentIdFactory.java
new file mode 100644
index 00000000000..3cebb5ab36e
--- /dev/null
+++ b/mailbox/api/src/main/java/org/apache/james/mailbox/UuidBackedAttachmentIdFactory.java
@@ -0,0 +1,34 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.mailbox;
+
+import org.apache.james.mailbox.model.UuidBackedAttachmentId;
+
+public class UuidBackedAttachmentIdFactory implements AttachmentIdFactory {
+ @Override
+ public UuidBackedAttachmentId random() {
+ return UuidBackedAttachmentId.random();
+ }
+
+ @Override
+ public UuidBackedAttachmentId from(String id) {
+ return UuidBackedAttachmentId.from(id);
+ }
+}
diff --git a/mailbox/api/src/main/java/org/apache/james/mailbox/model/UuidBackedAttachmentId.java b/mailbox/api/src/main/java/org/apache/james/mailbox/model/UuidBackedAttachmentId.java
new file mode 100644
index 00000000000..12186a28821
--- /dev/null
+++ b/mailbox/api/src/main/java/org/apache/james/mailbox/model/UuidBackedAttachmentId.java
@@ -0,0 +1,76 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+package org.apache.james.mailbox.model;
+
+import java.util.UUID;
+
+import com.google.common.base.MoreObjects;
+import com.google.common.base.Objects;
+
+public class UuidBackedAttachmentId implements AttachmentId {
+ public static UuidBackedAttachmentId random() {
+ return new UuidBackedAttachmentId(UUID.randomUUID());
+ }
+
+ public static UuidBackedAttachmentId from(String id) {
+ return new UuidBackedAttachmentId(UUID.fromString(id));
+ }
+
+ public static UuidBackedAttachmentId from(UUID id) {
+ return new UuidBackedAttachmentId(id);
+ }
+
+ private final UUID id;
+
+ private UuidBackedAttachmentId(UUID id) {
+ this.id = id;
+ }
+
+ @Override
+ public String getId() {
+ return id.toString();
+ }
+
+ @Override
+ public UUID asUUID() {
+ return id;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (obj instanceof UuidBackedAttachmentId) {
+ UuidBackedAttachmentId other = (UuidBackedAttachmentId) obj;
+ return Objects.equal(id, other.id);
+ }
+ return false;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hashCode(id);
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects
+ .toStringHelper(this)
+ .add("id", id)
+ .toString();
+ }
+}
diff --git a/mailbox/cassandra/src/main/java/org/apache/james/mailbox/cassandra/quota/Limits.java b/mailbox/api/src/main/java/org/apache/james/mailbox/quota/Limits.java
similarity index 97%
rename from mailbox/cassandra/src/main/java/org/apache/james/mailbox/cassandra/quota/Limits.java
rename to mailbox/api/src/main/java/org/apache/james/mailbox/quota/Limits.java
index 3ef7aec0975..f278d03ed75 100644
--- a/mailbox/cassandra/src/main/java/org/apache/james/mailbox/cassandra/quota/Limits.java
+++ b/mailbox/api/src/main/java/org/apache/james/mailbox/quota/Limits.java
@@ -17,7 +17,7 @@
* under the License. *
****************************************************************/
-package org.apache.james.mailbox.cassandra.quota;
+package org.apache.james.mailbox.quota;
import java.util.Optional;
diff --git a/mailbox/cassandra/src/main/java/org/apache/james/mailbox/cassandra/quota/QuotaCodec.java b/mailbox/api/src/main/java/org/apache/james/mailbox/quota/QuotaCodec.java
similarity index 90%
rename from mailbox/cassandra/src/main/java/org/apache/james/mailbox/cassandra/quota/QuotaCodec.java
rename to mailbox/api/src/main/java/org/apache/james/mailbox/quota/QuotaCodec.java
index 87b6cdcef79..d3d9b5cd67a 100644
--- a/mailbox/cassandra/src/main/java/org/apache/james/mailbox/cassandra/quota/QuotaCodec.java
+++ b/mailbox/api/src/main/java/org/apache/james/mailbox/quota/QuotaCodec.java
@@ -16,7 +16,7 @@
* specific language governing permissions and limitations *
* under the License. *
****************************************************************/
-package org.apache.james.mailbox.cassandra.quota;
+package org.apache.james.mailbox.quota;
import java.util.Optional;
import java.util.function.Function;
@@ -30,18 +30,18 @@ public class QuotaCodec {
private static final long INFINITE = -1;
private static final long NO_RIGHT = 0L;
- static Long quotaValueToLong(QuotaLimitValue> value) {
+ public static Long quotaValueToLong(QuotaLimitValue> value) {
if (value.isUnlimited()) {
return INFINITE;
}
return value.asLong();
}
- static Optional longToQuotaSize(Long value) {
+ public static Optional longToQuotaSize(Long value) {
return longToQuotaValue(value, QuotaSizeLimit.unlimited(), QuotaSizeLimit::size);
}
- static Optional longToQuotaCount(Long value) {
+ public static Optional longToQuotaCount(Long value) {
return longToQuotaValue(value, QuotaCountLimit.unlimited(), QuotaCountLimit::count);
}
diff --git a/mailbox/api/src/test/java/org/apache/james/mailbox/MailboxManagerTest.java b/mailbox/api/src/test/java/org/apache/james/mailbox/MailboxManagerTest.java
index 881fcaaf972..9ec14ef15c2 100644
--- a/mailbox/api/src/test/java/org/apache/james/mailbox/MailboxManagerTest.java
+++ b/mailbox/api/src/test/java/org/apache/james/mailbox/MailboxManagerTest.java
@@ -651,7 +651,9 @@ void getAllAnnotationsShouldRetrieveStoredAnnotations() throws Exception {
mailboxManager.updateAnnotations(inbox, session, annotations);
- assertThat(mailboxManager.getAllAnnotations(inbox, session)).isEqualTo(annotations);
+ assertThat(mailboxManager.getAllAnnotations(inbox, session))
+ .hasSize(annotations.size())
+ .containsAnyElementsOf(annotations);
}
@Test
diff --git a/mailbox/cassandra/src/main/java/org/apache/james/mailbox/cassandra/CassandraMailboxSessionMapperFactory.java b/mailbox/cassandra/src/main/java/org/apache/james/mailbox/cassandra/CassandraMailboxSessionMapperFactory.java
index 6c8e39c5b3c..55a27497830 100644
--- a/mailbox/cassandra/src/main/java/org/apache/james/mailbox/cassandra/CassandraMailboxSessionMapperFactory.java
+++ b/mailbox/cassandra/src/main/java/org/apache/james/mailbox/cassandra/CassandraMailboxSessionMapperFactory.java
@@ -180,12 +180,12 @@ public SubscriptionMapper createSubscriptionMapper(MailboxSession mailboxSession
}
@Override
- public ModSeqProvider getModSeqProvider() {
+ public ModSeqProvider getModSeqProvider(MailboxSession session) {
return modSeqProvider;
}
@Override
- public UidProvider getUidProvider() {
+ public UidProvider getUidProvider(MailboxSession session) {
return uidProvider;
}
diff --git a/mailbox/cassandra/src/main/java/org/apache/james/mailbox/cassandra/quota/CassandraCurrentQuotaManagerV2.java b/mailbox/cassandra/src/main/java/org/apache/james/mailbox/cassandra/quota/CassandraCurrentQuotaManagerV2.java
index fba7cc01ab6..d455706f429 100644
--- a/mailbox/cassandra/src/main/java/org/apache/james/mailbox/cassandra/quota/CassandraCurrentQuotaManagerV2.java
+++ b/mailbox/cassandra/src/main/java/org/apache/james/mailbox/cassandra/quota/CassandraCurrentQuotaManagerV2.java
@@ -26,7 +26,6 @@
import jakarta.inject.Inject;
import org.apache.james.backends.cassandra.components.CassandraQuotaCurrentValueDao;
-import org.apache.james.backends.cassandra.components.CassandraQuotaCurrentValueDao.QuotaKey;
import org.apache.james.core.quota.QuotaComponent;
import org.apache.james.core.quota.QuotaCountUsage;
import org.apache.james.core.quota.QuotaCurrentValue;
@@ -117,16 +116,16 @@ public Mono setCurrentQuotas(QuotaOperation quotaOperation) {
});
}
- private QuotaKey asQuotaKeyCount(QuotaRoot quotaRoot) {
+ private QuotaCurrentValue.Key asQuotaKeyCount(QuotaRoot quotaRoot) {
return asQuotaKey(quotaRoot, QuotaType.COUNT);
}
- private QuotaKey asQuotaKeySize(QuotaRoot quotaRoot) {
+ private QuotaCurrentValue.Key asQuotaKeySize(QuotaRoot quotaRoot) {
return asQuotaKey(quotaRoot, QuotaType.SIZE);
}
- private QuotaKey asQuotaKey(QuotaRoot quotaRoot, QuotaType quotaType) {
- return QuotaKey.of(
+ private QuotaCurrentValue.Key asQuotaKey(QuotaRoot quotaRoot, QuotaType quotaType) {
+ return QuotaCurrentValue.Key.of(
QuotaComponent.MAILBOX,
quotaRoot.asString(),
quotaType);
diff --git a/mailbox/cassandra/src/main/java/org/apache/james/mailbox/cassandra/quota/CassandraGlobalMaxQuotaDao.java b/mailbox/cassandra/src/main/java/org/apache/james/mailbox/cassandra/quota/CassandraGlobalMaxQuotaDao.java
index 02e777d7f18..3be44c6c6d6 100644
--- a/mailbox/cassandra/src/main/java/org/apache/james/mailbox/cassandra/quota/CassandraGlobalMaxQuotaDao.java
+++ b/mailbox/cassandra/src/main/java/org/apache/james/mailbox/cassandra/quota/CassandraGlobalMaxQuotaDao.java
@@ -39,6 +39,8 @@
import org.apache.james.backends.cassandra.utils.CassandraAsyncExecutor;
import org.apache.james.core.quota.QuotaCountLimit;
import org.apache.james.core.quota.QuotaSizeLimit;
+import org.apache.james.mailbox.quota.Limits;
+import org.apache.james.mailbox.quota.QuotaCodec;
import com.datastax.oss.driver.api.core.CqlSession;
import com.datastax.oss.driver.api.core.cql.PreparedStatement;
diff --git a/mailbox/cassandra/src/main/java/org/apache/james/mailbox/cassandra/quota/CassandraPerDomainMaxQuotaDao.java b/mailbox/cassandra/src/main/java/org/apache/james/mailbox/cassandra/quota/CassandraPerDomainMaxQuotaDao.java
index c583a6a487d..53267376eda 100644
--- a/mailbox/cassandra/src/main/java/org/apache/james/mailbox/cassandra/quota/CassandraPerDomainMaxQuotaDao.java
+++ b/mailbox/cassandra/src/main/java/org/apache/james/mailbox/cassandra/quota/CassandraPerDomainMaxQuotaDao.java
@@ -35,6 +35,8 @@
import org.apache.james.core.quota.QuotaCountLimit;
import org.apache.james.core.quota.QuotaSizeLimit;
import org.apache.james.mailbox.cassandra.table.CassandraDomainMaxQuota;
+import org.apache.james.mailbox.quota.Limits;
+import org.apache.james.mailbox.quota.QuotaCodec;
import com.datastax.oss.driver.api.core.CqlSession;
import com.datastax.oss.driver.api.core.cql.PreparedStatement;
diff --git a/mailbox/cassandra/src/main/java/org/apache/james/mailbox/cassandra/quota/CassandraPerUserMaxQuotaDao.java b/mailbox/cassandra/src/main/java/org/apache/james/mailbox/cassandra/quota/CassandraPerUserMaxQuotaDao.java
index db1c36eeaa0..932da5ea912 100644
--- a/mailbox/cassandra/src/main/java/org/apache/james/mailbox/cassandra/quota/CassandraPerUserMaxQuotaDao.java
+++ b/mailbox/cassandra/src/main/java/org/apache/james/mailbox/cassandra/quota/CassandraPerUserMaxQuotaDao.java
@@ -35,6 +35,8 @@
import org.apache.james.core.quota.QuotaSizeLimit;
import org.apache.james.mailbox.cassandra.table.CassandraMaxQuota;
import org.apache.james.mailbox.model.QuotaRoot;
+import org.apache.james.mailbox.quota.Limits;
+import org.apache.james.mailbox.quota.QuotaCodec;
import com.datastax.oss.driver.api.core.CqlSession;
import com.datastax.oss.driver.api.core.cql.PreparedStatement;
diff --git a/mailbox/cassandra/src/main/java/org/apache/james/mailbox/cassandra/quota/CassandraPerUserMaxQuotaManagerV1.java b/mailbox/cassandra/src/main/java/org/apache/james/mailbox/cassandra/quota/CassandraPerUserMaxQuotaManagerV1.java
index d6dead22c40..6016288f5d5 100644
--- a/mailbox/cassandra/src/main/java/org/apache/james/mailbox/cassandra/quota/CassandraPerUserMaxQuotaManagerV1.java
+++ b/mailbox/cassandra/src/main/java/org/apache/james/mailbox/cassandra/quota/CassandraPerUserMaxQuotaManagerV1.java
@@ -32,6 +32,7 @@
import org.apache.james.core.quota.QuotaSizeLimit;
import org.apache.james.mailbox.model.Quota;
import org.apache.james.mailbox.model.QuotaRoot;
+import org.apache.james.mailbox.quota.Limits;
import org.apache.james.mailbox.quota.MaxQuotaManager;
import com.google.common.collect.ImmutableMap;
diff --git a/mailbox/cassandra/src/main/java/org/apache/james/mailbox/cassandra/quota/CassandraPerUserMaxQuotaManagerV2.java b/mailbox/cassandra/src/main/java/org/apache/james/mailbox/cassandra/quota/CassandraPerUserMaxQuotaManagerV2.java
index e310aa93430..e698d8c9df9 100644
--- a/mailbox/cassandra/src/main/java/org/apache/james/mailbox/cassandra/quota/CassandraPerUserMaxQuotaManagerV2.java
+++ b/mailbox/cassandra/src/main/java/org/apache/james/mailbox/cassandra/quota/CassandraPerUserMaxQuotaManagerV2.java
@@ -19,7 +19,6 @@
package org.apache.james.mailbox.cassandra.quota;
-import static org.apache.james.backends.cassandra.components.CassandraQuotaLimitDao.QuotaLimitKey;
import static org.apache.james.util.ReactorUtils.publishIfPresent;
import java.util.Map;
@@ -41,7 +40,9 @@
import org.apache.james.core.quota.QuotaType;
import org.apache.james.mailbox.model.Quota;
import org.apache.james.mailbox.model.QuotaRoot;
+import org.apache.james.mailbox.quota.Limits;
import org.apache.james.mailbox.quota.MaxQuotaManager;
+import org.apache.james.mailbox.quota.QuotaCodec;
import com.google.common.collect.ImmutableMap;
@@ -130,7 +131,7 @@ public void removeDomainMaxMessage(Domain domain) {
@Override
public Mono removeDomainMaxMessageReactive(Domain domain) {
- return cassandraQuotaLimitDao.deleteQuotaLimit(QuotaLimitKey.of(QuotaComponent.MAILBOX, QuotaScope.DOMAIN, domain.asString(), QuotaType.COUNT));
+ return cassandraQuotaLimitDao.deleteQuotaLimit(QuotaLimit.QuotaLimitKey.of(QuotaComponent.MAILBOX, QuotaScope.DOMAIN, domain.asString(), QuotaType.COUNT));
}
@Override
@@ -140,7 +141,7 @@ public void removeDomainMaxStorage(Domain domain) {
@Override
public Mono removeDomainMaxStorageReactive(Domain domain) {
- return cassandraQuotaLimitDao.deleteQuotaLimit(QuotaLimitKey.of(QuotaComponent.MAILBOX, QuotaScope.DOMAIN, domain.asString(), QuotaType.SIZE));
+ return cassandraQuotaLimitDao.deleteQuotaLimit(QuotaLimit.QuotaLimitKey.of(QuotaComponent.MAILBOX, QuotaScope.DOMAIN, domain.asString(), QuotaType.SIZE));
}
@Override
@@ -170,7 +171,7 @@ public void removeMaxMessage(QuotaRoot quotaRoot) {
@Override
public Mono removeMaxMessageReactive(QuotaRoot quotaRoot) {
- return cassandraQuotaLimitDao.deleteQuotaLimit(QuotaLimitKey.of(QuotaComponent.MAILBOX, QuotaScope.USER, quotaRoot.getValue(), QuotaType.COUNT));
+ return cassandraQuotaLimitDao.deleteQuotaLimit(QuotaLimit.QuotaLimitKey.of(QuotaComponent.MAILBOX, QuotaScope.USER, quotaRoot.getValue(), QuotaType.COUNT));
}
@Override
@@ -180,7 +181,7 @@ public void removeMaxStorage(QuotaRoot quotaRoot) {
@Override
public Mono removeMaxStorageReactive(QuotaRoot quotaRoot) {
- return cassandraQuotaLimitDao.deleteQuotaLimit(QuotaLimitKey.of(QuotaComponent.MAILBOX, QuotaScope.USER, quotaRoot.getValue(), QuotaType.SIZE));
+ return cassandraQuotaLimitDao.deleteQuotaLimit(QuotaLimit.QuotaLimitKey.of(QuotaComponent.MAILBOX, QuotaScope.USER, quotaRoot.getValue(), QuotaType.SIZE));
}
@Override
@@ -205,7 +206,7 @@ public void removeGlobalMaxStorage() {
@Override
public Mono removeGlobalMaxStorageReactive() {
- return cassandraQuotaLimitDao.deleteQuotaLimit(QuotaLimitKey.of(QuotaComponent.MAILBOX, QuotaScope.GLOBAL, GLOBAL_IDENTIFIER, QuotaType.SIZE));
+ return cassandraQuotaLimitDao.deleteQuotaLimit(QuotaLimit.QuotaLimitKey.of(QuotaComponent.MAILBOX, QuotaScope.GLOBAL, GLOBAL_IDENTIFIER, QuotaType.SIZE));
}
@Override
@@ -230,7 +231,7 @@ public void removeGlobalMaxMessage() {
@Override
public Mono removeGlobalMaxMessageReactive() {
- return cassandraQuotaLimitDao.deleteQuotaLimit(QuotaLimitKey.of(QuotaComponent.MAILBOX, QuotaScope.GLOBAL, GLOBAL_IDENTIFIER, QuotaType.COUNT));
+ return cassandraQuotaLimitDao.deleteQuotaLimit(QuotaLimit.QuotaLimitKey.of(QuotaComponent.MAILBOX, QuotaScope.GLOBAL, GLOBAL_IDENTIFIER, QuotaType.COUNT));
}
@Override
@@ -322,7 +323,7 @@ private Mono getLimits(QuotaScope quotaScope, String identifier) {
}
private Mono getMaxMessageReactive(QuotaScope quotaScope, String identifier) {
- return cassandraQuotaLimitDao.getQuotaLimit(QuotaLimitKey.of(QuotaComponent.MAILBOX, quotaScope, identifier, QuotaType.COUNT))
+ return cassandraQuotaLimitDao.getQuotaLimit(QuotaLimit.QuotaLimitKey.of(QuotaComponent.MAILBOX, quotaScope, identifier, QuotaType.COUNT))
.map(QuotaLimit::getQuotaLimit)
.handle(publishIfPresent())
.map(QuotaCodec::longToQuotaCount)
@@ -330,7 +331,7 @@ private Mono getMaxMessageReactive(QuotaScope quotaScope, Strin
}
public Mono getMaxStorageReactive(QuotaScope quotaScope, String identifier) {
- return cassandraQuotaLimitDao.getQuotaLimit(QuotaLimitKey.of(QuotaComponent.MAILBOX, quotaScope, identifier, QuotaType.SIZE))
+ return cassandraQuotaLimitDao.getQuotaLimit(QuotaLimit.QuotaLimitKey.of(QuotaComponent.MAILBOX, quotaScope, identifier, QuotaType.SIZE))
.map(QuotaLimit::getQuotaLimit)
.handle(publishIfPresent())
.map(QuotaCodec::longToQuotaSize)
diff --git a/mailbox/cassandra/src/test/java/org/apache/james/mailbox/cassandra/CassandraThreadIdGuessingAlgorithmTest.java b/mailbox/cassandra/src/test/java/org/apache/james/mailbox/cassandra/CassandraThreadIdGuessingAlgorithmTest.java
index 546657676f9..1d7e8dd5a4e 100644
--- a/mailbox/cassandra/src/test/java/org/apache/james/mailbox/cassandra/CassandraThreadIdGuessingAlgorithmTest.java
+++ b/mailbox/cassandra/src/test/java/org/apache/james/mailbox/cassandra/CassandraThreadIdGuessingAlgorithmTest.java
@@ -52,8 +52,6 @@
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.RegisterExtension;
-import reactor.core.publisher.Flux;
-
public class CassandraThreadIdGuessingAlgorithmTest extends ThreadIdGuessingAlgorithmContract {
private CassandraMailboxManager mailboxManager;
private CassandraThreadDAO threadDAO;
@@ -94,8 +92,10 @@ protected MessageId initOtherBasedMessageId() {
}
@Override
- protected Flux saveThreadData(Username username, Set mimeMessageIds, MessageId messageId, ThreadId threadId, Optional baseSubject) {
- return threadDAO.insertSome(username, hashMimeMessagesIds(mimeMessageIds), messageId, threadId, hashSubject(baseSubject));
+ protected void saveThreadData(Username username, Set mimeMessageIds, MessageId messageId, ThreadId threadId, Optional baseSubject) {
+ threadDAO.insertSome(username, hashMimeMessagesIds(mimeMessageIds), messageId, threadId, hashSubject(baseSubject))
+ .then()
+ .block();
}
@Test
diff --git a/mailbox/cassandra/src/test/java/org/apache/james/mailbox/cassandra/mail/CassandraMapperProvider.java b/mailbox/cassandra/src/test/java/org/apache/james/mailbox/cassandra/mail/CassandraMapperProvider.java
index e3e7cd9f2b8..4f5c310b181 100644
--- a/mailbox/cassandra/src/test/java/org/apache/james/mailbox/cassandra/mail/CassandraMapperProvider.java
+++ b/mailbox/cassandra/src/test/java/org/apache/james/mailbox/cassandra/mail/CassandraMapperProvider.java
@@ -40,8 +40,8 @@
import org.apache.james.mailbox.store.mail.MailboxMapper;
import org.apache.james.mailbox.store.mail.MessageIdMapper;
import org.apache.james.mailbox.store.mail.MessageMapper;
+import org.apache.james.mailbox.store.mail.UidProvider;
import org.apache.james.mailbox.store.mail.model.MapperProvider;
-import org.apache.james.mailbox.store.mail.model.MessageUidProvider;
import org.apache.james.utils.UpdatableTickingClock;
import com.google.common.collect.ImmutableList;
@@ -51,7 +51,7 @@ public class CassandraMapperProvider implements MapperProvider {
private static final Factory MESSAGE_ID_FACTORY = new CassandraMessageId.Factory();
private final CassandraCluster cassandra;
- private final MessageUidProvider messageUidProvider;
+ private final UidProvider messageUidProvider;
private final CassandraModSeqProvider cassandraModSeqProvider;
private final UpdatableTickingClock updatableTickingClock;
private final MailboxSession mailboxSession = MailboxSessionUtil.create(Username.of("benwa"));
@@ -60,7 +60,7 @@ public class CassandraMapperProvider implements MapperProvider {
public CassandraMapperProvider(CassandraCluster cassandra,
CassandraConfiguration cassandraConfiguration) {
this.cassandra = cassandra;
- messageUidProvider = new MessageUidProvider();
+ messageUidProvider = new CassandraUidProvider(this.cassandra.getConf(), cassandraConfiguration);
cassandraModSeqProvider = new CassandraModSeqProvider(
this.cassandra.getConf(),
cassandraConfiguration);
@@ -116,8 +116,12 @@ public List getSupportedCapabilities() {
}
@Override
- public MessageUid generateMessageUid() {
- return messageUidProvider.next();
+ public MessageUid generateMessageUid(Mailbox mailbox) {
+ try {
+ return messageUidProvider.nextUid(mailbox);
+ } catch (MailboxException e) {
+ throw new RuntimeException(e);
+ }
}
@Override
diff --git a/mailbox/cassandra/src/test/java/org/apache/james/mailbox/cassandra/mail/CassandraMessageIdMapperTest.java b/mailbox/cassandra/src/test/java/org/apache/james/mailbox/cassandra/mail/CassandraMessageIdMapperTest.java
index 00200d3b214..33e42502d04 100644
--- a/mailbox/cassandra/src/test/java/org/apache/james/mailbox/cassandra/mail/CassandraMessageIdMapperTest.java
+++ b/mailbox/cassandra/src/test/java/org/apache/james/mailbox/cassandra/mail/CassandraMessageIdMapperTest.java
@@ -152,7 +152,7 @@ void retrieveMessagesShouldNotReturnMessagesWhenFailToPersistInMessageDAO(Cassan
.whenQueryStartsWith("UPDATE messagev3"));
try {
- message1.setUid(mapperProvider.generateMessageUid());
+ message1.setUid(mapperProvider.generateMessageUid(benwaInboxMailbox));
message1.setModSeq(mapperProvider.generateModSeq(benwaInboxMailbox));
sut.save(message1);
} catch (Exception e) {
@@ -176,7 +176,7 @@ void retrieveMessagesShouldNotReturnMessagesWhenFailsToPersistBlobParts(Cassandr
.whenQueryStartsWith("INSERT INTO blobparts (id,chunknumber,data)"));
try {
- message1.setUid(mapperProvider.generateMessageUid());
+ message1.setUid(mapperProvider.generateMessageUid(benwaInboxMailbox));
message1.setModSeq(mapperProvider.generateModSeq(benwaInboxMailbox));
sut.save(message1);
} catch (Exception e) {
@@ -200,7 +200,7 @@ void retrieveMessagesShouldNotReturnMessagesWhenFailsToPersistBlobs(CassandraClu
.whenQueryStartsWith("INSERT INTO blobs (id,position) VALUES (:id,:position)"));
try {
- message1.setUid(mapperProvider.generateMessageUid());
+ message1.setUid(mapperProvider.generateMessageUid(benwaInboxMailbox));
message1.setModSeq(mapperProvider.generateModSeq(benwaInboxMailbox));
sut.save(message1);
} catch (Exception e) {
@@ -224,7 +224,7 @@ void retrieveMessagesShouldNotReturnMessagesWhenFailsToPersistInImapUidTable(Cas
.whenQueryStartsWith("INSERT INTO imapuidtable"));
try {
- message1.setUid(mapperProvider.generateMessageUid());
+ message1.setUid(mapperProvider.generateMessageUid(benwaInboxMailbox));
message1.setModSeq(mapperProvider.generateModSeq(benwaInboxMailbox));
sut.save(message1);
} catch (Exception e) {
@@ -248,7 +248,7 @@ void addShouldPersistInTableOfTruthWhenMessageIdTableWritesFails(CassandraCluste
.whenQueryStartsWith("INSERT INTO messageidtable"));
try {
- message1.setUid(mapperProvider.generateMessageUid());
+ message1.setUid(mapperProvider.generateMessageUid(benwaInboxMailbox));
message1.setModSeq(mapperProvider.generateModSeq(benwaInboxMailbox));
sut.save(message1);
} catch (Exception e) {
@@ -275,7 +275,7 @@ void addShouldRetryMessageDenormalization(CassandraCluster cassandra) throws Exc
.times(5)
.whenQueryStartsWith("INSERT INTO messageidtable"));
- message1.setUid(mapperProvider.generateMessageUid());
+ message1.setUid(mapperProvider.generateMessageUid(benwaInboxMailbox));
message1.setModSeq(mapperProvider.generateModSeq(benwaInboxMailbox));
sut.save(message1);
diff --git a/mailbox/cassandra/src/test/java/org/apache/james/mailbox/cassandra/mail/CassandraMessageMapperRelaxedConsistencyTest.java b/mailbox/cassandra/src/test/java/org/apache/james/mailbox/cassandra/mail/CassandraMessageMapperRelaxedConsistencyTest.java
index a71d7318973..202ce3797ac 100644
--- a/mailbox/cassandra/src/test/java/org/apache/james/mailbox/cassandra/mail/CassandraMessageMapperRelaxedConsistencyTest.java
+++ b/mailbox/cassandra/src/test/java/org/apache/james/mailbox/cassandra/mail/CassandraMessageMapperRelaxedConsistencyTest.java
@@ -98,5 +98,20 @@ public void setFlagsShouldWorkWithConcurrencyWithRemove() throws Exception {
public void userFlagsUpdateShouldWorkInConcurrentEnvironment() throws Exception {
super.userFlagsUpdateShouldWorkInConcurrentEnvironment();
}
+
+ @Disabled("JAMES-3435 Without strong consistency flags update is not thread safe as long as it follows a read-before-write pattern")
+ @Override
+ public void updateFlagsWithRangeAllRangeShouldReturnUpdatedFlagsWithUidOrderAsc() {
+ }
+
+ @Disabled("JAMES-3435 Without strong consistency flags update is not thread safe as long as it follows a read-before-write pattern")
+ @Override
+ public void updateFlagsOnRangeShouldReturnUpdatedFlagsWithUidOrderAsc() {
+ }
+
+ @Disabled("JAMES-3435 Without strong consistency flags update is not thread safe as long as it follows a read-before-write pattern")
+ @Override
+ public void updateFlagsWithRangeFromShouldReturnUpdatedFlagsWithUidOrderAsc() {
+ }
}
}
diff --git a/mailbox/jpa/src/main/java/org/apache/james/mailbox/jpa/JPAMailboxSessionMapperFactory.java b/mailbox/jpa/src/main/java/org/apache/james/mailbox/jpa/JPAMailboxSessionMapperFactory.java
index 7f4f05d6c24..233d0e45a6d 100644
--- a/mailbox/jpa/src/main/java/org/apache/james/mailbox/jpa/JPAMailboxSessionMapperFactory.java
+++ b/mailbox/jpa/src/main/java/org/apache/james/mailbox/jpa/JPAMailboxSessionMapperFactory.java
@@ -102,12 +102,12 @@ public AnnotationMapper createAnnotationMapper(MailboxSession session) {
}
@Override
- public UidProvider getUidProvider() {
+ public UidProvider getUidProvider(MailboxSession session) {
return uidProvider;
}
@Override
- public ModSeqProvider getModSeqProvider() {
+ public ModSeqProvider getModSeqProvider(MailboxSession session) {
return modSeqProvider;
}
diff --git a/mailbox/jpa/src/test/java/org/apache/james/mailbox/jpa/mail/JPAMapperProvider.java b/mailbox/jpa/src/test/java/org/apache/james/mailbox/jpa/mail/JPAMapperProvider.java
index fdad8e414ac..8bbf83238c0 100644
--- a/mailbox/jpa/src/test/java/org/apache/james/mailbox/jpa/mail/JPAMapperProvider.java
+++ b/mailbox/jpa/src/test/java/org/apache/james/mailbox/jpa/mail/JPAMapperProvider.java
@@ -105,7 +105,7 @@ public MessageIdMapper createMessageIdMapper() throws MailboxException {
}
@Override
- public MessageUid generateMessageUid() {
+ public MessageUid generateMessageUid(Mailbox mailbox) {
throw new NotImplementedException("not implemented");
}
diff --git a/mailbox/memory/src/main/java/org/apache/james/mailbox/inmemory/InMemoryMailboxSessionMapperFactory.java b/mailbox/memory/src/main/java/org/apache/james/mailbox/inmemory/InMemoryMailboxSessionMapperFactory.java
index 84250a64512..bef77415878 100644
--- a/mailbox/memory/src/main/java/org/apache/james/mailbox/inmemory/InMemoryMailboxSessionMapperFactory.java
+++ b/mailbox/memory/src/main/java/org/apache/james/mailbox/inmemory/InMemoryMailboxSessionMapperFactory.java
@@ -103,12 +103,12 @@ public AnnotationMapper createAnnotationMapper(MailboxSession session) {
}
@Override
- public UidProvider getUidProvider() {
+ public UidProvider getUidProvider(MailboxSession session) {
return uidProvider;
}
@Override
- public ModSeqProvider getModSeqProvider() {
+ public ModSeqProvider getModSeqProvider(MailboxSession session) {
return modSeqProvider;
}
diff --git a/mailbox/memory/src/test/java/org/apache/james/mailbox/inmemory/mail/InMemoryMapperProvider.java b/mailbox/memory/src/test/java/org/apache/james/mailbox/inmemory/mail/InMemoryMapperProvider.java
index 286057ee167..e0eeeb3bfd0 100644
--- a/mailbox/memory/src/test/java/org/apache/james/mailbox/inmemory/mail/InMemoryMapperProvider.java
+++ b/mailbox/memory/src/test/java/org/apache/james/mailbox/inmemory/mail/InMemoryMapperProvider.java
@@ -90,7 +90,7 @@ public InMemoryId generateId() {
}
@Override
- public MessageUid generateMessageUid() {
+ public MessageUid generateMessageUid(Mailbox mailbox) {
return messageUidProvider.next();
}
@@ -119,13 +119,13 @@ public List getSupportedCapabilities() {
@Override
public ModSeq generateModSeq(Mailbox mailbox) throws MailboxException {
- return inMemoryMailboxSessionMapperFactory.getModSeqProvider()
+ return inMemoryMailboxSessionMapperFactory.getModSeqProvider(null)
.nextModSeq(mailbox);
}
@Override
public ModSeq highestModSeq(Mailbox mailbox) throws MailboxException {
- return inMemoryMailboxSessionMapperFactory.getModSeqProvider()
+ return inMemoryMailboxSessionMapperFactory.getModSeqProvider(null)
.highestModSeq(mailbox);
}
diff --git a/mailbox/plugin/deleted-messages-vault-postgres/pom.xml b/mailbox/plugin/deleted-messages-vault-postgres/pom.xml
new file mode 100644
index 00000000000..856b49aa565
--- /dev/null
+++ b/mailbox/plugin/deleted-messages-vault-postgres/pom.xml
@@ -0,0 +1,83 @@
+
+
+
+ 4.0.0
+
+ org.apache.james
+ apache-james-mailbox
+ 3.9.0-SNAPSHOT
+ ../../pom.xml
+
+
+ apache-james-mailbox-deleted-messages-vault-postgres
+ Apache James :: Mailbox :: Plugin :: Deleted Messages Vault :: Postgres
+ Apache James Mailbox Deleted Messages Vault metadata on top of Postgres
+
+
+
+ ${james.groupId}
+ apache-james-backends-postgres
+
+
+ ${james.groupId}
+ apache-james-backends-postgres
+ test-jar
+ test
+
+
+ ${james.groupId}
+ apache-james-mailbox-deleted-messages-vault
+
+
+ ${james.groupId}
+ apache-james-mailbox-deleted-messages-vault
+ test-jar
+ test
+
+
+ ${james.groupId}
+ apache-james-mailbox-memory
+ test
+
+
+ ${james.groupId}
+ apache-james-mailbox-postgres
+
+
+ ${james.groupId}
+ james-server-guice-common
+ test-jar
+ test
+
+
+ ${james.groupId}
+ james-server-testing
+ test
+
+
+ ${james.groupId}
+ testing-base
+ test
+
+
+ org.testcontainers
+ postgresql
+ test
+
+
+
diff --git a/mailbox/plugin/deleted-messages-vault-postgres/src/main/java/org/apache/james/vault/metadata/PostgresDeletedMessageMetadataModule.java b/mailbox/plugin/deleted-messages-vault-postgres/src/main/java/org/apache/james/vault/metadata/PostgresDeletedMessageMetadataModule.java
new file mode 100644
index 00000000000..de041482a47
--- /dev/null
+++ b/mailbox/plugin/deleted-messages-vault-postgres/src/main/java/org/apache/james/vault/metadata/PostgresDeletedMessageMetadataModule.java
@@ -0,0 +1,65 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.vault.metadata;
+
+import static org.apache.james.vault.metadata.PostgresDeletedMessageMetadataModule.DeletedMessageMetadataTable.OWNER_MESSAGE_ID_INDEX;
+import static org.apache.james.vault.metadata.PostgresDeletedMessageMetadataModule.DeletedMessageMetadataTable.TABLE;
+
+import org.apache.james.backends.postgres.PostgresIndex;
+import org.apache.james.backends.postgres.PostgresModule;
+import org.apache.james.backends.postgres.PostgresTable;
+import org.jooq.Field;
+import org.jooq.JSONB;
+import org.jooq.Record;
+import org.jooq.Table;
+import org.jooq.impl.DSL;
+import org.jooq.impl.SQLDataType;
+
+public interface PostgresDeletedMessageMetadataModule {
+ interface DeletedMessageMetadataTable {
+ Table TABLE_NAME = DSL.table("deleted_messages_metadata");
+
+ Field BUCKET_NAME = DSL.field("bucket_name", SQLDataType.VARCHAR.notNull());
+ Field OWNER = DSL.field("owner", SQLDataType.VARCHAR.notNull());
+ Field MESSAGE_ID = DSL.field("messageId", SQLDataType.VARCHAR.notNull());
+ Field BLOB_ID = DSL.field("blob_id", SQLDataType.VARCHAR.notNull());
+ Field METADATA = DSL.field("metadata", SQLDataType.JSONB.notNull());
+
+ PostgresTable TABLE = PostgresTable.name(TABLE_NAME.getName())
+ .createTableStep(((dsl, tableName) -> dsl.createTableIfNotExists(tableName)
+ .column(BUCKET_NAME)
+ .column(OWNER)
+ .column(MESSAGE_ID)
+ .column(BLOB_ID)
+ .column(METADATA)
+ .primaryKey(BUCKET_NAME, OWNER, MESSAGE_ID)))
+ .disableRowLevelSecurity()
+ .build();
+
+ PostgresIndex OWNER_MESSAGE_ID_INDEX = PostgresIndex.name("owner_messageId_index")
+ .createIndexStep((dsl, indexName) -> dsl.createUniqueIndexIfNotExists(indexName)
+ .on(TABLE_NAME, OWNER, MESSAGE_ID));
+ }
+
+ PostgresModule MODULE = PostgresModule.builder()
+ .addTable(TABLE)
+ .addIndex(OWNER_MESSAGE_ID_INDEX)
+ .build();
+}
diff --git a/mailbox/plugin/deleted-messages-vault-postgres/src/main/java/org/apache/james/vault/metadata/PostgresDeletedMessageMetadataVault.java b/mailbox/plugin/deleted-messages-vault-postgres/src/main/java/org/apache/james/vault/metadata/PostgresDeletedMessageMetadataVault.java
new file mode 100644
index 00000000000..7df316dc87e
--- /dev/null
+++ b/mailbox/plugin/deleted-messages-vault-postgres/src/main/java/org/apache/james/vault/metadata/PostgresDeletedMessageMetadataVault.java
@@ -0,0 +1,115 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.vault.metadata;
+
+import static org.apache.james.util.ReactorUtils.publishIfPresent;
+import static org.apache.james.vault.metadata.PostgresDeletedMessageMetadataModule.DeletedMessageMetadataTable.BLOB_ID;
+import static org.apache.james.vault.metadata.PostgresDeletedMessageMetadataModule.DeletedMessageMetadataTable.BUCKET_NAME;
+import static org.apache.james.vault.metadata.PostgresDeletedMessageMetadataModule.DeletedMessageMetadataTable.MESSAGE_ID;
+import static org.apache.james.vault.metadata.PostgresDeletedMessageMetadataModule.DeletedMessageMetadataTable.METADATA;
+import static org.apache.james.vault.metadata.PostgresDeletedMessageMetadataModule.DeletedMessageMetadataTable.OWNER;
+import static org.apache.james.vault.metadata.PostgresDeletedMessageMetadataModule.DeletedMessageMetadataTable.TABLE_NAME;
+import static org.jooq.JSONB.jsonb;
+
+import java.util.function.Function;
+
+import jakarta.inject.Inject;
+
+import org.apache.james.backends.postgres.utils.PostgresExecutor;
+import org.apache.james.blob.api.BlobId;
+import org.apache.james.blob.api.BucketName;
+import org.apache.james.core.Username;
+import org.apache.james.mailbox.model.MessageId;
+import org.jooq.Record;
+import org.reactivestreams.Publisher;
+
+import reactor.core.publisher.Flux;
+import reactor.core.publisher.Mono;
+
+public class PostgresDeletedMessageMetadataVault implements DeletedMessageMetadataVault {
+ private final PostgresExecutor postgresExecutor;
+ private final MetadataSerializer metadataSerializer;
+ private final BlobId.Factory blobIdFactory;
+
+ @Inject
+ public PostgresDeletedMessageMetadataVault(PostgresExecutor postgresExecutor,
+ MetadataSerializer metadataSerializer,
+ BlobId.Factory blobIdFactory) {
+ this.postgresExecutor = postgresExecutor;
+ this.metadataSerializer = metadataSerializer;
+ this.blobIdFactory = blobIdFactory;
+ }
+
+ @Override
+ public Publisher store(DeletedMessageWithStorageInformation deletedMessage) {
+ return postgresExecutor.executeVoid(context -> Mono.from(context.insertInto(TABLE_NAME)
+ .set(OWNER, deletedMessage.getDeletedMessage().getOwner().asString())
+ .set(MESSAGE_ID, deletedMessage.getDeletedMessage().getMessageId().serialize())
+ .set(BUCKET_NAME, deletedMessage.getStorageInformation().getBucketName().asString())
+ .set(BLOB_ID, deletedMessage.getStorageInformation().getBlobId().asString())
+ .set(METADATA, jsonb(metadataSerializer.serialize(deletedMessage)))));
+ }
+
+ @Override
+ public Publisher removeMetadataRelatedToBucket(BucketName bucketName) {
+ return postgresExecutor.executeVoid(context -> Mono.from(context.deleteFrom(TABLE_NAME)
+ .where(BUCKET_NAME.eq(bucketName.asString()))));
+ }
+
+ @Override
+ public Publisher remove(BucketName bucketName, Username username, MessageId messageId) {
+ return postgresExecutor.executeVoid(context -> Mono.from(context.deleteFrom(TABLE_NAME)
+ .where(BUCKET_NAME.eq(bucketName.asString()),
+ OWNER.eq(username.asString()),
+ MESSAGE_ID.eq(messageId.serialize()))));
+ }
+
+ @Override
+ public Publisher retrieveStorageInformation(Username username, MessageId messageId) {
+ return postgresExecutor.executeRow(context -> Mono.from(context.select(BUCKET_NAME, BLOB_ID)
+ .from(TABLE_NAME)
+ .where(OWNER.eq(username.asString()),
+ MESSAGE_ID.eq(messageId.serialize()))))
+ .map(toStorageInformation());
+ }
+
+ private Function toStorageInformation() {
+ return record -> StorageInformation.builder()
+ .bucketName(BucketName.of(record.get(BUCKET_NAME)))
+ .blobId(blobIdFactory.from(record.get(BLOB_ID)));
+ }
+
+ @Override
+ public Publisher listMessages(BucketName bucketName, Username username) {
+ return postgresExecutor.executeRows(context -> Flux.from(context.select(METADATA)
+ .from(TABLE_NAME)
+ .where(BUCKET_NAME.eq(bucketName.asString()),
+ OWNER.eq(username.asString()))))
+ .map(record -> metadataSerializer.deserialize(record.get(METADATA).data()))
+ .handle(publishIfPresent());
+ }
+
+ @Override
+ public Publisher listRelatedBuckets() {
+ return postgresExecutor.executeRows(context -> Flux.from(context.selectDistinct(BUCKET_NAME)
+ .from(TABLE_NAME)))
+ .map(record -> BucketName.of(record.get(BUCKET_NAME)));
+ }
+}
diff --git a/mailbox/plugin/deleted-messages-vault-postgres/src/main/java/org/apache/james/vault/metadata/PostgresDeletedMessageVaultDeletionCallback.java b/mailbox/plugin/deleted-messages-vault-postgres/src/main/java/org/apache/james/vault/metadata/PostgresDeletedMessageVaultDeletionCallback.java
new file mode 100644
index 00000000000..224d2a492ed
--- /dev/null
+++ b/mailbox/plugin/deleted-messages-vault-postgres/src/main/java/org/apache/james/vault/metadata/PostgresDeletedMessageVaultDeletionCallback.java
@@ -0,0 +1,123 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.vault.metadata;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.SequenceInputStream;
+import java.time.Clock;
+import java.time.ZoneOffset;
+import java.time.ZonedDateTime;
+import java.util.Optional;
+import java.util.Set;
+
+import jakarta.inject.Inject;
+
+import org.apache.james.blob.api.BlobStore;
+import org.apache.james.core.MailAddress;
+import org.apache.james.core.MaybeSender;
+import org.apache.james.core.Username;
+import org.apache.james.mailbox.model.MailboxId;
+import org.apache.james.mailbox.model.MessageId;
+import org.apache.james.mailbox.postgres.DeleteMessageListener;
+import org.apache.james.mailbox.postgres.mail.MessageRepresentation;
+import org.apache.james.mime4j.MimeIOException;
+import org.apache.james.mime4j.codec.DecodeMonitor;
+import org.apache.james.mime4j.dom.Message;
+import org.apache.james.mime4j.dom.address.Mailbox;
+import org.apache.james.mime4j.message.DefaultMessageBuilder;
+import org.apache.james.mime4j.stream.MimeConfig;
+import org.apache.james.server.core.Envelope;
+import org.apache.james.vault.DeletedMessage;
+import org.apache.james.vault.DeletedMessageVault;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.github.fge.lambdas.Throwing;
+import com.google.common.collect.ImmutableSet;
+
+import reactor.core.publisher.Mono;
+
+public class PostgresDeletedMessageVaultDeletionCallback implements DeleteMessageListener.DeletionCallback {
+ private static final Logger LOGGER = LoggerFactory.getLogger(PostgresDeletedMessageVaultDeletionCallback.class);
+
+ private final DeletedMessageVault deletedMessageVault;
+ private final BlobStore blobStore;
+ private final Clock clock;
+
+ @Inject
+ public PostgresDeletedMessageVaultDeletionCallback(DeletedMessageVault deletedMessageVault, BlobStore blobStore, Clock clock) {
+ this.deletedMessageVault = deletedMessageVault;
+ this.blobStore = blobStore;
+ this.clock = clock;
+ }
+
+ @Override
+ public Mono forMessage(MessageRepresentation message, MailboxId mailboxId, Username owner) {
+ return Mono.fromSupplier(Throwing.supplier(() -> message.getHeaderContent().getInputStream()))
+ .flatMap(headerStream -> {
+ Optional mimeMessage = parseMessage(headerStream, message.getMessageId());
+ DeletedMessage deletedMessage = DeletedMessage.builder()
+ .messageId(message.getMessageId())
+ .originMailboxes(mailboxId)
+ .user(owner)
+ .deliveryDate(ZonedDateTime.ofInstant(message.getInternalDate().toInstant(), ZoneOffset.UTC))
+ .deletionDate(ZonedDateTime.ofInstant(clock.instant(), ZoneOffset.UTC))
+ .sender(retrieveSender(mimeMessage))
+ .recipients(retrieveRecipients(mimeMessage))
+ .hasAttachment(!message.getAttachments().isEmpty())
+ .size(message.getSize())
+ .subject(mimeMessage.map(Message::getSubject))
+ .build();
+
+ return Mono.from(blobStore.readReactive(blobStore.getDefaultBucketName(), message.getBodyBlobId(), BlobStore.StoragePolicy.LOW_COST))
+ .map(bodyStream -> new SequenceInputStream(headerStream, bodyStream))
+ .flatMap(bodyStream -> Mono.from(deletedMessageVault.append(deletedMessage, bodyStream)));
+ });
+ }
+
+ private Optional parseMessage(InputStream inputStream, MessageId messageId) {
+ DefaultMessageBuilder messageBuilder = new DefaultMessageBuilder();
+ messageBuilder.setMimeEntityConfig(MimeConfig.PERMISSIVE);
+ messageBuilder.setDecodeMonitor(DecodeMonitor.SILENT);
+ try {
+ return Optional.ofNullable(messageBuilder.parseMessage(inputStream));
+ } catch (MimeIOException e) {
+ LOGGER.warn("Can not parse the message {}", messageId, e);
+ return Optional.empty();
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ private MaybeSender retrieveSender(Optional mimeMessage) {
+ return mimeMessage
+ .map(Message::getSender)
+ .map(Mailbox::getAddress)
+ .map(MaybeSender::getMailSender)
+ .orElse(MaybeSender.nullSender());
+ }
+
+ private Set retrieveRecipients(Optional maybeMessage) {
+ return maybeMessage.map(message -> Envelope.fromMime4JMessage(message, Envelope.ValidationPolicy.IGNORE))
+ .map(Envelope::getRecipients)
+ .orElse(ImmutableSet.of());
+ }
+}
diff --git a/mailbox/plugin/deleted-messages-vault-postgres/src/test/java/org/apache/james/vault/metadata/PostgresDeletedMessageMetadataVaultTest.java b/mailbox/plugin/deleted-messages-vault-postgres/src/test/java/org/apache/james/vault/metadata/PostgresDeletedMessageMetadataVaultTest.java
new file mode 100644
index 00000000000..b765147f1ab
--- /dev/null
+++ b/mailbox/plugin/deleted-messages-vault-postgres/src/test/java/org/apache/james/vault/metadata/PostgresDeletedMessageMetadataVaultTest.java
@@ -0,0 +1,46 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.vault.metadata;
+
+import org.apache.james.backends.postgres.PostgresExtension;
+import org.apache.james.backends.postgres.PostgresModule;
+import org.apache.james.blob.api.HashBlobId;
+import org.apache.james.mailbox.inmemory.InMemoryId;
+import org.apache.james.mailbox.inmemory.InMemoryMessageId;
+import org.apache.james.vault.dto.DeletedMessageWithStorageInformationConverter;
+import org.junit.jupiter.api.extension.RegisterExtension;
+
+class PostgresDeletedMessageMetadataVaultTest implements DeletedMessageMetadataVaultContract {
+ @RegisterExtension
+ static PostgresExtension postgresExtension = PostgresExtension.withoutRowLevelSecurity(
+ PostgresModule.aggregateModules(PostgresDeletedMessageMetadataModule.MODULE));
+
+ @Override
+ public DeletedMessageMetadataVault metadataVault() {
+ HashBlobId.Factory blobIdFactory = new HashBlobId.Factory();
+ InMemoryMessageId.Factory messageIdFactory = new InMemoryMessageId.Factory();
+ DeletedMessageWithStorageInformationConverter dtoConverter = new DeletedMessageWithStorageInformationConverter(blobIdFactory,
+ messageIdFactory, new InMemoryId.Factory());
+
+ return new PostgresDeletedMessageMetadataVault(postgresExtension.getDefaultPostgresExecutor(),
+ new MetadataSerializer(dtoConverter),
+ blobIdFactory);
+ }
+}
diff --git a/mailbox/plugin/deleted-messages-vault-cassandra/src/main/java/org/apache/james/vault/metadata/MetadataSerializer.java b/mailbox/plugin/deleted-messages-vault/src/main/java/org/apache/james/vault/metadata/MetadataSerializer.java
similarity index 100%
rename from mailbox/plugin/deleted-messages-vault-cassandra/src/main/java/org/apache/james/vault/metadata/MetadataSerializer.java
rename to mailbox/plugin/deleted-messages-vault/src/main/java/org/apache/james/vault/metadata/MetadataSerializer.java
diff --git a/mailbox/plugin/quota-mailing-cassandra/src/test/java/org/apache/james/mailbox/quota/cassandra/dto/DTOTest.java b/mailbox/plugin/quota-mailing-cassandra/src/test/java/org/apache/james/mailbox/quota/cassandra/dto/DTOTest.java
index 2673f28d1a0..be60527068a 100644
--- a/mailbox/plugin/quota-mailing-cassandra/src/test/java/org/apache/james/mailbox/quota/cassandra/dto/DTOTest.java
+++ b/mailbox/plugin/quota-mailing-cassandra/src/test/java/org/apache/james/mailbox/quota/cassandra/dto/DTOTest.java
@@ -20,7 +20,7 @@
package org.apache.james.mailbox.quota.cassandra.dto;
import static net.javacrumbs.jsonunit.assertj.JsonAssertions.assertThatJson;
-import static org.apache.james.mailbox.quota.cassandra.dto.QuotaEventDTOModules.QUOTA_THRESHOLD_CHANGE;
+import static org.apache.james.mailbox.quota.mailing.events.QuotaEventDTOModules.QUOTA_THRESHOLD_CHANGE;
import static org.apache.james.mailbox.quota.model.QuotaThresholdFixture._75;
import static org.apache.james.mailbox.quota.model.QuotaThresholdFixture._80;
import static org.assertj.core.api.Assertions.assertThat;
@@ -36,7 +36,10 @@
import org.apache.james.eventsourcing.EventId;
import org.apache.james.mailbox.model.Quota;
import org.apache.james.mailbox.quota.mailing.aggregates.UserQuotaThresholds;
+import org.apache.james.mailbox.quota.mailing.events.HistoryEvolutionDTO;
+import org.apache.james.mailbox.quota.mailing.events.QuotaDTO;
import org.apache.james.mailbox.quota.mailing.events.QuotaThresholdChangedEvent;
+import org.apache.james.mailbox.quota.mailing.events.QuotaThresholdChangedEventDTO;
import org.apache.james.mailbox.quota.model.HistoryEvolution;
import org.apache.james.mailbox.quota.model.QuotaThresholdChange;
import org.apache.james.util.ClassLoaderUtils;
diff --git a/mailbox/plugin/quota-mailing-cassandra/src/test/java/org/apache/james/mailbox/quota/cassandra/listeners/CassandraQuotaMailingListenersIntegrationTest.java b/mailbox/plugin/quota-mailing-cassandra/src/test/java/org/apache/james/mailbox/quota/cassandra/listeners/CassandraQuotaMailingListenersIntegrationTest.java
index 3c9b8a4e217..0464f95b75c 100644
--- a/mailbox/plugin/quota-mailing-cassandra/src/test/java/org/apache/james/mailbox/quota/cassandra/listeners/CassandraQuotaMailingListenersIntegrationTest.java
+++ b/mailbox/plugin/quota-mailing-cassandra/src/test/java/org/apache/james/mailbox/quota/cassandra/listeners/CassandraQuotaMailingListenersIntegrationTest.java
@@ -21,7 +21,7 @@
import org.apache.james.eventsourcing.eventstore.JsonEventSerializer;
import org.apache.james.eventsourcing.eventstore.cassandra.CassandraEventStoreExtension;
-import org.apache.james.mailbox.quota.cassandra.dto.QuotaEventDTOModules;
+import org.apache.james.mailbox.quota.mailing.events.QuotaEventDTOModules;
import org.apache.james.mailbox.quota.mailing.listeners.QuotaThresholdMailingIntegrationTest;
import org.junit.jupiter.api.extension.RegisterExtension;
diff --git a/mailbox/plugin/quota-mailing-cassandra/src/main/java/org/apache/james/mailbox/quota/cassandra/dto/HistoryEvolutionDTO.java b/mailbox/plugin/quota-mailing/src/main/java/org/apache/james/mailbox/quota/mailing/events/HistoryEvolutionDTO.java
similarity index 66%
rename from mailbox/plugin/quota-mailing-cassandra/src/main/java/org/apache/james/mailbox/quota/cassandra/dto/HistoryEvolutionDTO.java
rename to mailbox/plugin/quota-mailing/src/main/java/org/apache/james/mailbox/quota/mailing/events/HistoryEvolutionDTO.java
index 2d1dda1cc08..afeaab89b2f 100644
--- a/mailbox/plugin/quota-mailing-cassandra/src/main/java/org/apache/james/mailbox/quota/cassandra/dto/HistoryEvolutionDTO.java
+++ b/mailbox/plugin/quota-mailing/src/main/java/org/apache/james/mailbox/quota/mailing/events/HistoryEvolutionDTO.java
@@ -17,7 +17,7 @@
* under the License. *
****************************************************************/
-package org.apache.james.mailbox.quota.cassandra.dto;
+package org.apache.james.mailbox.quota.mailing.events;
import java.time.Instant;
import java.util.Optional;
@@ -26,14 +26,17 @@
import org.apache.james.mailbox.quota.model.QuotaThreshold;
import org.apache.james.mailbox.quota.model.QuotaThresholdChange;
-import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.google.common.base.Preconditions;
import com.google.common.primitives.Booleans;
-class HistoryEvolutionDTO {
+public record HistoryEvolutionDTO(@JsonProperty("change") HistoryEvolution.HistoryChangeType change,
+ @JsonProperty("recentness") Optional recentness,
+ @JsonProperty("threshold") Optional threshold,
+ @JsonProperty("instant") Optional instant) {
+ @JsonIgnore
public static HistoryEvolutionDTO toDto(HistoryEvolution historyEvolution) {
return new HistoryEvolutionDTO(
historyEvolution.getThresholdHistoryChange(),
@@ -46,43 +49,9 @@ public static HistoryEvolutionDTO toDto(HistoryEvolution historyEvolution) {
.map(Instant::toEpochMilli));
}
- private final HistoryEvolution.HistoryChangeType change;
- private final Optional recentness;
- private final Optional threshold;
- private final Optional instant;
-
- @JsonCreator
- public HistoryEvolutionDTO(
- @JsonProperty("changeType") HistoryEvolution.HistoryChangeType change,
- @JsonProperty("recentness") Optional recentness,
- @JsonProperty("threshold") Optional threshold,
- @JsonProperty("instant") Optional instant) {
- this.change = change;
- this.recentness = recentness;
- this.threshold = threshold;
- this.instant = instant;
- }
-
- public HistoryEvolution.HistoryChangeType getChange() {
- return change;
- }
-
- public Optional getRecentness() {
- return recentness;
- }
-
- public Optional getThreshold() {
- return threshold;
- }
-
- public Optional getInstant() {
- return instant;
- }
-
@JsonIgnore
public HistoryEvolution toHistoryEvolution() {
- Preconditions.checkState(Booleans.countTrue(
- threshold.isPresent(), instant.isPresent()) != 1,
+ Preconditions.checkState(Booleans.countTrue(threshold.isPresent(), instant.isPresent()) != 1,
"threshold and instant needs to be both set, or both unset. Mixed states not allowed.");
Optional quotaThresholdChange = threshold
@@ -93,6 +62,5 @@ public HistoryEvolution toHistoryEvolution() {
change,
recentness,
quotaThresholdChange);
-
}
-}
+}
\ No newline at end of file
diff --git a/mailbox/plugin/quota-mailing-cassandra/src/main/java/org/apache/james/mailbox/quota/cassandra/dto/QuotaDTO.java b/mailbox/plugin/quota-mailing/src/main/java/org/apache/james/mailbox/quota/mailing/events/QuotaDTO.java
similarity index 82%
rename from mailbox/plugin/quota-mailing-cassandra/src/main/java/org/apache/james/mailbox/quota/cassandra/dto/QuotaDTO.java
rename to mailbox/plugin/quota-mailing/src/main/java/org/apache/james/mailbox/quota/mailing/events/QuotaDTO.java
index 78e69cd5e8f..eff3a667e40 100644
--- a/mailbox/plugin/quota-mailing-cassandra/src/main/java/org/apache/james/mailbox/quota/cassandra/dto/QuotaDTO.java
+++ b/mailbox/plugin/quota-mailing/src/main/java/org/apache/james/mailbox/quota/mailing/events/QuotaDTO.java
@@ -17,7 +17,7 @@
* under the License. *
****************************************************************/
-package org.apache.james.mailbox.quota.cassandra.dto;
+package org.apache.james.mailbox.quota.mailing.events;
import java.util.Optional;
@@ -27,11 +27,13 @@
import org.apache.james.core.quota.QuotaSizeUsage;
import org.apache.james.mailbox.model.Quota;
-import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonProperty;
-class QuotaDTO {
+public record QuotaDTO(@JsonProperty("used") long used,
+ @JsonProperty("limit") Optional limit) {
+
+ @JsonIgnore
public static QuotaDTO from(Quota, ?> quota) {
if (quota.getLimit().isUnlimited()) {
return new QuotaDTO(quota.getUsed().asLong(), Optional.empty());
@@ -39,24 +41,6 @@ public static QuotaDTO from(Quota, ?> quota) {
return new QuotaDTO(quota.getUsed().asLong(), Optional.of(quota.getLimit().asLong()));
}
- private final long used;
- private final Optional limit;
-
- @JsonCreator
- private QuotaDTO(@JsonProperty("used") long used,
- @JsonProperty("limit") Optional limit) {
- this.used = used;
- this.limit = limit;
- }
-
- public long getUsed() {
- return used;
- }
-
- public Optional getLimit() {
- return limit;
- }
-
@JsonIgnore
public Quota asSizeQuota() {
return Quota.builder()
@@ -72,4 +56,4 @@ public Quota asCountQuota() {
.computedLimit(QuotaCountLimit.count(limit))
.build();
}
-}
+}
\ No newline at end of file
diff --git a/mailbox/plugin/quota-mailing-cassandra/src/main/java/org/apache/james/mailbox/quota/cassandra/dto/QuotaEventDTOModules.java b/mailbox/plugin/quota-mailing/src/main/java/org/apache/james/mailbox/quota/mailing/events/QuotaEventDTOModules.java
similarity index 92%
rename from mailbox/plugin/quota-mailing-cassandra/src/main/java/org/apache/james/mailbox/quota/cassandra/dto/QuotaEventDTOModules.java
rename to mailbox/plugin/quota-mailing/src/main/java/org/apache/james/mailbox/quota/mailing/events/QuotaEventDTOModules.java
index 1295411bf6b..5a0bb983c5c 100644
--- a/mailbox/plugin/quota-mailing-cassandra/src/main/java/org/apache/james/mailbox/quota/cassandra/dto/QuotaEventDTOModules.java
+++ b/mailbox/plugin/quota-mailing/src/main/java/org/apache/james/mailbox/quota/mailing/events/QuotaEventDTOModules.java
@@ -17,13 +17,11 @@
* under the License. *
****************************************************************/
-package org.apache.james.mailbox.quota.cassandra.dto;
+package org.apache.james.mailbox.quota.mailing.events;
import org.apache.james.eventsourcing.eventstore.dto.EventDTOModule;
-import org.apache.james.mailbox.quota.mailing.events.QuotaThresholdChangedEvent;
public interface QuotaEventDTOModules {
-
EventDTOModule QUOTA_THRESHOLD_CHANGE =
EventDTOModule
.forEvent(QuotaThresholdChangedEvent.class)
diff --git a/mailbox/plugin/quota-mailing-cassandra/src/main/java/org/apache/james/mailbox/quota/cassandra/dto/QuotaThresholdChangedEventDTO.java b/mailbox/plugin/quota-mailing/src/main/java/org/apache/james/mailbox/quota/mailing/events/QuotaThresholdChangedEventDTO.java
similarity index 57%
rename from mailbox/plugin/quota-mailing-cassandra/src/main/java/org/apache/james/mailbox/quota/cassandra/dto/QuotaThresholdChangedEventDTO.java
rename to mailbox/plugin/quota-mailing/src/main/java/org/apache/james/mailbox/quota/mailing/events/QuotaThresholdChangedEventDTO.java
index 829feda3ae6..725a1f5ecdf 100644
--- a/mailbox/plugin/quota-mailing-cassandra/src/main/java/org/apache/james/mailbox/quota/cassandra/dto/QuotaThresholdChangedEventDTO.java
+++ b/mailbox/plugin/quota-mailing/src/main/java/org/apache/james/mailbox/quota/mailing/events/QuotaThresholdChangedEventDTO.java
@@ -17,23 +17,28 @@
* under the License. *
****************************************************************/
-package org.apache.james.mailbox.quota.cassandra.dto;
+package org.apache.james.mailbox.quota.mailing.events;
import org.apache.james.eventsourcing.EventId;
import org.apache.james.eventsourcing.eventstore.dto.EventDTO;
import org.apache.james.mailbox.quota.mailing.aggregates.UserQuotaThresholds;
-import org.apache.james.mailbox.quota.mailing.events.QuotaThresholdChangedEvent;
-import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonProperty;
-class QuotaThresholdChangedEventDTO implements EventDTO {
+public record QuotaThresholdChangedEventDTO(@JsonProperty("type") String type,
+ @JsonProperty("eventId") int eventId,
+ @JsonProperty("aggregateId") String aggregateId,
+ @JsonProperty("sizeQuota") QuotaDTO sizeQuota,
+ @JsonProperty("countQuota") QuotaDTO countQuota,
+ @JsonProperty("sizeEvolution") HistoryEvolutionDTO sizeEvolution,
+ @JsonProperty("countEvolution") HistoryEvolutionDTO countEvolution) implements EventDTO {
@JsonIgnore
public static QuotaThresholdChangedEventDTO from(QuotaThresholdChangedEvent event, String type) {
return new QuotaThresholdChangedEventDTO(
- type, event.eventId().serialize(),
+ type,
+ event.eventId().serialize(),
event.getAggregateId().asAggregateKey(),
QuotaDTO.from(event.getSizeQuota()),
QuotaDTO.from(event.getCountQuota()),
@@ -41,60 +46,6 @@ public static QuotaThresholdChangedEventDTO from(QuotaThresholdChangedEvent even
HistoryEvolutionDTO.toDto(event.getCountHistoryEvolution()));
}
- private final String type;
- private final int eventId;
- private final String aggregateId;
- private final QuotaDTO sizeQuota;
- private final QuotaDTO countQuota;
- private final HistoryEvolutionDTO sizeEvolution;
- private final HistoryEvolutionDTO countEvolution;
-
- @JsonCreator
- private QuotaThresholdChangedEventDTO(
- @JsonProperty("type") String type,
- @JsonProperty("eventId") int eventId,
- @JsonProperty("aggregateId") String aggregateId,
- @JsonProperty("sizeQuota") QuotaDTO sizeQuota,
- @JsonProperty("countQuota") QuotaDTO countQuota,
- @JsonProperty("sizeEvolution") HistoryEvolutionDTO sizeEvolution,
- @JsonProperty("countEvolution") HistoryEvolutionDTO countEvolution) {
- this.type = type;
- this.eventId = eventId;
- this.aggregateId = aggregateId;
- this.sizeQuota = sizeQuota;
- this.countQuota = countQuota;
- this.sizeEvolution = sizeEvolution;
- this.countEvolution = countEvolution;
- }
-
- public String getType() {
- return type;
- }
-
- public long getEventId() {
- return eventId;
- }
-
- public String getAggregateId() {
- return aggregateId;
- }
-
- public QuotaDTO getSizeQuota() {
- return sizeQuota;
- }
-
- public QuotaDTO getCountQuota() {
- return countQuota;
- }
-
- public HistoryEvolutionDTO getSizeEvolution() {
- return sizeEvolution;
- }
-
- public HistoryEvolutionDTO getCountEvolution() {
- return countEvolution;
- }
-
@JsonIgnore
public QuotaThresholdChangedEvent toEvent() {
return new QuotaThresholdChangedEvent(
@@ -105,4 +56,10 @@ public QuotaThresholdChangedEvent toEvent() {
countQuota.asCountQuota(),
UserQuotaThresholds.Id.fromKey(aggregateId));
}
-}
+
+ @Override
+ @JsonIgnore
+ public String getType() {
+ return type;
+ }
+}
\ No newline at end of file
diff --git a/mailbox/pom.xml b/mailbox/pom.xml
index e79c7da284d..0fe0776bc09 100644
--- a/mailbox/pom.xml
+++ b/mailbox/pom.xml
@@ -49,6 +49,7 @@
plugin/deleted-messages-vaultplugin/deleted-messages-vault-cassandra
+ plugin/deleted-messages-vault-postgresplugin/quota-mailingplugin/quota-mailing-cassandra
@@ -58,6 +59,8 @@
plugin/quota-search-opensearchplugin/quota-search-scanning
+ postgres
+
scanning-searchspringstore
diff --git a/mailbox/postgres/pom.xml b/mailbox/postgres/pom.xml
new file mode 100644
index 00000000000..96f13038906
--- /dev/null
+++ b/mailbox/postgres/pom.xml
@@ -0,0 +1,185 @@
+
+
+
+ 4.0.0
+
+ org.apache.james
+ apache-james-mailbox
+ 3.9.0-SNAPSHOT
+ ../pom.xml
+
+
+ apache-james-mailbox-postgres
+ Apache James :: Mailbox :: Postgres
+
+
+ 5.3.7
+
+
+
+
+ ${james.groupId}
+ apache-james-backends-postgres
+
+
+ ${james.groupId}
+ apache-james-backends-postgres
+ test-jar
+ test
+
+
+ ${james.groupId}
+ apache-james-mailbox-api
+
+
+ ${james.groupId}
+ apache-james-mailbox-api
+ test-jar
+ test
+
+
+ ${james.groupId}
+ apache-james-mailbox-event-json
+
+
+ ${james.groupId}
+ apache-james-mailbox-store
+
+
+ ${james.groupId}
+ apache-james-mailbox-store
+ test-jar
+ test
+
+
+ ${james.groupId}
+ apache-james-mailbox-tools-quota-recompute
+ test
+
+
+ ${james.groupId}
+ apache-james-mailbox-tools-quota-recompute
+ test-jar
+ test
+
+
+ ${james.groupId}
+ blob-api
+
+
+ ${james.groupId}
+ blob-memory
+ test
+
+
+ ${james.groupId}
+ blob-memory
+ test
+
+
+ ${james.groupId}
+ blob-storage-strategy
+ test
+
+
+ ${james.groupId}
+ blob-storage-strategy
+ test
+
+
+ ${james.groupId}
+ event-bus-api
+ test-jar
+ test
+
+
+ ${james.groupId}
+ event-bus-in-vm
+ test
+
+
+ ${james.groupId}
+ james-json
+ test-jar
+ test
+
+
+ ${james.groupId}
+ james-server-data-postgres
+ test
+
+
+ ${james.groupId}
+ james-server-guice-common
+ test-jar
+ test
+
+
+ ${james.groupId}
+ james-server-testing
+ test
+
+
+ ${james.groupId}
+ james-server-util
+
+
+ ${james.groupId}
+ metrics-tests
+ test
+
+
+ ${james.groupId}
+ testing-base
+ test
+
+
+ com.fasterxml.jackson.datatype
+ jackson-datatype-jdk8
+
+
+ com.github.f4b6a3
+ uuid-creator
+ ${uuid-creator.version}
+
+
+ org.eclipse.angus
+ jakarta.mail
+
+
+ org.jasypt
+ jasypt
+
+
+ org.mockito
+ mockito-core
+ test
+
+
+ org.slf4j
+ slf4j-api
+
+
+ org.testcontainers
+ postgresql
+ test
+
+
+
diff --git a/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/DeleteMessageListener.java b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/DeleteMessageListener.java
new file mode 100644
index 00000000000..c6fc63c8684
--- /dev/null
+++ b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/DeleteMessageListener.java
@@ -0,0 +1,175 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.mailbox.postgres;
+
+import java.util.Set;
+import java.util.function.Function;
+
+import jakarta.inject.Inject;
+
+import org.apache.james.blob.api.BlobStore;
+import org.apache.james.core.Username;
+import org.apache.james.events.Event;
+import org.apache.james.events.EventListener;
+import org.apache.james.events.Group;
+import org.apache.james.mailbox.events.MailboxEvents.Expunged;
+import org.apache.james.mailbox.events.MailboxEvents.MailboxDeletion;
+import org.apache.james.mailbox.model.MailboxId;
+import org.apache.james.mailbox.model.MessageMetaData;
+import org.apache.james.mailbox.postgres.mail.MessageRepresentation;
+import org.apache.james.mailbox.postgres.mail.dao.PostgresAttachmentDAO;
+import org.apache.james.mailbox.postgres.mail.dao.PostgresMailboxMessageDAO;
+import org.apache.james.mailbox.postgres.mail.dao.PostgresMessageDAO;
+import org.apache.james.mailbox.postgres.mail.dao.PostgresThreadDAO;
+import org.apache.james.util.FunctionalUtils;
+import org.apache.james.util.ReactorUtils;
+import org.reactivestreams.Publisher;
+
+import reactor.core.publisher.Flux;
+import reactor.core.publisher.Mono;
+
+public class DeleteMessageListener implements EventListener.ReactiveGroupEventListener {
+ @FunctionalInterface
+ public interface DeletionCallback {
+ Mono forMessage(MessageRepresentation messageRepresentation, MailboxId mailboxId, Username owner);
+ }
+
+ public static class DeleteMessageListenerGroup extends Group {
+ }
+
+ public static final int LOW_CONCURRENCY = 4;
+
+ private final BlobStore blobStore;
+ private final Set deletionCallbackList;
+
+ private final PostgresMessageDAO.Factory messageDAOFactory;
+ private final PostgresMailboxMessageDAO.Factory mailboxMessageDAOFactory;
+ private final PostgresAttachmentDAO.Factory attachmentDAOFactory;
+ private final PostgresThreadDAO.Factory threadDAOFactory;
+
+ @Inject
+ public DeleteMessageListener(BlobStore blobStore,
+ PostgresMailboxMessageDAO.Factory mailboxMessageDAOFactory,
+ PostgresMessageDAO.Factory messageDAOFactory,
+ PostgresAttachmentDAO.Factory attachmentDAOFactory,
+ PostgresThreadDAO.Factory threadDAOFactory,
+ Set deletionCallbackList) {
+ this.messageDAOFactory = messageDAOFactory;
+ this.mailboxMessageDAOFactory = mailboxMessageDAOFactory;
+ this.blobStore = blobStore;
+ this.deletionCallbackList = deletionCallbackList;
+ this.attachmentDAOFactory = attachmentDAOFactory;
+ this.threadDAOFactory = threadDAOFactory;
+ }
+
+ @Override
+ public Group getDefaultGroup() {
+ return new DeleteMessageListenerGroup();
+ }
+
+ @Override
+ public boolean isHandling(Event event) {
+ return event instanceof Expunged || event instanceof MailboxDeletion;
+ }
+
+ @Override
+ public Publisher reactiveEvent(Event event) {
+ if (event instanceof Expunged) {
+ Expunged expunged = (Expunged) event;
+ return handleMessageDeletion(expunged);
+ }
+ if (event instanceof MailboxDeletion) {
+ MailboxDeletion mailboxDeletion = (MailboxDeletion) event;
+ return handleMailboxDeletion(mailboxDeletion);
+ }
+ return Mono.empty();
+ }
+
+ private Mono handleMailboxDeletion(MailboxDeletion event) {
+ PostgresMessageDAO postgresMessageDAO = messageDAOFactory.create(event.getUsername().getDomainPart());
+ PostgresMailboxMessageDAO postgresMailboxMessageDAO = mailboxMessageDAOFactory.create(event.getUsername().getDomainPart());
+ PostgresAttachmentDAO attachmentDAO = attachmentDAOFactory.create(event.getUsername().getDomainPart());
+ PostgresThreadDAO threadDAO = threadDAOFactory.create(event.getUsername().getDomainPart());
+
+ return postgresMailboxMessageDAO.deleteByMailboxId((PostgresMailboxId) event.getMailboxId())
+ .flatMap(msgId -> handleMessageDeletion(postgresMessageDAO, postgresMailboxMessageDAO, attachmentDAO, threadDAO, msgId, event.getMailboxId(), event.getMailboxPath().getUser()),
+ LOW_CONCURRENCY)
+ .then();
+ }
+
+ private Mono handleMessageDeletion(Expunged event) {
+ PostgresMessageDAO postgresMessageDAO = messageDAOFactory.create(event.getUsername().getDomainPart());
+ PostgresMailboxMessageDAO postgresMailboxMessageDAO = mailboxMessageDAOFactory.create(event.getUsername().getDomainPart());
+ PostgresAttachmentDAO attachmentDAO = attachmentDAOFactory.create(event.getUsername().getDomainPart());
+ PostgresThreadDAO threadDAO = threadDAOFactory.create(event.getUsername().getDomainPart());
+
+ return Flux.fromIterable(event.getExpunged()
+ .values())
+ .map(MessageMetaData::getMessageId)
+ .map(PostgresMessageId.class::cast)
+ .flatMap(msgId -> handleMessageDeletion(postgresMessageDAO, postgresMailboxMessageDAO, attachmentDAO, threadDAO, msgId, event.getMailboxId(), event.getMailboxPath().getUser()), LOW_CONCURRENCY)
+ .then();
+ }
+
+ private Mono handleMessageDeletion(PostgresMessageDAO postgresMessageDAO,
+ PostgresMailboxMessageDAO postgresMailboxMessageDAO,
+ PostgresAttachmentDAO attachmentDAO,
+ PostgresThreadDAO threadDAO,
+ PostgresMessageId messageId,
+ MailboxId mailboxId,
+ Username owner) {
+ return Mono.just(messageId)
+ .filterWhen(msgId -> isUnreferenced(msgId, postgresMailboxMessageDAO))
+ .flatMap(msgId -> postgresMessageDAO.retrieveMessage(messageId)
+ .flatMap(executeDeletionCallbacks(mailboxId, owner))
+ .then(deleteBodyBlob(msgId, postgresMessageDAO))
+ .then(deleteAttachment(msgId, attachmentDAO))
+ .then(threadDAO.deleteSome(owner, msgId))
+ .then(postgresMessageDAO.deleteByMessageId(msgId)));
+ }
+
+ private Function> executeDeletionCallbacks(MailboxId mailboxId, Username owner) {
+ return messageRepresentation -> Flux.fromIterable(deletionCallbackList)
+ .concatMap(callback -> callback.forMessage(messageRepresentation, mailboxId, owner))
+ .then();
+ }
+
+ private Mono deleteBodyBlob(PostgresMessageId id, PostgresMessageDAO postgresMessageDAO) {
+ return postgresMessageDAO.getBodyBlobId(id)
+ .flatMap(blobId -> Mono.from(blobStore.delete(blobStore.getDefaultBucketName(), blobId))
+ .then());
+ }
+
+ private Mono isUnreferenced(PostgresMessageId id, PostgresMailboxMessageDAO postgresMailboxMessageDAO) {
+ return postgresMailboxMessageDAO.existsByMessageId(id)
+ .map(FunctionalUtils.negate());
+ }
+
+ private Mono deleteAttachment(PostgresMessageId messageId, PostgresAttachmentDAO attachmentDAO) {
+ return deleteAttachmentBlobs(messageId, attachmentDAO)
+ .then(attachmentDAO.deleteByMessageId(messageId));
+ }
+
+ private Mono deleteAttachmentBlobs(PostgresMessageId messageId, PostgresAttachmentDAO attachmentDAO) {
+ return attachmentDAO.listBlobsByMessageId(messageId)
+ .flatMap(blobId -> Mono.from(blobStore.delete(blobStore.getDefaultBucketName(), blobId)), ReactorUtils.DEFAULT_CONCURRENCY)
+ .then();
+ }
+}
diff --git a/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/PostgresMailboxAggregateModule.java b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/PostgresMailboxAggregateModule.java
new file mode 100644
index 00000000000..90a52df7c4b
--- /dev/null
+++ b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/PostgresMailboxAggregateModule.java
@@ -0,0 +1,38 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.mailbox.postgres;
+
+import org.apache.james.backends.postgres.PostgresModule;
+import org.apache.james.mailbox.postgres.mail.PostgresAttachmentModule;
+import org.apache.james.mailbox.postgres.mail.PostgresMailboxModule;
+import org.apache.james.mailbox.postgres.mail.PostgresMessageModule;
+import org.apache.james.mailbox.postgres.mail.dao.PostgresThreadModule;
+import org.apache.james.mailbox.postgres.user.PostgresSubscriptionModule;
+
+public interface PostgresMailboxAggregateModule {
+
+ PostgresModule MODULE = PostgresModule.aggregateModules(
+ PostgresMailboxModule.MODULE,
+ PostgresSubscriptionModule.MODULE,
+ PostgresMessageModule.MODULE,
+ PostgresMailboxAnnotationModule.MODULE,
+ PostgresAttachmentModule.MODULE,
+ PostgresThreadModule.MODULE);
+}
diff --git a/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/PostgresMailboxAnnotationModule.java b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/PostgresMailboxAnnotationModule.java
new file mode 100644
index 00000000000..64f0937ae81
--- /dev/null
+++ b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/PostgresMailboxAnnotationModule.java
@@ -0,0 +1,57 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.mailbox.postgres;
+
+import static org.apache.james.mailbox.postgres.mail.PostgresMailboxModule.PostgresMailboxTable;
+
+import java.util.UUID;
+
+import org.apache.james.backends.postgres.PostgresModule;
+import org.apache.james.backends.postgres.PostgresTable;
+import org.jooq.Field;
+import org.jooq.Record;
+import org.jooq.Table;
+import org.jooq.impl.DSL;
+import org.jooq.impl.DefaultDataType;
+import org.jooq.impl.SQLDataType;
+import org.jooq.postgres.extensions.bindings.HstoreBinding;
+import org.jooq.postgres.extensions.types.Hstore;
+
+public interface PostgresMailboxAnnotationModule {
+ interface PostgresMailboxAnnotationTable {
+ Table TABLE_NAME = DSL.table("mailbox_annotations");
+
+ Field MAILBOX_ID = DSL.field("mailbox_id", SQLDataType.UUID.notNull());
+ Field ANNOTATIONS = DSL.field("annotations", DefaultDataType.getDefaultDataType("hstore").asConvertedDataType(new HstoreBinding()).notNull());
+
+ PostgresTable TABLE = PostgresTable.name(TABLE_NAME.getName())
+ .createTableStep(((dsl, tableName) -> dsl.createTableIfNotExists(tableName)
+ .column(MAILBOX_ID)
+ .column(ANNOTATIONS)
+ .primaryKey(MAILBOX_ID)
+ .constraints(DSL.constraint().foreignKey(MAILBOX_ID).references(PostgresMailboxTable.TABLE_NAME, PostgresMailboxTable.MAILBOX_ID).onDeleteCascade())))
+ .supportsRowLevelSecurity()
+ .build();
+ }
+
+ PostgresModule MODULE = PostgresModule.builder()
+ .addTable(PostgresMailboxAnnotationModule.PostgresMailboxAnnotationTable.TABLE)
+ .build();
+}
diff --git a/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/PostgresMailboxId.java b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/PostgresMailboxId.java
new file mode 100644
index 00000000000..52111dd4cb6
--- /dev/null
+++ b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/PostgresMailboxId.java
@@ -0,0 +1,86 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+package org.apache.james.mailbox.postgres;
+
+import java.io.Serializable;
+import java.util.Objects;
+import java.util.UUID;
+
+import org.apache.james.mailbox.model.MailboxId;
+
+import com.google.common.base.MoreObjects;
+
+public class PostgresMailboxId implements MailboxId, Serializable {
+
+ public static class Factory implements MailboxId.Factory {
+ @Override
+ public PostgresMailboxId fromString(String serialized) {
+ return of(serialized);
+ }
+ }
+
+ private final UUID id;
+
+ public static PostgresMailboxId generate() {
+ return of(UUID.randomUUID());
+ }
+
+ public static PostgresMailboxId of(UUID id) {
+ return new PostgresMailboxId(id);
+ }
+
+ public static PostgresMailboxId of(String serialized) {
+ return new PostgresMailboxId(UUID.fromString(serialized));
+ }
+
+ private PostgresMailboxId(UUID id) {
+ this.id = id;
+ }
+
+ @Override
+ public String serialize() {
+ return id.toString();
+ }
+
+ public UUID asUuid() {
+ return id;
+ }
+
+ @Override
+ public final boolean equals(Object o) {
+ if (o instanceof PostgresMailboxId) {
+ PostgresMailboxId other = (PostgresMailboxId) o;
+ return Objects.equals(id, other.id);
+ }
+ return false;
+ }
+
+ @Override
+ public final int hashCode() {
+ return Objects.hash(id);
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(this)
+ .add("id", id)
+ .toString();
+ }
+
+}
diff --git a/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/PostgresMailboxManager.java b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/PostgresMailboxManager.java
new file mode 100644
index 00000000000..bce2f957de9
--- /dev/null
+++ b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/PostgresMailboxManager.java
@@ -0,0 +1,102 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.mailbox.postgres;
+
+import java.time.Clock;
+import java.util.EnumSet;
+
+import jakarta.inject.Inject;
+
+import org.apache.james.events.EventBus;
+import org.apache.james.mailbox.MailboxSession;
+import org.apache.james.mailbox.SessionProvider;
+import org.apache.james.mailbox.model.Mailbox;
+import org.apache.james.mailbox.model.MessageId;
+import org.apache.james.mailbox.store.MailboxManagerConfiguration;
+import org.apache.james.mailbox.store.NoMailboxPathLocker;
+import org.apache.james.mailbox.store.PreDeletionHooks;
+import org.apache.james.mailbox.store.StoreMailboxAnnotationManager;
+import org.apache.james.mailbox.store.StoreMailboxManager;
+import org.apache.james.mailbox.store.StoreMessageManager;
+import org.apache.james.mailbox.store.StoreRightManager;
+import org.apache.james.mailbox.store.mail.ThreadIdGuessingAlgorithm;
+import org.apache.james.mailbox.store.mail.model.impl.MessageParser;
+import org.apache.james.mailbox.store.quota.QuotaComponents;
+import org.apache.james.mailbox.store.search.MessageSearchIndex;
+
+public class PostgresMailboxManager extends StoreMailboxManager {
+
+ public static final EnumSet MAILBOX_CAPABILITIES = EnumSet.of(
+ MailboxCapabilities.UserFlag,
+ MailboxCapabilities.Namespace,
+ MailboxCapabilities.Move,
+ MailboxCapabilities.Annotation,
+ MailboxCapabilities.ACL);
+
+ private final PostgresMailboxSessionMapperFactory mapperFactory;
+
+ @Inject
+ public PostgresMailboxManager(PostgresMailboxSessionMapperFactory mapperFactory,
+ SessionProvider sessionProvider,
+ MessageParser messageParser,
+ MessageId.Factory messageIdFactory,
+ EventBus eventBus,
+ StoreMailboxAnnotationManager annotationManager,
+ StoreRightManager storeRightManager,
+ QuotaComponents quotaComponents,
+ MessageSearchIndex index,
+ ThreadIdGuessingAlgorithm threadIdGuessingAlgorithm,
+ PreDeletionHooks preDeletionHooks,
+ Clock clock) {
+ super(mapperFactory, sessionProvider, new NoMailboxPathLocker(),
+ messageParser, messageIdFactory, annotationManager,
+ eventBus, storeRightManager, quotaComponents,
+ index, MailboxManagerConfiguration.DEFAULT, preDeletionHooks, threadIdGuessingAlgorithm, clock);
+ this.mapperFactory = mapperFactory;
+ }
+
+ @Override
+ protected StoreMessageManager createMessageManager(Mailbox mailboxRow, MailboxSession session) {
+ return new PostgresMessageManager(mapperFactory,
+ getMessageSearchIndex(),
+ getEventBus(),
+ getLocker(),
+ mailboxRow,
+ getQuotaComponents().getQuotaManager(),
+ getQuotaComponents().getQuotaRootResolver(),
+ getMessageParser(),
+ getMessageIdFactory(),
+ configuration.getBatchSizes(),
+ getStoreRightManager(),
+ getThreadIdGuessingAlgorithm(),
+ getClock(),
+ getPreDeletionHooks());
+ }
+
+ @Override
+ public EnumSet getSupportedMailboxCapabilities() {
+ return MAILBOX_CAPABILITIES;
+ }
+
+ @Override
+ public EnumSet getSupportedMessageCapabilities() {
+ return EnumSet.of(MessageCapabilities.UniqueID);
+ }
+}
diff --git a/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/PostgresMailboxSessionMapperFactory.java b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/PostgresMailboxSessionMapperFactory.java
new file mode 100644
index 00000000000..8e157c514b0
--- /dev/null
+++ b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/PostgresMailboxSessionMapperFactory.java
@@ -0,0 +1,152 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+package org.apache.james.mailbox.postgres;
+
+import java.time.Clock;
+
+import jakarta.inject.Inject;
+
+import org.apache.james.backends.postgres.PostgresConfiguration;
+import org.apache.james.backends.postgres.RowLevelSecurity;
+import org.apache.james.backends.postgres.utils.PostgresExecutor;
+import org.apache.james.blob.api.BlobId;
+import org.apache.james.blob.api.BlobStore;
+import org.apache.james.mailbox.MailboxSession;
+import org.apache.james.mailbox.postgres.mail.PostgresAnnotationMapper;
+import org.apache.james.mailbox.postgres.mail.PostgresAttachmentMapper;
+import org.apache.james.mailbox.postgres.mail.PostgresMailboxMapper;
+import org.apache.james.mailbox.postgres.mail.PostgresMailboxMemberDAO;
+import org.apache.james.mailbox.postgres.mail.PostgresMessageIdMapper;
+import org.apache.james.mailbox.postgres.mail.PostgresMessageMapper;
+import org.apache.james.mailbox.postgres.mail.PostgresModSeqProvider;
+import org.apache.james.mailbox.postgres.mail.PostgresUidProvider;
+import org.apache.james.mailbox.postgres.mail.RLSSupportPostgresMailboxMapper;
+import org.apache.james.mailbox.postgres.mail.dao.PostgresAttachmentDAO;
+import org.apache.james.mailbox.postgres.mail.dao.PostgresMailboxAnnotationDAO;
+import org.apache.james.mailbox.postgres.mail.dao.PostgresMailboxDAO;
+import org.apache.james.mailbox.postgres.mail.dao.PostgresMailboxMessageDAO;
+import org.apache.james.mailbox.postgres.mail.dao.PostgresMessageDAO;
+import org.apache.james.mailbox.postgres.mail.dao.PostgresThreadDAO;
+import org.apache.james.mailbox.postgres.user.PostgresSubscriptionDAO;
+import org.apache.james.mailbox.postgres.user.PostgresSubscriptionMapper;
+import org.apache.james.mailbox.store.MailboxSessionMapperFactory;
+import org.apache.james.mailbox.store.mail.AnnotationMapper;
+import org.apache.james.mailbox.store.mail.AttachmentMapperFactory;
+import org.apache.james.mailbox.store.mail.MailboxMapper;
+import org.apache.james.mailbox.store.mail.MessageIdMapper;
+import org.apache.james.mailbox.store.mail.MessageMapper;
+import org.apache.james.mailbox.store.user.SubscriptionMapper;
+
+import com.google.common.collect.ImmutableSet;
+
+public class PostgresMailboxSessionMapperFactory extends MailboxSessionMapperFactory implements AttachmentMapperFactory {
+
+ private final PostgresExecutor.Factory executorFactory;
+ private final BlobStore blobStore;
+ private final BlobId.Factory blobIdFactory;
+ private final Clock clock;
+ private final RowLevelSecurity rowLevelSecurity;
+
+ @Inject
+ public PostgresMailboxSessionMapperFactory(PostgresExecutor.Factory executorFactory,
+ Clock clock,
+ BlobStore blobStore,
+ BlobId.Factory blobIdFactory,
+ PostgresConfiguration postgresConfiguration) {
+ this.executorFactory = executorFactory;
+ this.blobStore = blobStore;
+ this.blobIdFactory = blobIdFactory;
+ this.clock = clock;
+ this.rowLevelSecurity = postgresConfiguration.getRowLevelSecurity();
+ }
+
+ @Override
+ public MailboxMapper createMailboxMapper(MailboxSession session) {
+ PostgresMailboxDAO mailboxDAO = new PostgresMailboxDAO(executorFactory.create(session.getUser().getDomainPart()));
+ if (rowLevelSecurity.isRowLevelSecurityEnabled()) {
+ return new RLSSupportPostgresMailboxMapper(mailboxDAO,
+ new PostgresMailboxMemberDAO(executorFactory.create(session.getUser().getDomainPart())));
+ } else {
+ return new PostgresMailboxMapper(mailboxDAO);
+ }
+ }
+
+ @Override
+ public MessageMapper createMessageMapper(MailboxSession session) {
+ return new PostgresMessageMapper(executorFactory.create(session.getUser().getDomainPart()),
+ getModSeqProvider(session),
+ getUidProvider(session),
+ blobStore,
+ clock,
+ blobIdFactory);
+ }
+
+ @Override
+ public MessageIdMapper createMessageIdMapper(MailboxSession session) {
+ return new PostgresMessageIdMapper(new PostgresMailboxDAO(executorFactory.create(session.getUser().getDomainPart())),
+ new PostgresMessageDAO(executorFactory.create(session.getUser().getDomainPart()), blobIdFactory),
+ new PostgresMailboxMessageDAO(executorFactory.create(session.getUser().getDomainPart())),
+ getModSeqProvider(session),
+ getAttachmentMapper(session),
+ blobStore,
+ blobIdFactory,
+ clock);
+ }
+
+ @Override
+ public SubscriptionMapper createSubscriptionMapper(MailboxSession session) {
+ return new PostgresSubscriptionMapper(new PostgresSubscriptionDAO(executorFactory.create(session.getUser().getDomainPart())));
+ }
+
+ @Override
+ public AnnotationMapper createAnnotationMapper(MailboxSession session) {
+ return new PostgresAnnotationMapper(new PostgresMailboxAnnotationDAO(executorFactory.create(session.getUser().getDomainPart())));
+ }
+
+ @Override
+ public PostgresUidProvider getUidProvider(MailboxSession session) {
+ return new PostgresUidProvider.Factory(executorFactory).create(session);
+ }
+
+ @Override
+ public PostgresModSeqProvider getModSeqProvider(MailboxSession session) {
+ return new PostgresModSeqProvider.Factory(executorFactory).create(session);
+ }
+
+ @Override
+ public PostgresAttachmentMapper createAttachmentMapper(MailboxSession session) {
+ PostgresAttachmentDAO postgresAttachmentDAO = new PostgresAttachmentDAO(executorFactory.create(session.getUser().getDomainPart()), blobIdFactory);
+ return new PostgresAttachmentMapper(postgresAttachmentDAO, blobStore);
+ }
+
+ @Override
+ public PostgresAttachmentMapper getAttachmentMapper(MailboxSession session) {
+ return createAttachmentMapper(session);
+ }
+
+ protected DeleteMessageListener deleteMessageListener() {
+ PostgresMessageDAO.Factory postgresMessageDAOFactory = new PostgresMessageDAO.Factory(blobIdFactory, executorFactory);
+ PostgresMailboxMessageDAO.Factory postgresMailboxMessageDAOFactory = new PostgresMailboxMessageDAO.Factory(executorFactory);
+ PostgresAttachmentDAO.Factory attachmentDAOFactory = new PostgresAttachmentDAO.Factory(executorFactory, blobIdFactory);
+ PostgresThreadDAO.Factory threadDAOFactory = new PostgresThreadDAO.Factory(executorFactory);
+
+ return new DeleteMessageListener(blobStore, postgresMailboxMessageDAOFactory, postgresMessageDAOFactory,
+ attachmentDAOFactory, threadDAOFactory, ImmutableSet.of());
+ }
+}
diff --git a/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/PostgresMessageId.java b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/PostgresMessageId.java
new file mode 100644
index 00000000000..57594b45987
--- /dev/null
+++ b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/PostgresMessageId.java
@@ -0,0 +1,89 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.mailbox.postgres;
+
+import java.util.Objects;
+import java.util.UUID;
+
+import org.apache.james.mailbox.model.MessageId;
+
+import com.github.f4b6a3.uuid.UuidCreator;
+import com.google.common.base.MoreObjects;
+
+public class PostgresMessageId implements MessageId {
+
+ public static class Factory implements MessageId.Factory {
+
+ @Override
+ public PostgresMessageId generate() {
+ return of(UuidCreator.getTimeOrderedEpoch());
+ }
+
+ public static PostgresMessageId of(UUID uuid) {
+ return new PostgresMessageId(uuid);
+ }
+
+ @Override
+ public PostgresMessageId fromString(String serialized) {
+ return of(UUID.fromString(serialized));
+ }
+ }
+
+ private final UUID uuid;
+
+ private PostgresMessageId(UUID uuid) {
+ this.uuid = uuid;
+ }
+
+ @Override
+ public String serialize() {
+ return uuid.toString();
+ }
+
+ public UUID asUuid() {
+ return uuid;
+ }
+
+ @Override
+ public boolean isSerializable() {
+ return true;
+ }
+
+ @Override
+ public final boolean equals(Object o) {
+ if (o instanceof PostgresMessageId) {
+ PostgresMessageId other = (PostgresMessageId) o;
+ return Objects.equals(uuid, other.uuid);
+ }
+ return false;
+ }
+
+ @Override
+ public final int hashCode() {
+ return Objects.hash(uuid);
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(this)
+ .add("uuid", uuid)
+ .toString();
+ }
+}
diff --git a/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/PostgresMessageManager.java b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/PostgresMessageManager.java
new file mode 100644
index 00000000000..ad2621b4aaf
--- /dev/null
+++ b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/PostgresMessageManager.java
@@ -0,0 +1,124 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.mailbox.postgres;
+
+import java.time.Clock;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Optional;
+
+import jakarta.mail.Flags;
+
+import org.apache.james.events.EventBus;
+import org.apache.james.mailbox.MailboxPathLocker;
+import org.apache.james.mailbox.MailboxSession;
+import org.apache.james.mailbox.MessageUid;
+import org.apache.james.mailbox.exception.MailboxException;
+import org.apache.james.mailbox.model.Mailbox;
+import org.apache.james.mailbox.model.MailboxACL;
+import org.apache.james.mailbox.model.MailboxCounters;
+import org.apache.james.mailbox.model.MessageId;
+import org.apache.james.mailbox.postgres.mail.PostgresMailbox;
+import org.apache.james.mailbox.quota.QuotaManager;
+import org.apache.james.mailbox.quota.QuotaRootResolver;
+import org.apache.james.mailbox.store.BatchSizes;
+import org.apache.james.mailbox.store.MailboxSessionMapperFactory;
+import org.apache.james.mailbox.store.MessageFactory;
+import org.apache.james.mailbox.store.MessageStorer;
+import org.apache.james.mailbox.store.PreDeletionHooks;
+import org.apache.james.mailbox.store.StoreMailboxManager;
+import org.apache.james.mailbox.store.StoreMessageManager;
+import org.apache.james.mailbox.store.StoreRightManager;
+import org.apache.james.mailbox.store.mail.MessageMapper;
+import org.apache.james.mailbox.store.mail.ThreadIdGuessingAlgorithm;
+import org.apache.james.mailbox.store.mail.model.impl.MessageParser;
+import org.apache.james.mailbox.store.search.MessageSearchIndex;
+
+import reactor.core.publisher.Mono;
+import reactor.util.function.Tuple2;
+
+public class PostgresMessageManager extends StoreMessageManager {
+
+ private final MailboxSessionMapperFactory mapperFactory;
+ private final StoreRightManager storeRightManager;
+ private final Mailbox mailbox;
+
+ public PostgresMessageManager(PostgresMailboxSessionMapperFactory mapperFactory,
+ MessageSearchIndex index, EventBus eventBus,
+ MailboxPathLocker locker, Mailbox mailbox,
+ QuotaManager quotaManager, QuotaRootResolver quotaRootResolver,
+ MessageParser messageParser,
+ MessageId.Factory messageIdFactory, BatchSizes batchSizes,
+ StoreRightManager storeRightManager, ThreadIdGuessingAlgorithm threadIdGuessingAlgorithm,
+ Clock clock, PreDeletionHooks preDeletionHooks) {
+ super(StoreMailboxManager.DEFAULT_NO_MESSAGE_CAPABILITIES, mapperFactory, index, eventBus, locker, mailbox,
+ quotaManager, quotaRootResolver, batchSizes, storeRightManager, preDeletionHooks,
+ new MessageStorer.WithAttachment(mapperFactory, messageIdFactory, new MessageFactory.StoreMessageFactory(), mapperFactory, messageParser, threadIdGuessingAlgorithm, clock));
+ this.storeRightManager = storeRightManager;
+ this.mapperFactory = mapperFactory;
+ this.mailbox = mailbox;
+ }
+
+
+ @Override
+ public Flags getPermanentFlags(MailboxSession session) {
+ Flags flags = super.getPermanentFlags(session);
+ flags.add(Flags.Flag.USER);
+ return flags;
+ }
+
+ public Mono getMetaDataReactive(MailboxMetaData.RecentMode recentMode, MailboxSession mailboxSession, EnumSet items) throws MailboxException {
+ if (!storeRightManager.hasRight(mailbox, MailboxACL.Right.Read, mailboxSession)) {
+ return Mono.just(MailboxMetaData.sensibleInformationFree(getResolvedAcl(mailboxSession), getMailboxEntity().getUidValidity(), isWriteable(mailboxSession)));
+ }
+
+ Flags permanentFlags = getPermanentFlags(mailboxSession);
+ MessageMapper messageMapper = mapperFactory.getMessageMapper(mailboxSession);
+
+ Mono postgresMailboxMetaDataPublisher = Mono.just(mapperFactory.getMailboxMapper(mailboxSession))
+ .flatMap(postgresMailboxMapper -> postgresMailboxMapper.findMailboxById(getMailboxEntity().getMailboxId())
+ .map(mailbox -> (PostgresMailbox) mailbox));
+
+ Mono, List>> firstUnseenAndRecentPublisher = Mono.zip(firstUnseen(messageMapper, items), recent(recentMode, mailboxSession));
+
+ return messageMapper.executeReactive(Mono.zip(postgresMailboxMetaDataPublisher, mailboxCounters(messageMapper, items))
+ .flatMap(metadataAndCounter -> {
+ PostgresMailbox metadata = metadataAndCounter.getT1();
+ MailboxCounters counters = metadataAndCounter.getT2();
+ return firstUnseenAndRecentPublisher.map(firstUnseenAndRecent -> new MailboxMetaData(
+ firstUnseenAndRecent.getT2(),
+ permanentFlags,
+ metadata.getUidValidity(),
+ nextUid(metadata),
+ metadata.getHighestModSeq(),
+ counters.getCount(),
+ counters.getUnseen(),
+ firstUnseenAndRecent.getT1().orElse(null),
+ isWriteable(mailboxSession),
+ metadata.getACL()));
+ }));
+ }
+
+ private MessageUid nextUid(PostgresMailbox mailboxMetaData) {
+ return Optional.ofNullable(mailboxMetaData.getLastUid())
+ .map(MessageUid::next)
+ .orElse(MessageUid.MIN_VALUE);
+ }
+}
diff --git a/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/PostgresThreadIdGuessingAlgorithm.java b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/PostgresThreadIdGuessingAlgorithm.java
new file mode 100644
index 00000000000..77419e98fc1
--- /dev/null
+++ b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/PostgresThreadIdGuessingAlgorithm.java
@@ -0,0 +1,91 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.mailbox.postgres;
+
+import java.util.HashSet;
+import java.util.List;
+import java.util.Optional;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import jakarta.inject.Inject;
+
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.james.mailbox.MailboxSession;
+import org.apache.james.mailbox.exception.ThreadNotFoundException;
+import org.apache.james.mailbox.model.MessageId;
+import org.apache.james.mailbox.model.ThreadId;
+import org.apache.james.mailbox.postgres.mail.dao.PostgresThreadDAO;
+import org.apache.james.mailbox.store.mail.ThreadIdGuessingAlgorithm;
+import org.apache.james.mailbox.store.mail.model.MimeMessageId;
+import org.apache.james.mailbox.store.mail.model.Subject;
+import org.apache.james.mailbox.store.search.SearchUtil;
+
+import com.google.common.hash.Hashing;
+
+import reactor.core.publisher.Flux;
+import reactor.core.publisher.Mono;
+
+public class PostgresThreadIdGuessingAlgorithm implements ThreadIdGuessingAlgorithm {
+ private final PostgresThreadDAO.Factory threadDAOFactory;
+
+ @Inject
+ public PostgresThreadIdGuessingAlgorithm(PostgresThreadDAO.Factory threadDAOFactory) {
+ this.threadDAOFactory = threadDAOFactory;
+ }
+
+ @Override
+ public Mono guessThreadIdReactive(MessageId messageId, Optional mimeMessageId, Optional inReplyTo,
+ Optional> references, Optional subject, MailboxSession session) {
+ PostgresThreadDAO threadDAO = threadDAOFactory.create(session.getUser().getDomainPart());
+
+ Set hashMimeMessageIds = buildMimeMessageIdSet(mimeMessageId, inReplyTo, references)
+ .stream()
+ .map(mimeMessageId1 -> Hashing.murmur3_32_fixed().hashBytes(mimeMessageId1.getValue().getBytes()).asInt())
+ .collect(Collectors.toSet());
+
+ Optional hashBaseSubject = subject.map(value -> new Subject(SearchUtil.getBaseSubject(value.getValue())))
+ .map(subject1 -> Hashing.murmur3_32_fixed().hashBytes(subject1.getValue().getBytes()).asInt());
+
+ return threadDAO.findThreads(session.getUser(), hashMimeMessageIds)
+ .filter(pair -> pair.getLeft().equals(hashBaseSubject))
+ .next()
+ .map(Pair::getRight)
+ .switchIfEmpty(Mono.just(ThreadId.fromBaseMessageId(messageId)))
+ .flatMap(threadId -> threadDAO
+ .insertSome(session.getUser(), hashMimeMessageIds, PostgresMessageId.class.cast(messageId), threadId, hashBaseSubject)
+ .then(Mono.just(threadId)));
+ }
+
+ @Override
+ public Flux getMessageIdsInThread(ThreadId threadId, MailboxSession session) {
+ PostgresThreadDAO threadDAO = threadDAOFactory.create(session.getUser().getDomainPart());
+ return threadDAO.findMessageIds(threadId, session.getUser())
+ .switchIfEmpty(Flux.error(new ThreadNotFoundException(threadId)));
+ }
+
+ private Set buildMimeMessageIdSet(Optional mimeMessageId, Optional inReplyTo, Optional> references) {
+ Set mimeMessageIds = new HashSet<>();
+ mimeMessageId.ifPresent(mimeMessageIds::add);
+ inReplyTo.ifPresent(mimeMessageIds::add);
+ references.ifPresent(mimeMessageIds::addAll);
+ return mimeMessageIds;
+ }
+}
diff --git a/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/UnsupportAttachmentContentLoader.java b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/UnsupportAttachmentContentLoader.java
new file mode 100644
index 00000000000..e4954999eca
--- /dev/null
+++ b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/UnsupportAttachmentContentLoader.java
@@ -0,0 +1,34 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.mailbox.postgres;
+
+import java.io.InputStream;
+
+import org.apache.commons.lang3.NotImplementedException;
+import org.apache.james.mailbox.AttachmentContentLoader;
+import org.apache.james.mailbox.MailboxSession;
+import org.apache.james.mailbox.model.AttachmentMetadata;
+
+public class UnsupportAttachmentContentLoader implements AttachmentContentLoader {
+ @Override
+ public InputStream load(AttachmentMetadata attachment, MailboxSession mailboxSession) {
+ throw new NotImplementedException("Postgresql doesn't support loading attachment separately from Message");
+ }
+}
diff --git a/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/MailboxDeleteDuringUpdateException.java b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/MailboxDeleteDuringUpdateException.java
new file mode 100644
index 00000000000..e738905441a
--- /dev/null
+++ b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/MailboxDeleteDuringUpdateException.java
@@ -0,0 +1,23 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.mailbox.postgres.mail;
+
+public class MailboxDeleteDuringUpdateException extends Exception {
+}
diff --git a/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/MessageRepresentation.java b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/MessageRepresentation.java
new file mode 100644
index 00000000000..dd24c5dd60d
--- /dev/null
+++ b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/MessageRepresentation.java
@@ -0,0 +1,179 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.mailbox.postgres.mail;
+
+import java.util.Date;
+import java.util.List;
+import java.util.Optional;
+import java.util.stream.Collectors;
+
+import org.apache.james.blob.api.BlobId;
+import org.apache.james.mailbox.model.AttachmentId;
+import org.apache.james.mailbox.model.Cid;
+import org.apache.james.mailbox.model.Content;
+import org.apache.james.mailbox.model.MessageAttachmentMetadata;
+import org.apache.james.mailbox.model.MessageId;
+
+import com.google.common.base.Preconditions;
+
+public class MessageRepresentation {
+ public static class AttachmentRepresentation {
+ public static AttachmentRepresentation from(MessageAttachmentMetadata messageAttachmentMetadata) {
+ return new AttachmentRepresentation(
+ messageAttachmentMetadata.getAttachment().getAttachmentId(),
+ messageAttachmentMetadata.getName(),
+ messageAttachmentMetadata.getCid(),
+ messageAttachmentMetadata.isInline());
+ }
+
+ public static List from(List messageAttachmentMetadata) {
+ return messageAttachmentMetadata.stream()
+ .map(AttachmentRepresentation::from)
+ .collect(Collectors.toList());
+ }
+
+ private final AttachmentId attachmentId;
+ private final Optional name;
+ private final Optional cid;
+ private final boolean isInline;
+
+ public AttachmentRepresentation(AttachmentId attachmentId, Optional name, Optional cid, boolean isInline) {
+ Preconditions.checkNotNull(attachmentId, "attachmentId is required");
+ this.attachmentId = attachmentId;
+ this.name = name;
+ this.cid = cid;
+ this.isInline = isInline;
+ }
+
+ public AttachmentId getAttachmentId() {
+ return attachmentId;
+ }
+
+ public Optional getName() {
+ return name;
+ }
+
+ public Optional getCid() {
+ return cid;
+ }
+
+ public boolean isInline() {
+ return isInline;
+ }
+ }
+
+ public static MessageRepresentation.Builder builder() {
+ return new MessageRepresentation.Builder();
+ }
+
+ public static class Builder {
+ private MessageId messageId;
+ private Date internalDate;
+ private Long size;
+ private Content headerContent;
+ private BlobId bodyBlobId;
+
+ private List attachments = List.of();
+
+ public MessageRepresentation.Builder messageId(MessageId messageId) {
+ this.messageId = messageId;
+ return this;
+ }
+
+ public MessageRepresentation.Builder internalDate(Date internalDate) {
+ this.internalDate = internalDate;
+ return this;
+ }
+
+ public MessageRepresentation.Builder size(long size) {
+ Preconditions.checkArgument(size >= 0, "size can not be negative");
+ this.size = size;
+ return this;
+ }
+
+ public MessageRepresentation.Builder headerContent(Content headerContent) {
+ this.headerContent = headerContent;
+ return this;
+ }
+
+ public MessageRepresentation.Builder bodyBlobId(BlobId bodyBlobId) {
+ this.bodyBlobId = bodyBlobId;
+ return this;
+ }
+
+ public MessageRepresentation.Builder attachments(List attachments) {
+ this.attachments = attachments;
+ return this;
+ }
+
+ public MessageRepresentation build() {
+ Preconditions.checkNotNull(messageId, "messageId is required");
+ Preconditions.checkNotNull(internalDate, "internalDate is required");
+ Preconditions.checkNotNull(size, "size is required");
+ Preconditions.checkNotNull(headerContent, "headerContent is required");
+ Preconditions.checkNotNull(bodyBlobId, "mailboxId is required");
+
+ return new MessageRepresentation(messageId, internalDate, size, headerContent, bodyBlobId, attachments);
+ }
+ }
+
+ private final MessageId messageId;
+ private final Date internalDate;
+ private final Long size;
+ private final Content headerContent;
+ private final BlobId bodyBlobId;
+
+ private final List attachments;
+
+ private MessageRepresentation(MessageId messageId, Date internalDate, Long size,
+ Content headerContent, BlobId bodyBlobId,
+ List attachments) {
+ this.messageId = messageId;
+ this.internalDate = internalDate;
+ this.size = size;
+ this.headerContent = headerContent;
+ this.bodyBlobId = bodyBlobId;
+ this.attachments = attachments;
+ }
+
+ public Date getInternalDate() {
+ return internalDate;
+ }
+
+ public Long getSize() {
+ return size;
+ }
+
+ public MessageId getMessageId() {
+ return messageId;
+ }
+
+ public Content getHeaderContent() {
+ return headerContent;
+ }
+
+ public BlobId getBodyBlobId() {
+ return bodyBlobId;
+ }
+
+ public List getAttachments() {
+ return attachments;
+ }
+}
diff --git a/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/PostgresAnnotationMapper.java b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/PostgresAnnotationMapper.java
new file mode 100644
index 00000000000..c58498be1f5
--- /dev/null
+++ b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/PostgresAnnotationMapper.java
@@ -0,0 +1,140 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.mailbox.postgres.mail;
+
+import java.util.List;
+import java.util.Set;
+
+import jakarta.inject.Inject;
+
+import org.apache.james.mailbox.model.MailboxAnnotation;
+import org.apache.james.mailbox.model.MailboxAnnotationKey;
+import org.apache.james.mailbox.model.MailboxId;
+import org.apache.james.mailbox.postgres.PostgresMailboxId;
+import org.apache.james.mailbox.postgres.mail.dao.PostgresMailboxAnnotationDAO;
+import org.apache.james.mailbox.store.mail.AnnotationMapper;
+
+import reactor.core.publisher.Flux;
+import reactor.core.publisher.Mono;
+
+public class PostgresAnnotationMapper implements AnnotationMapper {
+ private final PostgresMailboxAnnotationDAO annotationDAO;
+
+ @Inject
+ public PostgresAnnotationMapper(PostgresMailboxAnnotationDAO annotationDAO) {
+ this.annotationDAO = annotationDAO;
+ }
+
+ @Override
+ public List getAllAnnotations(MailboxId mailboxId) {
+ return getAllAnnotationsReactive(mailboxId)
+ .collectList()
+ .block();
+ }
+
+ @Override
+ public Flux getAllAnnotationsReactive(MailboxId mailboxId) {
+ return annotationDAO.getAllAnnotations((PostgresMailboxId) mailboxId);
+ }
+
+ @Override
+ public List getAnnotationsByKeys(MailboxId mailboxId, Set keys) {
+ return getAnnotationsByKeysReactive(mailboxId, keys)
+ .collectList()
+ .block();
+ }
+
+ @Override
+ public Flux getAnnotationsByKeysReactive(MailboxId mailboxId, Set keys) {
+ return annotationDAO.getAnnotationsByKeys((PostgresMailboxId) mailboxId, keys);
+ }
+
+ @Override
+ public List getAnnotationsByKeysWithOneDepth(MailboxId mailboxId, Set keys) {
+ return getAnnotationsByKeysWithOneDepthReactive(mailboxId, keys)
+ .collectList()
+ .block();
+ }
+
+ @Override
+ public Flux getAnnotationsByKeysWithOneDepthReactive(MailboxId mailboxId, Set keys) {
+ return Flux.fromIterable(keys).flatMap(mailboxAnnotationKey ->
+ annotationDAO.getAnnotationsByKeyLike((PostgresMailboxId) mailboxId, mailboxAnnotationKey)
+ .filter(annotation -> mailboxAnnotationKey.isParentOrIsEqual(annotation.getKey())));
+ }
+
+ @Override
+ public List getAnnotationsByKeysWithAllDepth(MailboxId mailboxId, Set keys) {
+ return getAnnotationsByKeysWithAllDepthReactive(mailboxId, keys)
+ .collectList()
+ .block();
+ }
+
+ @Override
+ public Flux getAnnotationsByKeysWithAllDepthReactive(MailboxId mailboxId, Set keys) {
+ return Flux.fromIterable(keys).flatMap(mailboxAnnotationKey ->
+ annotationDAO.getAnnotationsByKeyLike((PostgresMailboxId) mailboxId, mailboxAnnotationKey)
+ .filter(annotation -> mailboxAnnotationKey.isAncestorOrIsEqual(annotation.getKey())));
+ }
+
+ @Override
+ public void deleteAnnotation(MailboxId mailboxId, MailboxAnnotationKey key) {
+ deleteAnnotationReactive(mailboxId, key)
+ .block();
+ }
+
+ @Override
+ public Mono deleteAnnotationReactive(MailboxId mailboxId, MailboxAnnotationKey key) {
+ return annotationDAO.deleteAnnotation((PostgresMailboxId) mailboxId, key);
+ }
+
+ @Override
+ public void insertAnnotation(MailboxId mailboxId, MailboxAnnotation mailboxAnnotation) {
+ insertAnnotationReactive(mailboxId, mailboxAnnotation)
+ .block();
+ }
+
+ @Override
+ public Mono insertAnnotationReactive(MailboxId mailboxId, MailboxAnnotation mailboxAnnotation) {
+ return annotationDAO.insertAnnotation((PostgresMailboxId) mailboxId, mailboxAnnotation);
+ }
+
+ @Override
+ public boolean exist(MailboxId mailboxId, MailboxAnnotation mailboxAnnotation) {
+ return existReactive(mailboxId, mailboxAnnotation)
+ .block();
+ }
+
+ @Override
+ public Mono existReactive(MailboxId mailboxId, MailboxAnnotation mailboxAnnotation) {
+ return annotationDAO.exist((PostgresMailboxId) mailboxId, mailboxAnnotation.getKey());
+ }
+
+ @Override
+ public int countAnnotations(MailboxId mailboxId) {
+ return countAnnotationsReactive(mailboxId)
+ .block();
+ }
+
+ @Override
+ public Mono countAnnotationsReactive(MailboxId mailboxId) {
+ return annotationDAO.countAnnotations((PostgresMailboxId) mailboxId);
+ }
+}
diff --git a/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/PostgresAttachmentBlobReferenceSource.java b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/PostgresAttachmentBlobReferenceSource.java
new file mode 100644
index 00000000000..3e64be72e31
--- /dev/null
+++ b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/PostgresAttachmentBlobReferenceSource.java
@@ -0,0 +1,53 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.mailbox.postgres.mail;
+
+import jakarta.inject.Inject;
+import jakarta.inject.Named;
+import jakarta.inject.Singleton;
+
+import org.apache.james.backends.postgres.utils.PostgresExecutor;
+import org.apache.james.blob.api.BlobId;
+import org.apache.james.blob.api.BlobReferenceSource;
+import org.apache.james.mailbox.postgres.mail.dao.PostgresAttachmentDAO;
+
+import reactor.core.publisher.Flux;
+
+public class PostgresAttachmentBlobReferenceSource implements BlobReferenceSource {
+
+ private final PostgresAttachmentDAO postgresAttachmentDAO;
+
+ @Inject
+ @Singleton
+ public PostgresAttachmentBlobReferenceSource(@Named(PostgresExecutor.BY_PASS_RLS_INJECT) PostgresExecutor postgresExecutor,
+ BlobId.Factory bloIdFactory) {
+ this(new PostgresAttachmentDAO(postgresExecutor, bloIdFactory));
+ }
+
+ public PostgresAttachmentBlobReferenceSource(PostgresAttachmentDAO postgresAttachmentDAO) {
+ this.postgresAttachmentDAO = postgresAttachmentDAO;
+ }
+
+ @Override
+ public Flux listReferencedBlobs() {
+ return postgresAttachmentDAO.listBlobs();
+ }
+
+}
diff --git a/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/PostgresAttachmentMapper.java b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/PostgresAttachmentMapper.java
new file mode 100644
index 00000000000..1be53fa3a64
--- /dev/null
+++ b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/PostgresAttachmentMapper.java
@@ -0,0 +1,125 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.mailbox.postgres.mail;
+
+import static org.apache.james.blob.api.BlobStore.StoragePolicy.LOW_COST;
+
+import java.io.InputStream;
+import java.util.Collection;
+import java.util.List;
+
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.james.blob.api.BlobStore;
+import org.apache.james.mailbox.exception.AttachmentNotFoundException;
+import org.apache.james.mailbox.model.AttachmentId;
+import org.apache.james.mailbox.model.AttachmentMetadata;
+import org.apache.james.mailbox.model.MessageAttachmentMetadata;
+import org.apache.james.mailbox.model.MessageId;
+import org.apache.james.mailbox.model.ParsedAttachment;
+import org.apache.james.mailbox.model.UuidBackedAttachmentId;
+import org.apache.james.mailbox.postgres.mail.dao.PostgresAttachmentDAO;
+import org.apache.james.mailbox.store.mail.AttachmentMapper;
+
+import com.github.fge.lambdas.Throwing;
+import com.google.common.base.Preconditions;
+
+import reactor.core.publisher.Flux;
+import reactor.core.publisher.Mono;
+
+public class PostgresAttachmentMapper implements AttachmentMapper {
+
+ private final PostgresAttachmentDAO postgresAttachmentDAO;
+ private final BlobStore blobStore;
+
+ public PostgresAttachmentMapper(PostgresAttachmentDAO postgresAttachmentDAO, BlobStore blobStore) {
+ this.postgresAttachmentDAO = postgresAttachmentDAO;
+ this.blobStore = blobStore;
+ }
+
+ @Override
+ public InputStream loadAttachmentContent(AttachmentId attachmentId) {
+ return loadAttachmentContentReactive(attachmentId)
+ .block();
+ }
+
+ @Override
+ public Mono loadAttachmentContentReactive(AttachmentId attachmentId) {
+ return postgresAttachmentDAO.getAttachment(attachmentId)
+ .flatMap(pair -> Mono.from(blobStore.readReactive(blobStore.getDefaultBucketName(), pair.getRight(), LOW_COST)))
+ .switchIfEmpty(Mono.error(() -> new AttachmentNotFoundException(attachmentId.toString())));
+ }
+
+ @Override
+ public AttachmentMetadata getAttachment(AttachmentId attachmentId) throws AttachmentNotFoundException {
+ Preconditions.checkArgument(attachmentId != null);
+ return postgresAttachmentDAO.getAttachment(attachmentId)
+ .map(Pair::getLeft)
+ .blockOptional()
+ .orElseThrow(() -> new AttachmentNotFoundException(attachmentId.getId()));
+ }
+
+ @Override
+ public Mono getAttachmentReactive(AttachmentId attachmentId) {
+ Preconditions.checkArgument(attachmentId != null);
+ return postgresAttachmentDAO.getAttachment(attachmentId)
+ .map(Pair::getLeft)
+ .switchIfEmpty(Mono.error(() -> new AttachmentNotFoundException(attachmentId.getId())));
+ }
+
+ public Flux getAttachmentsReactive(Collection attachmentIds) {
+ Preconditions.checkArgument(attachmentIds != null);
+ return postgresAttachmentDAO.getAttachments(attachmentIds);
+ }
+
+ @Override
+ public List getAttachments(Collection attachmentIds) {
+ return getAttachmentsReactive(attachmentIds)
+ .collectList()
+ .block();
+ }
+
+ @Override
+ public List storeAttachments(Collection attachments, MessageId ownerMessageId) {
+ return storeAttachmentsReactive(attachments, ownerMessageId)
+ .block();
+ }
+
+ @Override
+ public Mono> storeAttachmentsReactive(Collection attachments, MessageId ownerMessageId) {
+ return Flux.fromIterable(attachments)
+ .concatMap(attachment -> storeAttachmentAsync(attachment, ownerMessageId))
+ .collectList();
+ }
+
+ private Mono storeAttachmentAsync(ParsedAttachment parsedAttachment, MessageId ownerMessageId) {
+ return Mono.fromCallable(parsedAttachment::getContent)
+ .flatMap(content -> Mono.from(blobStore.save(blobStore.getDefaultBucketName(), parsedAttachment.getContent(), BlobStore.StoragePolicy.LOW_COST))
+ .flatMap(blobId -> {
+ AttachmentId attachmentId = UuidBackedAttachmentId.random();
+ return postgresAttachmentDAO.storeAttachment(AttachmentMetadata.builder()
+ .attachmentId(attachmentId)
+ .type(parsedAttachment.getContentType())
+ .size(Throwing.supplier(content::size).get())
+ .messageId(ownerMessageId)
+ .build(), blobId)
+ .thenReturn(Throwing.supplier(() -> parsedAttachment.asMessageAttachment(attachmentId, ownerMessageId)).get());
+ }));
+ }
+}
diff --git a/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/PostgresAttachmentModule.java b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/PostgresAttachmentModule.java
new file mode 100644
index 00000000000..2bc4e0b16b2
--- /dev/null
+++ b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/PostgresAttachmentModule.java
@@ -0,0 +1,64 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.mailbox.postgres.mail;
+
+import java.util.UUID;
+
+import org.apache.james.backends.postgres.PostgresIndex;
+import org.apache.james.backends.postgres.PostgresModule;
+import org.apache.james.backends.postgres.PostgresTable;
+import org.jooq.Field;
+import org.jooq.Record;
+import org.jooq.Table;
+import org.jooq.impl.DSL;
+import org.jooq.impl.SQLDataType;
+
+public interface PostgresAttachmentModule {
+
+ interface PostgresAttachmentTable {
+
+ Table TABLE_NAME = DSL.table("attachment");
+ Field ID = DSL.field("id", SQLDataType.UUID.notNull());
+ Field BLOB_ID = DSL.field("blob_id", SQLDataType.VARCHAR);
+ Field TYPE = DSL.field("type", SQLDataType.VARCHAR);
+ Field MESSAGE_ID = DSL.field("message_id", SQLDataType.UUID);
+ Field SIZE = DSL.field("size", SQLDataType.BIGINT);
+
+ PostgresTable TABLE = PostgresTable.name(TABLE_NAME.getName())
+ .createTableStep(((dsl, tableName) -> dsl.createTableIfNotExists(tableName)
+ .column(ID)
+ .column(BLOB_ID)
+ .column(TYPE)
+ .column(MESSAGE_ID)
+ .column(SIZE)
+ .constraint(DSL.primaryKey(ID))))
+ .supportsRowLevelSecurity()
+ .build();
+
+ PostgresIndex MESSAGE_ID_INDEX = PostgresIndex.name("attachment_message_id_index")
+ .createIndexStep((dsl, indexName) -> dsl.createIndexIfNotExists(indexName)
+ .on(TABLE_NAME, MESSAGE_ID));
+ }
+
+ PostgresModule MODULE = PostgresModule.builder()
+ .addTable(PostgresAttachmentTable.TABLE)
+ .addIndex(PostgresAttachmentTable.MESSAGE_ID_INDEX)
+ .build();
+}
diff --git a/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/PostgresMailbox.java b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/PostgresMailbox.java
new file mode 100644
index 00000000000..0485f5f49b9
--- /dev/null
+++ b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/PostgresMailbox.java
@@ -0,0 +1,54 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.mailbox.postgres.mail;
+
+import org.apache.james.mailbox.MessageUid;
+import org.apache.james.mailbox.ModSeq;
+import org.apache.james.mailbox.model.Mailbox;
+
+public class PostgresMailbox extends Mailbox {
+ private final ModSeq highestModSeq;
+ private final MessageUid lastUid;
+
+ public PostgresMailbox(Mailbox mailbox, ModSeq highestModSeq, MessageUid lastUid) {
+ super(mailbox);
+ this.highestModSeq = highestModSeq;
+ this.lastUid = lastUid;
+ }
+
+
+ public ModSeq getHighestModSeq() {
+ return highestModSeq;
+ }
+
+ public MessageUid getLastUid() {
+ return lastUid;
+ }
+
+ @Override
+ public final boolean equals(Object o) {
+ return super.equals(o);
+ }
+
+ @Override
+ public final int hashCode() {
+ return super.hashCode();
+ }
+}
diff --git a/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/PostgresMailboxMapper.java b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/PostgresMailboxMapper.java
new file mode 100644
index 00000000000..0974c0529ec
--- /dev/null
+++ b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/PostgresMailboxMapper.java
@@ -0,0 +1,116 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.mailbox.postgres.mail;
+
+import java.util.function.Function;
+
+import org.apache.james.core.Username;
+import org.apache.james.mailbox.acl.ACLDiff;
+import org.apache.james.mailbox.model.Mailbox;
+import org.apache.james.mailbox.model.MailboxACL;
+import org.apache.james.mailbox.model.MailboxId;
+import org.apache.james.mailbox.model.MailboxPath;
+import org.apache.james.mailbox.model.UidValidity;
+import org.apache.james.mailbox.model.search.MailboxQuery;
+import org.apache.james.mailbox.postgres.mail.dao.PostgresMailboxDAO;
+import org.apache.james.mailbox.store.mail.MailboxMapper;
+
+import com.github.fge.lambdas.Throwing;
+
+import reactor.core.publisher.Flux;
+import reactor.core.publisher.Mono;
+
+public class PostgresMailboxMapper implements MailboxMapper {
+ private final PostgresMailboxDAO postgresMailboxDAO;
+
+ public PostgresMailboxMapper(PostgresMailboxDAO postgresMailboxDAO) {
+ this.postgresMailboxDAO = postgresMailboxDAO;
+ }
+
+ @Override
+ public Mono create(MailboxPath mailboxPath, UidValidity uidValidity) {
+ return postgresMailboxDAO.create(mailboxPath,uidValidity);
+ }
+
+ @Override
+ public Mono rename(Mailbox mailbox) {
+ return postgresMailboxDAO.rename(mailbox);
+ }
+
+ @Override
+ public Mono delete(Mailbox mailbox) {
+ return postgresMailboxDAO.delete(mailbox.getMailboxId());
+ }
+
+ @Override
+ public Mono findMailboxByPath(MailboxPath mailboxName) {
+ return postgresMailboxDAO.findMailboxByPath(mailboxName)
+ .map(Function.identity());
+ }
+
+ @Override
+ public Mono findMailboxById(MailboxId mailboxId) {
+ return postgresMailboxDAO.findMailboxById(mailboxId)
+ .map(Function.identity());
+ }
+
+ @Override
+ public Flux findMailboxWithPathLike(MailboxQuery.UserBound query) {
+ return postgresMailboxDAO.findMailboxWithPathLike(query)
+ .map(Function.identity());
+ }
+
+ @Override
+ public Mono hasChildren(Mailbox mailbox, char delimiter) {
+ return postgresMailboxDAO.hasChildren(mailbox, delimiter);
+ }
+
+ @Override
+ public Flux list() {
+ return postgresMailboxDAO.getAll()
+ .map(Function.identity());
+ }
+
+ public Flux findNonPersonalMailboxes(Username userName, MailboxACL.Right right) {
+ return postgresMailboxDAO.findMailboxesByUsername(userName)
+ .filter(postgresMailbox -> postgresMailbox.getACL().getEntries().get(MailboxACL.EntryKey.createUserEntryKey(userName)).contains(right))
+ .map(Function.identity());
+ }
+
+ @Override
+ public Mono updateACL(Mailbox mailbox, MailboxACL.ACLCommand mailboxACLCommand) {
+ return upsertACL(mailbox,
+ mailbox.getACL(),
+ Throwing.supplier(() -> mailbox.getACL().apply(mailboxACLCommand)).get());
+ }
+
+ @Override
+ public Mono setACL(Mailbox mailbox, MailboxACL mailboxACL) {
+ return upsertACL(mailbox, mailbox.getACL(), mailboxACL);
+ }
+
+ private Mono upsertACL(Mailbox mailbox, MailboxACL oldACL, MailboxACL newACL) {
+ return postgresMailboxDAO.upsertACL(mailbox.getMailboxId(), newACL)
+ .then(Mono.fromCallable(() -> {
+ mailbox.setACL(newACL);
+ return ACLDiff.computeDiff(oldACL, newACL);
+ }));
+ }
+}
diff --git a/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/PostgresMailboxMemberDAO.java b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/PostgresMailboxMemberDAO.java
new file mode 100644
index 00000000000..5cf73eb29f6
--- /dev/null
+++ b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/PostgresMailboxMemberDAO.java
@@ -0,0 +1,65 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.mailbox.postgres.mail;
+
+import static org.apache.james.mailbox.postgres.mail.PostgresMailboxMemberModule.PostgresMailboxMemberTable.MAILBOX_ID;
+import static org.apache.james.mailbox.postgres.mail.PostgresMailboxMemberModule.PostgresMailboxMemberTable.TABLE_NAME;
+import static org.apache.james.mailbox.postgres.mail.PostgresMailboxMemberModule.PostgresMailboxMemberTable.USER_NAME;
+
+import java.util.List;
+
+import org.apache.james.backends.postgres.utils.PostgresExecutor;
+import org.apache.james.core.Username;
+import org.apache.james.mailbox.postgres.PostgresMailboxId;
+
+import reactor.core.publisher.Flux;
+import reactor.core.publisher.Mono;
+
+public class PostgresMailboxMemberDAO {
+ private final PostgresExecutor postgresExecutor;
+
+ public PostgresMailboxMemberDAO(PostgresExecutor postgresExecutor) {
+ this.postgresExecutor = postgresExecutor;
+ }
+
+ public Flux findMailboxIdByUsername(Username username) {
+ return postgresExecutor.executeRows(dslContext -> Flux.from(dslContext.select(MAILBOX_ID)
+ .from(TABLE_NAME)
+ .where(USER_NAME.eq(username.asString()))))
+ .map(record -> PostgresMailboxId.of(record.get(MAILBOX_ID)));
+ }
+
+ public Mono insert(PostgresMailboxId mailboxId, List usernames) {
+ return postgresExecutor.executeVoid(dslContext -> Mono.from(dslContext.insertInto(TABLE_NAME, USER_NAME, MAILBOX_ID)
+ .valuesOfRecords(usernames.stream()
+ .map(username -> dslContext.newRecord(USER_NAME, MAILBOX_ID)
+ .value1(username.asString())
+ .value2(mailboxId.asUuid()))
+ .toList())));
+ }
+
+ public Mono delete(PostgresMailboxId mailboxId, List usernames) {
+ return postgresExecutor.executeVoid(dslContext -> Mono.from(dslContext.batch(usernames.stream()
+ .map(username -> dslContext.deleteFrom(TABLE_NAME)
+ .where(USER_NAME.eq(username.asString())
+ .and(MAILBOX_ID.eq(mailboxId.asUuid()))))
+ .toList())));
+ }
+}
diff --git a/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/PostgresMailboxMemberModule.java b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/PostgresMailboxMemberModule.java
new file mode 100644
index 00000000000..abcd3bfde3e
--- /dev/null
+++ b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/PostgresMailboxMemberModule.java
@@ -0,0 +1,57 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.mailbox.postgres.mail;
+
+import java.util.UUID;
+
+import org.apache.james.backends.postgres.PostgresIndex;
+import org.apache.james.backends.postgres.PostgresModule;
+import org.apache.james.backends.postgres.PostgresTable;
+import org.jooq.Field;
+import org.jooq.Record;
+import org.jooq.Table;
+import org.jooq.impl.DSL;
+import org.jooq.impl.SQLDataType;
+
+public interface PostgresMailboxMemberModule {
+ interface PostgresMailboxMemberTable {
+ Table TABLE_NAME = DSL.table("mailbox_member");
+
+ Field USER_NAME = DSL.field("user_name", SQLDataType.VARCHAR(255));
+ Field MAILBOX_ID = DSL.field("mailbox_id", SQLDataType.UUID.notNull());
+
+ PostgresTable TABLE = PostgresTable.name(TABLE_NAME.getName())
+ .createTableStep(((dsl, tableName) -> dsl.createTableIfNotExists(tableName)
+ .column(USER_NAME)
+ .column(MAILBOX_ID)
+ .constraint(DSL.primaryKey(USER_NAME, MAILBOX_ID))))
+ .supportsRowLevelSecurity()
+ .build();
+
+ PostgresIndex MAILBOX_MEMBER_USERNAME_INDEX = PostgresIndex.name("mailbox_member_username_index")
+ .createIndexStep((dsl, indexName) -> dsl.createIndexIfNotExists(indexName)
+ .on(TABLE_NAME, USER_NAME));
+ }
+
+ PostgresModule MODULE = PostgresModule.builder()
+ .addTable(PostgresMailboxMemberTable.TABLE)
+ .addIndex(PostgresMailboxMemberTable.MAILBOX_MEMBER_USERNAME_INDEX)
+ .build();
+}
diff --git a/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/PostgresMailboxModule.java b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/PostgresMailboxModule.java
new file mode 100644
index 00000000000..5b17924d018
--- /dev/null
+++ b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/PostgresMailboxModule.java
@@ -0,0 +1,77 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.mailbox.postgres.mail;
+
+import static org.jooq.impl.SQLDataType.BIGINT;
+
+import java.util.UUID;
+
+import org.apache.james.backends.postgres.PostgresIndex;
+import org.apache.james.backends.postgres.PostgresModule;
+import org.apache.james.backends.postgres.PostgresTable;
+import org.jooq.Field;
+import org.jooq.Name;
+import org.jooq.Record;
+import org.jooq.Table;
+import org.jooq.impl.DSL;
+import org.jooq.impl.SQLDataType;
+import org.jooq.postgres.extensions.bindings.HstoreBinding;
+import org.jooq.postgres.extensions.types.Hstore;
+
+public interface PostgresMailboxModule {
+ interface PostgresMailboxTable {
+ Table TABLE_NAME = DSL.table("mailbox");
+
+ Field MAILBOX_ID = DSL.field("mailbox_id", SQLDataType.UUID.notNull());
+ Field MAILBOX_NAME = DSL.field("mailbox_name", SQLDataType.VARCHAR(255).notNull());
+ Field MAILBOX_UID_VALIDITY = DSL.field("mailbox_uid_validity", BIGINT.notNull());
+ Field USER_NAME = DSL.field("user_name", SQLDataType.VARCHAR(255));
+ Field MAILBOX_NAMESPACE = DSL.field("mailbox_namespace", SQLDataType.VARCHAR(255).notNull());
+ Field MAILBOX_LAST_UID = DSL.field("mailbox_last_uid", BIGINT);
+ Field MAILBOX_HIGHEST_MODSEQ = DSL.field("mailbox_highest_modseq", BIGINT);
+ Field MAILBOX_ACL = DSL.field("mailbox_acl", org.jooq.impl.DefaultDataType.getDefaultDataType("hstore").asConvertedDataType(new HstoreBinding()));
+
+ Name MAILBOX_NAME_USER_NAME_NAMESPACE_UNIQUE_CONSTRAINT = DSL.name("mailbox_mailbox_name_user_name_mailbox_namespace_key");
+
+ PostgresTable TABLE = PostgresTable.name(TABLE_NAME.getName())
+ .createTableStep(((dsl, tableName) -> dsl.createTableIfNotExists(tableName)
+ .column(MAILBOX_ID, SQLDataType.UUID)
+ .column(MAILBOX_NAME)
+ .column(MAILBOX_UID_VALIDITY)
+ .column(USER_NAME)
+ .column(MAILBOX_NAMESPACE)
+ .column(MAILBOX_LAST_UID)
+ .column(MAILBOX_HIGHEST_MODSEQ)
+ .column(MAILBOX_ACL)
+ .constraint(DSL.primaryKey(MAILBOX_ID))
+ .constraint(DSL.constraint(MAILBOX_NAME_USER_NAME_NAMESPACE_UNIQUE_CONSTRAINT).unique(MAILBOX_NAME, USER_NAME, MAILBOX_NAMESPACE))))
+ .supportsRowLevelSecurity()
+ .addAdditionalAlterQueries(new PostgresTable.NonRLSOnlyAdditionalAlterQuery("CREATE INDEX mailbox_mailbox_acl_index ON " + TABLE_NAME.getName() + " USING GIN (" + MAILBOX_ACL.getName() + ")"))
+ .build();
+ PostgresIndex MAILBOX_USERNAME_NAMESPACE_INDEX = PostgresIndex.name("mailbox_username_namespace_index")
+ .createIndexStep((dsl, indexName) -> dsl.createIndexIfNotExists(indexName)
+ .on(TABLE_NAME, USER_NAME, MAILBOX_NAMESPACE));
+ }
+
+ PostgresModule MODULE = PostgresModule.builder()
+ .addTable(PostgresMailboxTable.TABLE)
+ .addIndex(PostgresMailboxTable.MAILBOX_USERNAME_NAMESPACE_INDEX)
+ .build();
+}
\ No newline at end of file
diff --git a/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/PostgresMessageBlobReferenceSource.java b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/PostgresMessageBlobReferenceSource.java
new file mode 100644
index 00000000000..3ea9032b298
--- /dev/null
+++ b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/PostgresMessageBlobReferenceSource.java
@@ -0,0 +1,42 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.mailbox.postgres.mail;
+
+import jakarta.inject.Inject;
+
+import org.apache.james.blob.api.BlobId;
+import org.apache.james.blob.api.BlobReferenceSource;
+import org.apache.james.mailbox.postgres.mail.dao.PostgresMessageDAO;
+
+import reactor.core.publisher.Flux;
+
+public class PostgresMessageBlobReferenceSource implements BlobReferenceSource {
+ private PostgresMessageDAO postgresMessageDAO;
+
+ @Inject
+ public PostgresMessageBlobReferenceSource(PostgresMessageDAO postgresMessageDAO) {
+ this.postgresMessageDAO = postgresMessageDAO;
+ }
+
+ @Override
+ public Flux listReferencedBlobs() {
+ return postgresMessageDAO.listBlobs();
+ }
+}
diff --git a/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/PostgresMessageIdMapper.java b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/PostgresMessageIdMapper.java
new file mode 100644
index 00000000000..961b51fb53b
--- /dev/null
+++ b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/PostgresMessageIdMapper.java
@@ -0,0 +1,257 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.mailbox.postgres.mail;
+
+import static org.apache.james.blob.api.BlobStore.StoragePolicy.LOW_COST;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.time.Clock;
+import java.util.Collection;
+import java.util.Date;
+import java.util.List;
+import java.util.function.Function;
+
+import jakarta.mail.Flags;
+
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.james.backends.postgres.utils.PostgresUtils;
+import org.apache.james.blob.api.BlobId;
+import org.apache.james.blob.api.BlobStore;
+import org.apache.james.mailbox.MessageManager;
+import org.apache.james.mailbox.MessageUid;
+import org.apache.james.mailbox.ModSeq;
+import org.apache.james.mailbox.exception.MailboxException;
+import org.apache.james.mailbox.exception.MailboxNotFoundException;
+import org.apache.james.mailbox.model.ComposedMessageIdWithMetaData;
+import org.apache.james.mailbox.model.Mailbox;
+import org.apache.james.mailbox.model.MailboxId;
+import org.apache.james.mailbox.model.MessageId;
+import org.apache.james.mailbox.model.UpdatedFlags;
+import org.apache.james.mailbox.postgres.PostgresMailboxId;
+import org.apache.james.mailbox.postgres.PostgresMessageId;
+import org.apache.james.mailbox.postgres.mail.dao.PostgresMailboxDAO;
+import org.apache.james.mailbox.postgres.mail.dao.PostgresMailboxMessageDAO;
+import org.apache.james.mailbox.postgres.mail.dao.PostgresMessageDAO;
+import org.apache.james.mailbox.store.FlagsUpdateCalculator;
+import org.apache.james.mailbox.store.MailboxReactorUtils;
+import org.apache.james.mailbox.store.mail.MessageIdMapper;
+import org.apache.james.mailbox.store.mail.MessageMapper;
+import org.apache.james.mailbox.store.mail.model.MailboxMessage;
+import org.apache.james.mailbox.store.mail.model.impl.SimpleMailboxMessage;
+import org.apache.james.util.ReactorUtils;
+import org.jooq.Record;
+import org.reactivestreams.Publisher;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableListMultimap;
+import com.google.common.collect.Multimap;
+import com.google.common.io.ByteSource;
+
+import reactor.core.publisher.Flux;
+import reactor.core.publisher.Mono;
+
+public class PostgresMessageIdMapper implements MessageIdMapper {
+ private static final Function MESSAGE_BODY_CONTENT_LOADER = (mailboxMessage) -> new ByteSource() {
+ @Override
+ public InputStream openStream() {
+ try {
+ return mailboxMessage.getBodyContent();
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ @Override
+ public long size() {
+ return mailboxMessage.getBodyOctets();
+ }
+ };
+
+ public static final int NUM_RETRIES = 5;
+ public static final Logger LOGGER = LoggerFactory.getLogger(PostgresMessageIdMapper.class);
+
+ private final PostgresMailboxDAO mailboxDAO;
+ private final PostgresMessageDAO messageDAO;
+ private final PostgresMailboxMessageDAO mailboxMessageDAO;
+ private final PostgresModSeqProvider modSeqProvider;
+ private final BlobStore blobStore;
+ private final Clock clock;
+ private final PostgresMessageRetriever messageRetriever;
+
+ public PostgresMessageIdMapper(PostgresMailboxDAO mailboxDAO,
+ PostgresMessageDAO messageDAO,
+ PostgresMailboxMessageDAO mailboxMessageDAO,
+ PostgresModSeqProvider modSeqProvider,
+ PostgresAttachmentMapper attachmentMapper,
+ BlobStore blobStore,
+ BlobId.Factory blobIdFactory,
+ Clock clock) {
+ this.mailboxDAO = mailboxDAO;
+ this.messageDAO = messageDAO;
+ this.mailboxMessageDAO = mailboxMessageDAO;
+ this.modSeqProvider = modSeqProvider;
+ this.blobStore = blobStore;
+ this.clock = clock;
+ this.messageRetriever = new PostgresMessageRetriever(blobStore, blobIdFactory, attachmentMapper);
+ }
+
+ @Override
+ public List find(Collection messageIds, MessageMapper.FetchType fetchType) {
+ return findReactive(messageIds, fetchType)
+ .collectList()
+ .block();
+ }
+
+ @Override
+ public Publisher findMetadata(MessageId messageId) {
+ return mailboxMessageDAO.findMetadataByMessageId(PostgresMessageId.class.cast(messageId));
+ }
+
+ @Override
+ public Flux findReactive(Collection messageIds, MessageMapper.FetchType fetchType) {
+ Flux> fetchMessagePublisher = mailboxMessageDAO.findMessagesByMessageIds(messageIds.stream().map(PostgresMessageId.class::cast).collect(ImmutableList.toImmutableList()), fetchType);
+ return messageRetriever.get(fetchType, fetchMessagePublisher);
+ }
+
+ @Override
+ public List findMailboxes(MessageId messageId) {
+ return mailboxMessageDAO.findMailboxes(PostgresMessageId.class.cast(messageId))
+ .collect(ImmutableList.toImmutableList())
+ .block();
+ }
+
+ @Override
+ public void save(MailboxMessage mailboxMessage) throws MailboxException {
+ PostgresMailboxId mailboxId = PostgresMailboxId.class.cast(mailboxMessage.getMailboxId());
+ mailboxMessage.setSaveDate(Date.from(clock.instant()));
+ MailboxReactorUtils.block(mailboxDAO.findMailboxById(mailboxId)
+ .switchIfEmpty(Mono.error(() -> new MailboxNotFoundException(mailboxId)))
+ .then(saveBodyContent(mailboxMessage))
+ .flatMap(blobId -> messageDAO.insert(mailboxMessage, blobId.asString())
+ .onErrorResume(PostgresUtils.UNIQUE_CONSTRAINT_VIOLATION_PREDICATE, e -> Mono.empty()))
+ .then(mailboxMessageDAO.insert(mailboxMessage)));
+ }
+
+ @Override
+ public void copyInMailbox(MailboxMessage mailboxMessage, Mailbox mailbox) throws MailboxException {
+ MailboxReactorUtils.block(copyInMailboxReactive(mailboxMessage, mailbox));
+ }
+
+ @Override
+ public Mono copyInMailboxReactive(MailboxMessage mailboxMessage, Mailbox mailbox) {
+ mailboxMessage.setSaveDate(Date.from(clock.instant()));
+ PostgresMailboxId mailboxId = (PostgresMailboxId) mailbox.getMailboxId();
+ return mailboxMessageDAO.insert(mailboxMessage, mailboxId)
+ .onErrorResume(PostgresUtils.UNIQUE_CONSTRAINT_VIOLATION_PREDICATE, e -> Mono.empty());
+ }
+
+ @Override
+ public void delete(MessageId messageId) {
+ mailboxMessageDAO.deleteByMessageId((PostgresMessageId) messageId).block();
+ }
+
+ @Override
+ public void delete(MessageId messageId, Collection mailboxIds) {
+ mailboxMessageDAO.deleteByMessageIdAndMailboxIds((PostgresMessageId) messageId,
+ mailboxIds.stream().map(PostgresMailboxId.class::cast).collect(ImmutableList.toImmutableList())).block();
+ }
+
+ @Override
+ public Mono> setFlags(MessageId messageId, List mailboxIds, Flags newState, MessageManager.FlagsUpdateMode updateMode) {
+ return Flux.fromIterable(mailboxIds)
+ .distinct()
+ .map(PostgresMailboxId.class::cast)
+ .concatMap(mailboxId -> flagsUpdateWithRetry(newState, updateMode, mailboxId, messageId))
+ .collect(ImmutableListMultimap.toImmutableListMultimap(Pair::getLeft, Pair::getRight));
+ }
+
+ private Flux> flagsUpdateWithRetry(Flags newState, MessageManager.FlagsUpdateMode updateMode, MailboxId mailboxId, MessageId messageId) {
+ return updateFlags(mailboxId, messageId, newState, updateMode)
+ .retry(NUM_RETRIES)
+ .onErrorResume(MailboxDeleteDuringUpdateException.class, e -> {
+ LOGGER.info("Mailbox {} was deleted during flag update", mailboxId);
+ return Mono.empty();
+ })
+ .flatMapIterable(Function.identity())
+ .map(pair -> buildUpdatedFlags(pair.getRight(), pair.getLeft()));
+ }
+
+ private Pair buildUpdatedFlags(ComposedMessageIdWithMetaData composedMessageIdWithMetaData, Flags oldFlags) {
+ return Pair.of(composedMessageIdWithMetaData.getComposedMessageId().getMailboxId(),
+ UpdatedFlags.builder()
+ .uid(composedMessageIdWithMetaData.getComposedMessageId().getUid())
+ .messageId(composedMessageIdWithMetaData.getComposedMessageId().getMessageId())
+ .modSeq(composedMessageIdWithMetaData.getModSeq())
+ .oldFlags(oldFlags)
+ .newFlags(composedMessageIdWithMetaData.getFlags())
+ .build());
+ }
+
+ private Mono>> updateFlags(MailboxId mailboxId, MessageId messageId, Flags newState, MessageManager.FlagsUpdateMode updateMode) {
+ PostgresMailboxId postgresMailboxId = (PostgresMailboxId) mailboxId;
+ PostgresMessageId postgresMessageId = (PostgresMessageId) messageId;
+ return mailboxMessageDAO.findMetadataByMessageId(postgresMessageId, postgresMailboxId)
+ .flatMap(oldComposedId -> updateFlags(newState, updateMode, postgresMailboxId, oldComposedId), ReactorUtils.DEFAULT_CONCURRENCY)
+ .switchIfEmpty(Mono.error(MailboxDeleteDuringUpdateException::new))
+ .collectList();
+ }
+
+ private Mono> updateFlags(Flags newState, MessageManager.FlagsUpdateMode updateMode, PostgresMailboxId mailboxId, ComposedMessageIdWithMetaData oldComposedId) {
+ FlagsUpdateCalculator flagsUpdateCalculator = new FlagsUpdateCalculator(newState, updateMode);
+ Flags newFlags = flagsUpdateCalculator.buildNewFlags(oldComposedId.getFlags());
+ if (identicalFlags(oldComposedId, newFlags)) {
+ return Mono.just(Pair.of(oldComposedId.getFlags(), oldComposedId));
+ } else {
+ return modSeqProvider.nextModSeqReactive(mailboxId)
+ .flatMap(newModSeq -> updateFlags(mailboxId, flagsUpdateCalculator, newModSeq, oldComposedId.getComposedMessageId().getUid())
+ .map(flags -> Pair.of(oldComposedId.getFlags(), new ComposedMessageIdWithMetaData(
+ oldComposedId.getComposedMessageId(),
+ flags,
+ newModSeq,
+ oldComposedId.getThreadId()))));
+ }
+ }
+
+ private Mono updateFlags(PostgresMailboxId mailboxId, FlagsUpdateCalculator flagsUpdateCalculator, ModSeq newModSeq, MessageUid uid) {
+
+ switch (flagsUpdateCalculator.getMode()) {
+ case ADD:
+ return mailboxMessageDAO.addFlags(mailboxId, uid, flagsUpdateCalculator.providedFlags(), newModSeq);
+ case REMOVE:
+ return mailboxMessageDAO.removeFlags(mailboxId, uid, flagsUpdateCalculator.providedFlags(), newModSeq);
+ case REPLACE:
+ return mailboxMessageDAO.replaceFlags(mailboxId, uid, flagsUpdateCalculator.providedFlags(), newModSeq);
+ default:
+ return Mono.error(() -> new RuntimeException("Unknown MessageRange type " + flagsUpdateCalculator.getMode()));
+ }
+ }
+
+ private boolean identicalFlags(ComposedMessageIdWithMetaData oldComposedId, Flags newFlags) {
+ return oldComposedId.getFlags().equals(newFlags);
+ }
+
+ private Mono saveBodyContent(MailboxMessage message) {
+ return Mono.fromCallable(() -> MESSAGE_BODY_CONTENT_LOADER.apply(message))
+ .flatMap(bodyByteSource -> Mono.from(blobStore.save(blobStore.getDefaultBucketName(), bodyByteSource, LOW_COST)));
+ }
+}
diff --git a/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/PostgresMessageMapper.java b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/PostgresMessageMapper.java
new file mode 100644
index 00000000000..5112324b10f
--- /dev/null
+++ b/mailbox/postgres/src/main/java/org/apache/james/mailbox/postgres/mail/PostgresMessageMapper.java
@@ -0,0 +1,446 @@
+/****************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one *
+ * or more contributor license agreements. See the NOTICE file *
+ * distributed with this work for additional information *
+ * regarding copyright ownership. The ASF licenses this file *
+ * to you under the Apache License, Version 2.0 (the *
+ * "License"); you may not use this file except in compliance *
+ * with the License. You may obtain a copy of the License at *
+ * *
+ * http://www.apache.org/licenses/LICENSE-2.0 *
+ * *
+ * Unless required by applicable law or agreed to in writing, *
+ * software distributed under the License is distributed on an *
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
+ * KIND, either express or implied. See the License for the *
+ * specific language governing permissions and limitations *
+ * under the License. *
+ ****************************************************************/
+
+package org.apache.james.mailbox.postgres.mail;
+
+import static org.apache.james.blob.api.BlobStore.StoragePolicy.LOW_COST;
+import static org.apache.james.util.ReactorUtils.DEFAULT_CONCURRENCY;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.time.Clock;
+import java.util.Date;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.function.Function;
+
+import jakarta.mail.Flags;
+
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.james.backends.postgres.utils.PostgresExecutor;
+import org.apache.james.backends.postgres.utils.PostgresUtils;
+import org.apache.james.blob.api.BlobId;
+import org.apache.james.blob.api.BlobStore;
+import org.apache.james.mailbox.ApplicableFlagBuilder;
+import org.apache.james.mailbox.FlagsBuilder;
+import org.apache.james.mailbox.MessageUid;
+import org.apache.james.mailbox.ModSeq;
+import org.apache.james.mailbox.exception.MailboxException;
+import org.apache.james.mailbox.model.ComposedMessageId;
+import org.apache.james.mailbox.model.ComposedMessageIdWithMetaData;
+import org.apache.james.mailbox.model.Mailbox;
+import org.apache.james.mailbox.model.MailboxCounters;
+import org.apache.james.mailbox.model.MessageMetaData;
+import org.apache.james.mailbox.model.MessageRange;
+import org.apache.james.mailbox.model.UpdatedFlags;
+import org.apache.james.mailbox.postgres.PostgresMailboxId;
+import org.apache.james.mailbox.postgres.mail.dao.PostgresAttachmentDAO;
+import org.apache.james.mailbox.postgres.mail.dao.PostgresMailboxDAO;
+import org.apache.james.mailbox.postgres.mail.dao.PostgresMailboxMessageDAO;
+import org.apache.james.mailbox.postgres.mail.dao.PostgresMessageDAO;
+import org.apache.james.mailbox.store.FlagsUpdateCalculator;
+import org.apache.james.mailbox.store.MailboxReactorUtils;
+import org.apache.james.mailbox.store.mail.MessageMapper;
+import org.apache.james.mailbox.store.mail.model.MailboxMessage;
+import org.apache.james.mailbox.store.mail.model.impl.SimpleMailboxMessage;
+import org.apache.james.util.streams.Limit;
+import org.jooq.Record;
+
+import com.google.common.io.ByteSource;
+
+import reactor.core.publisher.Flux;
+import reactor.core.publisher.Mono;
+
+public class PostgresMessageMapper implements MessageMapper {
+
+ private static final Function MESSAGE_BODY_CONTENT_LOADER = (mailboxMessage) -> new ByteSource() {
+ @Override
+ public InputStream openStream() {
+ try {
+ return mailboxMessage.getBodyContent();
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ @Override
+ public long size() {
+ return mailboxMessage.getBodyOctets();
+ }
+ };
+
+
+ private final PostgresMessageDAO messageDAO;
+ private final PostgresMailboxMessageDAO mailboxMessageDAO;
+ private final PostgresMailboxDAO mailboxDAO;
+ private final PostgresModSeqProvider modSeqProvider;
+ private final PostgresUidProvider uidProvider;
+ private final BlobStore blobStore;
+ private final Clock clock;
+ private final PostgresMessageRetriever messageRetriever;
+
+ public PostgresMessageMapper(PostgresExecutor postgresExecutor,
+ PostgresModSeqProvider modSeqProvider,
+ PostgresUidProvider uidProvider,
+ BlobStore blobStore,
+ Clock clock,
+ BlobId.Factory blobIdFactory) {
+ this.messageDAO = new PostgresMessageDAO(postgresExecutor, blobIdFactory);
+ this.mailboxMessageDAO = new PostgresMailboxMessageDAO(postgresExecutor);
+ this.mailboxDAO = new PostgresMailboxDAO(postgresExecutor);
+ this.modSeqProvider = modSeqProvider;
+ this.uidProvider = uidProvider;
+ this.blobStore = blobStore;
+ this.clock = clock;
+ PostgresAttachmentMapper attachmentMapper = new PostgresAttachmentMapper(new PostgresAttachmentDAO(postgresExecutor, blobIdFactory), blobStore);
+ this.messageRetriever = new PostgresMessageRetriever(blobStore, blobIdFactory, attachmentMapper);
+ }
+
+
+ @Override
+ public Iterator