Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Updated RFS to filter out system indices by default #763

Merged
merged 3 commits into from
Jun 25, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
import java.nio.file.Path;
import java.nio.file.Paths;
import java.time.Clock;
import java.util.List;
import java.util.UUID;
import java.util.function.Function;

Expand Down Expand Up @@ -81,6 +82,10 @@ public static class Args {
description = "Optional. The target password; if not provided, will assume no auth on target")
public String targetPass = null;

@Parameter(names = {"--index-allowlist"}, description = ("Optional. List of index names to migrate"
+ " (e.g. 'logs_2024_01, logs_2024_02'). Default: all non-system indices (e.g. those not starting with '.')"), required = false)
public List<String> indexAllowlist = List.of();

@Parameter(names = {"--max-shard-size-bytes"}, description = ("Optional. The maximum shard size, in bytes, to allow when"
+ " performing the document migration. Useful for preventing disk overflow. Default: 50 * 1024 * 1024 * 1024 (50 GB)"), required = false)
public long maxShardSizeBytes = 50 * 1024 * 1024 * 1024L;
Expand Down Expand Up @@ -126,7 +131,7 @@ public static void main(String[] args) throws Exception {
luceneDirPath, ElasticsearchConstants_ES_7_10.BUFFER_SIZE_IN_BYTES);

run(LuceneDocumentsReader::new, reindexer, workCoordinator, processManager, indexMetadataFactory,
arguments.snapshotName, shardMetadataFactory, unpackerFactory, arguments.maxShardSizeBytes);
arguments.snapshotName, arguments.indexAllowlist, shardMetadataFactory, unpackerFactory, arguments.maxShardSizeBytes);
});
}

Expand All @@ -136,12 +141,13 @@ public static DocumentsRunner.CompletionStatus run(Function<Path,LuceneDocuments
LeaseExpireTrigger leaseExpireTrigger,
IndexMetadata.Factory indexMetadataFactory,
String snapshotName,
List<String> indexAllowlist,
ShardMetadata.Factory shardMetadataFactory,
SnapshotShardUnpacker.Factory unpackerFactory,
long maxShardSizeBytes)
throws IOException, InterruptedException, NoWorkLeftException {
var scopedWorkCoordinator = new ScopedWorkCoordinator(workCoordinator, leaseExpireTrigger);
confirmShardPrepIsComplete(indexMetadataFactory, snapshotName, scopedWorkCoordinator);
confirmShardPrepIsComplete(indexMetadataFactory, snapshotName, indexAllowlist, scopedWorkCoordinator);
if (!workCoordinator.workItemsArePending()) {
throw new NoWorkLeftException("No work items are pending/all work items have been processed. Returning.");
}
Expand All @@ -159,6 +165,7 @@ public static DocumentsRunner.CompletionStatus run(Function<Path,LuceneDocuments

private static void confirmShardPrepIsComplete(IndexMetadata.Factory indexMetadataFactory,
String snapshotName,
List<String> indexAllowlist,
ScopedWorkCoordinator scopedWorkCoordinator)
throws IOException, InterruptedException
{
Expand All @@ -168,7 +175,7 @@ private static void confirmShardPrepIsComplete(IndexMetadata.Factory indexMetada
long lockRenegotiationMillis = 1000;
for (int shardSetupAttemptNumber=0; ; ++shardSetupAttemptNumber) {
try {
new ShardWorkPreparer().run(scopedWorkCoordinator, indexMetadataFactory, snapshotName);
new ShardWorkPreparer().run(scopedWorkCoordinator, indexMetadataFactory, snapshotName, indexAllowlist);
return;
} catch (IWorkCoordinator.LeaseLockHeldElsewhereException e) {
long finalLockRenegotiationMillis = lockRenegotiationMillis;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,7 @@ public void test(String sourceImageName, String targetImageName, int numWorkers)
osTargetContainer.start();

final var SNAPSHOT_NAME = "test_snapshot";
final List<String> INDEX_ALLOWLIST = List.of();
CreateSnapshot.run(
c -> new FileSystemSnapshotCreator(SNAPSHOT_NAME, c, ElasticsearchContainer.CLUSTER_SNAPSHOT_DIR),
new OpenSearchClient(esSourceContainer.getUrl(), null));
Expand All @@ -106,13 +107,13 @@ public void test(String sourceImageName, String targetImageName, int numWorkers)

var targetClient = new OpenSearchClient(osTargetContainer.getHttpHostAddress(), null);
var sourceRepo = new FileSystemRepo(tempDir);
migrateMetadata(sourceRepo, targetClient, SNAPSHOT_NAME);
migrateMetadata(sourceRepo, targetClient, SNAPSHOT_NAME, INDEX_ALLOWLIST);

var workerFutures = new ArrayList<CompletableFuture<Void>>();
var runCounter = new AtomicInteger();
for (int i = 0; i < numWorkers; ++i) {
workerFutures.add(CompletableFuture.supplyAsync(() ->
migrateDocumentsSequentially(sourceRepo, SNAPSHOT_NAME,
migrateDocumentsSequentially(sourceRepo, SNAPSHOT_NAME, INDEX_ALLOWLIST,
osTargetContainer.getHttpHostAddress(), runCounter)));
}
var thrownException = Assertions.assertThrows(ExecutionException.class, () ->
Expand Down Expand Up @@ -176,11 +177,12 @@ private void checkClusterMigrationOnFinished(ElasticsearchContainer esSourceCont
@SneakyThrows
private Void migrateDocumentsSequentially(FileSystemRepo sourceRepo,
String snapshotName,
List<String> indexAllowlist,
String targetAddress,
AtomicInteger runCounter) {
for (int runNumber=0; ; ++runNumber) {
try {
var workResult = migrateDocumentsWithOneWorker(sourceRepo, snapshotName, targetAddress);
var workResult = migrateDocumentsWithOneWorker(sourceRepo, snapshotName, indexAllowlist, targetAddress);
if (workResult == DocumentsRunner.CompletionStatus.NOTHING_DONE) {
return null;
} else {
Expand All @@ -197,7 +199,7 @@ private Void migrateDocumentsSequentially(FileSystemRepo sourceRepo,
}
}

private static void migrateMetadata(SourceRepo sourceRepo, OpenSearchClient targetClient, String snapshotName) {
private static void migrateMetadata(SourceRepo sourceRepo, OpenSearchClient targetClient, String snapshotName, List<String> indexAllowlist) {
SnapshotRepo.Provider repoDataProvider = new SnapshotRepoProvider_ES_7_10(sourceRepo);
GlobalMetadata.Factory metadataFactory = new GlobalMetadataFactory_ES_7_10(repoDataProvider);
GlobalMetadataCreator_OS_2_11 metadataCreator = new GlobalMetadataCreator_OS_2_11(targetClient,
Expand All @@ -208,7 +210,7 @@ private static void migrateMetadata(SourceRepo sourceRepo, OpenSearchClient targ

IndexMetadata.Factory indexMetadataFactory = new IndexMetadataFactory_ES_7_10(repoDataProvider);
IndexCreator_OS_2_11 indexCreator = new IndexCreator_OS_2_11(targetClient);
new IndexRunner(snapshotName, indexMetadataFactory, indexCreator, transformer).migrateIndices();
new IndexRunner(snapshotName, indexMetadataFactory, indexCreator, transformer, indexAllowlist).migrateIndices();
}

private static class FilteredLuceneDocumentsReader extends LuceneDocumentsReader {
Expand All @@ -230,6 +232,7 @@ static class LeasePastError extends Error { }
@SneakyThrows
private DocumentsRunner.CompletionStatus migrateDocumentsWithOneWorker(SourceRepo sourceRepo,
String snapshotName,
List<String> indexAllowlist,
String targetAddress)
throws RfsMigrateDocuments.NoWorkLeftException
{
Expand Down Expand Up @@ -265,6 +268,7 @@ private DocumentsRunner.CompletionStatus migrateDocumentsWithOneWorker(SourceRep
processManager,
indexMetadataFactory,
snapshotName,
indexAllowlist,
shardMetadataFactory,
unpackerFactory,
16*1024*1024);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ public static class Args {
public String targetPass = null;

@Parameter(names = {"--index-allowlist"}, description = ("Optional. List of index names to migrate"
+ " (e.g. 'logs_2024_01, logs_2024_02'). Default: all indices"), required = false)
+ " (e.g. 'logs_2024_01, logs_2024_02'). Default: all non-system indices (e.g. those not starting with '.')"), required = false)
public List<String> indexAllowlist = List.of();

@Parameter(names = {"--index-template-allowlist"}, description = ("Optional. List of index template names to migrate"
Expand Down Expand Up @@ -103,6 +103,7 @@ public static void main(String[] args) throws Exception {
final String targetHost = arguments.targetHost;
final String targetUser = arguments.targetUser;
final String targetPass = arguments.targetPass;
final List<String> indexAllowlist = arguments.indexAllowlist;
final List<String> indexTemplateAllowlist = arguments.indexTemplateAllowlist;
final List<String> componentTemplateAllowlist = arguments.componentTemplateAllowlist;
final int awarenessDimensionality = arguments.minNumberOfReplicas + 1;
Expand All @@ -126,7 +127,7 @@ public static void main(String[] args) throws Exception {

final IndexMetadata.Factory indexMetadataFactory = new IndexMetadataFactory_ES_7_10(repoDataProvider);
final IndexCreator_OS_2_11 indexCreator = new IndexCreator_OS_2_11(targetClient);
new IndexRunner(snapshotName, indexMetadataFactory, indexCreator, transformer).migrateIndices();
new IndexRunner(snapshotName, indexMetadataFactory, indexCreator, transformer, indexAllowlist).migrateIndices();
});
}
}
7 changes: 4 additions & 3 deletions RFS/src/main/java/com/rfs/RunRfsWorker.java
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,8 @@
final String targetHost = arguments.targetHost;
final String targetUser = arguments.targetUser;
final String targetPass = arguments.targetPass;
final List<String> indexTemplateAllowlist = arguments.indexAllowlist;
final List<String> indexAllowlist = arguments.indexAllowlist;
final List<String> indexTemplateAllowlist = arguments.indexTemplateAllowlist;

Check warning on line 136 in RFS/src/main/java/com/rfs/RunRfsWorker.java

View check run for this annotation

Codecov / codecov/patch

RFS/src/main/java/com/rfs/RunRfsWorker.java#L135-L136

Added lines #L135 - L136 were not covered by tests
final List<String> componentTemplateAllowlist = arguments.componentTemplateAllowlist;
final long maxShardSizeBytes = arguments.maxShardSizeBytes;
final int awarenessDimensionality = arguments.minNumberOfReplicas + 1;
Expand Down Expand Up @@ -160,7 +161,7 @@

IndexMetadata.Factory indexMetadataFactory = new IndexMetadataFactory_ES_7_10(repoDataProvider);
IndexCreator_OS_2_11 indexCreator = new IndexCreator_OS_2_11(targetClient);
new IndexRunner(snapshotName, indexMetadataFactory, indexCreator, transformer).migrateIndices();
new IndexRunner(snapshotName, indexMetadataFactory, indexCreator, transformer, indexAllowlist).migrateIndices();

Check warning on line 164 in RFS/src/main/java/com/rfs/RunRfsWorker.java

View check run for this annotation

Codecov / codecov/patch

RFS/src/main/java/com/rfs/RunRfsWorker.java#L164

Added line #L164 was not covered by tests

ShardMetadata.Factory shardMetadataFactory = new ShardMetadataFactory_ES_7_10(repoDataProvider);
DefaultSourceRepoAccessor repoAccessor = new DefaultSourceRepoAccessor(sourceRepo);
Expand All @@ -174,7 +175,7 @@
var workCoordinator = new OpenSearchWorkCoordinator(new ApacheHttpClient(new URI(targetHost)),
5, UUID.randomUUID().toString());
var scopedWorkCoordinator = new ScopedWorkCoordinator(workCoordinator, processManager);
new ShardWorkPreparer().run(scopedWorkCoordinator, indexMetadataFactory, snapshotName);
new ShardWorkPreparer().run(scopedWorkCoordinator, indexMetadataFactory, snapshotName, indexAllowlist);

Check warning on line 178 in RFS/src/main/java/com/rfs/RunRfsWorker.java

View check run for this annotation

Codecov / codecov/patch

RFS/src/main/java/com/rfs/RunRfsWorker.java#L178

Added line #L178 was not covered by tests
new DocumentsRunner(scopedWorkCoordinator,
(name,shard) -> shardMetadataFactory.fromRepo(snapshotName,name,shard),
unpackerFactory,
Expand Down
23 changes: 23 additions & 0 deletions RFS/src/main/java/com/rfs/common/FilterScheme.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
package com.rfs.common;

import java.util.List;
import java.util.function.BiConsumer;
import java.util.function.Predicate;

public class FilterScheme {

Check warning on line 7 in RFS/src/main/java/com/rfs/common/FilterScheme.java

View check run for this annotation

Codecov / codecov/patch

RFS/src/main/java/com/rfs/common/FilterScheme.java#L7

Added line #L7 was not covered by tests

public static Predicate<SnapshotRepo.Index> filterIndicesByAllowList(List<String> indexAllowlist, BiConsumer<String, Boolean> indexNameAcceptanceObserver) {
return index -> {

Check warning on line 10 in RFS/src/main/java/com/rfs/common/FilterScheme.java

View check run for this annotation

Codecov / codecov/patch

RFS/src/main/java/com/rfs/common/FilterScheme.java#L10

Added line #L10 was not covered by tests
boolean accepted;
if (indexAllowlist.isEmpty()) {
accepted = !index.getName().startsWith(".");
} else {
accepted = indexAllowlist.contains(index.getName());

Check warning on line 15 in RFS/src/main/java/com/rfs/common/FilterScheme.java

View check run for this annotation

Codecov / codecov/patch

RFS/src/main/java/com/rfs/common/FilterScheme.java#L15

Added line #L15 was not covered by tests
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Let's make a backlog item for this to support regex

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

+1

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

}

indexNameAcceptanceObserver.accept(index.getName(), accepted);

Check warning on line 18 in RFS/src/main/java/com/rfs/common/FilterScheme.java

View check run for this annotation

Codecov / codecov/patch

RFS/src/main/java/com/rfs/common/FilterScheme.java#L18

Added line #L18 was not covered by tests

return accepted;

Check warning on line 20 in RFS/src/main/java/com/rfs/common/FilterScheme.java

View check run for this annotation

Codecov / codecov/patch

RFS/src/main/java/com/rfs/common/FilterScheme.java#L20

Added line #L20 was not covered by tests
};
}
}
33 changes: 22 additions & 11 deletions RFS/src/main/java/com/rfs/worker/IndexRunner.java
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
package com.rfs.worker;

import java.util.Optional;
import java.util.List;
import java.util.function.BiConsumer;

import com.fasterxml.jackson.databind.node.ObjectNode;
import com.rfs.common.SnapshotRepo;
import lombok.AllArgsConstructor;
import lombok.extern.slf4j.Slf4j;

import com.rfs.common.FilterScheme;
import com.rfs.common.IndexMetadata;
import com.rfs.transformers.Transformer;
import com.rfs.version_os_2_11.IndexCreator_OS_2_11;
Expand All @@ -19,18 +20,28 @@
private final IndexMetadata.Factory metadataFactory;
private final IndexCreator_OS_2_11 indexCreator;
private final Transformer transformer;
private final List<String> indexAllowlist;

public void migrateIndices() {
SnapshotRepo.Provider repoDataProvider = metadataFactory.getRepoDataProvider();
// TODO - parallelize this, maybe ~400-1K requests per thread and do it asynchronously
for (SnapshotRepo.Index index : repoDataProvider.getIndicesInSnapshot(snapshotName)) {
var indexMetadata = metadataFactory.fromRepo(snapshotName, index.getName());
var root = indexMetadata.toObjectNode();
var transformedRoot = transformer.transformIndexMetadata(root);
var resultOp = indexCreator.create(transformedRoot, index.getName(), indexMetadata.getId());
resultOp.ifPresentOrElse(value -> log.info("Index " + index.getName() + " created successfully"),
() -> log.info("Index " + index.getName() + " already existed; no work required")
);
}

BiConsumer<String, Boolean> logger = (indexName, accepted) -> {

Check warning on line 29 in RFS/src/main/java/com/rfs/worker/IndexRunner.java

View check run for this annotation

Codecov / codecov/patch

RFS/src/main/java/com/rfs/worker/IndexRunner.java#L29

Added line #L29 was not covered by tests
if (!accepted) {
log.info("Index " + indexName + " rejected by allowlist");

Check warning on line 31 in RFS/src/main/java/com/rfs/worker/IndexRunner.java

View check run for this annotation

Codecov / codecov/patch

RFS/src/main/java/com/rfs/worker/IndexRunner.java#L31

Added line #L31 was not covered by tests
}
};
repoDataProvider.getIndicesInSnapshot(snapshotName).stream()
.filter(FilterScheme.filterIndicesByAllowList(indexAllowlist, logger))
.peek(index -> {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: we should be able to do a .forEach instead of the peek and count

var indexMetadata = metadataFactory.fromRepo(snapshotName, index.getName());
var root = indexMetadata.toObjectNode();
var transformedRoot = transformer.transformIndexMetadata(root);
var resultOp = indexCreator.create(transformedRoot, index.getName(), indexMetadata.getId());
resultOp.ifPresentOrElse(value -> log.info("Index " + index.getName() + " created successfully"),
() -> log.info("Index " + index.getName() + " already existed; no work required")

Check warning on line 42 in RFS/src/main/java/com/rfs/worker/IndexRunner.java

View check run for this annotation

Codecov / codecov/patch

RFS/src/main/java/com/rfs/worker/IndexRunner.java#L33-L42

Added lines #L33 - L42 were not covered by tests
);
})
.count(); // Force the stream to execute

Check warning on line 45 in RFS/src/main/java/com/rfs/worker/IndexRunner.java

View check run for this annotation

Codecov / codecov/patch

RFS/src/main/java/com/rfs/worker/IndexRunner.java#L44-L45

Added lines #L44 - L45 were not covered by tests
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

foreach() will force the stream to run. I think that this will generate a linting error as it is since the output of count() is discarded.

}
}
40 changes: 29 additions & 11 deletions RFS/src/main/java/com/rfs/worker/ShardWorkPreparer.java
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

import com.rfs.cms.IWorkCoordinator;
import com.rfs.cms.ScopedWorkCoordinator;
import com.rfs.common.FilterScheme;
import com.rfs.common.IndexMetadata;
import com.rfs.common.SnapshotRepo;
import lombok.Lombok;
Expand All @@ -10,6 +11,9 @@

import java.io.IOException;
import java.time.Duration;
import java.util.List;
import java.util.function.BiConsumer;
import java.util.stream.IntStream;

/**
* This class adds workitemes (leasable mutexes) via the WorkCoordinator so that future
Expand All @@ -22,7 +26,7 @@
public static final String SHARD_SETUP_WORK_ITEM_ID = "shard_setup";

public void run(ScopedWorkCoordinator scopedWorkCoordinator, IndexMetadata.Factory metadataFactory,
String snapshotName)
String snapshotName, List<String> indexAllowlist)
throws IOException, InterruptedException {

// ensure that there IS an index to house the shared state that we're going to be manipulating
Expand All @@ -44,7 +48,7 @@

@Override
public Void onAcquiredWork(IWorkCoordinator.WorkItemAndDuration workItem) throws IOException {
prepareShardWorkItems(scopedWorkCoordinator.workCoordinator, metadataFactory, snapshotName);
prepareShardWorkItems(scopedWorkCoordinator.workCoordinator, metadataFactory, snapshotName, indexAllowlist);

Check warning on line 51 in RFS/src/main/java/com/rfs/worker/ShardWorkPreparer.java

View check run for this annotation

Codecov / codecov/patch

RFS/src/main/java/com/rfs/worker/ShardWorkPreparer.java#L51

Added line #L51 was not covered by tests
return null;
}

Expand All @@ -56,18 +60,32 @@
}

@SneakyThrows
private static void prepareShardWorkItems(IWorkCoordinator workCoordinator,
IndexMetadata.Factory metadataFactory, String snapshotName) {
private static void prepareShardWorkItems(IWorkCoordinator workCoordinator, IndexMetadata.Factory metadataFactory,
String snapshotName, List<String> indexAllowlist) {
log.info("Setting up the Documents Work Items...");
SnapshotRepo.Provider repoDataProvider = metadataFactory.getRepoDataProvider();
for (SnapshotRepo.Index index : repoDataProvider.getIndicesInSnapshot(snapshotName)) {
IndexMetadata.Data indexMetadata = metadataFactory.fromRepo(snapshotName, index.getName());
log.info("Index " + indexMetadata.getName() + " has " + indexMetadata.getNumberOfShards() + " shards");
for (int shardId = 0; shardId < indexMetadata.getNumberOfShards(); shardId++) {
log.info("Creating Documents Work Item for index: " + indexMetadata.getName() + ", shard: " + shardId);
workCoordinator.createUnassignedWorkItem(IndexAndShard.formatAsWorkItemString(indexMetadata.getName(), shardId));

BiConsumer<String, Boolean> logger = (indexName, accepted) -> {

Check warning on line 68 in RFS/src/main/java/com/rfs/worker/ShardWorkPreparer.java

View check run for this annotation

Codecov / codecov/patch

RFS/src/main/java/com/rfs/worker/ShardWorkPreparer.java#L68

Added line #L68 was not covered by tests
if (!accepted) {
log.info("Index " + indexName + " rejected by allowlist");

Check warning on line 70 in RFS/src/main/java/com/rfs/worker/ShardWorkPreparer.java

View check run for this annotation

Codecov / codecov/patch

RFS/src/main/java/com/rfs/worker/ShardWorkPreparer.java#L70

Added line #L70 was not covered by tests
}
}
};
repoDataProvider.getIndicesInSnapshot(snapshotName).stream()
.filter(FilterScheme.filterIndicesByAllowList(indexAllowlist, logger))
.peek(index -> {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: same as above with foreach

IndexMetadata.Data indexMetadata = metadataFactory.fromRepo(snapshotName, index.getName());
log.info("Index " + indexMetadata.getName() + " has " + indexMetadata.getNumberOfShards() + " shards");
IntStream.range(0, indexMetadata.getNumberOfShards()).forEach(shardId -> {
log.info("Creating Documents Work Item for index: " + indexMetadata.getName() + ", shard: " + shardId);

Check warning on line 79 in RFS/src/main/java/com/rfs/worker/ShardWorkPreparer.java

View check run for this annotation

Codecov / codecov/patch

RFS/src/main/java/com/rfs/worker/ShardWorkPreparer.java#L72-L79

Added lines #L72 - L79 were not covered by tests
try {
workCoordinator.createUnassignedWorkItem(IndexAndShard.formatAsWorkItemString(indexMetadata.getName(), shardId));
} catch (IOException e) {
throw Lombok.sneakyThrow(e);
}
});
})
.count(); // Force the stream to execute

Check warning on line 87 in RFS/src/main/java/com/rfs/worker/ShardWorkPreparer.java

View check run for this annotation

Codecov / codecov/patch

RFS/src/main/java/com/rfs/worker/ShardWorkPreparer.java#L81-L87

Added lines #L81 - L87 were not covered by tests

log.info("Finished setting up the Documents Work Items.");
}
}
Loading