Skip to content

Commit

Permalink
Extract common code into CompactionJobCompletionUtils
Browse files Browse the repository at this point in the history
  • Loading branch information
kr565370 committed May 7, 2024
1 parent 588ad7b commit 5912af6
Show file tree
Hide file tree
Showing 6 changed files with 84 additions and 82 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -20,13 +20,13 @@

import sleeper.compaction.job.CompactionJob;
import sleeper.compaction.job.CompactionJobStatusStore;
import sleeper.core.statestore.FileReference;
import sleeper.core.statestore.StateStore;
import sleeper.core.statestore.StateStoreException;
import sleeper.core.statestore.exception.FileReferenceNotAssignedToJobException;
import sleeper.core.util.ExponentialBackoffWithJitter;
import sleeper.core.util.ExponentialBackoffWithJitter.WaitRange;

import static sleeper.compaction.job.completion.CompactionJobCompletionUtils.updateStateStoreSuccess;

public class CompactionJobCompletion {
public static final Logger LOGGER = LoggerFactory.getLogger(CompactionJobCompletion.class);

Expand Down Expand Up @@ -55,39 +55,11 @@ public CompactionJobCompletion(

public void apply(CompactionJobCompletionRequest request) throws StateStoreException, InterruptedException {
CompactionJob job = request.getJob();
updateStateStoreSuccess(job, request.getRecordsWritten());
updateStateStoreSuccess(job, request.getRecordsWritten(), stateStoreProvider.getByTableId(job.getTableId()),
jobAssignmentWaitAttempts, jobAssignmentWaitBackoff);
statusStore.jobFinished(job, request.buildRecordsProcessedSummary(), request.getTaskId());
}

private void updateStateStoreSuccess(CompactionJob job, long recordsWritten) throws StateStoreException, InterruptedException {
StateStore stateStore = stateStoreProvider.getByTableId(job.getTableId());
FileReference fileReference = FileReference.builder()
.filename(job.getOutputFile())
.partitionId(job.getPartitionId())
.numberOfRecords(recordsWritten)
.countApproximate(false)
.onlyContainsDataForThisPartition(true)
.build();

// Compaction jobs are sent for execution before updating the state store to assign the input files to the job.
// Sometimes the compaction can finish before the job assignment is finished. We wait for the job assignment
// rather than immediately failing the job run.
FileReferenceNotAssignedToJobException failure = null;
for (int attempts = 0; attempts < jobAssignmentWaitAttempts; attempts++) {
jobAssignmentWaitBackoff.waitBeforeAttempt(attempts);
try {
stateStore.atomicallyReplaceFileReferencesWithNewOne(job.getId(), job.getPartitionId(), job.getInputFiles(), fileReference);
LOGGER.info("Atomically replaced {} file references in state store with file reference {}.", job.getInputFiles(), fileReference);
return;
} catch (FileReferenceNotAssignedToJobException e) {
LOGGER.warn("Job not yet assigned to input files on attempt {} of {}: {}",
attempts + 1, jobAssignmentWaitAttempts, e.getMessage());
failure = e;
}
}
throw new TimedOutWaitingForFileAssignmentsException(failure);
}

@FunctionalInterface
public interface GetStateStore {
StateStore getByTableId(String tableId);
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
/*
* Copyright 2022-2024 Crown Copyright
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sleeper.compaction.job.completion;

import sleeper.compaction.job.CompactionJob;
import sleeper.core.statestore.FileReference;
import sleeper.core.statestore.StateStore;
import sleeper.core.statestore.StateStoreException;
import sleeper.core.statestore.exception.FileReferenceNotAssignedToJobException;
import sleeper.core.util.ExponentialBackoffWithJitter;
import sleeper.core.util.ExponentialBackoffWithJitter.WaitRange;

public class CompactionJobCompletionUtils {
public static final int JOB_ASSIGNMENT_WAIT_ATTEMPTS = 10;
public static final WaitRange JOB_ASSIGNMENT_WAIT_RANGE = WaitRange.firstAndMaxWaitCeilingSecs(2, 60);

private CompactionJobCompletionUtils() {
}

public static void updateStateStoreSuccess(
CompactionJob job,
long recordsWritten,
StateStore stateStore) throws StateStoreException, InterruptedException {
updateStateStoreSuccess(job, recordsWritten, stateStore,
JOB_ASSIGNMENT_WAIT_ATTEMPTS,
new ExponentialBackoffWithJitter(JOB_ASSIGNMENT_WAIT_RANGE));
}

public static void updateStateStoreSuccess(
CompactionJob job,
long recordsWritten,
StateStore stateStore,
int jobAssignmentWaitAttempts,
ExponentialBackoffWithJitter jobAssignmentWaitBackoff) throws StateStoreException, InterruptedException {
FileReference fileReference = FileReference.builder()
.filename(job.getOutputFile())
.partitionId(job.getPartitionId())
.numberOfRecords(recordsWritten)
.countApproximate(false)
.onlyContainsDataForThisPartition(true)
.build();

// Compaction jobs are sent for execution before updating the state store to assign the input files to the job.
// Sometimes the compaction can finish before the job assignment is finished. We wait for the job assignment
// rather than immediately failing the job run.
FileReferenceNotAssignedToJobException failure = null;
for (int attempts = 0; attempts < jobAssignmentWaitAttempts; attempts++) {
jobAssignmentWaitBackoff.waitBeforeAttempt(attempts);
try {
stateStore.atomicallyReplaceFileReferencesWithNewOne(job.getId(), job.getPartitionId(), job.getInputFiles(), fileReference);
return;
} catch (FileReferenceNotAssignedToJobException e) {
failure = e;
}
}
throw new TimedOutWaitingForFileAssignmentsException(failure);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@

import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import static sleeper.compaction.job.execution.testutils.CompactSortedFilesTestUtils.updateStateStoreSuccess;
import static sleeper.compaction.job.completion.CompactionJobCompletionUtils.updateStateStoreSuccess;
import static sleeper.configuration.properties.InstancePropertiesTestHelper.createTestInstanceProperties;
import static sleeper.configuration.properties.table.TablePropertiesTestHelper.createTestTableProperties;
import static sleeper.core.schema.SchemaTestHelper.schemaWithKey;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,28 +16,19 @@
package sleeper.compaction.job.execution.testutils;

import sleeper.compaction.job.CompactionJob;
import sleeper.compaction.job.completion.TimedOutWaitingForFileAssignmentsException;
import sleeper.core.schema.Field;
import sleeper.core.schema.Schema;
import sleeper.core.schema.type.LongType;
import sleeper.core.schema.type.PrimitiveType;
import sleeper.core.schema.type.Type;
import sleeper.core.statestore.FileReference;
import sleeper.core.statestore.StateStore;
import sleeper.core.statestore.StateStoreException;
import sleeper.core.statestore.exception.FileReferenceNotAssignedToJobException;
import sleeper.core.util.ExponentialBackoffWithJitter;
import sleeper.core.util.ExponentialBackoffWithJitter.WaitRange;

import java.util.stream.Stream;

import static java.util.stream.Collectors.toUnmodifiableList;
import static sleeper.core.statestore.AssignJobIdRequest.assignJobOnPartitionToFiles;

public class CompactSortedFilesTestUtils {
public static final int JOB_ASSIGNMENT_WAIT_ATTEMPTS = 10;
public static final WaitRange JOB_ASSIGNMENT_WAIT_RANGE = WaitRange.firstAndMaxWaitCeilingSecs(2, 60);

private CompactSortedFilesTestUtils() {
}

Expand Down Expand Up @@ -72,43 +63,4 @@ public static void assignJobIdsToInputFiles(StateStore stateStore, CompactionJob
.map(job -> assignJobOnPartitionToFiles(job.getId(), job.getPartitionId(), job.getInputFiles()))
.collect(toUnmodifiableList()));
}

public static void updateStateStoreSuccess(
CompactionJob job,
long recordsWritten,
StateStore stateStore) throws StateStoreException, InterruptedException {
updateStateStoreSuccess(job, recordsWritten, stateStore,
JOB_ASSIGNMENT_WAIT_ATTEMPTS,
new ExponentialBackoffWithJitter(JOB_ASSIGNMENT_WAIT_RANGE));
}

public static void updateStateStoreSuccess(
CompactionJob job,
long recordsWritten,
StateStore stateStore,
int jobAssignmentWaitAttempts,
ExponentialBackoffWithJitter jobAssignmentWaitBackoff) throws StateStoreException, InterruptedException {
FileReference fileReference = FileReference.builder()
.filename(job.getOutputFile())
.partitionId(job.getPartitionId())
.numberOfRecords(recordsWritten)
.countApproximate(false)
.onlyContainsDataForThisPartition(true)
.build();

// Compaction jobs are sent for execution before updating the state store to assign the input files to the job.
// Sometimes the compaction can finish before the job assignment is finished. We wait for the job assignment
// rather than immediately failing the job run.
FileReferenceNotAssignedToJobException failure = null;
for (int attempts = 0; attempts < jobAssignmentWaitAttempts; attempts++) {
jobAssignmentWaitBackoff.waitBeforeAttempt(attempts);
try {
stateStore.atomicallyReplaceFileReferencesWithNewOne(job.getId(), job.getPartitionId(), job.getInputFiles(), fileReference);
return;
} catch (FileReferenceNotAssignedToJobException e) {
failure = e;
}
}
throw new TimedOutWaitingForFileAssignmentsException(failure);
}
}
7 changes: 7 additions & 0 deletions java/system-test/system-test-dsl/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,13 @@
<version>${project.parent.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>sleeper</groupId>
<artifactId>compaction-job-execution</artifactId>
<version>${project.parent.version}</version>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>sleeper</groupId>
<artifactId>ingest-core</artifactId>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@
import java.util.UUID;

import static java.util.stream.Collectors.toUnmodifiableList;
import static sleeper.compaction.job.execution.testutils.CompactSortedFilesTestUtils.updateStateStoreSuccess;
import static sleeper.compaction.job.completion.CompactionJobCompletionUtils.updateStateStoreSuccess;

public class InMemoryCompaction {
private final Map<String, CompactionJob> queuedJobsById = new TreeMap<>();
Expand Down

0 comments on commit 5912af6

Please sign in to comment.