Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

http-netty: let RetryingHttpRequesterFilter return responses on failure #3048

Open
wants to merge 7 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
import io.servicetalk.concurrent.api.RetryStrategies;
import io.servicetalk.concurrent.api.Single;
import io.servicetalk.context.api.ContextMap;
import io.servicetalk.http.api.DefaultHttpHeadersFactory;
import io.servicetalk.http.api.FilterableReservedStreamingHttpConnection;
import io.servicetalk.http.api.FilterableStreamingHttpClient;
import io.servicetalk.http.api.HttpExecutionStrategies;
Expand All @@ -43,6 +44,7 @@
import io.servicetalk.http.api.StreamingHttpRequest;
import io.servicetalk.http.api.StreamingHttpRequester;
import io.servicetalk.http.api.StreamingHttpResponse;
import io.servicetalk.http.api.StreamingHttpResponses;
import io.servicetalk.transport.api.ExecutionContext;
import io.servicetalk.transport.api.ExecutionStrategyInfluencer;
import io.servicetalk.transport.api.RetryableException;
Expand All @@ -55,6 +57,7 @@
import java.util.function.UnaryOperator;
import javax.annotation.Nullable;

import static io.servicetalk.buffer.netty.BufferAllocators.DEFAULT_ALLOCATOR;
import static io.servicetalk.concurrent.api.Completable.completed;
import static io.servicetalk.concurrent.api.Completable.failed;
import static io.servicetalk.concurrent.api.RetryStrategies.retryWithConstantBackoffDeltaJitter;
Expand Down Expand Up @@ -91,15 +94,16 @@ public final class RetryingHttpRequesterFilter
implements StreamingHttpClientFilterFactory, ExecutionStrategyInfluencer<HttpExecutionStrategy> {
static final int DEFAULT_MAX_TOTAL_RETRIES = 4;
private static final RetryingHttpRequesterFilter DISABLE_AUTO_RETRIES =
new RetryingHttpRequesterFilter(true, false, false, 1, null,
new RetryingHttpRequesterFilter(true, false, false, false, 1, null,
(__, ___) -> NO_RETRIES, null);
private static final RetryingHttpRequesterFilter DISABLE_ALL_RETRIES =
new RetryingHttpRequesterFilter(false, true, false, 0, null,
new RetryingHttpRequesterFilter(false, true, false, false, 0, null,
(__, ___) -> NO_RETRIES, null);

private final boolean waitForLb;
private final boolean ignoreSdErrors;
private final boolean mayReplayRequestPayload;
private final boolean returnFailedResponses;
private final int maxTotalRetries;
@Nullable
private final Function<HttpResponseMetaData, HttpResponseException> responseMapper;
Expand All @@ -109,13 +113,14 @@ public final class RetryingHttpRequesterFilter

RetryingHttpRequesterFilter(
final boolean waitForLb, final boolean ignoreSdErrors, final boolean mayReplayRequestPayload,
final int maxTotalRetries,
final boolean returnFailedResponses, final int maxTotalRetries,
@Nullable final Function<HttpResponseMetaData, HttpResponseException> responseMapper,
final BiFunction<HttpRequestMetaData, Throwable, BackOffPolicy> retryFor,
@Nullable final RetryCallbacks onRequestRetry) {
this.waitForLb = waitForLb;
this.ignoreSdErrors = ignoreSdErrors;
this.mayReplayRequestPayload = mayReplayRequestPayload;
this.returnFailedResponses = returnFailedResponses;
this.maxTotalRetries = maxTotalRetries;
this.responseMapper = responseMapper;
this.retryFor = retryFor;
Expand Down Expand Up @@ -270,7 +275,15 @@ protected Single<StreamingHttpResponse> request(final StreamingHttpRequester del
// 1. Metadata is shared across retries
// 2. Publisher state is restored to original state for each retry
// duplicatedRequest isn't used below because retryWhen must be applied outside the defer operator for (2).
return single.retryWhen(retryStrategy(request, executionContext(), true));
single = single.retryWhen(retryStrategy(request, executionContext(), true));
if (returnFailedResponses) {
single = single.onErrorResume(HttpResponseException.class, t -> {
HttpResponseMetaData metaData = t.metaData();
return Single.succeeded(StreamingHttpResponses.newResponse(metaData.status(), metaData.version(),
metaData.headers(), DEFAULT_ALLOCATOR, DefaultHttpHeadersFactory.INSTANCE));
});
}
return single;
}
}

Expand Down Expand Up @@ -719,6 +732,7 @@ public static final class Builder {

private int maxTotalRetries = DEFAULT_MAX_TOTAL_RETRIES;
private boolean retryExpectationFailed;
private boolean returnFailedResponses;

private BiFunction<HttpRequestMetaData, RetryableException, BackOffPolicy>
retryRetryableExceptions = (requestMetaData, e) -> BackOffPolicy.ofImmediateBounded();
Expand All @@ -745,6 +759,11 @@ public static final class Builder {
@Nullable
private RetryCallbacks onRequestRetry;

public Builder returnFailedResponses(final boolean returnFailedResponses) {
this.returnFailedResponses = returnFailedResponses;
return this;
}
Comment on lines +762 to +765
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm certain this can have a better name and clearly it needs docs before merging. Name suggestions welcome.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I also think this API is a bit awkward: first you must turn a response into an HttpResponseException and then it's going to be discarded. Alternatively, we could just have a different lambda to the tune of Function<Boolean, HttpResponseMetadata> shouldRetry.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Right now we don't have RS operators to achieve retries without mapping into exceptions. If we go the route of clean retry of response meta-data without mapping to exceptions, it's possible but will take longer.

Current rational was that some users want to always map responses to exceptions, that's why we have independent responseMapper. Then some users may want to retry that, so there is a 2nd method for them to retryResponses. We decided to put them next to each other on the same builder instead of offering 2 different filters bcz they often used together.

I agree that having a 3rd method that works only if the other 2 also configured is not intuitive. Alternatively, we can consider adding a retryResponses overload that takes a boolean to make a decision if it need to unwrap the original response or not.


/**
* By default, automatic retries wait for the associated {@link LoadBalancer} to be
* {@link LoadBalancerReadyEvent ready} before triggering a retry for requests. This behavior may add latency to
Expand Down Expand Up @@ -1054,7 +1073,7 @@ public RetryingHttpRequesterFilter build() {
return NO_RETRIES;
};
return new RetryingHttpRequesterFilter(waitForLb, ignoreSdErrors, mayReplayRequestPayload,
maxTotalRetries, responseMapper, allPredicate, onRequestRetry);
returnFailedResponses, maxTotalRetries, responseMapper, allPredicate, onRequestRetry);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -251,21 +251,31 @@ private void assertRequestRetryingPred(final BlockingHttpClient client) {
assertThat("Unexpected calls to select.", (double) lbSelectInvoked.get(), closeTo(5.0, 1.0));
}

@Test
void testResponseMapper() {
@ParameterizedTest
@ValueSource(booleans = {true, false})
void testResponseMapper(final boolean returnFailedResponses) throws Exception {
AtomicInteger newConnectionCreated = new AtomicInteger();
AtomicInteger responseDrained = new AtomicInteger();
AtomicInteger onRequestRetryCounter = new AtomicInteger();
final int maxTotalRetries = 4;
final String retryMessage = "Retryable header";
normalClient = normalClientBuilder
.appendClientFilter(new Builder()
.returnFailedResponses(returnFailedResponses)
.maxTotalRetries(maxTotalRetries)
.responseMapper(metaData -> metaData.headers().contains(RETRYABLE_HEADER) ?
new HttpResponseException("Retryable header", metaData) : null)
new HttpResponseException(retryMessage, metaData) : null)
// Disable request retrying
.retryRetryableExceptions((requestMetaData, e) -> ofNoRetries())
// Retry only responses marked so
.retryResponses((requestMetaData, throwable) -> ofImmediate(maxTotalRetries - 1))
.retryResponses((requestMetaData, throwable) -> {
if (throwable instanceof HttpResponseException &&
retryMessage.equals(throwable.getMessage())) {
return ofImmediate(maxTotalRetries - 1);
} else {
throw new RuntimeException("Unexpected exception");
}
})
.onRequestRetry((count, req, t) ->
assertThat(onRequestRetryCounter.incrementAndGet(), is(count)))
.build())
Expand All @@ -281,9 +291,14 @@ public Single<StreamingHttpResponse> request(final StreamingHttpRequest request)
};
})
.buildBlocking();
HttpResponseException e = assertThrows(HttpResponseException.class,
() -> normalClient.request(normalClient.get("/")));
assertThat("Unexpected exception.", e, instanceOf(HttpResponseException.class));
if (returnFailedResponses) {
HttpResponse response = normalClient.request(normalClient.get("/"));
assertThat(response.status(), is(HttpResponseStatus.OK));
} else {
HttpResponseException e = assertThrows(HttpResponseException.class,
() -> normalClient.request(normalClient.get("/")));
assertThat("Unexpected exception.", e, instanceOf(HttpResponseException.class));
}
// The load balancer is allowed to be not ready one time, which is counted against total retry attempts but not
// against actual requests being issued.
assertThat("Unexpected calls to select.", lbSelectInvoked.get(), allOf(greaterThanOrEqualTo(maxTotalRetries),
Expand Down
Loading