Skip to content

Commit

Permalink
[FE] Fix invalid SeekType value for the offset and timestamp fields (…
Browse files Browse the repository at this point in the history
…#3057)

* SeekType fix the value during the offset and timestamp fields

* BE: thread interruption logging added to Emitters

Co-authored-by: iliax <[email protected]>
  • Loading branch information
Mgrdich and iliax authored Dec 19, 2022
1 parent f9906b5 commit c2be45f
Show file tree
Hide file tree
Showing 4 changed files with 18 additions and 0 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.errors.InterruptException;
import org.apache.kafka.common.utils.Bytes;
import reactor.core.publisher.FluxSink;

Expand Down Expand Up @@ -85,6 +86,9 @@ public void accept(FluxSink<TopicMessageEventDTO> sink) {
}
sendFinishStatsAndCompleteSink(sink);
log.debug("Polling finished");
} catch (InterruptException kafkaInterruptException) {
log.debug("Polling finished due to thread interruption");
sink.complete();
} catch (Exception e) {
log.error("Error occurred while consuming records", e);
sink.error(e);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.errors.InterruptException;
import org.apache.kafka.common.utils.Bytes;
import reactor.core.publisher.FluxSink;

Expand Down Expand Up @@ -59,6 +60,9 @@ public void accept(FluxSink<TopicMessageEventDTO> sink) {
}
sendFinishStatsAndCompleteSink(sink);
log.debug("Polling finished");
} catch (InterruptException kafkaInterruptException) {
log.debug("Polling finished due to thread interruption");
sink.complete();
} catch (Exception e) {
log.error("Error occurred while consuming records", e);
sink.error(e);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ public void accept(FluxSink<TopicMessageEventDTO> sink) {
sink.complete();
log.debug("Tailing finished");
} catch (InterruptException kafkaInterruptException) {
log.debug("Tailing finished due to thread interruption");
sink.complete();
} catch (Exception e) {
log.error("Error consuming {}", consumerPosition, e);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -219,6 +219,15 @@ const Filters: React.FC<FiltersProps> = ({
default:
props.seekType = currentSeekType;
}

if (offset && currentSeekType === SeekType.OFFSET) {
props.seekType = SeekType.OFFSET;
}

if (timestamp && currentSeekType === SeekType.TIMESTAMP) {
props.seekType = SeekType.TIMESTAMP;
}

props.seekTo = selectedPartitions.map(({ value }) => {
const offsetProperty =
seekDirection === SeekDirection.FORWARD ? 'offsetMin' : 'offsetMax';
Expand Down

0 comments on commit c2be45f

Please sign in to comment.