|
41 | 41 | import org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo;
|
42 | 42 | import org.apache.kafka.clients.admin.TopicDescription;
|
43 | 43 | import org.apache.kafka.common.KafkaFuture;
|
| 44 | +import org.apache.kafka.common.Node; |
| 45 | +import org.apache.kafka.common.TopicCollection; |
44 | 46 | import org.apache.kafka.common.TopicCollection.TopicIdCollection;
|
45 | 47 | import org.apache.kafka.common.TopicPartition;
|
| 48 | +import org.apache.kafka.common.TopicPartitionInfo; |
46 | 49 | import org.apache.kafka.common.Uuid;
|
| 50 | +import org.apache.kafka.common.acl.AclOperation; |
47 | 51 | import org.apache.kafka.common.config.ConfigResource;
|
48 | 52 | import org.apache.kafka.common.errors.ApiException;
|
49 | 53 | import org.apache.kafka.common.internals.KafkaFutureImpl;
|
@@ -125,28 +129,29 @@ class TopicsResourceIT {
|
125 | 129 | TopicHelper topicUtils;
|
126 | 130 | ConsumerUtils groupUtils;
|
127 | 131 | String clusterId1;
|
| 132 | + URI bootstrapServers1; |
128 | 133 | String clusterId2;
|
129 | 134 | ServerSocket randomSocket;
|
130 | 135 |
|
131 | 136 | @BeforeEach
|
132 | 137 | void setup() throws IOException {
|
133 |
| - URI bootstrapServers = URI.create(deployments.getExternalBootstrapServers()); |
| 138 | + bootstrapServers1 = URI.create(deployments.getExternalBootstrapServers()); |
134 | 139 | randomSocket = new ServerSocket(0);
|
135 | 140 | URI randomBootstrapServers = URI.create("dummy://localhost:" + randomSocket.getLocalPort());
|
136 | 141 |
|
137 |
| - topicUtils = new TopicHelper(bootstrapServers, config, null); |
| 142 | + topicUtils = new TopicHelper(bootstrapServers1, config, null); |
138 | 143 | topicUtils.deleteAllTopics();
|
139 | 144 |
|
140 | 145 | groupUtils = new ConsumerUtils(config, null);
|
141 | 146 |
|
142 |
| - utils = new TestHelper(bootstrapServers, config, null); |
| 147 | + utils = new TestHelper(bootstrapServers1, config, null); |
143 | 148 |
|
144 | 149 | clusterId1 = utils.getClusterId();
|
145 | 150 | clusterId2 = UUID.randomUUID().toString();
|
146 | 151 |
|
147 | 152 | client.resources(Kafka.class).delete();
|
148 | 153 | client.resources(Kafka.class)
|
149 |
| - .resource(utils.buildKafkaResource("test-kafka1", clusterId1, bootstrapServers)) |
| 154 | + .resource(utils.buildKafkaResource("test-kafka1", clusterId1, bootstrapServers1)) |
150 | 155 | .create();
|
151 | 156 | // Second cluster is offline/non-existent
|
152 | 157 | client.resources(Kafka.class)
|
@@ -955,6 +960,60 @@ void testDescribeTopicWithNoSuchTopic() {
|
955 | 960 | .body("errors.code", contains("4041"));
|
956 | 961 | }
|
957 | 962 |
|
| 963 | + @Test |
| 964 | + void testDescribeTopicWithOfflinePartition() { |
| 965 | + String topicName = UUID.randomUUID().toString(); |
| 966 | + Map<String, String> topicIds = topicUtils.createTopics(clusterId1, List.of(topicName), 2); |
| 967 | + |
| 968 | + //int partition, Node leader, List<Node> replicas, List<Node> isr |
| 969 | + Node node0 = new Node(0, "node0", bootstrapServers1.getPort()); |
| 970 | + Node node1 = new Node(1, "node1", bootstrapServers1.getPort()); |
| 971 | + |
| 972 | + Answer<DescribeTopicsResult> describeTopicsResult = args -> { |
| 973 | + List<TopicPartitionInfo> partitions = List.of( |
| 974 | + // Online, 2 replicas, 1 ISR |
| 975 | + new TopicPartitionInfo(0, node0, List.of(node0, node1), List.of(node0)), |
| 976 | + // Offline, 2 replicas, no ISRs |
| 977 | + new TopicPartitionInfo(1, null, List.of(node0, node1), List.of())); |
| 978 | + Set<AclOperation> authorizedOperations = Set.of(AclOperation.ALL); |
| 979 | + Uuid topicId = Uuid.fromString(topicIds.get(topicName)); |
| 980 | + |
| 981 | + var description = KafkaFuture.completedFuture( |
| 982 | + new TopicDescription(topicName, false, partitions, authorizedOperations, topicId)); |
| 983 | + |
| 984 | + class Result extends DescribeTopicsResult { |
| 985 | + Result() { |
| 986 | + super(Map.of(topicId, description), null); |
| 987 | + } |
| 988 | + } |
| 989 | + |
| 990 | + return new Result(); |
| 991 | + }; |
| 992 | + |
| 993 | + AdminClientSpy.install(client -> { |
| 994 | + // Mock listOffsets |
| 995 | + doAnswer(describeTopicsResult) |
| 996 | + .when(client) |
| 997 | + .describeTopics(any(TopicCollection.class), any(DescribeTopicsOptions.class)); |
| 998 | + }); |
| 999 | + |
| 1000 | + whenRequesting(req -> req.get("{topicId}", clusterId1, topicIds.get(topicName))) |
| 1001 | + .assertThat() |
| 1002 | + .statusCode(is(Status.OK.getStatusCode())) |
| 1003 | + .body("data.attributes.name", is(topicName)) |
| 1004 | + .body("data.attributes.status", is("PartiallyOffline")) |
| 1005 | + .body("data.attributes.partitions", hasSize(2)) |
| 1006 | + .body("data.attributes.partitions[0].status", is("UnderReplicated")) |
| 1007 | + .body("data.attributes.partitions[0].replicas[0].inSync", is(true)) |
| 1008 | + .body("data.attributes.partitions[0].replicas[0].localStorage", notNullValue()) |
| 1009 | + // storage not fetched for followers |
| 1010 | + .body("data.attributes.partitions[0].replicas[1].inSync", is(false)) |
| 1011 | + .body("data.attributes.partitions[0].replicas[1].localStorage", nullValue()) |
| 1012 | + // Partition 2, offline with no ISRs |
| 1013 | + .body("data.attributes.partitions[1].status", is("Offline")) |
| 1014 | + .body("data.attributes.partitions[1].replicas.inSync", everyItem(is(false))); |
| 1015 | + } |
| 1016 | + |
958 | 1017 | @Test
|
959 | 1018 | void testCreateTopicSucceeds() {
|
960 | 1019 | String topicName = UUID.randomUUID().toString();
|
|
0 commit comments