diff --git a/config/worker.properties b/config/worker.properties
index 961fd73..8117cc9 100644
--- a/config/worker.properties
+++ b/config/worker.properties
@@ -1,6 +1,6 @@
bootstrap.servers=localhost:9092
-plugin.path=target
+plugin.path=target/plugin
offset.storage.file.filename=/tmp/connect.offsets
key.converter=org.apache.kafka.connect.storage.StringConverter
diff --git a/docker-compose.yml b/docker-compose.yml
index a25d23b..b6f5d81 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -64,16 +64,15 @@ services:
- CONNECT_VALUE_CONVERTER=org.apache.kafka.connect.json.JsonConverter
- CONNECT_INTERNAL_KEY_CONVERTER=org.apache.kafka.connect.json.JsonConverter
- CONNECT_INTERNAL_VALUE_CONVERTER=org.apache.kafka.connect.json.JsonConverter
-
+
- CONNECT_PLUGIN_PATH=/opt/connectors
- KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:/etc/log4j.properties
-
+
- AWS_PROFILE
- AWS_ACCESS_KEY_ID
- AWS_SECRET_ACCESS_KEY
volumes:
- ~/.aws:/root/.aws
- - ./target:/opt/connectors
- - /opt/connectors/.shaded-jar
+ - ./target/plugin:/opt/connectors
- ./config/log4j.properties:/etc/log4j.properties
depends_on: [broker]
diff --git a/pom.xml b/pom.xml
index 6f9d16c..c5412bb 100644
--- a/pom.xml
+++ b/pom.xml
@@ -15,22 +15,11 @@
UTF-8
1.8
- 1.6.0
- 0.8.2
- 3.8.0
- 2.22.0
- 3.0.1
- 3.1.1
- 3.0.1
- 2.22.1
- 3.0.0
-
2.1.0
1.11.592
4.12
2.28.2
19.0
- 1.18.4
@@ -59,11 +48,13 @@
org.mockito
mockito-core
${mockito-core.version}
+ test
com.google.guava
guava
${google.guava.version}
+ test
@@ -79,138 +70,84 @@
-
-
-
-
- org.apache.maven.plugins
- maven-compiler-plugin
- ${maven-compiler-plugin.version}
-
- ${java.version}
- ${java.version}
-
-
-
- org.apache.maven.plugins
- maven-javadoc-plugin
- ${maven-javadoc-plugin.version}
-
-
- org.apache.maven.plugins
- maven-source-plugin
- ${maven-source-plugin.version}
-
-
- attach-sources
- verify
-
- jar-no-fork
-
-
-
-
-
- org.apache.maven.plugins
- maven-shade-plugin
- ${maven-shade-plugin.version}
-
-
- package
-
- shade
-
-
- ${project.build.directory}/.shaded-jar
-
-
- *:*
-
-
- META-INF/*.SF
- META-INF/*.DSA
- META-INF/*.RSA
-
-
-
-
-
-
- junit:*
- jmock:*
- mockito-all:*
-
-
- commons-codec:*
- commons-logging:*
- org.apache.httpcomponents:*
- org.apache.kafka:*
- org.slf4j:*
-
-
-
-
-
-
-
- org.jacoco
- jacoco-maven-plugin
- ${jacoco-maven-plugin.version}
-
-
-
- prepare-agent
-
-
-
- report
- prepare-package
-
- report
-
-
-
-
-
-
-
org.apache.maven.plugins
maven-compiler-plugin
-
-
- org.codehaus.mojo
- exec-maven-plugin
- ${exec-maven-plugin.version}
-
-
- org.apache.maven.plugins
- maven-source-plugin
+ 3.8.0
+
+ ${java.version}
+ ${java.version}
+
org.apache.maven.plugins
- maven-surefire-plugin
- ${maven-surefire-plugin.version}
+ maven-javadoc-plugin
+ 3.0.1
org.apache.maven.plugins
- maven-failsafe-plugin
- ${maven-failsafe-plugin.version}
+ maven-source-plugin
+ 3.0.1
+
+
+ attach-sources
+ verify
+
+ jar-no-fork
+
+
+
org.apache.maven.plugins
- maven-javadoc-plugin
-
-
- org.jacoco
- jacoco-maven-plugin
+ maven-jar-plugin
+ 3.1.2
+
+ target/plugin/
+
org.apache.maven.plugins
maven-shade-plugin
+ 3.1.1
+
+
+ package
+
+ shade
+
+
+
+
+ *:*
+
+
+ META-INF/*.SF
+ META-INF/*.DSA
+ META-INF/*.RSA
+
+
+
+
+
+
+ junit:*
+ jmock:*
+ mockito-all:*
+
+
+ commons-codec:*
+ commons-logging:*
+ org.apache.httpcomponents:*
+ org.apache.kafka:*
+ org.slf4j:*
+
+
+
+
+
org.codehaus.mojo
diff --git a/src/main/java/com/nordstrom/kafka/connect/formatters/PayloadFormatter.java b/src/main/java/com/nordstrom/kafka/connect/formatters/PayloadFormatter.java
new file mode 100644
index 0000000..0872fb5
--- /dev/null
+++ b/src/main/java/com/nordstrom/kafka/connect/formatters/PayloadFormatter.java
@@ -0,0 +1,9 @@
+package com.nordstrom.kafka.connect.formatters;
+
+import java.util.Collection;
+import org.apache.kafka.connect.sink.SinkRecord;
+
+public interface PayloadFormatter {
+ String format(final SinkRecord record) throws PayloadFormattingException;
+ String formatBatch(final Collection records) throws PayloadFormattingException;
+}
diff --git a/src/main/java/com/nordstrom/kafka/connect/formatters/PayloadFormattingException.java b/src/main/java/com/nordstrom/kafka/connect/formatters/PayloadFormattingException.java
new file mode 100644
index 0000000..c5e8375
--- /dev/null
+++ b/src/main/java/com/nordstrom/kafka/connect/formatters/PayloadFormattingException.java
@@ -0,0 +1,7 @@
+package com.nordstrom.kafka.connect.formatters;
+
+public class PayloadFormattingException extends RuntimeException {
+ public PayloadFormattingException (final Throwable e) {
+ super(e);
+ }
+}
diff --git a/src/main/java/com/nordstrom/kafka/connect/formatters/PlainPayload.java b/src/main/java/com/nordstrom/kafka/connect/formatters/PlainPayload.java
new file mode 100644
index 0000000..f0542bd
--- /dev/null
+++ b/src/main/java/com/nordstrom/kafka/connect/formatters/PlainPayload.java
@@ -0,0 +1,64 @@
+package com.nordstrom.kafka.connect.formatters;
+
+import org.apache.kafka.connect.sink.SinkRecord;
+
+public class PlainPayload {
+ private String key;
+ private String keySchemaName;
+ private String value;
+ private String valueSchemaName;
+ private String topic;
+ private int partition;
+ private long offset;
+ private long timestamp;
+ private String timestampTypeName;
+
+ protected PlainPayload() {
+ }
+
+ public PlainPayload(final SinkRecord record) {
+ this.key = record.key() == null ? "" : record.key().toString();
+ if (record.keySchema() != null)
+ this.keySchemaName = record.keySchema().name();
+
+ this.value = record.value() == null ? "" : record.value().toString();
+ if (record.valueSchema() != null)
+ this.valueSchemaName = record.valueSchema().name();
+
+ this.topic = record.topic();
+ this.partition = record.kafkaPartition();
+ this.offset = record.kafkaOffset();
+
+ if (record.timestamp() != null)
+ this.timestamp = record.timestamp();
+ if (record.timestampType() != null)
+ this.timestampTypeName = record.timestampType().name;
+ }
+
+ public String getValue() { return this.value; }
+ public void setValue(final String value) { this.value = value; }
+
+ public long getOffset() { return this.offset; }
+ public void setOffset(final long offset) { this.offset = offset; }
+
+ public Long getTimestamp() { return this.timestamp; }
+ public void setTimestamp(final long timestamp) { this.timestamp = timestamp; }
+
+ public String getTimestampTypeName() { return this.timestampTypeName; }
+ public void setTimestampTypeName(final String timestampTypeName) { this.timestampTypeName = timestampTypeName; }
+
+ public int getPartition() { return this.partition; }
+ public void setPartition(final int partition) { this.partition = partition; }
+
+ public String getKey() { return this.key; }
+ public void setKey(final String key) { this.key = key; }
+
+ public String getKeySchemaName() { return this.keySchemaName; }
+ public void setKeySchemaName(final String keySchemaName) { this.keySchemaName = keySchemaName; }
+
+ public String getValueSchemaName() { return this.valueSchemaName; }
+ public void setValueSchemaName(final String valueSchemaName) { this.valueSchemaName = valueSchemaName; }
+
+ public String getTopic() { return this.topic; }
+ public void setTopic(final String topic) { this.topic = topic; }
+}
diff --git a/src/main/java/com/nordstrom/kafka/connect/formatters/PlainPayloadFormatter.java b/src/main/java/com/nordstrom/kafka/connect/formatters/PlainPayloadFormatter.java
new file mode 100644
index 0000000..982dc32
--- /dev/null
+++ b/src/main/java/com/nordstrom/kafka/connect/formatters/PlainPayloadFormatter.java
@@ -0,0 +1,43 @@
+package com.nordstrom.kafka.connect.formatters;
+
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.ObjectWriter;
+
+import org.apache.kafka.connect.sink.SinkRecord;
+
+import java.util.Collection;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class PlainPayloadFormatter implements PayloadFormatter {
+ private static final Logger LOGGER = LoggerFactory.getLogger(PlainPayloadFormatter.class);
+ private final ObjectWriter recordWriter = new ObjectMapper().writerFor(PlainPayload.class);
+ private final ObjectWriter recordsWriter = new ObjectMapper().writerFor(PlainPayload[].class);
+
+ public String format(final SinkRecord record) {
+ PlainPayload payload = new PlainPayload(record);
+
+ try {
+ return this.recordWriter.writeValueAsString(payload);
+ } catch (final JsonProcessingException e) {
+ LOGGER.error(e.getLocalizedMessage(), e);
+ throw new PayloadFormattingException(e);
+ }
+ }
+
+ public String formatBatch(final Collection records) {
+ final PlainPayload[] payloads = records
+ .stream()
+ .map(record -> new PlainPayload(record))
+ .toArray(PlainPayload[]::new);
+
+ try {
+ return this.recordsWriter.writeValueAsString(payloads);
+ } catch (final JsonProcessingException e) {
+ LOGGER.error(e.getLocalizedMessage(), e);
+ throw new PayloadFormattingException(e);
+ }
+ }
+}
diff --git a/src/main/java/com/nordstrom/kafka/connect/lambda/AwsLambdaUtil.java b/src/main/java/com/nordstrom/kafka/connect/lambda/AwsLambdaUtil.java
index 4dac70b..36db3b7 100644
--- a/src/main/java/com/nordstrom/kafka/connect/lambda/AwsLambdaUtil.java
+++ b/src/main/java/com/nordstrom/kafka/connect/lambda/AwsLambdaUtil.java
@@ -9,22 +9,18 @@
import com.amazonaws.services.lambda.model.InvokeResult;
import com.amazonaws.services.lambda.model.RequestTooLargeException;
import com.nordstrom.kafka.connect.utils.Guard;
-import org.apache.kafka.common.Configurable;
-import org.apache.kafka.connect.errors.ConnectException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.nio.ByteBuffer;
import java.time.Duration;
import java.time.Instant;
-import java.util.Map;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
public class AwsLambdaUtil {
-
private static final Logger LOGGER = LoggerFactory.getLogger(AwsLambdaUtil.class);
private static final int MEGABYTE_SIZE = 1024 * 1024;
@@ -36,43 +32,20 @@ public class AwsLambdaUtil {
private final AWSLambdaAsync lambdaClient;
private final InvocationFailure failureMode;
- public AwsLambdaUtil(final Configuration optConfigs, final Map bareAssumeRoleConfigs) {
- LOGGER.debug("AwsLambdaUtil.ctor:bareAssumeRoleConfigs={}", bareAssumeRoleConfigs);
- Guard.verifyNotNull(optConfigs, "optConfigs");
-
- final AWSLambdaAsyncClientBuilder builder = AWSLambdaAsyncClientBuilder.standard();
-
- // Will check if there's proxy configuration in the environment; if
- // there's any will construct the client with it.
- if (optConfigs.getHttpProxyHost().isPresent()) {
- final ClientConfiguration clientConfiguration = new ClientConfiguration()
- .withProxyHost(optConfigs.getHttpProxyHost().get());
- if (optConfigs.getHttpProxyPort().isPresent()) {
- clientConfiguration.setProxyPort(optConfigs.getHttpProxyPort().get());
- }
- builder.setClientConfiguration(clientConfiguration);
- LOGGER.info("Setting proxy configuration for AWS Lambda Async client host: {} port {}",
- optConfigs.getHttpProxyHost().get(), optConfigs.getHttpProxyPort().orElse(-1));
- }
+ public AwsLambdaUtil(ClientConfiguration clientConfiguration,
+ AWSCredentialsProvider credentialsProvider,
+ InvocationFailure failureMode) {
- if (optConfigs.getAwsRegion().isPresent()) {
- builder.setRegion(optConfigs.getAwsRegion().get());
- LOGGER.info("Using aws region: {}", optConfigs.getAwsRegion().toString());
- }
+ Guard.verifyNotNull(clientConfiguration, "clientConfiguration");
+ Guard.verifyNotNull(credentialsProvider, "credentialsProvider");
- failureMode = optConfigs.getFailureMode().orElse(InvocationFailure.STOP);
-
- AWSCredentialsProvider provider = null;
- try {
- provider = getCredentialsProvider(bareAssumeRoleConfigs);
- } catch (Exception e) {
- LOGGER.error("Problem initializing provider", e);
- }
- if (provider != null) {
- builder.setCredentials(provider);
- }
+ final AWSLambdaAsyncClientBuilder builder = AWSLambdaAsyncClientBuilder.standard()
+ .withClientConfiguration(clientConfiguration)
+ .withCredentials(credentialsProvider);
+ this.failureMode = failureMode;
this.lambdaClient = builder.build();
+
LOGGER.info("AWS Lambda client initialized");
}
@@ -155,41 +128,6 @@ InvocationResponse checkPayloadSizeForInvocationType(final byte[] payload, final
return new InvocationResponse(413, e.getLocalizedMessage(), e.getLocalizedMessage(), start, Instant.now());
}
- @SuppressWarnings("unchecked")
- public AWSCredentialsProvider getCredentialsProvider(Map roleConfigs) {
- LOGGER.info(".get-credentials-provider:assumeRoleConfigs={}", roleConfigs);
-
- try {
- Object providerField = roleConfigs.get("class");
- String providerClass = LambdaSinkConnectorConfig.ConfigurationKeys.CREDENTIALS_PROVIDER_CLASS_DEFAULT.getValue();
- if (providerField != null) {
- providerClass = providerField.toString();
- }
- LOGGER.debug(".get-credentials-provider:field={}, class={}", providerField, providerClass);
- AWSCredentialsProvider provider = ((Class extends AWSCredentialsProvider>)
- getClass(providerClass)).newInstance();
-
- if (provider instanceof Configurable) {
- ((Configurable) provider).configure(roleConfigs);
- }
-
- LOGGER.debug(".get-credentials-provider:provider={}", provider);
- return provider;
- } catch (IllegalAccessException | InstantiationException e) {
- throw new ConnectException("Invalid class for: " + LambdaSinkConnectorConfig.ConfigurationKeys.CREDENTIALS_PROVIDER_CLASS_CONFIG, e);
- }
- }
-
- public Class> getClass(String className) {
- LOGGER.debug(".get-class:class={}",className);
- try {
- return Class.forName(className);
- } catch (ClassNotFoundException e) {
- LOGGER.error("Provider class not found: {}", e);
- }
- return null;
- }
-
private class LambdaInvocationException extends RuntimeException {
public LambdaInvocationException(final Throwable e) {
super(e);
diff --git a/src/main/java/com/nordstrom/kafka/connect/lambda/Configuration.java b/src/main/java/com/nordstrom/kafka/connect/lambda/Configuration.java
deleted file mode 100644
index ba261e2..0000000
--- a/src/main/java/com/nordstrom/kafka/connect/lambda/Configuration.java
+++ /dev/null
@@ -1,85 +0,0 @@
-package com.nordstrom.kafka.connect.lambda;
-
-import com.nordstrom.kafka.connect.utils.Facility;
-
-import java.util.Optional;
-
-import static com.nordstrom.kafka.connect.lambda.InvocationFailure.DROP;
-
-public class Configuration {
-
- private static final int MAX_HTTP_PORT_NUMBER = 65536;
- private final Optional httpProxyHost;
- private final Optional httpProxyPort;
- private final Optional awsRegion;
- private final Optional failureMode;
- private final Optional roleArn;
- private final Optional sessionName;
- private final Optional externalId;
-
-
- public Configuration(final String httpProxyHost,
- final Integer httpProxyPort,
- final String awsRegion,
- final InvocationFailure failureMode,
- final String roleArn,
- final String sessionName,
- final String externalId) {
- this.httpProxyHost =
- Facility.isNotNullNorEmpty(httpProxyHost) ? Optional.of(httpProxyHost) : Optional.empty();
- this.httpProxyPort = Facility.isNotNullAndInRange(httpProxyPort, 0, MAX_HTTP_PORT_NUMBER)
- ? Optional.of(httpProxyPort) : Optional.empty();
- this.awsRegion =
- Facility.isNotNullNorEmpty(awsRegion) ? Optional.of(awsRegion) : Optional.empty();
- this.failureMode = Facility.isNotNull(failureMode) ? Optional.of(failureMode): Optional.of(DROP);
- this.roleArn =
- Facility.isNotNullNorEmpty(roleArn) ? Optional.of(roleArn) : Optional.empty();
- this.sessionName =
- Facility.isNotNullNorEmpty(sessionName) ? Optional.of(sessionName) : Optional.empty();
- this.externalId =
- Facility.isNotNullNorEmpty(externalId) ? Optional.of(externalId) : Optional.empty();
-
- }
-
- public Configuration(final Optional httpProxyHost,
- final Optional httpProxyPort,
- final Optional awsRegion,
- final Optional failureMode,
- final Optional roleArn,
- final Optional sessionName,
- final Optional externalId) {
-
- this.httpProxyHost = httpProxyHost;
- this.httpProxyPort = httpProxyPort;
- this.awsRegion = awsRegion;
- this.failureMode = failureMode;
- this.roleArn = roleArn;
- this.sessionName = sessionName;
- this.externalId = externalId;
- }
-
- public static Configuration empty() {
- return new Configuration(Optional.empty(),
- Optional.empty(), Optional.empty(),
- Optional.empty(), Optional.empty(),
- Optional.empty(), Optional.empty());
- }
-
- public Optional getHttpProxyHost() {
- return this.httpProxyHost;
- }
-
- public Optional getHttpProxyPort() {
- return this.httpProxyPort;
- }
-
- public Optional getAwsRegion() { return this.awsRegion; }
-
- public Optional getFailureMode() { return this.failureMode; }
-
- public Optional getRoleArn() { return this.roleArn; }
-
- public Optional getSessionName() { return this.sessionName; }
-
- public Optional getExternalId() { return this.externalId; }
-}
diff --git a/src/main/java/com/nordstrom/kafka/connect/lambda/KafkaSerializationException.java b/src/main/java/com/nordstrom/kafka/connect/lambda/KafkaSerializationException.java
deleted file mode 100644
index 14b4f3d..0000000
--- a/src/main/java/com/nordstrom/kafka/connect/lambda/KafkaSerializationException.java
+++ /dev/null
@@ -1,8 +0,0 @@
-package com.nordstrom.kafka.connect.lambda;
-
-public class KafkaSerializationException extends RuntimeException {
-
- public KafkaSerializationException(final Throwable e) {
- super(e);
- }
-}
diff --git a/src/main/java/com/nordstrom/kafka/connect/lambda/LambdaSinkConnector.java b/src/main/java/com/nordstrom/kafka/connect/lambda/LambdaSinkConnector.java
index 9876ceb..ad5bc01 100644
--- a/src/main/java/com/nordstrom/kafka/connect/lambda/LambdaSinkConnector.java
+++ b/src/main/java/com/nordstrom/kafka/connect/lambda/LambdaSinkConnector.java
@@ -20,32 +20,27 @@ public class LambdaSinkConnector extends SinkConnector {
@Override
public List