From 644247e4cb49bbf22463c45636897c6cc0bd2ddc Mon Sep 17 00:00:00 2001
From: Simon Ambridge
Date: Tue, 5 Dec 2017 21:59:27 +0000
Subject: [PATCH] More work on ReST UI
---
TransactionHandlers/kafka_start.sh | 29 +++++++++++++++++++++++++++++
creates_and_inserts.cql | 18 ++++++++++--------
restRTFAP/public/.DS_Store | Bin 0 -> 6148 bytes
restRTFAP/public/index.html | 10 +++++-----
4 files changed, 44 insertions(+), 13 deletions(-)
create mode 100644 TransactionHandlers/kafka_start.sh
create mode 100644 restRTFAP/public/.DS_Store
diff --git a/TransactionHandlers/kafka_start.sh b/TransactionHandlers/kafka_start.sh
new file mode 100644
index 0000000..0503710
--- /dev/null
+++ b/TransactionHandlers/kafka_start.sh
@@ -0,0 +1,29 @@
+##################
+# Zoopkeeper and Kafka demo start script
+# V0.1 1/12/17
+#################
+echo "\$KAFKA_HOME=/usr/local/kafka/kafka_2.11-1.0.0"
+cd $KAFKA_HOME
+echo "Start Zookeeper"
+sudo -b nohup $KAFKA_HOME/bin/zookeeper-server-start.sh $KAFKA_HOME/config/zookeeper.properties > $KAFKA_HOME/logs/nohupz.out 2>&1
+echo ""
+echo "Start Kafka"
+sudo -b nohup $KAFKA_HOME/bin/kafka-server-start.sh $KAFKA_HOME/config/server.properties > $KAFKA_HOME/logs/nohupk.out 2>&1
+echo ""
+echo "Create New Topic:"
+$KAFKA_HOME/bin/kafka-topics.sh --zookeeper localhost:2181 --create --replication-factor 1 --partitions 1 --topic NewTransactions
+echo ""
+echo "Confirm that we created it correctly:"
+$KAFKA_HOME/bin/kafka-topics.sh --zookeeper localhost:2181 --list
+echo ""
+echo "Set msg. retention period to 1 hour (360000 milliseconds)"
+$KAFKA_HOME/bin/kafka-configs.sh --zookeeper localhost:2181 --entity-type topics --alter --add-config retention.ms=3600000 --entity-name NewTransactions
+echo ""
+echo "Display topic configuration details:"
+$KAFKA_HOME/bin/kafka-configs.sh --zookeeper localhost:2181 --describe --entity-name NewTransactions --entity-type topics
+echo ""
+echo "Describe the 'NewTransactions' Topic:"
+$KAFKA_HOME/bin/kafka-topics.sh --describe --zookeeper localhost:2181 --topic NewTransactions
+echo ""
+echo "List messages from beginning:"
+$KAFKA_HOME/bin/kafka-console-consumer.sh --zookeeper localhost:2181 --topic NewTransactions --from-beginning
diff --git a/creates_and_inserts.cql b/creates_and_inserts.cql
index e22265a..962285d 100644
--- a/creates_and_inserts.cql
+++ b/creates_and_inserts.cql
@@ -1,6 +1,6 @@
//
// PLEASE NOTE: The Keyspace creation command will need to reflect the topology of your
-// environment. As presented it will only work in a single data center test deployment.
+// environment. SimpleStrategy is only intended to be used in a single data center test deployment.
CREATE KEYSPACE if not exists rtfap
WITH durable_writes = true
@@ -45,7 +45,7 @@ CREATE TABLE if not exists rtfap.hourlyaggregates_bycc(
min_amount double,
total_count bigint,
PRIMARY KEY ((cc_no, hour))
-);
+) WITH default_time_to_live = 86400;
DROP TABLE IF EXISTS rtfap.dailyaggregates_bycc;
CREATE TABLE if not exists rtfap.dailyaggregates_bycc(
@@ -56,7 +56,7 @@ CREATE TABLE if not exists rtfap.dailyaggregates_bycc(
min_amount double,
total_count bigint,
PRIMARY KEY ((cc_no, day))
-);
+) WITH default_time_to_live = 86400;
DROP TABLE IF EXISTS rtfap.monthlyaggregates_bycc;
CREATE TABLE if not exists rtfap.monthlyaggregates_bycc(
@@ -67,7 +67,7 @@ CREATE TABLE if not exists rtfap.monthlyaggregates_bycc(
min_amount double,
total_count bigint,
PRIMARY KEY ((cc_no, month))
-);
+) WITH default_time_to_live = 86400;
DROP TABLE IF EXISTS rtfap.yearlyaggregates_bycc;
CREATE TABLE if not exists rtfap.yearlyaggregates_bycc(
@@ -78,7 +78,7 @@ CREATE TABLE if not exists rtfap.yearlyaggregates_bycc(
min_amount double,
total_count bigint,
PRIMARY KEY ((cc_no, year))
-);
+) WITH default_time_to_live = 86400;
DROP TABLE IF EXISTS rtfap.dailytxns_bymerchant;
CREATE TABLE rtfap.dailytxns_bymerchant (
@@ -100,7 +100,7 @@ CREATE TABLE rtfap.dailytxns_bymerchant (
tags set,
user_id text,
PRIMARY KEY ((merchant, day), cc_no)
-) WITH CLUSTERING ORDER BY (cc_no ASC);
+) WITH CLUSTERING ORDER BY (cc_no ASC) AND default_time_to_live = 86400;
DROP TABLE IF EXISTS rtfap.txn_count_min;
CREATE TABLE if not exists rtfap.txn_count_min (
@@ -117,10 +117,11 @@ CREATE TABLE if not exists rtfap.txn_count_min (
ttl_txn_hr int,
ttl_txn_min int,
PRIMARY KEY (( year, month, day, hour, minute ))
-);
+) WITH default_time_to_live = 86400;
// Cassandra stress table
+DROP TABLE IF EXISTS rtfap.txn_by_cc;
CREATE TABLE if not exists rtfap.txn_by_cc (
cc_no text,
txn_year int,
@@ -149,7 +150,8 @@ AND replication = {
'replication_factor' : 1
};
-CREATE TABLE banana.dashboards (id text PRIMARY KEY,
+DROP TABLE IF EXISTS banana.dashboards;
+CREATE TABLE if not exists banana.dashboards (id text PRIMARY KEY,
"_version_" bigint, dashboard text, group text, solr_query text, title text, user text);
diff --git a/restRTFAP/public/.DS_Store b/restRTFAP/public/.DS_Store
new file mode 100644
index 0000000000000000000000000000000000000000..8e238734b79f571f9a825c0f9ba6c08a0dec71a6
GIT binary patch
literal 6148
zcmeHKOG*SW5UtWd2HecjWv(y__hD!^F5JuolxZ0Sn;y{F=Tct96L=8cOO--va3jbl
zRgk<&sw$t?bdnMgUG4j&$U;QsC`6^w5Oz0BNA5fWY8~TN9v`;L^=2G)je!oai)%lT
zdl}?Lw$fhz#6FB&*YBS;A}^QcuUF5XV?
z`Ke*l!9Xw&416%a^C6)Svtu!=M+d5!0s#4pt^%F81Y?q8b}WXlK-5Bk7OI|NsD-0H
zxnFiHh88aB$%p#P@5zhS*)e{q?xNW+>R=!km@;r|%?0oO3;Z(8B!3zbtzaM+_-730
ztXp?0Y|8J}FWci?n^3M%DD*2-AkZg|05o`xoK>UQ6Lt7y$70A)*k8ke@ertlL=_DD
G0t0W9GAj1~
literal 0
HcmV?d00001
diff --git a/restRTFAP/public/index.html b/restRTFAP/public/index.html
index 8659f75..f722671 100644
--- a/restRTFAP/public/index.html
+++ b/restRTFAP/public/index.html
@@ -266,14 +266,14 @@ Spark & Querying Roll Up Tables
This tight integration between Cassandra and Spark offers huge value in terms of significantly reduced ETL complexity (no data movement to different clusters) and thus reducing time to insight from your data through a much less complex "cohesive lambda architecture"
- Streaming Analytics
+
Streaming Analytics
The streaming analytics element of this application is made up of two parts:
-
A transaction "producer" - a Scala/Akka app that generates random credit card transactions and then places those transactions onto a Kafka queue.
- A transaction "consumer" - also written in Scala, is a Spark streaming job that (a) consumes the messages put on the Kafka queue, and then (b) parses those messages, evalutes the transaction status and then writes them to the Datastax/Cassandra table transactions.
- It also generates rolling summary lines into the txn_count_min table every minute.
- Streaming analytics code can be found under the directory TransactionHandlers/producer (pre-requisite: make sure you have run the CQL schema create script as described above to create the necessary tables).
+ A transaction "producer" - a Scala/Akka app that generates random credit card transactions and then places those transactions onto a Kafka queue.
+ A transaction "consumer" - also written in Scala, is a Spark streaming job that (a) consumes the messages put on the Kafka queue, and then (b) parses those messages, evalutes the transaction status and then writes them to the Datastax/Cassandra table transactions.
+ It also generates rolling summary lines into the txn_count_min table every minute.
+ Streaming analytics code can be found under the directory TransactionHandlers/producer (pre-requisite: make sure you have run the CQL schema create script as described above to create the necessary tables).
Follow the Spark streaming installation and set up instructions here on Github