diff --git a/.gitignore b/.gitignore index 31e01adb..2f591386 100644 --- a/.gitignore +++ b/.gitignore @@ -30,3 +30,4 @@ graphql/node_modules rindexer_rust_playground/generated_csv/*/*.csv .env !.env.example +node_modules diff --git a/cli/src/commands/add.rs b/cli/src/commands/add.rs index a300f352..dde901de 100644 --- a/cli/src/commands/add.rs +++ b/cli/src/commands/add.rs @@ -182,6 +182,8 @@ pub async fn handle_add_contract_command( dependency_events: None, reorg_safe_distance: None, generate_csv: None, + streams: None, + chat: None, }); write_manifest(&manifest, &rindexer_yaml_path).map_err(|e| { diff --git a/cli/src/commands/new.rs b/cli/src/commands/new.rs index 5f90d3e5..87d0adf3 100644 --- a/cli/src/commands/new.rs +++ b/cli/src/commands/new.rs @@ -52,6 +52,15 @@ fn write_docker_compose(path: &Path) -> Result<(), WriteFileError> { write_file(&path.join("docker-compose.yml"), generate_docker_file()) } +fn write_gitignore(path: &Path) -> Result<(), WriteFileError> { + write_file( + &path.join(".gitignore"), + r#".rindexer + generated_csv/**/*.txt + "#, + ) +} + pub fn handle_new_command( project_path: PathBuf, project_type: ProjectType, @@ -151,6 +160,8 @@ pub fn handle_new_command( dependency_events: None, reorg_safe_distance: None, generate_csv: None, + streams: None, + chat: None, }], phantom: None, global: None, @@ -211,6 +222,8 @@ POSTGRES_PASSWORD=rindexer"#; generate_rindexer_rust_project(&project_path); } + write_gitignore(&project_path)?; + print_success_message(&success_message); Ok(()) diff --git a/core/Cargo.toml b/core/Cargo.toml index c221486a..40da6ffd 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -43,15 +43,25 @@ tracing-subscriber = { version = "0.3", features = ["env-filter", "fmt", "time"] chrono = "0.4.38" log = "0.4.20" colored = "2.0" +hex = "0.4.3" +uuid = { version = "1.10.0", features = ["v4"] } +# do not change version as have to match ethers at the moment reqwest = { version = "0.11.27", features = ["json"] } thread_local = "1.1" native-tls = "0.2" postgres-native-tls = "0.5" +aws-config = "1.5.0" +aws-sdk-sns = "1.37.0" +lapin = "2.5.0" +deadpool = { version = "0.12", features = ["rt_tokio_1"] } +deadpool-lapin = "0.12" +rdkafka = { version = "0.36", features = ["tokio"] } +teloxide = "0.12" +serenity = { version = "0.12", features = ["client", "framework"] } # build jemallocator = { version = "0.5.0", optional = true } jemalloc-ctl = { version = "0.5.0", optional = true } -hex = "0.4.3" [profile.release] lto = "fat" diff --git a/core/src/chat/clients.rs b/core/src/chat/clients.rs new file mode 100644 index 00000000..d0502c1d --- /dev/null +++ b/core/src/chat/clients.rs @@ -0,0 +1,315 @@ +use std::sync::Arc; + +use ethers::types::U64; +use futures::future::join_all; +use serde_json::Value; +use serenity::all::ChannelId; +use teloxide::types::ChatId; +use thiserror::Error; +use tokio::{ + task, + task::{JoinError, JoinHandle}, +}; + +use crate::{ + chat::{ + discord::{DiscordBot, DiscordError}, + slack::{SlackBot, SlackError}, + telegram::{TelegramBot, TelegramError}, + template::Template, + }, + event::{filter_event_data_by_conditions, EventMessage}, + manifest::chat::{ + ChatConfig, DiscordConfig, DiscordEvent, SlackConfig, SlackEvent, TelegramConfig, + TelegramEvent, + }, +}; + +type SendMessage = Vec>>; + +#[derive(Error, Debug)] +pub enum ChatError { + #[error("Telegram error: {0}")] + Telegram(#[from] TelegramError), + + #[error("Discord error: {0}")] + Discord(#[from] DiscordError), + + #[error("Slack error: {0}")] + Slack(#[from] SlackError), + + #[error("Task failed: {0}")] + JoinError(JoinError), +} + +#[derive(Debug, Clone)] +struct TelegramInstance { + config: TelegramConfig, + client: Arc, +} + +#[derive(Debug)] +struct DiscordInstance { + config: DiscordConfig, + client: Arc, +} + +#[derive(Debug)] +struct SlackInstance { + config: SlackConfig, + client: Arc, +} + +pub struct ChatClients { + telegram: Option>, + discord: Option>, + slack: Option>, +} + +impl ChatClients { + pub async fn new(chat_config: ChatConfig) -> Self { + let telegram = chat_config.telegram.map(|config| { + config + .into_iter() + .map(|config| { + let client = Arc::new(TelegramBot::new(&config.bot_token)); + TelegramInstance { config, client } + }) + .collect() + }); + + let discord = chat_config.discord.map(|config| { + config + .into_iter() + .map(|config| { + let client = Arc::new(DiscordBot::new(&config.bot_token)); + DiscordInstance { config, client } + }) + .collect() + }); + + let slack = chat_config.slack.map(|config| { + config + .into_iter() + .map(|config| { + let client = Arc::new(SlackBot::new(config.bot_token.clone())); + SlackInstance { config, client } + }) + .collect() + }); + + Self { telegram, discord, slack } + } + + fn find_accepted_block_range(&self, from_block: &U64, to_block: &U64) -> U64 { + if from_block > to_block { + panic!("Invalid range: from_block must be less than or equal to to_block"); + } + + match from_block.overflowing_add(to_block - from_block) { + (result, false) => result, + (_, true) => U64::max_value(), + } + } + + pub fn is_in_block_range_to_send(&self, from_block: &U64, to_block: &U64) -> bool { + // only 10 blocks at a time else rate limits will kick in + U64::from(10) <= self.find_accepted_block_range(from_block, to_block) + } + + fn has_any_chat(&self) -> bool { + self.telegram.is_some() || self.discord.is_some() || self.slack.is_some() + } + + fn telegram_send_message_tasks( + &self, + instance: &TelegramInstance, + event_for: &TelegramEvent, + events_data: &[Value], + ) -> SendMessage { + let tasks: Vec<_> = events_data + .iter() + .filter(|event_data| { + if let Some(conditions) = &event_for.conditions { + filter_event_data_by_conditions(event_data, conditions) + } else { + true + } + }) + .map(|event_data| { + let client = Arc::clone(&instance.client); + let chat_id = ChatId(instance.config.chat_id); + let message = Template::new(event_for.template_inline.clone()) + .parse_template_inline(event_data); + task::spawn(async move { + client.send_message(chat_id, &message).await?; + Ok(()) + }) + }) + .collect(); + tasks + } + + fn discord_send_message_tasks( + &self, + instance: &DiscordInstance, + event_for: &DiscordEvent, + events_data: &[Value], + ) -> SendMessage { + let tasks: Vec<_> = events_data + .iter() + .filter(|event_data| { + if let Some(conditions) = &event_for.conditions { + filter_event_data_by_conditions(event_data, conditions) + } else { + true + } + }) + .map(|event_data| { + let client = Arc::clone(&instance.client); + let channel_id = ChannelId::new(instance.config.channel_id); + let message = Template::new(event_for.template_inline.clone()) + .parse_template_inline(event_data); + task::spawn(async move { + client.send_message(channel_id, &message).await?; + Ok(()) + }) + }) + .collect(); + tasks + } + + fn slack_send_message_tasks( + &self, + instance: &SlackInstance, + event_for: &SlackEvent, + events_data: &[Value], + ) -> SendMessage { + let tasks: Vec<_> = events_data + .iter() + .filter(|event_data| { + if let Some(conditions) = &event_for.conditions { + filter_event_data_by_conditions(event_data, conditions) + } else { + true + } + }) + .map(|event_data| { + let client = Arc::clone(&instance.client); + let channel = instance.config.channel.clone(); + let message = Template::new(event_for.template_inline.clone()) + .parse_template_inline(event_data); + task::spawn(async move { + client.send_message(&channel, &message).await?; + Ok(()) + }) + }) + .collect(); + tasks + } + + pub async fn send_message( + &self, + event_message: &EventMessage, + index_event_in_order: bool, + from_block: &U64, + to_block: &U64, + ) -> Result { + if !self.has_any_chat() || !self.is_in_block_range_to_send(from_block, to_block) { + return Ok(0); + } + + // will always have something even if the event has no parameters due to the tx_information + if let Value::Array(data_array) = &event_message.event_data { + let mut messages: Vec = Vec::new(); + + if let Some(telegram) = &self.telegram { + for instance in telegram { + if instance.config.networks.contains(&event_message.network) { + let telegram_event = instance + .config + .messages + .iter() + .find(|e| e.event_name == event_message.event_name); + + if let Some(telegram_event) = telegram_event { + let message = self.telegram_send_message_tasks( + instance, + telegram_event, + data_array, + ); + messages.push(message); + } + } + } + } + + if let Some(discord) = &self.discord { + for instance in discord { + if instance.config.networks.contains(&event_message.network) { + let discord_event = instance + .config + .messages + .iter() + .find(|e| e.event_name == event_message.event_name); + + if let Some(discord_event) = discord_event { + let message = self.discord_send_message_tasks( + instance, + discord_event, + data_array, + ); + messages.push(message); + } + } + } + } + + if let Some(slack) = &self.slack { + for instance in slack { + if instance.config.networks.contains(&event_message.network) { + let slack_event = instance + .config + .messages + .iter() + .find(|e| e.event_name == event_message.event_name); + + if let Some(slack_event) = slack_event { + let message = + self.slack_send_message_tasks(instance, slack_event, data_array); + messages.push(message); + } + } + } + } + + let mut messages_sent = 0; + + if index_event_in_order { + for message in messages { + for publish in message { + match publish.await { + Ok(Ok(_)) => messages_sent += 1, + Ok(Err(e)) => return Err(e), + Err(e) => return Err(ChatError::JoinError(e)), + } + } + } + } else { + let tasks: Vec<_> = messages.into_iter().flatten().collect(); + let results = join_all(tasks).await; + for result in results { + match result { + Ok(Ok(_)) => messages_sent += 1, + Ok(Err(e)) => return Err(e), + Err(e) => return Err(ChatError::JoinError(e)), + } + } + } + + Ok(messages_sent) + } else { + unreachable!("Event data should be an array"); + } + } +} diff --git a/core/src/chat/discord.rs b/core/src/chat/discord.rs new file mode 100644 index 00000000..dbf39fb7 --- /dev/null +++ b/core/src/chat/discord.rs @@ -0,0 +1,29 @@ +use serenity::{http::Http, model::id::ChannelId}; +use thiserror::Error; + +#[derive(Error, Debug)] +pub enum DiscordError { + #[error("Discord API error: {0}")] + ApiError(#[from] serenity::Error), +} + +#[derive(Debug)] +pub struct DiscordBot { + http: Http, +} + +impl DiscordBot { + pub fn new(token: &str) -> Self { + let http = Http::new(token); + Self { http } + } + + pub async fn send_message( + &self, + channel_id: ChannelId, + message: &str, + ) -> Result<(), DiscordError> { + channel_id.say(&self.http, message).await?; + Ok(()) + } +} diff --git a/core/src/chat/mod.rs b/core/src/chat/mod.rs new file mode 100644 index 00000000..f2416cee --- /dev/null +++ b/core/src/chat/mod.rs @@ -0,0 +1,7 @@ +mod clients; +pub use clients::ChatClients; + +mod discord; +mod slack; +mod telegram; +mod template; diff --git a/core/src/chat/slack.rs b/core/src/chat/slack.rs new file mode 100644 index 00000000..8210bd35 --- /dev/null +++ b/core/src/chat/slack.rs @@ -0,0 +1,62 @@ +use reqwest::Client; +use serde_json::json; +use thiserror::Error; + +#[derive(Error, Debug)] +pub enum SlackError { + #[error("HTTP request error: {0}")] + ReqwestError(#[from] reqwest::Error), + + #[error("Could not parse response: {0}")] + CouldNotParseResponse(#[from] serde_json::Error), + + #[error("Slack API error: {0}")] + ApiError(String), +} + +#[derive(Debug, Clone)] +pub struct SlackBot { + client: Client, + token: String, +} + +impl SlackBot { + pub fn new(token: String) -> Self { + let client = Client::new(); + Self { client, token } + } + + pub async fn send_message(&self, channel: &str, message: &str) -> Result<(), SlackError> { + let url = "https://slack.com/api/chat.postMessage"; + let response = self + .client + .post(url) + .header("Authorization", format!("Bearer {}", self.token)) + .header("Content-Type", "application/json") + .json(&json!({ + "channel": channel, + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": message + } + } + ] + })) + .send() + .await?; + + let response_text = response.text().await?; + let response_json: serde_json::Value = serde_json::from_str(&response_text)?; + + if response_json["ok"].as_bool().unwrap_or(false) { + Ok(()) + } else { + Err(SlackError::ApiError( + response_json["error"].as_str().unwrap_or("Unknown error").to_string(), + )) + } + } +} diff --git a/core/src/chat/telegram.rs b/core/src/chat/telegram.rs new file mode 100644 index 00000000..6f052274 --- /dev/null +++ b/core/src/chat/telegram.rs @@ -0,0 +1,25 @@ +use teloxide::{prelude::*, types::ParseMode, RequestError}; +use thiserror::Error; + +#[derive(Error, Debug)] +pub enum TelegramError { + #[error("Telegram API error: {0}")] + ApiError(#[from] RequestError), +} + +#[derive(Debug, Clone)] +pub struct TelegramBot { + bot: Bot, +} + +impl TelegramBot { + pub fn new(token: &str) -> Self { + let bot = Bot::new(token); + Self { bot } + } + + pub async fn send_message(&self, chat_id: ChatId, message: &str) -> Result<(), TelegramError> { + self.bot.send_message(chat_id, message).parse_mode(ParseMode::MarkdownV2).await?; + Ok(()) + } +} diff --git a/core/src/chat/template.rs b/core/src/chat/template.rs new file mode 100644 index 00000000..b36d66bb --- /dev/null +++ b/core/src/chat/template.rs @@ -0,0 +1,90 @@ +use ethers::types::U64; +use regex::Regex; +use serde_json::Value; + +#[derive(Debug, Clone)] +pub struct Template { + value: String, +} + +impl Template { + pub fn new(value: String) -> Self { + Self { value } + } + + pub fn parse_template_inline(&self, event_data: &Value) -> String { + let mut template = self.value.clone(); + let placeholders = self.extract_placeholders(&template); + + for placeholder in placeholders { + if placeholder.contains('(') { + if let Some(value) = self.evaluate_function(&placeholder, event_data) { + template = template.replace(&format!("{{{{{}}}}}", placeholder), &value); + } + } else if let Some(value) = self.get_nested_value(event_data, &placeholder) { + template = template.replace(&format!("{{{{{}}}}}", placeholder), &value); + } + } + template + } + + fn extract_placeholders(&self, template: &str) -> Vec { + let mut placeholders = Vec::new(); + let mut start = 0; + while let Some(start_index) = template[start..].find("{{") { + if let Some(end_index) = template[start + start_index + 2..].find("}}") { + let placeholder = + &template[start + start_index + 2..start + start_index + 2 + end_index]; + placeholders.push(placeholder.to_string()); + start += start_index + 2 + end_index + 2; + } else { + break; + } + } + placeholders + } + + fn get_nested_value(&self, data: &Value, path: &str) -> Option { + let keys: Vec<&str> = path.split('.').collect(); + let mut current = data; + for key in keys { + if let Some(value) = current.get(key) { + current = value; + } else { + return None; + } + } + Some(current.to_string().replace('"', "")) + } + + fn evaluate_function(&self, function_call: &str, event_data: &Value) -> Option { + let re = Regex::new(r"(\w+)\(([^)]+)\)").unwrap(); + if let Some(captures) = re.captures(function_call) { + let function_name = &captures[1]; + let args: Vec<&str> = captures[2].split(',').map(|s| s.trim()).collect(); + if function_name == "format_value" && args.len() == 2 { + if let Some(value_str) = self.get_nested_value(event_data, args[0]) { + if let Ok(decimals) = args[1].parse::() { + return Some(self.format_value(&value_str, decimals)); + } + } + } + } + None + } + + fn format_value(&self, value: &str, decimals: u32) -> String { + match U64::from_dec_str(value) { + Ok(v) => { + let divisor = U64::from(10).pow(decimals.into()); + let integer_part = v / divisor; + let fractional_part = v % divisor; + if fractional_part.is_zero() { + return integer_part.to_string(); + } + format!("{}.{}", integer_part, fractional_part) + } + Err(_) => value.to_string(), + } + } +} diff --git a/core/src/database/postgres/sql_type_wrapper.rs b/core/src/database/postgres/sql_type_wrapper.rs index 9f581930..1c618f4e 100644 --- a/core/src/database/postgres/sql_type_wrapper.rs +++ b/core/src/database/postgres/sql_type_wrapper.rs @@ -7,9 +7,10 @@ use ethers::{ prelude::{Bytes, H128, H160, H256, H512, U128, U256, U512, U64}, }; use rust_decimal::Decimal; +use serde_json::{json, Value}; use tokio_postgres::types::{to_sql_checked, IsNull, ToSql, Type as PgType}; -use crate::abi::ABIInput; +use crate::{abi::ABIInput, event::callback_registry::TxInformation}; #[derive(Debug, Clone)] pub enum EthereumSqlTypeWrapper { @@ -656,3 +657,110 @@ fn serialize_vec_decimal( out.extend_from_slice(&buf); Ok(IsNull::No) } + +fn count_components(components: &[ABIInput]) -> usize { + components + .iter() + .map(|component| { + if component.type_ == "tuple" { + let nested_components = + component.components.as_ref().expect("Tuple should have components defined"); + 1 + count_components(nested_components) + } else { + 1 + } + }) + .sum() +} + +pub fn map_ethereum_wrapper_to_json( + abi_inputs: &[ABIInput], + wrappers: &[EthereumSqlTypeWrapper], + transaction_information: &TxInformation, + is_within_tuple: bool, +) -> Value { + let mut result = serde_json::Map::new(); + + let mut current_wrapper_index = 0; + let mut wrappers_index_processed = Vec::new(); + for abi_input in abi_inputs.iter() { + // tuples will take in multiple wrapper indexes, so we need to skip them if processed + if wrappers_index_processed.contains(¤t_wrapper_index) { + continue; + } + if let Some(wrapper) = wrappers.get(current_wrapper_index) { + if abi_input.type_ == "tuple" { + let components = + abi_input.components.as_ref().expect("Tuple should have components defined"); + let total_properties = count_components(components); + let tuple_value = map_ethereum_wrapper_to_json( + components, + &wrappers[current_wrapper_index..total_properties], + transaction_information, + true, + ); + result.insert(abi_input.name.clone(), tuple_value); + for i in current_wrapper_index..total_properties { + wrappers_index_processed.push(i); + } + current_wrapper_index = total_properties; + } else { + let value = match wrapper { + EthereumSqlTypeWrapper::U64(u) => json!(u), + EthereumSqlTypeWrapper::VecU64(u64s) => json!(u64s), + EthereumSqlTypeWrapper::U128(u) => json!(u.to_string()), + EthereumSqlTypeWrapper::VecU128(u128s) => { + json!(u128s.iter().map(|u| u.to_string()).collect::>()) + } + EthereumSqlTypeWrapper::U256(u) => json!(u.to_string()), + EthereumSqlTypeWrapper::VecU256(u256s) => { + json!(u256s.iter().map(|u| u.to_string()).collect::>()) + } + EthereumSqlTypeWrapper::U512(u) => json!(u.to_string()), + EthereumSqlTypeWrapper::VecU512(u512s) => { + json!(u512s.iter().map(|u| u.to_string()).collect::>()) + } + EthereumSqlTypeWrapper::H128(h) => json!(h), + EthereumSqlTypeWrapper::VecH128(h128s) => json!(h128s), + EthereumSqlTypeWrapper::H160(h) => json!(h), + EthereumSqlTypeWrapper::VecH160(h160s) => json!(h160s), + EthereumSqlTypeWrapper::H256(h) => json!(h), + EthereumSqlTypeWrapper::VecH256(h256s) => json!(h256s), + EthereumSqlTypeWrapper::H512(h) => json!(h), + EthereumSqlTypeWrapper::VecH512(h512s) => json!(h512s), + EthereumSqlTypeWrapper::Address(address) => json!(address), + EthereumSqlTypeWrapper::VecAddress(addresses) => json!(addresses), + EthereumSqlTypeWrapper::Bool(b) => json!(b), + EthereumSqlTypeWrapper::VecBool(bools) => json!(bools), + EthereumSqlTypeWrapper::U32(u) => json!(u), + EthereumSqlTypeWrapper::VecU32(u32s) => json!(u32s), + EthereumSqlTypeWrapper::U16(u) => json!(u), + EthereumSqlTypeWrapper::VecU16(u16s) => json!(u16s), + EthereumSqlTypeWrapper::U8(u) => json!(u), + EthereumSqlTypeWrapper::VecU8(u8s) => json!(u8s), + EthereumSqlTypeWrapper::String(s) => json!(s), + EthereumSqlTypeWrapper::VecString(strings) => json!(strings), + EthereumSqlTypeWrapper::Bytes(bytes) => json!(hex::encode(bytes)), + EthereumSqlTypeWrapper::VecBytes(bytes) => { + json!(bytes.iter().map(hex::encode).collect::>()) + } + }; + result.insert(abi_input.name.clone(), value); + wrappers_index_processed.push(current_wrapper_index); + current_wrapper_index += 1; + } + } else { + panic!( + "No wrapper found for ABI input {:?} and wrapper index {} - wrappers {:?}", + abi_input, current_wrapper_index, wrappers + ); + } + } + + // only do this at the top level + if !is_within_tuple { + result.insert("transaction_information".to_string(), json!(transaction_information)); + } + + Value::Object(result) +} diff --git a/core/src/event/callback_registry.rs b/core/src/event/callback_registry.rs index 644994f0..fa057706 100644 --- a/core/src/event/callback_registry.rs +++ b/core/src/event/callback_registry.rs @@ -7,6 +7,7 @@ use ethers::{ }; use futures::future::BoxFuture; use rand::Rng; +use serde::{Deserialize, Serialize}; use tokio::time::sleep; use tracing::{debug, error}; @@ -23,7 +24,7 @@ pub fn noop_decoder() -> Decoder { }) as Decoder } -#[derive(Debug, Clone)] +#[derive(Debug, Serialize, Deserialize, Clone)] pub struct TxInformation { pub network: String, pub address: Address, diff --git a/core/src/event/conditions.rs b/core/src/event/conditions.rs new file mode 100644 index 00000000..1d385758 --- /dev/null +++ b/core/src/event/conditions.rs @@ -0,0 +1,91 @@ +use ethers::prelude::U64; +use serde_json::{Map, Value}; + +fn get_nested_value(data: &Value, path: &str) -> Option { + let keys: Vec<&str> = path.split('.').collect(); + let mut current = data; + for key in keys { + match current.get(key) { + Some(value) => current = value, + None => return None, + } + } + Some(current.clone()) +} + +#[allow(clippy::manual_strip)] +fn evaluate_condition(value: &Value, condition: &str) -> bool { + if condition.contains("||") || + condition.contains("&&") || + condition.contains('>') || + condition.contains('<') || + condition.contains('=') + { + let parts: Vec<&str> = condition.split("||").collect(); + for part in parts { + let subparts: Vec<&str> = part.split("&&").collect(); + let mut and_result = true; + for subpart in subparts { + let (op, comp) = if subpart.starts_with(">=") { + (">=", &subpart[2..]) + } else if subpart.starts_with("<=") { + ("<=", &subpart[2..]) + } else if subpart.starts_with(">") { + (">", &subpart[1..]) + } else if subpart.starts_with("<") { + ("<", &subpart[1..]) + } else if subpart.starts_with("=") { + ("=", &subpart[1..]) + } else { + ("", subpart) + }; + + and_result &= match op { + ">=" => { + U64::from_str_radix(value.as_str().unwrap_or("0"), 10).unwrap_or_default() >= + U64::from_str_radix(comp, 10).unwrap_or_default() + } + "<=" => { + U64::from_str_radix(value.as_str().unwrap_or("0"), 10).unwrap_or_default() <= + U64::from_str_radix(comp, 10).unwrap_or_default() + } + ">" => { + U64::from_str_radix(value.as_str().unwrap_or("0"), 10).unwrap_or_default() > + U64::from_str_radix(comp, 10).unwrap_or_default() + } + "<" => { + U64::from_str_radix(value.as_str().unwrap_or("0"), 10).unwrap_or_default() < + U64::from_str_radix(comp, 10).unwrap_or_default() + } + "=" => value == &Value::String(comp.to_string()), + "" => value == &Value::String(subpart.to_string()), + _ => false, + }; + } + if and_result { + return true; + } + } + false + } else { + value == &Value::String(condition.to_string()) + } +} + +pub fn filter_event_data_by_conditions( + event_data: &Value, + conditions: &Vec>, +) -> bool { + for condition in conditions { + for (key, value) in condition { + if let Some(event_value) = get_nested_value(event_data, key) { + if !evaluate_condition(&event_value, value.as_str().unwrap_or("")) { + return false; + } + } else { + return false; + } + } + } + true +} diff --git a/core/src/event/config.rs b/core/src/event/config.rs index c5ae27a5..bac8b28e 100644 --- a/core/src/event/config.rs +++ b/core/src/event/config.rs @@ -30,6 +30,7 @@ pub struct EventProcessingConfig { pub progress: Arc>, pub database: Option>, pub csv_details: Option, + pub stream_last_synced_block_file_path: Option, pub index_event_in_order: bool, pub live_indexing: bool, pub indexing_distance_from_head: U64, diff --git a/core/src/event/message.rs b/core/src/event/message.rs new file mode 100644 index 00000000..95fb6610 --- /dev/null +++ b/core/src/event/message.rs @@ -0,0 +1,9 @@ +use serde::{Deserialize, Serialize}; +use serde_json::Value; + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct EventMessage { + pub event_name: String, + pub event_data: Value, + pub network: String, +} diff --git a/core/src/event/mod.rs b/core/src/event/mod.rs index 392ed395..492c5c72 100644 --- a/core/src/event/mod.rs +++ b/core/src/event/mod.rs @@ -1,5 +1,13 @@ pub mod callback_registry; + pub mod config; pub mod contract_setup; + mod rindexer_event_filter; pub use rindexer_event_filter::{BuildRindexerFilterError, RindexerEventFilter}; + +mod message; +pub use message::EventMessage; + +mod conditions; +pub use conditions::filter_event_data_by_conditions; diff --git a/core/src/indexer/last_synced.rs b/core/src/indexer/last_synced.rs index d1a9d021..da5ac8a6 100644 --- a/core/src/indexer/last_synced.rs +++ b/core/src/indexer/last_synced.rs @@ -12,43 +12,19 @@ use tracing::error; use crate::{ event::config::EventProcessingConfig, helpers::{camel_to_snake, get_full_path}, - manifest::storage::CsvDetails, + manifest::{storage::CsvDetails, stream::StreamsConfig}, EthereumSqlTypeWrapper, PostgresClient, }; -fn build_last_synced_block_number_for_csv( - project_path: &Path, - csv_details: &CsvDetails, +async fn get_last_synced_block_number_file( + full_path: &Path, contract_name: &str, network: &str, event_name: &str, -) -> String { - format!( - "{}/{}/last-synced-blocks/{}-{}-{}.txt", - get_full_path(project_path, &csv_details.path) - .unwrap_or_else(|_| panic!("failed to get full path {}", project_path.display())) - .display(), - contract_name, - contract_name.to_lowercase(), - network.to_lowercase(), - event_name.to_lowercase() - ) -} +) -> Result, UpdateLastSyncedBlockNumberFile> { + let file_path = + build_last_synced_block_number_file(full_path, contract_name, network, event_name); -async fn get_last_synced_block_number_for_csv( - project_path: &Path, - csv_details: &CsvDetails, - contract_name: &str, - network: &str, - event_name: &str, -) -> Result, UpdateLastSyncedBlockNumberCsv> { - let file_path = build_last_synced_block_number_for_csv( - project_path, - csv_details, - contract_name, - network, - event_name, - ); let path = Path::new(&file_path); if !path.exists() { @@ -65,7 +41,7 @@ async fn get_last_synced_block_number_for_csv( return match parse { Ok(value) => Ok(Some(value)), Err(e) => { - Err(UpdateLastSyncedBlockNumberCsv::ParseError(value.to_string(), e.to_string())) + Err(UpdateLastSyncedBlockNumberFile::ParseError(value.to_string(), e.to_string())) } }; } @@ -73,10 +49,27 @@ async fn get_last_synced_block_number_for_csv( Ok(None) } +fn build_last_synced_block_number_file( + full_path: &Path, + contract_name: &str, + network: &str, + event_name: &str, +) -> String { + format!( + "{}/{}/last-synced-blocks/{}-{}-{}.txt", + full_path.display(), + contract_name, + contract_name.to_lowercase(), + network.to_lowercase(), + event_name.to_lowercase() + ) +} + pub struct SyncConfig<'a> { pub project_path: &'a Path, pub database: &'a Option>, pub csv_details: &'a Option, + pub stream_details: &'a Option<&'a StreamsConfig>, pub contract_csv_enabled: bool, pub indexer_name: &'a str, pub contract_name: &'a str, @@ -85,12 +78,13 @@ pub struct SyncConfig<'a> { } pub async fn get_last_synced_block_number(config: SyncConfig<'_>) -> Option { - // Check CSV file for last seen block + // Check CSV file for last seen block as no database enabled if config.database.is_none() && config.contract_csv_enabled { if let Some(csv_details) = config.csv_details { - return if let Ok(result) = get_last_synced_block_number_for_csv( - config.project_path, - csv_details, + return if let Ok(result) = get_last_synced_block_number_file( + &get_full_path(config.project_path, &csv_details.path).unwrap_or_else(|_| { + panic!("failed to get full path {}", config.project_path.display()) + }), config.contract_name, config.network, config.event_name, @@ -111,6 +105,41 @@ pub async fn get_last_synced_block_number(config: SyncConfig<'_>) -> Option } } + // Then check streams if no csv or database to find out last synced block + if config.database.is_none() && !config.contract_csv_enabled && config.stream_details.is_some() + { + let stream_details = config.stream_details.as_ref().unwrap(); + + // create the path if it does not exist + stream_details + .create_full_streams_last_synced_block_path(config.project_path, config.contract_name) + .await; + + return if let Ok(result) = get_last_synced_block_number_file( + &config + .project_path + .join(stream_details.get_streams_last_synced_block_path()) + .canonicalize() + .expect("Failed to canonicalize path"), + config.contract_name, + config.network, + config.event_name, + ) + .await + { + if let Some(value) = result { + if value.is_zero() { + return None; + } + } + + result + } else { + error!("Error fetching last synced block from stream"); + None + } + } + // Query database for last synced block if let Some(database) = config.database { let query = format!( @@ -142,7 +171,7 @@ pub async fn get_last_synced_block_number(config: SyncConfig<'_>) -> Option } #[derive(thiserror::Error, Debug)] -pub enum UpdateLastSyncedBlockNumberCsv { +pub enum UpdateLastSyncedBlockNumberFile { #[error("File IO error: {0}")] FileIo(#[from] std::io::Error), @@ -150,22 +179,20 @@ pub enum UpdateLastSyncedBlockNumberCsv { ParseError(String, String), } -async fn update_last_synced_block_number_for_csv_to_file( +async fn update_last_synced_block_number_for_file( config: &Arc, - csv_details: &CsvDetails, + full_path: &Path, to_block: U64, -) -> Result<(), UpdateLastSyncedBlockNumberCsv> { - let file_path = build_last_synced_block_number_for_csv( - &config.project_path, - csv_details, +) -> Result<(), UpdateLastSyncedBlockNumberFile> { + let file_path = build_last_synced_block_number_file( + full_path, &config.contract_name, &config.network_contract.network, &config.event_name, ); - let last_block = get_last_synced_block_number_for_csv( - &config.project_path, - csv_details, + let last_block = get_last_synced_block_number_file( + full_path, &config.contract_name, &config.network_contract.network, &config.event_name, @@ -220,11 +247,38 @@ pub fn update_progress_and_last_synced(config: Arc, to_bl error!("Error updating last synced block: {:?}", e); } } else if let Some(csv_details) = &config.csv_details { - if let Err(e) = - update_last_synced_block_number_for_csv_to_file(&config, csv_details, to_block) - .await + if let Err(e) = update_last_synced_block_number_for_file( + &config, + &get_full_path(&config.project_path, &csv_details.path).unwrap_or_else(|_| { + panic!("failed to get full path {}", config.project_path.display()) + }), + to_block, + ) + .await + { + error!( + "Error updating last synced block to CSV - path - {} error - {:?}", + csv_details.path, e + ); + } + } else if let Some(stream_last_synced_block_file_path) = + &config.stream_last_synced_block_file_path + { + if let Err(e) = update_last_synced_block_number_for_file( + &config, + &config + .project_path + .join(stream_last_synced_block_file_path) + .canonicalize() + .expect("Failed to canonicalize path"), + to_block, + ) + .await { - error!("Error updating last synced block to CSV: {:?}", e); + error!( + "Error updating last synced block to stream - path - {} error - {:?}", + stream_last_synced_block_file_path, e + ); } } }); diff --git a/core/src/indexer/no_code.rs b/core/src/indexer/no_code.rs index 6c6442f3..02d41c47 100644 --- a/core/src/indexer/no_code.rs +++ b/core/src/indexer/no_code.rs @@ -2,25 +2,31 @@ use std::{fs, io, path::Path, sync::Arc}; use colored::Colorize; use ethers::abi::{Abi, Contract as EthersContract, Event}; +use serde_json::Value; use tokio_postgres::types::Type as PgType; -use tracing::{debug, error, info}; +use tracing::{debug, error, info, warn}; use crate::{ abi::{ABIItem, CreateCsvFileForEvent, EventInfo, ParamTypeError, ReadAbiError}, + chat::ChatClients, database::postgres::{ client::PostgresClient, generate::{ generate_column_names_only_with_base_properties, generate_event_table_full_name, }, setup::{setup_postgres, SetupPostgresError}, - sql_type_wrapper::{map_log_params_to_ethereum_wrapper, EthereumSqlTypeWrapper}, + sql_type_wrapper::{ + map_ethereum_wrapper_to_json, map_log_params_to_ethereum_wrapper, + EthereumSqlTypeWrapper, + }, }, event::{ callback_registry::{ noop_decoder, EventCallbackRegistry, EventCallbackRegistryInformation, - EventCallbackType, + EventCallbackType, TxInformation, }, contract_setup::{ContractInformation, CreateContractInformationError}, + EventMessage, }, generate_random_id, helpers::get_full_path, @@ -30,8 +36,9 @@ use crate::{ yaml::{read_manifest, ReadManifestError}, }, provider::{CreateNetworkProvider, RetryClientError}, - setup_info_logger, AsyncCsvAppender, FutureExt, IndexingDetails, StartDetails, - StartNoCodeDetails, + setup_info_logger, + streams::StreamsClients, + AsyncCsvAppender, FutureExt, IndexingDetails, StartDetails, StartNoCodeDetails, }; #[derive(thiserror::Error, Debug)] @@ -122,10 +129,13 @@ struct NoCodeCallbackParams { indexer_name: String, contract_name: String, event: Event, + index_event_in_order: bool, csv: Option>, postgres: Option>, postgres_event_table_name: String, postgres_column_names: Vec, + streams_clients: Arc>, + chat_clients: Arc>, } fn no_code_callback(params: Arc) -> EventCallbackType { @@ -154,11 +164,16 @@ fn no_code_callback(params: Arc) -> EventCallbackType { } }; + let network = results.first().unwrap().tx_information.network.clone(); + let mut indexed_count = 0; let mut postgres_bulk_data: Vec> = Vec::new(); let mut postgres_bulk_column_types: Vec = Vec::new(); let mut csv_bulk_data: Vec> = Vec::new(); + // stream and chat info + let mut event_message_data: Vec = Vec::new(); + // Collect owned results to avoid lifetime issues let owned_results: Vec<_> = results .iter() @@ -170,7 +185,7 @@ fn no_code_callback(params: Arc) -> EventCallbackType { let block_number = result.tx_information.block_number; let block_hash = result.tx_information.block_hash; let network = result.tx_information.network.to_string(); - let tx_index = result.tx_information.transaction_index; + let transaction_index = result.tx_information.transaction_index; let log_index = result.tx_information.log_index; let event_parameters: Vec = @@ -182,7 +197,7 @@ fn no_code_callback(params: Arc) -> EventCallbackType { EthereumSqlTypeWrapper::U64(block_number), EthereumSqlTypeWrapper::H256(block_hash), EthereumSqlTypeWrapper::String(network.to_string()), - EthereumSqlTypeWrapper::U64(tx_index), + EthereumSqlTypeWrapper::U64(transaction_index), EthereumSqlTypeWrapper::U256(log_index), ]; @@ -190,6 +205,8 @@ fn no_code_callback(params: Arc) -> EventCallbackType { log.params, address, transaction_hash, + log_index, + transaction_index, block_number, block_hash, network, @@ -204,6 +221,8 @@ fn no_code_callback(params: Arc) -> EventCallbackType { log_params, address, transaction_hash, + log_index, + transaction_index, block_number, block_hash, network, @@ -212,6 +231,24 @@ fn no_code_callback(params: Arc) -> EventCallbackType { end_global_parameters, ) in owned_results { + if params.streams_clients.is_some() || params.chat_clients.is_some() { + let event_result = map_ethereum_wrapper_to_json( + ¶ms.event_info.inputs, + &event_parameters, + &TxInformation { + network: network.clone(), + address, + block_hash, + block_number, + transaction_hash, + log_index, + transaction_index, + }, + false, + ); + event_message_data.push(event_result); + } + let mut all_params: Vec = vec![contract_address]; all_params.extend(event_parameters); all_params.extend(end_global_parameters); @@ -289,13 +326,93 @@ fn no_code_callback(params: Arc) -> EventCallbackType { } } + let event_message = EventMessage { + event_name: params.event_info.name.clone(), + event_data: Value::Array(event_message_data), + network: network.clone(), + }; + + if let Some(streams_clients) = params.streams_clients.as_ref() { + let stream_id = format!( + "{}-{}-{}-{}-{}", + params.contract_name, params.event_info.name, network, from_block, to_block + ); + + match streams_clients + .stream(stream_id, &event_message, params.index_event_in_order) + .await + { + Ok(streamed) => { + if streamed > 0 { + info!( + "{}::{} - {} - {} events {}", + params.contract_name, + params.event_info.name, + "STREAMED".green(), + streamed, + format!( + "- blocks: {} - {} - network: {}", + from_block, to_block, network + ) + ); + } + } + Err(e) => { + error!("Error streaming event: {}", e); + return Err(e.to_string()); + } + } + } + + if let Some(chat_clients) = params.chat_clients.as_ref() { + if !chat_clients.is_in_block_range_to_send(&from_block, &to_block) { + warn!( + "{}::{} - {} - messages has a max 10 block range due the rate limits - {}", + params.contract_name, + params.event_info.name, + "CHAT_MESSAGES_DISABLED".yellow(), + format!("- blocks: {} - {} - network: {}", from_block, to_block, network) + ); + } else { + match chat_clients + .send_message( + &event_message, + params.index_event_in_order, + &from_block, + &to_block, + ) + .await + { + Ok(messages_sent) => { + if messages_sent > 0 { + info!( + "{}::{} - {} - {} events {}", + params.contract_name, + params.event_info.name, + "CHAT_MESSAGES_SENT".green(), + messages_sent, + format!( + "- blocks: {} - {} - network: {}", + from_block, to_block, network + ) + ); + } + } + Err(e) => { + error!("Error sending chat messages: {}", e); + return Err(e.to_string()); + } + } + } + } + info!( "{}::{} - {} - {} events {}", params.contract_name, params.event_info.name, "INDEXED".green(), indexed_count, - format!("- blocks: {} - {}", from_block, to_block) + format!("- blocks: {} - {} - network: {}", from_block, to_block, network) ); Ok(()) @@ -402,14 +519,28 @@ pub async fn process_events( let postgres_event_table_name = generate_event_table_full_name(&manifest.name, &contract.name, &event_info.name); + let streams_client = if let Some(streams) = &contract.streams { + Some(StreamsClients::new(streams.clone()).await) + } else { + None + }; + + let chat_clients = if let Some(chats) = &contract.chat { + Some(ChatClients::new(chats.clone()).await) + } else { + None + }; + + let index_event_in_order = contract + .index_event_in_order + .as_ref() + .map_or(false, |vec| vec.contains(&event_info.name)); + let event = EventCallbackRegistryInformation { id: generate_random_id(10), indexer_name: manifest.name.clone(), event_name: event_info.name.clone(), - index_event_in_order: contract - .index_event_in_order - .as_ref() - .map_or(false, |vec| vec.contains(&event_info.name)), + index_event_in_order, topic_id: event_info.topic_id(), contract: contract_information, callback: no_code_callback(Arc::new(NoCodeCallbackParams { @@ -417,10 +548,13 @@ pub async fn process_events( indexer_name: manifest.name.clone(), contract_name: contract.name.clone(), event: event.clone(), + index_event_in_order, csv, postgres: postgres.clone(), postgres_event_table_name, postgres_column_names, + streams_clients: Arc::new(streams_client), + chat_clients: Arc::new(chat_clients), })), }; diff --git a/core/src/indexer/start.rs b/core/src/indexer/start.rs index 63aba5d2..b1da4cdb 100644 --- a/core/src/indexer/start.rs +++ b/core/src/indexer/start.rs @@ -102,12 +102,18 @@ pub async fn start_indexing( let mut processed_network_contracts: Vec = Vec::new(); for event in registry.events.iter() { + let stream_details = manifest + .contracts + .iter() + .find(|c| c.name == event.contract.name) + .and_then(|c| c.streams.as_ref()); for network_contract in event.contract.details.iter() { let config = SyncConfig { project_path, database: &database, csv_details: &manifest.storage.csv, contract_csv_enabled: manifest.contract_csv_enabled(&event.contract.name), + stream_details: &stream_details, indexer_name: &event.indexer_name, contract_name: &event.contract.name, event_name: &event.event_name, @@ -190,6 +196,9 @@ pub async fn start_indexing( progress: Arc::clone(&event_progress_state), database: database.clone(), csv_details: manifest.storage.csv.clone(), + stream_last_synced_block_file_path: stream_details + .as_ref() + .map(|s| s.get_streams_last_synced_block_path()), live_indexing: if no_live_indexing_forced { false } else { diff --git a/core/src/lib.rs b/core/src/lib.rs index bbe3f1aa..481b28ba 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -24,10 +24,12 @@ mod logger; pub use logger::setup_info_logger; mod abi; pub use abi::ABIItem; +mod chat; pub mod event; pub mod phantom; pub mod provider; mod start; +mod streams; mod types; // export 3rd party dependencies diff --git a/core/src/manifest/chat.rs b/core/src/manifest/chat.rs new file mode 100644 index 00000000..ace6b120 --- /dev/null +++ b/core/src/manifest/chat.rs @@ -0,0 +1,68 @@ +use serde::{Deserialize, Serialize}; +use serde_json::{Map, Value}; + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct TelegramConfig { + pub bot_token: String, + pub chat_id: i64, + pub networks: Vec, + pub messages: Vec, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct TelegramEvent { + pub event_name: String, + + #[serde(skip_serializing_if = "Option::is_none")] + pub conditions: Option>>, + + pub template_inline: String, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct DiscordConfig { + pub bot_token: String, + pub channel_id: u64, + pub networks: Vec, + pub messages: Vec, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct DiscordEvent { + pub event_name: String, + + #[serde(skip_serializing_if = "Option::is_none")] + pub conditions: Option>>, + + pub template_inline: String, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct SlackConfig { + pub bot_token: String, + pub channel: String, + pub networks: Vec, + pub messages: Vec, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct SlackEvent { + pub event_name: String, + + #[serde(skip_serializing_if = "Option::is_none")] + pub conditions: Option>>, + + pub template_inline: String, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct ChatConfig { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub telegram: Option>, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub discord: Option>, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub slack: Option>, +} diff --git a/core/src/manifest/contract.rs b/core/src/manifest/contract.rs index 7d2adf9b..271d8c5c 100644 --- a/core/src/manifest/contract.rs +++ b/core/src/manifest/contract.rs @@ -12,6 +12,7 @@ use crate::{ AddressDetails, ContractEventMapping, FilterDetails, IndexingContractSetup, }, indexer::parse_topic, + manifest::{chat::ChatConfig, stream::StreamsConfig}, }; #[derive(Debug, Serialize, Deserialize, Clone)] @@ -221,6 +222,12 @@ pub struct Contract { #[serde(default, skip_serializing_if = "Option::is_none")] pub generate_csv: Option, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub streams: Option, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub chat: Option, } impl Contract { diff --git a/core/src/manifest/mod.rs b/core/src/manifest/mod.rs index fb4f35bc..6c7e4402 100644 --- a/core/src/manifest/mod.rs +++ b/core/src/manifest/mod.rs @@ -1,3 +1,4 @@ +pub mod chat; pub mod contract; pub mod core; pub mod global; @@ -5,4 +6,5 @@ pub mod graphql; pub mod network; pub mod phantom; pub mod storage; +pub mod stream; pub mod yaml; diff --git a/core/src/manifest/stream.rs b/core/src/manifest/stream.rs new file mode 100644 index 00000000..de6e8576 --- /dev/null +++ b/core/src/manifest/stream.rs @@ -0,0 +1,189 @@ +use std::path::Path; + +use lapin::ExchangeKind; +use serde::{Deserialize, Deserializer, Serialize}; +use serde_json::{Map, Value}; +use tokio::fs; + +use crate::types::aws_config::AwsConfig; + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct StreamEvent { + pub event_name: String, + + #[serde(skip_serializing_if = "Option::is_none")] + pub conditions: Option>>, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct SNSStreamTopicConfig { + pub prefix_id: Option, + pub topic_arn: String, + pub networks: Vec, + pub events: Vec, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct SNSStreamConfig { + pub aws_config: AwsConfig, + pub topics: Vec, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct WebhookStreamConfig { + pub endpoint: String, + pub shared_secret: String, + pub networks: Vec, + pub events: Vec, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize)] +pub struct ExchangeKindWrapper(pub ExchangeKind); + +impl<'de> Deserialize<'de> for ExchangeKindWrapper { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let s: String = Deserialize::deserialize(deserializer)?; + let kind = match s.to_lowercase().as_str() { + "direct" => ExchangeKind::Direct, + "fanout" => ExchangeKind::Fanout, + "headers" => ExchangeKind::Headers, + "topic" => ExchangeKind::Topic, + _ => ExchangeKind::Custom(s), + }; + Ok(ExchangeKindWrapper(kind)) + } +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct RabbitMQStreamQueueConfig { + pub exchange: String, + pub exchange_type: ExchangeKindWrapper, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub routing_key: Option, + pub networks: Vec, + pub events: Vec, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct RabbitMQStreamConfig { + pub url: String, + pub exchanges: Vec, +} + +impl RabbitMQStreamConfig { + pub fn validate(&self) -> Result<(), String> { + if self.exchanges.is_empty() { + return Err("No exchanges defined in RabbitMQ config".to_string()); + } + + for config in &self.exchanges { + if config.exchange_type.0 != ExchangeKind::Direct && + config.exchange_type.0 != ExchangeKind::Fanout && + config.exchange_type.0 != ExchangeKind::Topic + { + return Err("Only direct, topic and fanout exchanges are supported".to_string()); + } + + if config.exchange_type.0 == ExchangeKind::Fanout && config.routing_key.is_some() { + return Err("Fanout exchanges do not support routing keys".to_string()); + } + + if config.exchange_type.0 == ExchangeKind::Topic && config.routing_key.is_none() { + return Err("Topic exchanges require a routing key".to_string()); + } + + if config.exchange_type.0 == ExchangeKind::Direct && config.routing_key.is_none() { + return Err("Direct exchanges require a routing keys".to_string()); + } + } + + Ok(()) + } +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct KafkaStreamQueueConfig { + pub topic: String, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub key: Option, + pub networks: Vec, + pub events: Vec, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct KafkaStreamConfig { + pub brokers: Vec, + pub security_protocol: String, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub sasl_mechanisms: Option, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub sasl_username: Option, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub sasl_password: Option, + + pub acks: String, + pub topics: Vec, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct StreamsConfig { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub sns: Option, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub webhooks: Option>, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub rabbitmq: Option, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub kafka: Option, +} + +impl StreamsConfig { + pub fn validate(&self) -> Result<(), String> { + if let Some(rabbitmq) = &self.rabbitmq { + return rabbitmq.validate(); + } + + Ok(()) + } + + pub fn get_streams_last_synced_block_path(&self) -> String { + let mut path = ".rindexer/".to_string(); + if self.rabbitmq.is_some() { + path.push_str("rabbitmq_"); + } else if self.sns.is_some() { + path.push_str("sns_"); + } else if self.webhooks.is_some() { + path.push_str("webhooks_"); + } else if self.kafka.is_some() { + path.push_str("kafka_"); + } + + path.trim_end_matches('_').to_string() + } + + pub async fn create_full_streams_last_synced_block_path( + &self, + project_path: &Path, + contract_name: &str, + ) { + let path = + self.get_streams_last_synced_block_path() + "/" + contract_name + "/last-synced-blocks"; + + let full_path = project_path.join(path); + + if !Path::new(&full_path).exists() { + fs::create_dir_all(&full_path).await.expect("Failed to create directory for stream"); + } + } +} diff --git a/core/src/manifest/yaml.rs b/core/src/manifest/yaml.rs index 93a04915..ea4ccf71 100644 --- a/core/src/manifest/yaml.rs +++ b/core/src/manifest/yaml.rs @@ -6,6 +6,7 @@ use std::{ }; use regex::{Captures, Regex}; +use tracing::error; use crate::{ abi::ABIItem, @@ -22,6 +23,7 @@ fn substitute_env_variables(contents: &str) -> Result { match env::var(var_name) { Ok(val) => val, Err(_) => { + error!("Environment variable {} not found", var_name); panic!("Environment variable {} not found", var_name) } } @@ -54,6 +56,9 @@ pub enum ValidateManifestError { #[error("Relationship foreign key contract {0} not found")] RelationshipForeignKeyContractNotFound(String), + + #[error("Streams config is invalid: {0}")] + StreamsConfigValidationError(String), } fn validate_manifest( @@ -125,6 +130,12 @@ fn validate_manifest( if let Some(_dependency_events) = &contract.dependency_events { // TODO - validate the events all exist in the contract ABIs } + + if let Some(streams) = &contract.streams { + if let Err(e) = streams.validate() { + return Err(ValidateManifestError::StreamsConfigValidationError(e)); + } + } } if let Some(postgres) = &manifest.storage.postgres { diff --git a/core/src/streams/clients.rs b/core/src/streams/clients.rs new file mode 100644 index 00000000..d9f2978b --- /dev/null +++ b/core/src/streams/clients.rs @@ -0,0 +1,474 @@ +use std::sync::Arc; + +use aws_sdk_sns::{config::http::HttpResponse, error::SdkError, operation::publish::PublishError}; +use futures::future::join_all; +use serde_json::Value; +use thiserror::Error; +use tokio::{ + task, + task::{JoinError, JoinHandle}, +}; +use tracing::error; + +use crate::{ + event::{filter_event_data_by_conditions, EventMessage}, + manifest::stream::{ + KafkaStreamConfig, KafkaStreamQueueConfig, RabbitMQStreamConfig, RabbitMQStreamQueueConfig, + SNSStreamTopicConfig, StreamEvent, StreamsConfig, WebhookStreamConfig, + }, + streams::{ + kafka::{Kafka, KafkaError}, + RabbitMQ, RabbitMQError, Webhook, WebhookError, SNS, + }, +}; + +// we should limit the max chunk size we send over when streaming to 70KB - 100KB is most limits +// we can add this to yaml if people need it +const MAX_CHUNK_SIZE: usize = 75 * 1024; // 75 KB + +type StreamPublishes = Vec>>; + +#[derive(Debug, Clone)] +struct SNSStream { + config: Vec, + client: Arc, +} + +#[derive(Error, Debug)] +pub enum StreamError { + #[error("SNS could not publish - {0}")] + SnsCouldNotPublish(#[from] SdkError), + + #[error("Webhook could not publish: {0}")] + WebhookCouldNotPublish(#[from] WebhookError), + + #[error("RabbitMQ could not publish: {0}")] + RabbitMQCouldNotPublish(#[from] RabbitMQError), + + #[error("Kafka could not publish: {0}")] + KafkaCouldNotPublish(#[from] KafkaError), + + #[error("Task failed: {0}")] + JoinError(JoinError), +} + +#[derive(Debug, Clone)] +struct WebhookStream { + config: Vec, + client: Arc, +} + +pub struct RabbitMQStream { + config: RabbitMQStreamConfig, + client: Arc, +} + +pub struct KafkaStream { + config: KafkaStreamConfig, + client: Arc, +} + +pub struct StreamsClients { + sns: Option, + webhook: Option, + rabbitmq: Option, + kafka: Option, +} + +impl StreamsClients { + pub async fn new(stream_config: StreamsConfig) -> Self { + let sns = if let Some(config) = &stream_config.sns { + Some(SNSStream { + config: config.topics.clone(), + client: Arc::new(SNS::new(&config.aws_config).await), + }) + } else { + None + }; + + let webhook = stream_config.webhooks.as_ref().map(|config| WebhookStream { + config: config.clone(), + client: Arc::new(Webhook::new()), + }); + + let rabbitmq = if let Some(config) = stream_config.rabbitmq.as_ref() { + Some(RabbitMQStream { + config: config.clone(), + client: Arc::new(RabbitMQ::new(&config.url).await), + }) + } else { + None + }; + + let kafka = if let Some(config) = stream_config.kafka.as_ref() { + Some(KafkaStream { + config: config.clone(), + client: Arc::new( + Kafka::new(config) + .await + .unwrap_or_else(|e| panic!("Failed to create Kafka client: {:?}", e)), + ), + }) + } else { + None + }; + + Self { sns, webhook, rabbitmq, kafka } + } + + fn has_any_streams(&self) -> bool { + self.sns.is_some() || + self.webhook.is_some() || + self.rabbitmq.is_some() || + self.kafka.is_some() + } + + fn chunk_data(&self, data_array: &Vec) -> Vec> { + let mut current_chunk = Vec::new(); + let mut current_size = 0; + + let mut chunks = Vec::new(); + for item in data_array { + let item_str = serde_json::to_string(item).unwrap(); + let item_size = item_str.len(); + + if current_size + item_size > MAX_CHUNK_SIZE { + chunks.push(current_chunk); + current_chunk = Vec::new(); + current_size = 0; + } + + current_chunk.push(item.clone()); + current_size += item_size; + } + + if !current_chunk.is_empty() { + chunks.push(current_chunk); + } + + chunks + } + + fn create_chunk_message_raw(&self, event_message: &EventMessage, chunk: &[Value]) -> String { + let chunk_message = EventMessage { + event_name: event_message.event_name.clone(), + event_data: Value::Array(chunk.to_vec()), + network: event_message.network.clone(), + }; + + serde_json::to_string(&chunk_message).unwrap() + } + + fn create_chunk_message_json(&self, event_message: &EventMessage, chunk: &[Value]) -> Value { + let chunk_message = EventMessage { + event_name: event_message.event_name.clone(), + event_data: Value::Array(chunk.to_vec()), + network: event_message.network.clone(), + }; + + serde_json::to_value(&chunk_message).unwrap() + } + + fn generate_publish_message_id( + &self, + id: &str, + index: usize, + prefix: &Option, + ) -> String { + format!( + "rindexer_stream__{}-{}-chunk-{}", + prefix.as_ref().unwrap_or(&"".to_string()), + id.to_lowercase(), + index + ) + } + + fn filter_chunk_event_data_by_conditions( + &self, + events: &[StreamEvent], + event_message: &EventMessage, + chunk: &[Value], + ) -> Vec { + let stream_event = events + .iter() + .find(|e| e.event_name == event_message.event_name) + .expect("Failed to find stream event - should never happen please raise an issue"); + + let filtered_chunk: Vec = chunk + .iter() + .filter(|event_data| { + if let Some(conditions) = &stream_event.conditions { + filter_event_data_by_conditions(event_data, conditions) + } else { + true + } + }) + .cloned() + .collect(); + + filtered_chunk + } + + fn sns_stream_tasks( + &self, + config: &SNSStreamTopicConfig, + client: Arc, + id: &str, + event_message: &EventMessage, + chunks: Arc>>, + ) -> StreamPublishes { + let tasks: Vec<_> = chunks + .iter() + .enumerate() + .map(|(index, chunk)| { + let filtered_chunk: Vec = self.filter_chunk_event_data_by_conditions( + &config.events, + event_message, + chunk, + ); + + let publish_message_id = + self.generate_publish_message_id(id, index, &config.prefix_id); + let client = Arc::clone(&client); + let topic_arn = config.topic_arn.clone(); + let publish_message = self.create_chunk_message_raw(event_message, &filtered_chunk); + task::spawn(async move { + let _ = + client.publish(&publish_message_id, &topic_arn, &publish_message).await?; + + Ok(filtered_chunk.len()) + }) + }) + .collect(); + + tasks + } + + fn webhook_stream_tasks( + &self, + config: &WebhookStreamConfig, + client: Arc, + id: &str, + event_message: &EventMessage, + chunks: Arc>>, + ) -> StreamPublishes { + let tasks: Vec<_> = chunks + .iter() + .enumerate() + .map(|(index, chunk)| { + let filtered_chunk: Vec = self.filter_chunk_event_data_by_conditions( + &config.events, + event_message, + chunk, + ); + + let publish_message_id = self.generate_publish_message_id(id, index, &None); + let endpoint = config.endpoint.clone(); + let shared_secret = config.shared_secret.clone(); + let client = Arc::clone(&client); + let publish_message = + self.create_chunk_message_json(event_message, &filtered_chunk); + task::spawn(async move { + client + .publish(&publish_message_id, &endpoint, &shared_secret, &publish_message) + .await?; + + Ok(filtered_chunk.len()) + }) + }) + .collect(); + + tasks + } + + fn rabbitmq_stream_tasks( + &self, + config: &RabbitMQStreamQueueConfig, + client: Arc, + id: &str, + event_message: &EventMessage, + chunks: Arc>>, + ) -> StreamPublishes { + let tasks: Vec<_> = chunks + .iter() + .enumerate() + .map(|(index, chunk)| { + let filtered_chunk: Vec = self.filter_chunk_event_data_by_conditions( + &config.events, + event_message, + chunk, + ); + + let publish_message_id = self.generate_publish_message_id(id, index, &None); + let client = Arc::clone(&client); + let exchange = config.exchange.clone(); + let exchange_type = config.exchange_type.clone(); + let routing_key = config.routing_key.clone(); + let publish_message = + self.create_chunk_message_json(event_message, &filtered_chunk); + + task::spawn(async move { + client + .publish( + &publish_message_id, + &exchange, + &exchange_type, + &routing_key, + &publish_message, + ) + .await?; + Ok(filtered_chunk.len()) + }) + }) + .collect(); + tasks + } + + fn kafka_stream_tasks( + &self, + config: &KafkaStreamQueueConfig, + client: Arc, + id: &str, + event_message: &EventMessage, + chunks: Arc>>, + ) -> StreamPublishes { + let tasks: Vec<_> = chunks + .iter() + .enumerate() + .map(|(index, chunk)| { + let filtered_chunk: Vec = self.filter_chunk_event_data_by_conditions( + &config.events, + event_message, + chunk, + ); + + let publish_message_id = self.generate_publish_message_id(id, index, &None); + let client = Arc::clone(&client); + let exchange = config.topic.clone(); + let routing_key = config.key.clone(); + let publish_message = + self.create_chunk_message_json(event_message, &filtered_chunk); + task::spawn(async move { + client + .publish(&publish_message_id, &exchange, &routing_key, &publish_message) + .await?; + Ok(filtered_chunk.len()) + }) + }) + .collect(); + tasks + } + + pub async fn stream( + &self, + id: String, + event_message: &EventMessage, + index_event_in_order: bool, + ) -> Result { + if !self.has_any_streams() { + return Ok(0); + } + + // will always have something even if the event has no parameters due to the tx_information + if let Value::Array(data_array) = &event_message.event_data { + let chunks = Arc::new(self.chunk_data(data_array)); + let mut streams: Vec = Vec::new(); + + if let Some(sns) = &self.sns { + for config in &sns.config { + if config.events.iter().any(|e| e.event_name == event_message.event_name) && + config.networks.contains(&event_message.network) + { + streams.push(self.sns_stream_tasks( + config, + Arc::clone(&sns.client), + &id, + event_message, + Arc::clone(&chunks), + )); + } + } + }; + + if let Some(webhook) = &self.webhook { + for config in &webhook.config { + if config.events.iter().any(|e| e.event_name == event_message.event_name) && + config.networks.contains(&event_message.network) + { + streams.push(self.webhook_stream_tasks( + config, + Arc::clone(&webhook.client), + &id, + event_message, + Arc::clone(&chunks), + )); + } + } + } + + if let Some(rabbitmq) = &self.rabbitmq { + for config in &rabbitmq.config.exchanges { + if config.events.iter().any(|e| e.event_name == event_message.event_name) && + config.networks.contains(&event_message.network) + { + streams.push(self.rabbitmq_stream_tasks( + config, + Arc::clone(&rabbitmq.client), + &id, + event_message, + Arc::clone(&chunks), + )); + } + } + } + + if let Some(kafka) = &self.kafka { + for config in &kafka.config.topics { + if config.events.iter().any(|e| e.event_name == event_message.event_name) && + config.networks.contains(&event_message.network) + { + streams.push(self.kafka_stream_tasks( + config, + Arc::clone(&kafka.client), + &id, + event_message, + Arc::clone(&chunks), + )); + } + } + } + + let mut streamed_total = 0; + + if index_event_in_order { + for stream in streams { + for task in stream { + match task.await { + Ok(Ok(streamed)) => { + streamed_total += streamed; + } + Ok(Err(e)) => return Err(e), + Err(e) => return Err(StreamError::JoinError(e)), + } + } + } + } else { + let tasks: Vec<_> = streams.into_iter().flatten().collect(); + let results = join_all(tasks).await; + for result in results { + match result { + Ok(Ok(streamed)) => { + streamed_total += streamed; + } + Ok(Err(e)) => return Err(e), + Err(e) => return Err(StreamError::JoinError(e)), + } + } + } + + Ok(streamed_total) + } else { + unreachable!("Event data should be an array"); + } + } +} diff --git a/core/src/streams/kafka.rs b/core/src/streams/kafka.rs new file mode 100644 index 00000000..a5a553e1 --- /dev/null +++ b/core/src/streams/kafka.rs @@ -0,0 +1,79 @@ +use std::time::Duration; + +use rdkafka::{ + config::ClientConfig, + message::{Header, OwnedHeaders}, + producer::{FutureProducer, FutureRecord}, + util::Timeout, +}; +use serde_json::Value; +use thiserror::Error; + +use crate::{manifest::stream::KafkaStreamConfig, streams::STREAM_MESSAGE_ID_KEY}; + +#[derive(Error, Debug)] +pub enum KafkaError { + #[error("Kafka error: {0}")] + RdkafkaError(#[from] rdkafka::error::KafkaError), + + #[error("Could not parse message: {0}")] + CouldNotParseMessage(#[from] serde_json::Error), +} + +#[derive(Clone)] +pub struct Kafka { + producer: FutureProducer, +} + +impl Kafka { + pub async fn new(config: &KafkaStreamConfig) -> Result { + let servers_list = config.brokers.join(","); + let mut client_config = ClientConfig::new(); + + client_config + .set("bootstrap.servers", &servers_list) + .set("security.protocol", &config.security_protocol) + .set("acks", &config.acks); + + if let Some(ref sasl_mechanisms) = config.sasl_mechanisms { + client_config.set("sasl.mechanisms", sasl_mechanisms); + } + if let Some(ref sasl_username) = config.sasl_username { + client_config.set("sasl.username", sasl_username); + } + if let Some(ref sasl_password) = config.sasl_password { + client_config.set("sasl.password", sasl_password); + } + + let producer: FutureProducer = client_config.create().map_err(KafkaError::RdkafkaError)?; + + Ok(Self { producer }) + } + + pub async fn publish( + &self, + id: &str, + topic: &str, + key: &Option, + message: &Value, + ) -> Result<(), KafkaError> { + let message_body = serde_json::to_vec(message)?; + + let record = if key.is_some() { + FutureRecord::to(topic).key(key.as_ref().unwrap()).payload(&message_body).headers( + OwnedHeaders::new().insert(Header { key: STREAM_MESSAGE_ID_KEY, value: Some(id) }), + ) + } else { + FutureRecord::to(topic).payload(&message_body).headers( + OwnedHeaders::new().insert(Header { key: STREAM_MESSAGE_ID_KEY, value: Some(id) }), + ) + }; + + self.producer + .send(record, Timeout::After(Duration::from_secs(0))) + .await + .map_err(|(e, _)| KafkaError::RdkafkaError(e))?; + + Ok(()) + } +} diff --git a/core/src/streams/mod.rs b/core/src/streams/mod.rs new file mode 100644 index 00000000..3d21a663 --- /dev/null +++ b/core/src/streams/mod.rs @@ -0,0 +1,15 @@ +mod sns; +pub use sns::SNS; + +mod webhook; +pub use webhook::{Webhook, WebhookError}; + +mod rabbitmq; +pub use rabbitmq::{RabbitMQ, RabbitMQError}; + +mod kafka; + +mod clients; +pub use clients::StreamsClients; + +pub const STREAM_MESSAGE_ID_KEY: &str = "x-rindexer-id"; diff --git a/core/src/streams/rabbitmq.rs b/core/src/streams/rabbitmq.rs new file mode 100644 index 00000000..ae028d2d --- /dev/null +++ b/core/src/streams/rabbitmq.rs @@ -0,0 +1,72 @@ +use deadpool::managed::PoolError; +use deadpool_lapin::{Manager, Pool}; +use lapin::{options::*, types::FieldTable, BasicProperties, ConnectionProperties, ExchangeKind}; +use serde_json::Value; + +use crate::manifest::stream::ExchangeKindWrapper; + +#[derive(thiserror::Error, Debug)] +pub enum RabbitMQError { + #[error("Request error: {0}")] + LapinError(#[from] lapin::Error), + + #[error("Could not parse message: {0}")] + CouldNotParseMessage(#[from] serde_json::Error), + + #[error("Connection pool error")] + PoolError(#[from] PoolError), +} + +#[derive(Debug, Clone)] +pub struct RabbitMQ { + pool: Pool, +} + +impl RabbitMQ { + pub async fn new(uri: &str) -> Self { + let manager = Manager::new(uri, ConnectionProperties::default()); + let pool = Pool::builder(manager).max_size(16).build().expect("Failed to create pool"); + + Self { pool } + } + + pub async fn publish( + &self, + id: &str, + exchange: &str, + exchange_type: &ExchangeKindWrapper, + routing_key: &Option, + message: &Value, + ) -> Result<(), RabbitMQError> { + let message_body = serde_json::to_vec(message)?; + + let conn = self.pool.get().await?; + let channel = conn.create_channel().await?; + + channel + .exchange_declare( + exchange, + exchange_type.0.clone(), + ExchangeDeclareOptions::default(), + FieldTable::default(), + ) + .await?; + + channel + .basic_publish( + exchange, + match exchange_type.0 { + ExchangeKind::Fanout => "", // Fanout exchange ignores the routing key + _ => routing_key.as_ref().expect("Routing key should be defined"), + }, + BasicPublishOptions::default(), + &message_body, + BasicProperties::default() + .with_message_id(id.into()) + .with_content_type("application/json".into()), + ) + .await?; + + Ok(()) + } +} diff --git a/core/src/streams/sns.rs b/core/src/streams/sns.rs new file mode 100644 index 00000000..0c33d97d --- /dev/null +++ b/core/src/streams/sns.rs @@ -0,0 +1,74 @@ +use aws_config::{meta::region::RegionProviderChain, BehaviorVersion, Region}; +use aws_sdk_sns::{ + config::{http::HttpResponse, Credentials}, + error::SdkError, + operation::publish::{PublishError, PublishOutput}, + Client, +}; +use tracing::{error, info}; + +use crate::types::aws_config::AwsConfig; + +#[derive(Debug, Clone)] +pub struct SNS { + client: Client, +} + +impl SNS { + pub async fn new(config: &AwsConfig) -> Self { + let region_provider = RegionProviderChain::first_try(Region::new(config.region.clone())); + + let credentials_provider = Credentials::new( + &config.access_key, + &config.secret_key, + config.session_token.clone(), + None, + "manual", + ); + + let config = aws_config::defaults(BehaviorVersion::latest()) + .region(region_provider) + .credentials_provider(credentials_provider) + .load() + .await; + let client = Client::new(&config); + + // Test the connection by listing SNS topics + match client.list_topics().send().await { + Ok(_) => { + info!("Successfully connected to SNS."); + } + Err(error) => { + error!("Error connecting to SNS: {}", error); + panic!("Error connecting to SNS: {}", error); + } + } + + Self { client } + } + + pub async fn publish( + &self, + id: &str, + topic_arn: &str, + message: &str, + ) -> Result> { + if topic_arn.contains(".fifo") { + let result = self + .client + .publish() + .message(message) + .topic_arn(topic_arn) + // fifo needs to have group id and deduplication id + .message_group_id("default") + .message_deduplication_id(id) + .send() + .await?; + + Ok(result) + } else { + let result = self.client.publish().topic_arn(topic_arn).message(message).send().await?; + Ok(result) + } + } +} diff --git a/core/src/streams/webhook.rs b/core/src/streams/webhook.rs new file mode 100644 index 00000000..c6722a27 --- /dev/null +++ b/core/src/streams/webhook.rs @@ -0,0 +1,51 @@ +use reqwest::Client; +use serde_json::Value; + +use crate::streams::STREAM_MESSAGE_ID_KEY; + +#[derive(thiserror::Error, Debug)] +pub enum WebhookError { + #[error("Request error: {0}")] + RequestError(#[from] reqwest::Error), + + #[error("Webhook error: {0}")] + WebhookError(String), +} + +#[derive(Debug, Clone)] +pub struct Webhook { + client: Client, +} + +impl Webhook { + pub fn new() -> Self { + Self { client: Client::new() } + } + + pub async fn publish( + &self, + id: &str, + endpoint: &str, + shared_secret: &str, + message: &Value, + ) -> Result<(), WebhookError> { + let response = self + .client + .post(endpoint) + .header("Content-Type", "application/json") + .header("x-rindexer-shared-secret", shared_secret) + .header(STREAM_MESSAGE_ID_KEY, id) + .json(message) + .send() + .await?; + + if response.status().is_success() { + Ok(()) + } else { + Err(WebhookError::WebhookError(format!( + "Failed to send webhook: {}", + response.status() + ))) + } + } +} diff --git a/core/src/types/aws_config.rs b/core/src/types/aws_config.rs new file mode 100644 index 00000000..964bc16b --- /dev/null +++ b/core/src/types/aws_config.rs @@ -0,0 +1,11 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct AwsConfig { + pub region: String, + pub access_key: String, + pub secret_key: String, + + #[serde(default, skip_serializing_if = "Option::is_none")] + pub session_token: Option, +} diff --git a/core/src/types/mod.rs b/core/src/types/mod.rs index 9de50d43..4b855e10 100644 --- a/core/src/types/mod.rs +++ b/core/src/types/mod.rs @@ -1 +1,2 @@ +pub mod aws_config; pub mod code; diff --git a/documentation/docs/pages/docs/changelog.mdx b/documentation/docs/pages/docs/changelog.mdx index 7420d474..ba5fa5d6 100644 --- a/documentation/docs/pages/docs/changelog.mdx +++ b/documentation/docs/pages/docs/changelog.mdx @@ -6,6 +6,15 @@ ### Features ------------------------------------------------- +- feat: support chatbots on telegram - https://rindexer.xyz/docs/start-building/chatbots/telegram +- feat: support chatbots on discord - https://rindexer.xyz/docs/start-building/chatbots/discord +- feat: support chatbots on slack - https://rindexer.xyz/docs/start-building/chatbots/slack +- feat: support streams with kafka - https://rindexer.xyz/docs/start-building/streams/kafka +- feat: support streams with rabbitmq - https://rindexer.xyz/docs/start-building/streams/rabbitmq +- feat: support streams with webhooks - https://rindexer.xyz/docs/start-building/streams/webhooks +- feat: support streams with sns/sqs - https://rindexer.xyz/docs/start-building/streams/sns +- feat: create .gitignore file for new projects + ### Bug fixes ------------------------------------------------- diff --git a/documentation/docs/pages/docs/start-building/chatbots/discord.mdx b/documentation/docs/pages/docs/start-building/chatbots/discord.mdx new file mode 100644 index 00000000..d0ccf664 --- /dev/null +++ b/documentation/docs/pages/docs/start-building/chatbots/discord.mdx @@ -0,0 +1,448 @@ +# Discord + +Discord is one of the most popular chat platforms, and is great to build bots and notifications when things happen on chain. + +## Setup a bot on discord + +1. go to https://discordapp.com/developers/applications/ +2. If you already have a bot created, click it in the list. If you don’t have any discord bots, click the “New Application” button. +3. Give Your Bot a Name (you can then after add a description and icon for it) +4. Your next step is to go over the menu on the left side of the screen and click “Bot”. It’s the icon that looks like a little puzzle piece. +5. Click the “Add Bot” button and press "Yes, do it!" +6. You see a section called “Token” you need to generate your bot token and save it somewhere safe for later. +7. In order to add your bot to your Discord Server, you’ll need to navigate back to the “OAuth2” tab. +8. Once there, scroll down to the “Oauth2 URL Generator” section. In the “Scopes” section, you’ll want to select the “bot” checkbox. +9. You’ll notice that a URL appeared as soon as you clicked “bot” — this will be your URL for adding your bot to a server. +10. Scroll down some more to the “Bot Permissions” section. This is where you choose what permissions to give your bot, and what it can and can’t do. +11. You want to do tick "Send messages" as rindexer does not read any messages from the server. +12. After you’ve selected your permissions, scroll up a little bit and look at the URL that was generated and copy and go to that url in your browser. +13. Here you’ll want to select the server you’re adding your bot to and press “Continue” +14. It then confirm permissions make sure you have ticked "Send Messages" and press "Authorize" +14. You are now done you will need the bot token to setup the discord bot with rindexer + + +## Configure rindexer + +`discord` property accepts an array allowing you to split up the channels any way you wish. + +## Example + +```yaml +name: RocketPoolETHIndexer +description: My first rindexer project +repository: https://github.com/joshstevens19/rindexer +project_type: no-code +networks: +- name: ethereum + chain_id: 1 + rpc: https://mainnet.gateway.tenderly.co +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + chat: // [!code focus] + discord: // [!code focus] + - bot_token: ${DISCORD_BOT_TOKEN} // [!code focus] + channel_id: 123456789012345678 // [!code focus] + networks: // [!code focus] + - ethereum // [!code focus] + messages: + - event_name: Transfer // [!code focus] + # conditions are optional // [!code focus] + conditions: // [!code focus] + - "from": "0x0338ce5020c447f7e668dc2ef778025ce3982662||0x0338ce5020c447f7e668dc2ef778025ce398266u" // [!code focus] + - "value": ">=10||<=2000000000000000000" // [!code focus] + template_inline: "*New RETH Transfer Event* // [!code focus] + + from: {{from}} // [!code focus] + + to: {{to}} // [!code focus] + + amount: {{format_value(value, 18)}} // [!code focus] + + RETH contract: {{transaction_information.address}} // [!code focus] + + [etherscan](https://etherscan.io/tx/{{transaction_information.transaction_hash}}) // [!code focus] + " // [!code focus] +``` + +## bot_token + +This is your discord bot token which you generate using @BotFather. + +:::info +We advise you to put this in a environment variables. +::: + +```yaml +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + chat: // [!code focus] + discord: // [!code focus] + - bot_token: ${DISCORD_BOT_TOKEN} // [!code focus] +``` + +## channel_id + +You have add your bot to channel to use it, so this is the channel ID you wish the bot to send messages to. + +```yaml +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + chat: // [!code focus] + discord: // [!code focus] + - bot_token: ${DISCORD_BOT_TOKEN} + channel_id: -4223616270 // [!code focus] +``` + +## networks + +This is an array of networks you want to send messages to this discord channel. + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + chat: // [!code focus] + discord: // [!code focus] + - bot_token: ${DISCORD_BOT_TOKEN} + channel_id: 123456789012345678 + networks: // [!code focus] + - ethereum // [!code focus] +``` + +## messages + +This is an array of messages you want to send to this discord channel. It is an array as you can define many different +messages to send to this channel with different conditions. + +### event_name + +This is the name of the event you want to send a message for, must match the ABI event name. + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + chat: // [!code focus] + discord: // [!code focus] + - bot_token: ${DISCORD_BOT_TOKEN} + channel_id: 123456789012345678 + networks: + - ethereum + messages: + - event_name: Transfer // [!code focus] +``` + +### conditions + +This accepts an array of conditions you want to apply to the event data before sending a message to this discord channel. + +:::info +This is optional, if you do not provide any conditions all the events will be sent to this discord channel. +::: + +You may want to filter on the message based on the event data, if the event data has not got an index on the on the +solidity event you can not filter it over the logs. The `conditions` filter is here to help you with this, +based on your ABI you can filter on the event data. + +rindexer has enabled a special syntax which allows you to define on your ABI fields what you want to filter on. + +1. `>` - higher then (for numbers only) +2. `<` - lower then (for numbers only) +3. `=` - equals +4. `>=` - higher then or equals (for numbers only) +5. `<=` - lower then or equals (for numbers only) +6. `||` - or +7. `&&` - and + +So lets look at an example lets say i only want to get transfer events which are higher then `2000000000000000000` RETH wei + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + chat: // [!code focus] + discord: // [!code focus] + - bot_token: ${DISCORD_BOT_TOKEN} + channel_id: 123456789012345678 + networks: + - ethereum + messages: + - event_name: Transfer // [!code focus] + conditions: // [!code focus] + - "value": ">=2000000000000000000" // [!code focus] +``` + +We use the ABI input name `value` to filter on the value field, you can find these names in the ABI file. + +```json +{ + "anonymous":false, + "inputs":[ + { + "indexed":true, + "internalType":"address", + "name":"from", + "type":"address" + }, + { + "indexed":true, + "internalType":"address", + "name":"to", + "type":"address" + }, + { + "indexed":false, + "internalType":"uint256", + "name":"value", // [!code focus] + "type":"uint256" + } + ], + "name":"Transfer", + "type":"event" +} +``` + +You can use the `||` or `&&` to combine conditions. + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + chat: // [!code focus] + discord: // [!code focus] + - bot_token: ${DISCORD_BOT_TOKEN} + channel_id: 123456789012345678 + networks: + - ethereum + messages: + - event_name: Transfer + conditions: // [!code focus] + - "value": ">=2000000000000000000 && value <=4000000000000000000" // [!code focus] +``` + +You can use the `=` to filter on other aspects like the `from` or `to` address. + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + chat: // [!code focus] + discord: // [!code focus] + - bot_token: ${DISCORD_BOT_TOKEN} + channel_id: 123456789012345678 + networks: + - ethereum + messages: + - event_name: Transfer + conditions: // [!code focus] + - "from": "0x0338ce5020c447f7e668dc2ef778025ce3982662 || 0x0338ce5020c447f7e668dc2ef778025ce398266u" // [!code focus] + - "value": ">=2000000000000000000 || value <=4000000000000000000" // [!code focus] +``` + +:::info +Note we advise you to filer any `indexed` fields in the contract details in the `rindexer.yaml` file. +As these can be filtered out on the request level and not filtered out in rindexer itself. +You can read more about it [here](/docs/start-building/yaml-config/contracts#indexed_1-indexed_2-indexed_3). +::: + +If you have a tuple and you want to get that value you just use the object notation. + +For example lets say we want to only get the events for `profileId` from the `quoteParams` tuple which equals `1`: + +```json +{ + "anonymous": false, + "inputs": [ + { + "components": [ + { + "internalType": "uint256", + "name": "profileId", // [!code focus] + "type": "uint256" + }, + ... + ], + "indexed": false, + "internalType": "struct Types.QuoteParams", + "name": "quoteParams", // [!code focus] + "type": "tuple" + }, + ... + ], + "name": "QuoteCreated", // [!code focus] + "type": "event" +} +``` + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + chat: // [!code focus] + discord: // [!code focus] + - bot_token: ${DISCORD_BOT_TOKEN} + channel_id: 123456789012345678 + networks: + - ethereum + messages: + - event_name: Transfer + conditions: // [!code focus] + - "quoteParams.profileId": "=1" // [!code focus] +``` + +### template_inline + +You can then write your own template inline, this is the template you want to send to the channel. +You have to use the ABI input names in object notation for example if i wanted to put value in the template +i just have to write `{{value}}` in the template and it will be replaced with the value of the event itself. + +The template supports: +- bold text = \*bold text\* +- italic text = \_italic text\_ +- inline url = \[inline URL\](YOUR_URL) +- inline fixed-width code = \`inline fixed-width code\` +- pre-formatted fixed-width code block = \`\`\`pre-formatted fixed-width code block\`\`\` +- pre-formatted fixed-width known code block = \`\`\`rust pre-formatted fixed-width known code block\`\`\` +- breaks = just line break in the template + +#### transaction_information + +You also can use the `transaction_information` object to get common information about the transaction, this is the +transaction information for the event. + +```rs +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct TxInformation { + pub network: String, + // This will convert to a hex string in the template + pub address: Address, + // This will convert to a hex string in the template + pub block_hash: H256, + // This will convert to a string decimal in the template + pub block_number: U64, + // This will convert to a hex string in the template + pub transaction_hash: H256, + // This will convert to a string decimal in the template + pub log_index: U256, + // This will convert to a string decimal in the template + pub transaction_index: U64, +} +``` + +:::info +To avoid confusion `address` in `transaction_information` is the address of the contract the event was emitted from. +::: + +#### format_value + +You can use the `format_value` function to format the value of the event to a decimal value with the specified decimals. + +Lets put it all together: + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + chat: // [!code focus] + discord: // [!code focus] + - bot_token: ${DISCORD_BOT_TOKEN} + channel_id: 123456789012345678 + networks: + - ethereum + messages: + - event_name: Transfer // [!code focus] + template_inline: "*New RETH Transfer Event* // [!code focus] + + from: {{from}} // [!code focus] + + to: {{to}} // [!code focus] + + amount: {{format_value(value, 18)}} // [!code focus] + + RETH contract: {{transaction_information.address}} // [!code focus] + + [etherscan](https://etherscan.io/tx/{{transaction_information.transaction_hash}}) // [!code focus] + " // [!code focus] +``` \ No newline at end of file diff --git a/documentation/docs/pages/docs/start-building/chatbots/index.mdx b/documentation/docs/pages/docs/start-building/chatbots/index.mdx new file mode 100644 index 00000000..f01ada7c --- /dev/null +++ b/documentation/docs/pages/docs/start-building/chatbots/index.mdx @@ -0,0 +1,25 @@ +# Chatbots + +:::info +rindexer Chatbots can be used without any other storage providers. It can also be used with storage providers. +::: + +rindexer has first-class support for Chatbots, this means you can use your favourite chat platform to +send messages to when events happen on chain. + +:::info +Due to rate limits and also what most people require rindexer Chatbots will only start sending messages with a largest +block range of 10 blocks. Telegrams bots are really only meant to be ran sending live data not historic data. +::: + +:::info +Rust projects do not get exposed to the stream clients yet but it can easily be exposed in the future. +::: + +Supported Chatbots providers: + +- [Telegram](/docs/start-building/chatbots/telegram) - Send messages to your Telegram chats +- [Discord](/docs/start-building/chatbots/discord) - Send messages to your Discord chats +- [Slack](/docs/start-building/chatbots/slack) - Send messages to your Slack channels + +Want any other chat provider to be supported? [Create an issue](https://github.com/joshstevens19/rindexer/issues/new) and we will look into it. \ No newline at end of file diff --git a/documentation/docs/pages/docs/start-building/chatbots/slack.mdx b/documentation/docs/pages/docs/start-building/chatbots/slack.mdx new file mode 100644 index 00000000..a5c20323 --- /dev/null +++ b/documentation/docs/pages/docs/start-building/chatbots/slack.mdx @@ -0,0 +1,446 @@ +# Slack + +Slack is one of the most popular chat platforms, and is great to build bots and notifications when things happen on chain. + +## Setup a bot on slack + +1. Go to api.slack.com, log into your workspace and click on Create an app +2. Click on From scratch and then give it a name and select your workspace. We will call ours RethTransferEvents. +3. Click on the Bots box under the Add features and functionality header +4. Click on Review scopes to add +5. Scroll down to the Bot token scopes header and add `chat:write`. These are the permissions the bot needs to write messages +6. Finally, scroll all the way up and click on Install to workspace, and Allow on the following screen. +This should now show a screen with the Bot User OAuth Token visible. Take note of this token, +since it’s the one we will be using to send messages. + +## Configure rindexer + +`slack` property accepts an array allowing you to split up the channels any way you wish. + +## Example + +```yaml +name: RocketPoolETHIndexer +description: My first rindexer project +repository: https://github.com/joshstevens19/rindexer +project_type: no-code +networks: +- name: ethereum + chain_id: 1 + rpc: https://mainnet.gateway.tenderly.co +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + chat: // [!code focus] + slack: // [!code focus] + - bot_token: ${SLACK_BOT_TOKEN} // [!code focus] + channel: "#RethTransferEvents" // [!code focus] + networks: // [!code focus] + - ethereum // [!code focus] + messages: + - event_name: Transfer // [!code focus] + # conditions are optional // [!code focus] + conditions: // [!code focus] + - "from": "0x0338ce5020c447f7e668dc2ef778025ce3982662||0x0338ce5020c447f7e668dc2ef778025ce398266u" // [!code focus] + - "value": ">=10||<=2000000000000000000" // [!code focus] + template_inline: "*New RETH Transfer Event* // [!code focus] + + from: {{from}} // [!code focus] + + to: {{to}} // [!code focus] + + amount: {{format_value(value, 18)}} // [!code focus] + + RETH contract: {{transaction_information.address}} // [!code focus] + + // [!code focus] + " // [!code focus] +``` + +## bot_token + +This is your slack bot token which you generate using @BotFather. + +:::info +We advise you to put this in a environment variables. +::: + +```yaml +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + chat: // [!code focus] + slack: // [!code focus] + - bot_token: ${SLACK_BOT_TOKEN} // [!code focus] +``` + +## channel + +This is the channel you want to send messages to. + +:::info +The `#` must be included in the channel name. +::: + +```yaml +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + chat: // [!code focus] + slack: // [!code focus] + - bot_token: ${SLACK_BOT_TOKEN} + channel: "#RethTransferEvents" // [!code focus] +``` + +## networks + +This is an array of networks you want to send messages to this slack channel. + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + chat: // [!code focus] + slack: // [!code focus] + - bot_token: ${SLACK_BOT_TOKEN} + channel: "#RethTransferEvents" + networks: // [!code focus] + - ethereum // [!code focus] +``` + +## messages + +This is an array of messages you want to send to this slack channel. It is an array as you can define many different +messages to send to this channel with different conditions. + +### event_name + +This is the name of the event you want to send a message for, must match the ABI event name. + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + chat: // [!code focus] + slack: // [!code focus] + - bot_token: ${SLACK_BOT_TOKEN} + channel: "#RethTransferEvents" + networks: + - ethereum + messages: + - event_name: Transfer // [!code focus] +``` + +### conditions + +This accepts an array of conditions you want to apply to the event data before sending a message to this slack channel. + +:::info +This is optional, if you do not provide any conditions all the events will be sent to this slack channel. +::: + +You may want to filter on the message based on the event data, if the event data has not got an index on the on the +solidity event you can not filter it over the logs. The `conditions` filter is here to help you with this, +based on your ABI you can filter on the event data. + +rindexer has enabled a special syntax which allows you to define on your ABI fields what you want to filter on. + +1. `>` - higher then (for numbers only) +2. `<` - lower then (for numbers only) +3. `=` - equals +4. `>=` - higher then or equals (for numbers only) +5. `<=` - lower then or equals (for numbers only) +6. `||` - or +7. `&&` - and + +So lets look at an example lets say i only want to get transfer events which are higher then `2000000000000000000` RETH wei + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + chat: // [!code focus] + slack: // [!code focus] + - bot_token: ${SLACK_BOT_TOKEN} + channel: "#RethTransferEvents" + networks: + - ethereum + messages: + - event_name: Transfer // [!code focus] + conditions: // [!code focus] + - "value": ">=2000000000000000000" // [!code focus] +``` + +We use the ABI input name `value` to filter on the value field, you can find these names in the ABI file. + +```json +{ + "anonymous":false, + "inputs":[ + { + "indexed":true, + "internalType":"address", + "name":"from", + "type":"address" + }, + { + "indexed":true, + "internalType":"address", + "name":"to", + "type":"address" + }, + { + "indexed":false, + "internalType":"uint256", + "name":"value", // [!code focus] + "type":"uint256" + } + ], + "name":"Transfer", + "type":"event" +} +``` + +You can use the `||` or `&&` to combine conditions. + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + chat: // [!code focus] + slack: // [!code focus] + - bot_token: ${SLACK_BOT_TOKEN} + channel: "#RethTransferEvents" + networks: + - ethereum + messages: + - event_name: Transfer + conditions: // [!code focus] + - "value": ">=2000000000000000000 && value <=4000000000000000000" // [!code focus] +``` + +You can use the `=` to filter on other aspects like the `from` or `to` address. + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + chat: // [!code focus] + slack: // [!code focus] + - bot_token: ${SLACK_BOT_TOKEN} + channel: "#RethTransferEvents" + networks: + - ethereum + messages: + - event_name: Transfer + conditions: // [!code focus] + - "from": "0x0338ce5020c447f7e668dc2ef778025ce3982662 || 0x0338ce5020c447f7e668dc2ef778025ce398266u" // [!code focus] + - "value": ">=2000000000000000000 || value <=4000000000000000000" // [!code focus] +``` + +:::info +Note we advise you to filer any `indexed` fields in the contract details in the `rindexer.yaml` file. +As these can be filtered out on the request level and not filtered out in rindexer itself. +You can read more about it [here](/docs/start-building/yaml-config/contracts#indexed_1-indexed_2-indexed_3). +::: + +If you have a tuple and you want to get that value you just use the object notation. + +For example lets say we want to only get the events for `profileId` from the `quoteParams` tuple which equals `1`: + +```json +{ + "anonymous": false, + "inputs": [ + { + "components": [ + { + "internalType": "uint256", + "name": "profileId", // [!code focus] + "type": "uint256" + }, + ... + ], + "indexed": false, + "internalType": "struct Types.QuoteParams", + "name": "quoteParams", // [!code focus] + "type": "tuple" + }, + ... + ], + "name": "QuoteCreated", // [!code focus] + "type": "event" +} +``` + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + chat: // [!code focus] + slack: // [!code focus] + - bot_token: ${SLACK_BOT_TOKEN} + channel: "#RethTransferEvents" + networks: + - ethereum + messages: + - event_name: Transfer + conditions: // [!code focus] + - "quoteParams.profileId": "=1" // [!code focus] +``` + +### template_inline + +You can then write your own template inline, this is the template you want to send to the channel. +You have to use the ABI input names in object notation for example if i wanted to put value in the template +i just have to write `{{value}}` in the template and it will be replaced with the value of the event itself. + +The template supports: +- bold text = \*bold text\* +- italic text = \_italic text\_ +- strikethrough text = \~strikethrough text\~ +- block qoute = \> block quote +- inline url = \ +- inline fixed-width code = \`inline fixed-width code\` +- pre-formatted fixed-width code block = \`\`\`pre-formatted fixed-width code block\`\`\` +- pre-formatted fixed-width known code block = \`\`\`rust pre-formatted fixed-width known code block\`\`\` +- breaks = just line break in the template + +#### transaction_information + +You also can use the `transaction_information` object to get common information about the transaction, this is the +transaction information for the event. + +```rs +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct TxInformation { + pub network: String, + // This will convert to a hex string in the template + pub address: Address, + // This will convert to a hex string in the template + pub block_hash: H256, + // This will convert to a string decimal in the template + pub block_number: U64, + // This will convert to a hex string in the template + pub transaction_hash: H256, + // This will convert to a string decimal in the template + pub log_index: U256, + // This will convert to a string decimal in the template + pub transaction_index: U64, +} +``` + +:::info +To avoid confusion `address` in `transaction_information` is the address of the contract the event was emitted from. +::: + +##### format_value + +You can use the `format_value` function to format the value of the event to a decimal value with the specified decimals. + +Lets put it all together: + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + chat: // [!code focus] + slack: // [!code focus] + - bot_token: ${SLACK_BOT_TOKEN} + channel: "#RethTransferEvents" + networks: + - ethereum + messages: + - event_name: Transfer // [!code focus] + template_inline: "*New RETH Transfer Event* // [!code focus] + + from: {{from}} // [!code focus] + + to: {{to}} // [!code focus] + + amount: {{format_value(value, 18)}} // [!code focus] + + RETH contract: {{transaction_information.address}} // [!code focus] + + // [!code focus] + " // [!code focus] +``` \ No newline at end of file diff --git a/documentation/docs/pages/docs/start-building/chatbots/telegram.mdx b/documentation/docs/pages/docs/start-building/chatbots/telegram.mdx new file mode 100644 index 00000000..10254ac0 --- /dev/null +++ b/documentation/docs/pages/docs/start-building/chatbots/telegram.mdx @@ -0,0 +1,439 @@ +# Telegram + +Telegram is one of the most popular chat platforms, and is great to build bots and notifications when things happen on chain. + +## Setup a bot on telegram + +You have to use telegram itself to setup a bot: + +1. Search BotFather on Telegram. +2. Type /start to get started. +3. Type /newbot to get a bot. +4. Enter your Bot name and unique Username, which should end with the bot. +5. Then, you will get your Bot token. (keep this safe you need it shortly) + +## Configure rindexer + +`telegram` property accepts an array allowing you to split up the chats any way you wish. + +## Example + +```yaml +name: RocketPoolETHIndexer +description: My first rindexer project +repository: https://github.com/joshstevens19/rindexer +project_type: no-code +networks: +- name: ethereum + chain_id: 1 + rpc: https://mainnet.gateway.tenderly.co +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + chat: // [!code focus] + telegram: // [!code focus] + - bot_token: ${TELEGRAM_BOT_TOKEN} // [!code focus] + chat_id: -4223616270 // [!code focus] + networks: // [!code focus] + - ethereum // [!code focus] + messages: + - event_name: Transfer // [!code focus] + # conditions are optional // [!code focus] + conditions: // [!code focus] + - "from": "0x0338ce5020c447f7e668dc2ef778025ce3982662||0x0338ce5020c447f7e668dc2ef778025ce398266u" // [!code focus] + - "value": ">=10||<=2000000000000000000" // [!code focus] + template_inline: "*New RETH Transfer Event* // [!code focus] + + from: {{from}} // [!code focus] + + to: {{to}} // [!code focus] + + amount: {{format_value(value, 18)}} // [!code focus] + + RETH contract: {{transaction_information.address}} // [!code focus] + + [etherscan](https://etherscan.io/tx/{{transaction_information.transaction_hash}}) // [!code focus] + " // [!code focus] +``` + +## bot_token + +This is your telegram bot token which you generate using @BotFather. + +:::info +We advise you to put this in a environment variables. +::: + +```yaml +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + chat: // [!code focus] + telegram: // [!code focus] + - bot_token: ${TELEGRAM_BOT_TOKEN} // [!code focus] +``` + +## chat_id + +You have add your bot to chats to use it, so this is the chat ID you wish the bot to send messages to. + +```yaml +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + chat: // [!code focus] + telegram: // [!code focus] + - bot_token: ${TELEGRAM_BOT_TOKEN} + chat_id: -4223616270 // [!code focus] +``` + +## networks + +This is an array of networks you want to send messages to this telegram chat. + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + chat: // [!code focus] + telegram: // [!code focus] + - bot_token: ${TELEGRAM_BOT_TOKEN} + chat_id: -4223616270 + networks: // [!code focus] + - ethereum // [!code focus] +``` + +## messages + +This is an array of messages you want to send to this telegram chat. It is an array as you can define many different +messages to send to this chat with different conditions. + +### event_name + +This is the name of the event you want to send a message for, must match the ABI event name. + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + chat: // [!code focus] + telegram: // [!code focus] + - bot_token: ${TELEGRAM_BOT_TOKEN} + chat_id: -4223616270 + networks: + - ethereum + messages: + - event_name: Transfer // [!code focus] +``` + +### conditions + +This accepts an array of conditions you want to apply to the event data before sending a message to this telegram chat. + +:::info +This is optional, if you do not provide any conditions all the events will be sent to this telegram chat. +::: + +You may want to filter on the message based on the event data, if the event data has not got an index on the on the +solidity event you can not filter it over the logs. The `conditions` filter is here to help you with this, +based on your ABI you can filter on the event data. + +rindexer has enabled a special syntax which allows you to define on your ABI fields what you want to filter on. + +1. `>` - higher then (for numbers only) +2. `<` - lower then (for numbers only) +3. `=` - equals +4. `>=` - higher then or equals (for numbers only) +5. `<=` - lower then or equals (for numbers only) +6. `||` - or +7. `&&` - and + +So lets look at an example lets say i only want to get transfer events which are higher then `2000000000000000000` RETH wei + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + chat: // [!code focus] + telegram: // [!code focus] + - bot_token: ${TELEGRAM_BOT_TOKEN} + chat_id: -4223616270 + networks: + - ethereum + messages: + - event_name: Transfer // [!code focus] + conditions: // [!code focus] + - "value": ">=2000000000000000000" // [!code focus] +``` + +We use the ABI input name `value` to filter on the value field, you can find these names in the ABI file. + +```json +{ + "anonymous":false, + "inputs":[ + { + "indexed":true, + "internalType":"address", + "name":"from", + "type":"address" + }, + { + "indexed":true, + "internalType":"address", + "name":"to", + "type":"address" + }, + { + "indexed":false, + "internalType":"uint256", + "name":"value", // [!code focus] + "type":"uint256" + } + ], + "name":"Transfer", + "type":"event" +} +``` + +You can use the `||` or `&&` to combine conditions. + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + chat: // [!code focus] + telegram: // [!code focus] + - bot_token: ${TELEGRAM_BOT_TOKEN} + chat_id: -4223616270 + networks: + - ethereum + messages: + - event_name: Transfer + conditions: // [!code focus] + - "value": ">=2000000000000000000 && value <=4000000000000000000" // [!code focus] +``` + +You can use the `=` to filter on other aspects like the `from` or `to` address. + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + chat: // [!code focus] + telegram: // [!code focus] + - bot_token: ${TELEGRAM_BOT_TOKEN} + chat_id: -4223616270 + networks: + - ethereum + messages: + - event_name: Transfer + conditions: // [!code focus] + - "from": "0x0338ce5020c447f7e668dc2ef778025ce3982662 || 0x0338ce5020c447f7e668dc2ef778025ce398266u" // [!code focus] + - "value": ">=2000000000000000000 || value <=4000000000000000000" // [!code focus] +``` + +:::info +Note we advise you to filer any `indexed` fields in the contract details in the `rindexer.yaml` file. +As these can be filtered out on the request level and not filtered out in rindexer itself. +You can read more about it [here](/docs/start-building/yaml-config/contracts#indexed_1-indexed_2-indexed_3). +::: + +If you have a tuple and you want to get that value you just use the object notation. + +For example lets say we want to only get the events for `profileId` from the `quoteParams` tuple which equals `1`: + +```json +{ + "anonymous": false, + "inputs": [ + { + "components": [ + { + "internalType": "uint256", + "name": "profileId", // [!code focus] + "type": "uint256" + }, + ... + ], + "indexed": false, + "internalType": "struct Types.QuoteParams", + "name": "quoteParams", // [!code focus] + "type": "tuple" + }, + ... + ], + "name": "QuoteCreated", // [!code focus] + "type": "event" +} +``` + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + chat: // [!code focus] + telegram: // [!code focus] + - bot_token: ${TELEGRAM_BOT_TOKEN} + chat_id: -4223616270 + networks: + - ethereum + messages: + - event_name: Transfer + conditions: // [!code focus] + - "quoteParams.profileId": "=1" // [!code focus] +``` + +### template_inline + +You can then write your own template inline, this is the template you want to send to the chat. +You have to use the ABI input names in object notation for example if i wanted to put value in the template +i just have to write `{{value}}` in the template and it will be replaced with the value of the event itself. + +The template supports: +- bold text = \*bold text\* +- italic text = \_italic text\_ +- inline url = \[inline URL\](YOUR_URL) +- inline fixed-width code = \`inline fixed-width code\` +- pre-formatted fixed-width code block = \`\`\`pre-formatted fixed-width code block\`\`\` +- pre-formatted fixed-width known code block = \`\`\`rust pre-formatted fixed-width known code block\`\`\` +- breaks = just line break in the template + +#### transaction_information + +You also can use the `transaction_information` object to get common information about the transaction, this is the +transaction information for the event. + +```rs +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct TxInformation { + pub network: String, + // This will convert to a hex string in the template + pub address: Address, + // This will convert to a hex string in the template + pub block_hash: H256, + // This will convert to a string decimal in the template + pub block_number: U64, + // This will convert to a hex string in the template + pub transaction_hash: H256, + // This will convert to a string decimal in the template + pub log_index: U256, + // This will convert to a string decimal in the template + pub transaction_index: U64, +} +``` + +:::info +To avoid confusion `address` in `transaction_information` is the address of the contract the event was emitted from. +::: + +#### format_value + +You can use the `format_value` function to format the value of the event to a decimal value with the specified decimals. + +Lets put it all together: + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + chat: // [!code focus] + telegram: // [!code focus] + - bot_token: ${TELEGRAM_BOT_TOKEN} + chat_id: -4223616270 + networks: + - ethereum + messages: + - event_name: Transfer // [!code focus] + template_inline: "*New RETH Transfer Event* // [!code focus] + + from: {{from}} // [!code focus] + + to: {{to}} // [!code focus] + + amount: {{format_value(value, 18)}} // [!code focus] + + RETH contract: {{transaction_information.address}} // [!code focus] + + [etherscan](https://etherscan.io/tx/{{transaction_information.transaction_hash}}) // [!code focus] + " // [!code focus] +``` \ No newline at end of file diff --git a/documentation/docs/pages/docs/start-building/streams/index.mdx b/documentation/docs/pages/docs/start-building/streams/index.mdx new file mode 100644 index 00000000..5eb51286 --- /dev/null +++ b/documentation/docs/pages/docs/start-building/streams/index.mdx @@ -0,0 +1,24 @@ +# Streams + +:::info +rindexer streams can be used without any other storage providers. It can also be used with storage providers. +::: + +rindexer supports streaming data from rindexer to anywhere you want. This allows you to +build your own data indexing solutions in any language you wish, alongside stream the data to any location +you want with tons of use cases. Streams also support advanced filtering and conditions which allows you to filter the data +before it is streamed. This can all be done using no-code and set in the YAML configuration file. + +:::info +Rust projects do not get exposed to the stream clients yet but it can easily be exposed in the future. +::: + +Note you can use all the streams together they are independent of each other, so if you wanted to us `kafka`, +`webhooks`, `rabbitmq` and `sns` together you can do that. + +Supported stream providers: + +- [Webhooks](/docs/start-building/streams/webhooks) - Fire webhooks to your own APIs +- [Kafka](/docs/start-building/streams/kafka) - Find out more about [Apache Kafka](https://kafka.apache.org/) +- [RabbitMQ](/docs/start-building/streams/rabbitmq) - Find out more about [RabbitMQ](https://www.rabbitmq.com/) +- [SNS/SQS](/docs/start-building/streams/sns) - Find out more about [Simple Notification Service](https://aws.amazon.com/sns/) and [Simple Queue Service](https://aws.amazon.com/sqs/) \ No newline at end of file diff --git a/documentation/docs/pages/docs/start-building/streams/kafka.mdx b/documentation/docs/pages/docs/start-building/streams/kafka.mdx new file mode 100644 index 00000000..af033a8f --- /dev/null +++ b/documentation/docs/pages/docs/start-building/streams/kafka.mdx @@ -0,0 +1,647 @@ +# Kafka + +:::info +rindexer streams can be used without any other storage providers. It can also be used with storage providers. +::: + +rindexer allows you to configure [Kafka](https://kafka.apache.org/) to stream any data to. This goes under +the [contracts](/docs/start-building/yaml-config/contracts) section of the YAML configuration file. + +Find out more about [Kafka](https://kafka.apache.org/). + +rindexer kafka integration supports SSL queues and none SSL queues. + +## Configuration with rindexer + +`kafka` property accepts an array of `topics` allowing you to split up the streams any way you wish. + +## Example + +Kafka has to be configured to use SASL_SSL or PLAINTEXT. You can read more about it [here](https://kafka.apache.org/documentation/#security_sasl). + +:::code-group + +```yaml [none-ssl] +name: RocketPoolETHIndexer +description: My first rindexer project +repository: https://github.com/joshstevens19/rindexer +project_type: no-code +networks: +- name: ethereum + chain_id: 1 + rpc: https://mainnet.gateway.tenderly.co +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + kafka: // [!code focus] + brokers: // [!code focus] + - ${KAFKA_BROKER_URL_1} // [!code focus] + - ${KAFKA_BROKER_URL_2} // [!code focus] + acks: all // [!code focus] + security_protocol: PLAINTEXT // [!code focus] + topics: // [!code focus] + - topic: test-topic // [!code focus] + # key is optional // [!code focus] + key: my-routing-key // [!code focus] + networks: // [!code focus] + - ethereum // [!code focus] + events: // [!code focus] + - event_name: Transfer // [!code focus] +``` + +```yaml [ssl] +name: RocketPoolETHIndexer +description: My first rindexer project +repository: https://github.com/joshstevens19/rindexer +project_type: no-code +networks: +- name: ethereum + chain_id: 1 + rpc: https://mainnet.gateway.tenderly.co +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + kafka: // [!code focus] + brokers: // [!code focus] + - ${KAFKA_BROKER_URL_1} // [!code focus] + - ${KAFKA_BROKER_URL_2} // [!code focus] + acks: all // [!code focus] + security_protocol: SASL_SSL // [!code focus] + sasl_mechanisms: PLAIN // [!code focus] + sasl_username: $ // [!code focus] + sasl_password: $ // [!code focus] + topics: + - topic: test-topic + # key is optional // [!code focus] + key: my-routing-key + networks: + - ethereum + events: + - event_name: Transfer +``` + +::: + +## brokers + +You define the kafka brokers you wish to connect to, you can pass in multiple brokers if you wish. A single broker +will of course work as well. + +:::info +We advise brokers should be set in your environment variables. +::: + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + kafka: // [!code focus] + brokers: // [!code focus] + - ${KAFKA_BROKER_URL_1} // [!code focus] + - ${KAFKA_BROKER_URL_2} // [!code focus] +``` + +## acks + +- `acks=0` - When acks=0 producers consider messages as "written successfully" the moment the message was sent without waiting for the broker to accept it at all. +- `acks=1` - When acks=1 , producers consider messages as "written successfully" when the message was acknowledged by only the leader. +- `acks=all` - When acks=all, producers consider messages as "written successfully" when the message is accepted by all in-sync replicas (ISR). + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + kafka: // [!code focus] + brokers: + - ${KAFKA_BROKER_URL_1} + - ${KAFKA_BROKER_URL_2} + # all or 0 or 1 + acks: all // [!code focus] + security_protocol: SASL_SSL // [!code focus] +``` + + +## security_protocol + +This is either `PLAINTEXT` or `SASL_SSL`. You can read more about it [here](https://kafka.apache.org/documentation/#security_sasl). + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + kafka: // [!code focus] + brokers: + - ${KAFKA_BROKER_URL_1} + - ${KAFKA_BROKER_URL_2} + acks: all + security_protocol: SASL_SSL // [!code focus] +``` + +## sasl_mechanisms + +:::info +This is optional, if you are using SASL_SSL you will need to provide this. +::: + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + kafka: // [!code focus] + brokers: + - ${KAFKA_BROKER_URL_1} + - ${KAFKA_BROKER_URL_2} + acks: all + security_protocol: SASL_SSL + sasl_mechanisms: PLAIN // [!code focus] +``` + +## sasl_username + +:::info +This is optional, if you are using SASL_SSL you will need to provide this. +
+We advise you to put this in your environment variables. +::: + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + kafka: // [!code focus] + brokers: + - ${KAFKA_BROKER_URL_1} + - ${KAFKA_BROKER_URL_2} + acks: all + security_protocol: SASL_SSL + sasl_mechanisms: PLAIN + sasl_username: $ // [!code focus] +``` + +## sasl_password + +:::info +This is optional, if you are using SASL_SSL you will need to provide this. +
+We advise you to put this in your environment variables. +::: + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + kafka: // [!code focus] + brokers: + - ${KAFKA_BROKER_URL_1} + - ${KAFKA_BROKER_URL_2} + acks: all + security_protocol: SASL_SSL + sasl_mechanisms: PLAIN + sasl_username: $ + sasl_password: $ // [!code focus] +``` + +## topics + +This is an array of topics you want to stream to this kafka. + +### topic + +This is the topic name. + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + kafka: // [!code focus] + brokers: + - ${KAFKA_BROKER_URL_1} + - ${KAFKA_BROKER_URL_2} + acks: all + security_protocol: SASL_SSL + sasl_mechanisms: PLAIN + sasl_username: $ + sasl_password: $ + topics: + - topic: test-topic // [!code focus] +``` + +### key + +:::info +This is optional +::: + +You can route your messages to a specific partition in the topic, this is useful if you have multiple consumers +on the same topic. + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + kafka: // [!code focus] + brokers: + - ${KAFKA_BROKER_URL_1} + - ${KAFKA_BROKER_URL_2} + acks: all + security_protocol: SASL_SSL + sasl_mechanisms: PLAIN + sasl_username: $ + sasl_password: $ + topics: + - topic: test-topic + key: my-routing-key // [!code focus] + networks: + - ethereum + events: + - event_name: Transfer +``` + +## networks + +This is an array of networks you want to stream to this kafka. + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + kafka: // [!code focus] + brokers: + - ${KAFKA_BROKER_URL_1} + - ${KAFKA_BROKER_URL_2} + acks: all + security_protocol: SASL_SSL + sasl_mechanisms: PLAIN + sasl_username: $ + sasl_password: $ + topics: + - topic: test-topic + key: my-routing-key + networks: // [!code focus] + - ethereum // [!code focus] + events: + - event_name: Transfer +``` + +## events + +This is an array of events you want to stream to this kafka. + +### event_name + +This is the name of the event you want to stream to this kafka, must match the ABI event name. + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + kafka: // [!code focus] + brokers: + - ${KAFKA_BROKER_URL_1} + - ${KAFKA_BROKER_URL_2} + acks: all + security_protocol: SASL_SSL + sasl_mechanisms: PLAIN + sasl_username: $ + sasl_password: $ + topics: + - topic: test-topic + key: my-routing-key + networks: + - ethereum + events: // [!code focus] + - event_name: Transfer // [!code focus] +``` + +### conditions + +This accepts an array of conditions you want to apply to the event data before streaming to this kafka. + +:::info +This is optional, if you do not provide any conditions all data will be streamed. +::: + +You may want to filter on the stream based on the event data, if the event data has not got an index on the on the +solidity event you can not filter it over the logs. The `conditions` filter is here to help you with this, +based on your ABI you can filter on the event data. + +rindexer has enabled a special syntax which allows you to define on your ABI fields what you want to filter on. + +1. `>` - higher then (for numbers only) +2. `<` - lower then (for numbers only) +3. `=` - equals +4. `>=` - higher then or equals (for numbers only) +5. `<=` - lower then or equals (for numbers only) +6. `||` - or +7. `&&` - and + +So lets look at an example lets say i only want to get transfer events which are higher then `2000000000000000000` RETH wei + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + kafka: // [!code focus] + brokers: + - ${KAFKA_BROKER_URL_1} + - ${KAFKA_BROKER_URL_2} + acks: all + security_protocol: SASL_SSL + sasl_mechanisms: PLAIN + sasl_username: $ + sasl_password: $ + topics: + - topic: test-topic + key: my-routing-key + networks: + - ethereum + events: // [!code focus] + - event_name: Transfer // [!code focus] + conditions: // [!code focus] + - "value": ">=2000000000000000000" // [!code focus] +``` + +We use the ABI input name `value` to filter on the value field, you can find these names in the ABI file. + +```json +{ + "anonymous":false, + "inputs":[ + { + "indexed":true, + "internalType":"address", + "name":"from", + "type":"address" + }, + { + "indexed":true, + "internalType":"address", + "name":"to", + "type":"address" + }, + { + "indexed":false, + "internalType":"uint256", + "name":"value", // [!code focus] + "type":"uint256" + } + ], + "name":"Transfer", + "type":"event" +} +``` + +You can use the `||` or `&&` to combine conditions. + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + kafka: // [!code focus] + brokers: + - ${KAFKA_BROKER_URL_1} + - ${KAFKA_BROKER_URL_2} + acks: all + security_protocol: SASL_SSL + sasl_mechanisms: PLAIN + sasl_username: $ + sasl_password: $ + topics: + - topic: test-topic + key: my-routing-key + networks: + - ethereum + events: // [!code focus] + - event_name: Transfer + conditions: // [!code focus] + - "value": ">=2000000000000000000 && value <=4000000000000000000" // [!code focus] +``` + +You can use the `=` to filter on other aspects like the `from` or `to` address. + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + kafka: // [!code focus] + brokers: + - ${KAFKA_BROKER_URL_1} + - ${KAFKA_BROKER_URL_2} + acks: all + security_protocol: SASL_SSL + sasl_mechanisms: PLAIN + sasl_username: $ + sasl_password: $ + topics: + - topic: test-topic + key: my-routing-key + networks: + - ethereum + events: // [!code focus] + - event_name: Transfer + conditions: // [!code focus] + - "from": "0x0338ce5020c447f7e668dc2ef778025ce3982662 || 0x0338ce5020c447f7e668dc2ef778025ce398266u" // [!code focus] + - "value": ">=2000000000000000000 || value <=4000000000000000000" // [!code focus] +``` + +:::info +Note we advise you to filer any `indexed` fields in the contract details in the `rindexer.yaml` file. +As these can be filtered out on the request level and not filtered out in rindexer itself. +You can read more about it [here](/docs/start-building/yaml-config/contracts#indexed_1-indexed_2-indexed_3). +::: + +If you have a tuple and you want to get that value you just use the object notation. + +For example lets say we want to only get the events for `profileId` from the `quoteParams` tuple which equals `1`: + +```json +{ + "anonymous": false, + "inputs": [ + { + "components": [ + { + "internalType": "uint256", + "name": "profileId", // [!code focus] + "type": "uint256" + }, + ... + ], + "indexed": false, + "internalType": "struct Types.QuoteParams", + "name": "quoteParams", // [!code focus] + "type": "tuple" + }, + ... + ], + "name": "QuoteCreated", // [!code focus] + "type": "event" +} +``` + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + kafka: // [!code focus] + brokers: + - ${KAFKA_BROKER_URL_1} + - ${KAFKA_BROKER_URL_2} + acks: all + security_protocol: SASL_SSL + sasl_mechanisms: PLAIN + sasl_username: $ + sasl_password: $ + topics: + - topic: test-topic + key: my-routing-key + networks: + - ethereum + events: // [!code focus] + - event_name: Transfer + conditions: // [!code focus] + - "quoteParams.profileId": "=1" // [!code focus] +``` + diff --git a/documentation/docs/pages/docs/start-building/streams/rabbitmq.mdx b/documentation/docs/pages/docs/start-building/streams/rabbitmq.mdx new file mode 100644 index 00000000..2a1ad532 --- /dev/null +++ b/documentation/docs/pages/docs/start-building/streams/rabbitmq.mdx @@ -0,0 +1,509 @@ +# RabbitMQ + +:::info +rindexer streams can be used without any other storage providers. It can also be used with storage providers. +::: + +rindexer allows you to configure [RabbitMQ](https://www.rabbitmq.com/) to stream any data to. This goes under +the [contracts](/docs/start-building/yaml-config/contracts) section of the YAML configuration file. + +Find out more about [RabbitMQ](https://www.rabbitmq.com/). + +rindexer rabbitmq integration supports `direct`, `topic` and `fanout` exchanges. +You can read more about what they do differently [here](https://medium.com/trendyol-tech/rabbitmq-exchange-types-d7e1f51ec825). + +## Configuration with rindexer + +`rabbitmq` property accepts an array of `exchanges` allowing you to split up the streams any way you wish. + +## Example + +:::code-group + +```yaml [direct] +name: RocketPoolETHIndexer +description: My first rindexer project +repository: https://github.com/joshstevens19/rindexer +project_type: no-code +networks: +- name: ethereum + chain_id: 1 + rpc: https://mainnet.gateway.tenderly.co +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + rabbitmq: // [!code focus] + # we advise to put this in a environment variables // [!code focus] + url: ${RABBITMQ_URL} // [!code focus] + exchanges: // [!code focus] + - exchange: transfer // [!code focus] + # expected one of `direct`, `topic` or `fanout` // [!code focus] + exchange_type: direct // [!code focus] + routing_key: my-routing-key // [!code focus] + networks: // [!code focus] + - ethereum // [!code focus] + events: // [!code focus] + - event_name: Transfer // [!code focus] +``` + +```yaml [topic] +name: RocketPoolETHIndexer +description: My first rindexer project +repository: https://github.com/joshstevens19/rindexer +project_type: no-code +networks: +- name: ethereum + chain_id: 1 + rpc: https://mainnet.gateway.tenderly.co +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + rabbitmq: // [!code focus] + # we advise to put this in a environment variables // [!code focus] + url: ${RABBITMQ_URL} // [!code focus] + exchanges: // [!code focus] + - exchange: transfer // [!code focus] + # expected one of `direct`, `topic` or `fanout` // [!code focus] + exchange_type: topic // [!code focus] + routing_key: my-routing-key // [!code focus] + networks: // [!code focus] + - ethereum // [!code focus] + events: // [!code focus] + - event_name: Transfer // [!code focus] +``` + +```yaml [fanout] +name: RocketPoolETHIndexer +description: My first rindexer project +repository: https://github.com/joshstevens19/rindexer +project_type: no-code +networks: +- name: ethereum + chain_id: 1 + rpc: https://mainnet.gateway.tenderly.co +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + rabbitmq: // [!code focus] + # we advise to put this in a environment variables // [!code focus] + url: ${RABBITMQ_URL} // [!code focus] + exchanges: // [!code focus] + - exchange: transfer // [!code focus] + # expected one of `direct`, `topic` or `fanout` // [!code focus] + exchange_type: fanout // [!code focus] + networks: // [!code focus] + - ethereum // [!code focus] + events: // [!code focus] + - event_name: Transfer // [!code focus] +``` + +::: + +## url + +This is the rabbitmq connection url we advise to put this in a environment variable. + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + rabbitmq: // [!code focus] + # we advise to put this in a environment variables // [!code focus] + url: ${RABBITMQ_URL} // [!code focus] +``` + + +## exchanges + +This is an array of exchanges you want to stream to this rabbitmq. + +### exchange + +This is the exchange name. + +```yaml [standard] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + rabbitmq: // [!code focus] + # we advise to put this in a environment variables + url: ${RABBITMQ_URL} + exchanges: // [!code focus] + - exchange: transfer // [!code focus] +``` + +### exchange_type + +This is the exchange type, you can read more about them [here](https://medium.com/trendyol-tech/rabbitmq-exchange-types-d7e1f51ec825). +rindexer supports `direct`, `topic` and `fanout` exchanges. + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + rabbitmq: // [!code focus] + # we advise to put this in a environment variables + url: ${RABBITMQ_URL} + exchanges: // [!code focus] + - exchange: transfer + # expected one of `direct`, `topic` or `fanout` + exchange_type: direct // [!code focus] +``` + +### routing_key + +This is the routing key for the exchange. You do not need to provide this if you are using a `fanout` exchange. This +is mandatory for `direct` and `topic` exchanges. + +:::info +This is optional for `fanout` exchanges, required for `direct` and `topic` exchanges. +::: + +```yaml [fifo] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + rabbitmq: // [!code focus] + # we advise to put this in a environment variables + url: ${RABBITMQ_URL} + exchanges: // [!code focus] + - exchange: transfer + # expected one of `direct`, `topic` or `fanout` + exchange_type: direct + routing_key: my-routing-key // [!code focus] +``` + +## networks + +This is an array of networks you want to stream to this rabbitmq. + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + rabbitmq: // [!code focus] + # we advise to put this in a environment variables // [!code focus] + url: ${RABBITMQ_URL} + exchanges: // [!code focus] + - exchange: transfer + # expected one of `direct`, `topic` or `fanout` + exchange_type: direct + routing_key: my-routing-key + networks: // [!code focus] + - ethereum // [!code focus] +``` + +## events + +This is an array of events you want to stream to this rabbitmq. + +### event_name + +This is the name of the event you want to stream to this rabbitmq, must match the ABI event name. + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + rabbitmq: // [!code focus] + # we advise to put this in a environment variables // [!code focus] + url: ${RABBITMQ_URL} + exchanges: // [!code focus] + - exchange: transfer + # expected one of `direct`, `topic` or `fanout` + exchange_type: direct + routing_key: my-routing-key + networks: + - ethereum + events: // [!code focus] + - event_name: Transfer // [!code focus] +``` + +### conditions + +This accepts an array of conditions you want to apply to the event data before streaming to this rabbitmq. + +:::info +This is optional, if you do not provide any conditions all data will be streamed. +::: + +You may want to filter on the stream based on the event data, if the event data has not got an index on the on the +solidity event you can not filter it over the logs. The `conditions` filter is here to help you with this, +based on your ABI you can filter on the event data. + +rindexer has enabled a special syntax which allows you to define on your ABI fields what you want to filter on. + +1. `>` - higher then (for numbers only) +2. `<` - lower then (for numbers only) +3. `=` - equals +4. `>=` - higher then or equals (for numbers only) +5. `<=` - lower then or equals (for numbers only) +6. `||` - or +7. `&&` - and + +So lets look at an example lets say i only want to get transfer events which are higher then `2000000000000000000` RETH wei + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + rabbitmq: // [!code focus] + # we advise to put this in a environment variables // [!code focus] + url: ${RABBITMQ_URL} + exchanges: // [!code focus] + - exchange: transfer + # expected one of `direct`, `topic` or `fanout` + exchange_type: direct + routing_key: my-routing-key + networks: + - ethereum + events: // [!code focus] + - event_name: Transfer // [!code focus] + conditions: // [!code focus] + - "value": ">=2000000000000000000" // [!code focus] +``` + +We use the ABI input name `value` to filter on the value field, you can find these names in the ABI file. + +```json +{ + "anonymous":false, + "inputs":[ + { + "indexed":true, + "internalType":"address", + "name":"from", + "type":"address" + }, + { + "indexed":true, + "internalType":"address", + "name":"to", + "type":"address" + }, + { + "indexed":false, + "internalType":"uint256", + "name":"value", // [!code focus] + "type":"uint256" + } + ], + "name":"Transfer", + "type":"event" +} +``` + +You can use the `||` or `&&` to combine conditions. + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + rabbitmq: // [!code focus] + # we advise to put this in a environment variables // [!code focus] + url: ${RABBITMQ_URL} + exchanges: // [!code focus] + - exchange: transfer + # expected one of `direct`, `topic` or `fanout` + exchange_type: direct + routing_key: my-routing-key + networks: + - ethereum + events: // [!code focus] + - event_name: Transfer + conditions: // [!code focus] + - "value": ">=2000000000000000000 && value <=4000000000000000000" // [!code focus] +``` + +You can use the `=` to filter on other aspects like the `from` or `to` address. + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + rabbitmq: // [!code focus] + # we advise to put this in a environment variables // [!code focus] + url: ${RABBITMQ_URL} + exchanges: // [!code focus] + - exchange: transfer + # expected one of `direct`, `topic` or `fanout` + exchange_type: direct + routing_key: my-routing-key + networks: + - ethereum + events: // [!code focus] + - event_name: Transfer + conditions: // [!code focus] + - "from": "0x0338ce5020c447f7e668dc2ef778025ce3982662 || 0x0338ce5020c447f7e668dc2ef778025ce398266u" // [!code focus] + - "value": ">=2000000000000000000 || value <=4000000000000000000" // [!code focus] +``` + +:::info +Note we advise you to filer any `indexed` fields in the contract details in the `rindexer.yaml` file. +As these can be filtered out on the request level and not filtered out in rindexer itself. +You can read more about it [here](/docs/start-building/yaml-config/contracts#indexed_1-indexed_2-indexed_3). +::: + +If you have a tuple and you want to get that value you just use the object notation. + +For example lets say we want to only get the events for `profileId` from the `quoteParams` tuple which equals `1`: + +```json +{ + "anonymous": false, + "inputs": [ + { + "components": [ + { + "internalType": "uint256", + "name": "profileId", // [!code focus] + "type": "uint256" + }, + ... + ], + "indexed": false, + "internalType": "struct Types.QuoteParams", + "name": "quoteParams", // [!code focus] + "type": "tuple" + }, + ... + ], + "name": "QuoteCreated", // [!code focus] + "type": "event" +} +``` + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + rabbitmq: // [!code focus] + # we advise to put this in a environment variables // [!code focus] + url: ${RABBITMQ_URL} + exchanges: // [!code focus] + - exchange: transfer + # expected one of `direct`, `topic` or `fanout` + exchange_type: direct + routing_key: my-routing-key + networks: + - ethereum + events: // [!code focus] + - event_name: Transfer + conditions: // [!code focus] + - "quoteParams.profileId": "=1" // [!code focus] +``` + diff --git a/documentation/docs/pages/docs/start-building/streams/sns.mdx b/documentation/docs/pages/docs/start-building/streams/sns.mdx new file mode 100644 index 00000000..4902eb3c --- /dev/null +++ b/documentation/docs/pages/docs/start-building/streams/sns.mdx @@ -0,0 +1,507 @@ +# SNS / SQS + +:::info +rindexer streams can be used without any other storage providers. It can also be used with storage providers. +::: + +rindexer allows you to configure AWS SNS and AWS SQS to stream any data to. This goes under +the [contracts](/docs/start-building/yaml-config/contracts) section of the YAML configuration file. + +Find out more about [Simple Notification Service](https://aws.amazon.com/sns/) and [Simple Queue Service](https://aws.amazon.com/sqs/) + +## Configuration with rindexer + +`sns` `topics` property accepts an array allowing you to split up the streams any way you wish. + +## Example + +```yaml [rindexer.yaml] +name: RocketPoolETHIndexer +description: My first rindexer project +repository: https://github.com/joshstevens19/rindexer +project_type: no-code +networks: +- name: ethereum + chain_id: 1 + rpc: https://mainnet.gateway.tenderly.co +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + sns: // [!code focus] + aws_config: // [!code focus] + region: us-east-1 // [!code focus] + access_key: ${AWS_ACCESS_KEY_ID} // [!code focus] + secret_key: ${AWS_SECRET_ACCESS_KEY} // [!code focus] + # session_token is optional // [!code focus] + session_token: ${AWS_SESSION_TOKEN} // [!code focus] + topics: // [!code focus] + - topic_arn: "arn:aws:sns:us-east-1:664643779377:test" // [!code focus] + networks: // [!code focus] + - ethereum // [!code focus] + events: // [!code focus] + - event_name: Transfer // [!code focus] +``` + +## aws_config + +This is the AWS configuration for the SNS client. + +### region + +The AWS region to connect to. + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + sns: // [!code focus] + aws_config: // [!code focus] + region: us-east-1 // [!code focus] +``` + +### access_key + +:::info +We advise you to put this in a environment variables. +::: + +The AWS access key to connect to. + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + sns: // [!code focus] + aws_config: // [!code focus] + region: us-east-1 + access_key: ${AWS_ACCESS_KEY_ID} // [!code focus] +``` + +### secret_key + +:::info +We advise you to put this in a environment variables. +::: + +The AWS secret key to connect to. + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + sns: // [!code focus] + aws_config: // [!code focus] + region: us-east-1 + access_key: ${AWS_ACCESS_KEY_ID} + secret_key: ${AWS_SECRET_ACCESS_KEY} // [!code focus] +``` + +### session_token + +:::info +This is optional +::: + +:::info +We advise you to put this in a environment variables. +::: + + +The AWS session token to connect to. + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + sns: // [!code focus] + aws_config: // [!code focus] + region: us-east-1 + access_key: ${AWS_ACCESS_KEY_ID} + secret_key: ${AWS_SECRET_ACCESS_KEY} + session_token: ${AWS_SESSION_TOKEN} // [!code focus] +``` + +## topics + +This is an array of topics you want to stream to this sns. + +### topic_arn + +This is your SNS topic arn. It supports first-in-first-out and standard topics. +You can read about the different here [here](https://aws.amazon.com/sns/features/). + +:::code-group + +```yaml [standard] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + sns: // [!code focus] + aws_config: + region: us-east-1 + access_key: ${AWS_ACCESS_KEY_ID} + secret_key: ${AWS_SECRET_ACCESS_KEY} + # session_token is optional + session_token: ${AWS_SESSION_TOKEN} + topics: // [!code focus] + - topic_arn: "arn:aws:sns:us-east-1:664643779377:test" // [!code focus] +``` + +```yaml [fifo] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + sns: // [!code focus] + aws_config: + region: us-east-1 + access_key: ${AWS_ACCESS_KEY_ID} + secret_key: ${AWS_SECRET_ACCESS_KEY} + # session_token is optional + session_token: ${AWS_SESSION_TOKEN} + topics: // [!code focus] + - topic_arn: "arn:aws:sns:us-east-1:664643779377:test.fifo" // [!code focus] +``` + +::: + +#### networks + +This is an array of networks you want to stream to this webhook. + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + sns: // [!code focus] + aws_config: + region: us-east-1 + access_key: ${AWS_ACCESS_KEY_ID} + secret_key: ${AWS_SECRET_ACCESS_KEY} + # session_token is optional + session_token: ${AWS_SESSION_TOKEN} + topics: // [!code focus] + - topic_arn: "arn:aws:sns:us-east-1:664643779377:test" + networks: // [!code focus] + - ethereum // [!code focus] +``` + +### events + +This is an array of events you want to stream to this SNS topic. + +#### event_name + +This is the name of the event you want to stream to this SNS topic, must match the ABI event name. + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + sns: // [!code focus] + aws_config: + region: us-east-1 + access_key: ${AWS_ACCESS_KEY_ID} + secret_key: ${AWS_SECRET_ACCESS_KEY} + # session_token is optional + session_token: ${AWS_SESSION_TOKEN} + topics: // [!code focus] + - topic_arn: "arn:aws:sns:us-east-1:664643779377:test" + networks: + - ethereum + events: // [!code focus] + - event_name: Transfer // [!code focus] +``` + +#### conditions + +This accepts an array of conditions you want to apply to the event data before streaming to this SNS topic. + +:::info +This is optional, if you do not provide any conditions all data will be streamed. +::: + +You may want to filter on the stream based on the event data, if the event data has not got an index on the on the +solidity event you can not filter it over the logs. The `conditions` filter is here to help you with this, +based on your ABI you can filter on the event data. + +rindexer has enabled a special syntax which allows you to define on your ABI fields what you want to filter on. + +1. `>` - higher then (for numbers only) +2. `<` - lower then (for numbers only) +3. `=` - equals +4. `>=` - higher then or equals (for numbers only) +5. `<=` - lower then or equals (for numbers only) +6. `||` - or +7. `&&` - and + +So lets look at an example lets say i only want to get transfer events which are higher then `2000000000000000000` RETH wei + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + sns: // [!code focus] + aws_config: + region: us-east-1 + access_key: ${AWS_ACCESS_KEY_ID} + secret_key: ${AWS_SECRET_ACCESS_KEY} + # session_token is optional + session_token: ${AWS_SESSION_TOKEN} + topics: // [!code focus] + - topic_arn: "arn:aws:sns:us-east-1:664643779377:test" + networks: + - ethereum + events: // [!code focus] + - event_name: Transfer + conditions: // [!code focus] + - "value": ">=2000000000000000000" // [!code focus] +``` + +We use the ABI input name `value` to filter on the value field, you can find these names in the ABI file. + +```json +{ + "anonymous":false, + "inputs":[ + { + "indexed":true, + "internalType":"address", + "name":"from", + "type":"address" + }, + { + "indexed":true, + "internalType":"address", + "name":"to", + "type":"address" + }, + { + "indexed":false, + "internalType":"uint256", + "name":"value", // [!code focus] + "type":"uint256" + } + ], + "name":"Transfer", + "type":"event" +} +``` + +You can use the `||` or `&&` to combine conditions. + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + sns: // [!code focus] + aws_config: + region: us-east-1 + access_key: ${AWS_ACCESS_KEY_ID} + secret_key: ${AWS_SECRET_ACCESS_KEY} + # session_token is optional + session_token: ${AWS_SESSION_TOKEN} + topics: // [!code focus] + - topic_arn: "arn:aws:sns:us-east-1:664643779377:test" + networks: + - ethereum + events: // [!code focus] + - event_name: Transfer + conditions: // [!code focus] + - "value": ">=2000000000000000000 && value <=4000000000000000000" // [!code focus] +``` + +You can use the `=` to filter on other aspects like the `from` or `to` address. + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + sns: // [!code focus] + aws_config: + region: us-east-1 + access_key: ${AWS_ACCESS_KEY_ID} + secret_key: ${AWS_SECRET_ACCESS_KEY} + # session_token is optional + session_token: ${AWS_SESSION_TOKEN} + topics: // [!code focus] + - topic_arn: "arn:aws:sns:us-east-1:664643779377:test" + networks: + - ethereum + events: // [!code focus] + - event_name: Transfer + conditions: // [!code focus] + - "from": "0x0338ce5020c447f7e668dc2ef778025ce3982662 || 0x0338ce5020c447f7e668dc2ef778025ce398266u" // [!code focus] + - "value": ">=2000000000000000000 || value <=4000000000000000000" // [!code focus] +``` + +:::info +Note we advise you to filer any `indexed` fields in the contract details in the `rindexer.yaml` file. +As these can be filtered out on the request level and not filtered out in rindexer itself. +You can read more about it [here](/docs/start-building/yaml-config/contracts#indexed_1-indexed_2-indexed_3). +::: + +If you have a tuple and you want to get that value you just use the object notation. + +For example lets say we want to only get the events for `profileId` from the `quoteParams` tuple which equals `1`: + +```json +{ + "anonymous": false, + "inputs": [ + { + "components": [ + { + "internalType": "uint256", + "name": "profileId", // [!code focus] + "type": "uint256" + }, + ... + ], + "indexed": false, + "internalType": "struct Types.QuoteParams", + "name": "quoteParams", // [!code focus] + "type": "tuple" + }, + ... + ], + "name": "QuoteCreated", // [!code focus] + "type": "event" +} +``` + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + sns: // [!code focus] + aws_config: + region: us-east-1 + access_key: ${AWS_ACCESS_KEY_ID} + secret_key: ${AWS_SECRET_ACCESS_KEY} + # session_token is optional + session_token: ${AWS_SESSION_TOKEN} + topics: // [!code focus] + - topic_arn: "arn:aws:sns:us-east-1:664643779377:test" + networks: + - ethereum + events: // [!code focus] + - event_name: Transfer + conditions: // [!code focus] + - "quoteParams.profileId": "=1" // [!code focus] +``` diff --git a/documentation/docs/pages/docs/start-building/streams/webhooks.mdx b/documentation/docs/pages/docs/start-building/streams/webhooks.mdx new file mode 100644 index 00000000..d6d4f524 --- /dev/null +++ b/documentation/docs/pages/docs/start-building/streams/webhooks.mdx @@ -0,0 +1,336 @@ +# Webhooks + +:::info +rindexer streams can be used without any other storage providers. It can also be used with storage providers. +::: + +rindexer allows you to configure webhooks to fire based on your conditions to another API. This goes under +the [contracts](/docs/start-building/yaml-config/contracts) section of the YAML configuration file. + +## Configuration with rindexer + +`webhooks` property accepts an array allowing you to split up the webhooks any way you wish. + +## Example + +```yaml [rindexer.yaml] +name: RocketPoolETHIndexer +description: My first rindexer project +repository: https://github.com/joshstevens19/rindexer +project_type: no-code +networks: +- name: ethereum + chain_id: 1 + rpc: https://mainnet.gateway.tenderly.co +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + webhooks: // [!code focus] + - endpoint: YOUR_WEBHOOK_URL // [!code focus] + shared_secret: ${RINDEXER_WEBHOOK_SHARED_SECRET} // [!code focus] + networks: // [!code focus] + - ethereum // [!code focus] + events: // [!code focus] + - event_name: Transfer // [!code focus] +``` + +## endpoint + +This is your rabbitmq url. + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + webhooks: // [!code focus] + - endpoint: YOUR_WEBHOOK_URL // [!code focus] +``` + +## shared_secret + +This is the shared secret you want to use to authenticate the webhook so you know it ha came from rindexer. +This is always injected in the header as `x-rindexer-shared-secret`. + +:::info +We advise you to put this in a environment variables. +::: + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + webhooks: // [!code focus] + - endpoint: YOUR_WEBHOOK_URL + shared_secret: ${RINDEXER_WEBHOOK_SHARED_SECRET} // [!code focus] +``` + +This is an array of networks you want to stream to this webhook. + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + webhooks: // [!code focus] + - endpoint: YOUR_WEBHOOK_URL + shared_secret: ${RINDEXER_WEBHOOK_SHARED_SECRET} + networks: // [!code focus] + - ethereum // [!code focus] +``` + +## events + +This is an array of events you want to stream to this webhook. + +### event_name + +This is the name of the event you want to stream to this webhook, must match the ABI event name. + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + webhooks: // [!code focus] + - endpoint: YOUR_WEBHOOK_URL + shared_secret: ${RINDEXER_WEBHOOK_SHARED_SECRET} + networks: + - ethereum + events: // [!code focus] + - event_name: Transfer // [!code focus] +``` + +### conditions + +This accepts an array of conditions you want to apply to the event data before calling the webhook. + +:::info +This is optional, if you do not provide any conditions all data will be streamed. +::: + +You may want to filter on the stream based on the event data, if the event data has not got an index on the on the +solidity event you can not filter it over the logs. The `conditions` filter is here to help you with this, +based on your ABI you can filter on the event data. + +rindexer has enabled a special syntax which allows you to define on your ABI fields what you want to filter on. + +1. `>` - higher then (for numbers only) +2. `<` - lower then (for numbers only) +3. `=` - equals +4. `>=` - higher then or equals (for numbers only) +5. `<=` - lower then or equals (for numbers only) +6. `||` - or +7. `&&` - and + +So lets look at an example lets say i only want to get transfer events which are higher then `2000000000000000000` RETH wei + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + webhooks: // [!code focus] + - endpoint: YOUR_WEBHOOK_URL + shared_secret: ${RINDEXER_WEBHOOK_SHARED_SECRET} + networks: + - ethereum + events: // [!code focus] + - event_name: Transfer // [!code focus] + conditions: // [!code focus] + - "value": ">=2000000000000000000" // [!code focus] +``` + +We use the ABI input name `value` to filter on the value field, you can find these names in the ABI file. + +```json +{ + "anonymous":false, + "inputs":[ + { + "indexed":true, + "internalType":"address", + "name":"from", + "type":"address" + }, + { + "indexed":true, + "internalType":"address", + "name":"to", + "type":"address" + }, + { + "indexed":false, + "internalType":"uint256", + "name":"value", // [!code focus] + "type":"uint256" + } + ], + "name":"Transfer", + "type":"event" +} +``` + +You can use the `||` or `&&` to combine conditions. + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + webhooks: // [!code focus] + - endpoint: YOUR_WEBHOOK_URL + shared_secret: ${RINDEXER_WEBHOOK_SHARED_SECRET} + networks: + - ethereum + events: // [!code focus] + - event_name: Transfer + conditions: // [!code focus] + - "value": ">=2000000000000000000 && value <=4000000000000000000" // [!code focus] +``` + +You can use the `=` to filter on other aspects like the `from` or `to` address. + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + webhooks: // [!code focus] + - endpoint: YOUR_WEBHOOK_URL + shared_secret: ${RINDEXER_WEBHOOK_SHARED_SECRET} + networks: + - ethereum + events: // [!code focus] + - event_name: Transfer + conditions: // [!code focus] + - "from": "0x0338ce5020c447f7e668dc2ef778025ce3982662 || 0x0338ce5020c447f7e668dc2ef778025ce398266u" // [!code focus] + - "value": ">=2000000000000000000 || value <=4000000000000000000" // [!code focus] +``` + +:::info +Note we advise you to filer any `indexed` fields in the contract details in the `rindexer.yaml` file. +As these can be filtered out on the request level and not filtered out in rindexer itself. +You can read more about it [here](/docs/start-building/yaml-config/contracts#indexed_1-indexed_2-indexed_3). +::: + +If you have a tuple and you want to get that value you just use the object notation. + +For example lets say we want to only get the events for `profileId` from the `quoteParams` tuple which equals `1`: + +```json +{ + "anonymous": false, + "inputs": [ + { + "components": [ + { + "internalType": "uint256", + "name": "profileId", // [!code focus] + "type": "uint256" + }, + ... + ], + "indexed": false, + "internalType": "struct Types.QuoteParams", + "name": "quoteParams", // [!code focus] + "type": "tuple" + }, + ... + ], + "name": "QuoteCreated", // [!code focus] + "type": "event" +} +``` + +```yaml [rindexer.yaml] +... +contracts: +- name: RocketPoolETH + details: + - network: ethereum + address: "0xae78736cd615f374d3085123a210448e74fc6393" + start_block: "18600000" + end_block: "18600181" + abi: "./abis/RocketTokenRETH.abi.json" + include_events: + - Transfer + streams: // [!code focus] + webhooks: // [!code focus] + - endpoint: YOUR_WEBHOOK_URL + shared_secret: ${RINDEXER_WEBHOOK_SHARED_SECRET} + networks: + - ethereum + events: // [!code focus] + - event_name: Transfer + conditions: // [!code focus] + - "quoteParams.profileId": "=1" // [!code focus] +``` + + + diff --git a/documentation/docs/pages/docs/start-building/yaml-config/contracts.mdx b/documentation/docs/pages/docs/start-building/yaml-config/contracts.mdx index fb45083d..243bbb8c 100644 --- a/documentation/docs/pages/docs/start-building/yaml-config/contracts.mdx +++ b/documentation/docs/pages/docs/start-building/yaml-config/contracts.mdx @@ -629,4 +629,13 @@ contracts: // [!code focus] - Transfer - Approval generate_csv: true // [!code focus] -``` \ No newline at end of file +``` + +## streams + +You can configure streams to stream the data to other services, this is useful if you want to use other services +to index the data. You can read more about it [here](/docs/start-building/streams). + +## chat + +You can configure chat to send messages You can read more about it [here](/docs/start-building/chatbots). \ No newline at end of file diff --git a/documentation/vocs.config.ts b/documentation/vocs.config.ts index 475c7af6..17d6ddac 100644 --- a/documentation/vocs.config.ts +++ b/documentation/vocs.config.ts @@ -70,6 +70,25 @@ export default defineConfig({ text: 'Delete', link: '/docs/start-building/delete', }, + { + text: 'Chatbots', + link: '/docs/start-building/chatbots', + items: [ + { text: 'Telegram', link: '/docs/start-building/chatbots/telegram' }, + { text: 'Discord', link: '/docs/start-building/chatbots/discord' }, + { text: 'Slack', link: '/docs/start-building/chatbots/slack' }, + ], + }, + { + text: 'Streams', + link: '/docs/start-building/streams', + items: [ + { text: 'Webhooks', link: '/docs/start-building/streams/webhooks' }, + { text: 'Kafka', link: '/docs/start-building/streams/kafka' }, + { text: 'Rabbitmq', link: '/docs/start-building/streams/rabbitmq' }, + { text: 'SNS/SQS', link: '/docs/start-building/streams/sns' }, + ], + }, { text: 'Phantom Events', link: '/docs/start-building/phantom', diff --git a/examples/rindexer_demo_cli/.gitignore b/examples/rindexer_demo_cli/.gitignore new file mode 100644 index 00000000..b0f01755 --- /dev/null +++ b/examples/rindexer_demo_cli/.gitignore @@ -0,0 +1,2 @@ +.rindexer +generated_csv/**/*.txt \ No newline at end of file diff --git a/examples/rindexer_demo_cli/rindexer.yaml b/examples/rindexer_demo_cli/rindexer.yaml index 5518b75c..da2cacb3 100644 --- a/examples/rindexer_demo_cli/rindexer.yaml +++ b/examples/rindexer_demo_cli/rindexer.yaml @@ -19,4 +19,4 @@ contracts: end_block: '18718056' abi: ./abis/RocketTokenRETH.abi.json include_events: - - Transfer + - Transfer \ No newline at end of file diff --git a/rindexer_rust_playground/src/rindexer_lib/typings/rindexer_playground/events/erc_20_filter_abi_gen.rs b/rindexer_rust_playground/src/rindexer_lib/typings/rindexer_playground/events/erc_20_filter_abi_gen.rs index 6184f3b0..ee8958f7 100644 --- a/rindexer_rust_playground/src/rindexer_lib/typings/rindexer_playground/events/erc_20_filter_abi_gen.rs +++ b/rindexer_rust_playground/src/rindexer_lib/typings/rindexer_playground/events/erc_20_filter_abi_gen.rs @@ -11,9 +11,7 @@ pub use rindexer_erc20_filter_gen::*; )] pub mod rindexer_erc20_filter_gen { const _: () = { - ::core::include_bytes!( - "../../../../../abis/erc20-abi.json", - ); + ::core::include_bytes!("../../../../../abis/erc20-abi.json",); }; #[allow(deprecated)] fn __abi() -> ::ethers::core::abi::Abi { diff --git a/rindexer_rust_playground/src/rindexer_lib/typings/rindexer_playground/events/rocket_pool_eth_abi_gen.rs b/rindexer_rust_playground/src/rindexer_lib/typings/rindexer_playground/events/rocket_pool_eth_abi_gen.rs index d1d181e2..4ea7351b 100644 --- a/rindexer_rust_playground/src/rindexer_lib/typings/rindexer_playground/events/rocket_pool_eth_abi_gen.rs +++ b/rindexer_rust_playground/src/rindexer_lib/typings/rindexer_playground/events/rocket_pool_eth_abi_gen.rs @@ -11,9 +11,7 @@ pub use rindexer_rocket_pool_eth_gen::*; )] pub mod rindexer_rocket_pool_eth_gen { const _: () = { - ::core::include_bytes!( - "../../../../../abis/erc20-abi.json", - ); + ::core::include_bytes!("../../../../../abis/erc20-abi.json",); }; #[allow(deprecated)] fn __abi() -> ::ethers::core::abi::Abi { diff --git a/rindexer_rust_playground/src/rindexer_lib/typings/rindexer_playground/events/world_abi_gen.rs b/rindexer_rust_playground/src/rindexer_lib/typings/rindexer_playground/events/world_abi_gen.rs index b104b7de..6206e629 100644 --- a/rindexer_rust_playground/src/rindexer_lib/typings/rindexer_playground/events/world_abi_gen.rs +++ b/rindexer_rust_playground/src/rindexer_lib/typings/rindexer_playground/events/world_abi_gen.rs @@ -11,9 +11,7 @@ pub use rindexer_world_gen::*; )] pub mod rindexer_world_gen { const _: () = { - ::core::include_bytes!( - "../../../../../abis/world.abi.json", - ); + ::core::include_bytes!("../../../../../abis/world.abi.json",); }; #[allow(deprecated)] fn __abi() -> ::ethers::core::abi::Abi { diff --git a/streams_playground/kafka/consumer.js b/streams_playground/kafka/consumer.js new file mode 100644 index 00000000..c6406336 --- /dev/null +++ b/streams_playground/kafka/consumer.js @@ -0,0 +1,39 @@ +const kafka = require('kafka-node'); + +// kafka: +// brokers: +// - broker1:9092 +// security_protocol: PLAINTEXT # Change to SASL_SSL if using SASL +// sasl_mechanisms: PLAIN # Remove if not using SASL +// sasl_username: # Omit if not using SASL +// sasl_password: # Omit if not using SASL +// acks: all +// dr_msg_cb: true +// topics: +// - topic: my-topic +// key: my-key +// networks: +// - ethereum +// events: +// - Transfer + +const client = new kafka.KafkaClient({ kafkaHost: 'localhost:9092' }); +const consumer = new kafka.Consumer( + client, + [{ topic: 'test-topic', partition: 0 }], + { + autoCommit: true, + fromOffset: 'earliest' + } +); + +consumer.on('message', function (message) { + console.log('Message consumed:', message); +}); + +consumer.on('error', function (err) { + console.error('Consumer error:', err); +}); + +console.log('Consumer started'); + diff --git a/streams_playground/kafka/create-topic.js b/streams_playground/kafka/create-topic.js new file mode 100644 index 00000000..86468c0f --- /dev/null +++ b/streams_playground/kafka/create-topic.js @@ -0,0 +1,22 @@ +const kafka = require("kafka-node"); +const { KafkaClient, Admin } = kafka; + +const client = new KafkaClient({ kafkaHost: 'localhost:9092' }); +const admin = new Admin(client); + +const topicToCreate = [ + { + topic: 'test-topic', + partitions: 1, + replicationFactor: 1 + } +]; + +admin.createTopics(topicToCreate, (error, result) => { + if (error) { + console.error('Failed to create topic:', error); + } else { + console.log('Topic created successfully:', result); + } + client.close(); +}); \ No newline at end of file diff --git a/streams_playground/kafka/docker-compose.yml b/streams_playground/kafka/docker-compose.yml new file mode 100644 index 00000000..96934e0a --- /dev/null +++ b/streams_playground/kafka/docker-compose.yml @@ -0,0 +1,21 @@ +version: '3' +services: + zookeeper: + image: bitnami/zookeeper:latest + environment: + - ALLOW_ANONYMOUS_LOGIN=yes + ports: + - "2181:2181" + + kafka: + image: bitnami/kafka:latest + ports: + - "9092:9092" + environment: + - KAFKA_BROKER_ID=1 + - KAFKA_LISTENERS=PLAINTEXT://:9092 + - KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://localhost:9092 + - KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 + - ALLOW_PLAINTEXT_LISTENER=yes + depends_on: + - zookeeper diff --git a/streams_playground/kafka/package-lock.json b/streams_playground/kafka/package-lock.json new file mode 100644 index 00000000..c97153f4 --- /dev/null +++ b/streams_playground/kafka/package-lock.json @@ -0,0 +1,836 @@ +{ + "name": "kafka", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "kafka", + "version": "1.0.0", + "license": "ISC", + "dependencies": { + "kafka-node": "^5.0.0" + } + }, + "node_modules/ansi-regex": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", + "integrity": "sha512-TIGnTpdo+E3+pCyAluZvtED5p5wCqLdezCyhPZzKPcxvFplEt4i+W7OONCKgeZFT3+y5NZZfOOS/Bdcanm1MYA==", + "optional": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/aproba": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/aproba/-/aproba-1.2.0.tgz", + "integrity": "sha512-Y9J6ZjXtoYh8RnXVCMOU/ttDmk1aBjunq9vO0ta5x85WDQiQfUF9sIPBITdbiiIVcBo03Hi3jMxigBtsddlXRw==", + "optional": true + }, + "node_modules/are-we-there-yet": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/are-we-there-yet/-/are-we-there-yet-1.1.7.tgz", + "integrity": "sha512-nxwy40TuMiUGqMyRHgCSWZ9FM4VAoRP4xUYSTv5ImRog+h9yISPbVH7H8fASCIzYn9wlEv4zvFL7uKDMCFQm3g==", + "deprecated": "This package is no longer supported.", + "optional": true, + "dependencies": { + "delegates": "^1.0.0", + "readable-stream": "^2.0.6" + } + }, + "node_modules/async": { + "version": "2.6.4", + "resolved": "https://registry.npmjs.org/async/-/async-2.6.4.tgz", + "integrity": "sha512-mzo5dfJYwAn29PeiJ0zvwTo04zj8HDJj0Mn8TD7sno7q12prdbnasKJHhkm2c1LgrhlJ0teaea8860oxi51mGA==", + "dependencies": { + "lodash": "^4.17.14" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" + }, + "node_modules/binary": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/binary/-/binary-0.3.0.tgz", + "integrity": "sha512-D4H1y5KYwpJgK8wk1Cue5LLPgmwHKYSChkbspQg5JtVuR5ulGckxfR62H3AE9UDkdMC8yyXlqYihuz3Aqg2XZg==", + "dependencies": { + "buffers": "~0.1.1", + "chainsaw": "~0.1.0" + }, + "engines": { + "node": "*" + } + }, + "node_modules/bindings": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/bindings/-/bindings-1.5.0.tgz", + "integrity": "sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==", + "optional": true, + "dependencies": { + "file-uri-to-path": "1.0.0" + } + }, + "node_modules/bl": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/bl/-/bl-2.2.1.tgz", + "integrity": "sha512-6Pesp1w0DEX1N550i/uGV/TqucVL4AM/pgThFSN/Qq9si1/DF9aIHs1BxD8V/QU0HoeHO6cQRTAuYnLPKq1e4g==", + "dependencies": { + "readable-stream": "^2.3.5", + "safe-buffer": "^5.1.1" + } + }, + "node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/buffer-alloc": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/buffer-alloc/-/buffer-alloc-1.2.0.tgz", + "integrity": "sha512-CFsHQgjtW1UChdXgbyJGtnm+O/uLQeZdtbDo8mfUgYXCHSM1wgrVxXm6bSyrUuErEb+4sYVGCzASBRot7zyrow==", + "optional": true, + "dependencies": { + "buffer-alloc-unsafe": "^1.1.0", + "buffer-fill": "^1.0.0" + } + }, + "node_modules/buffer-alloc-unsafe": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/buffer-alloc-unsafe/-/buffer-alloc-unsafe-1.1.0.tgz", + "integrity": "sha512-TEM2iMIEQdJ2yjPJoSIsldnleVaAk1oW3DBVUykyOLsEsFmEc9kn+SFFPz+gl54KQNxlDnAwCXosOS9Okx2xAg==", + "optional": true + }, + "node_modules/buffer-crc32": { + "version": "0.2.13", + "resolved": "https://registry.npmjs.org/buffer-crc32/-/buffer-crc32-0.2.13.tgz", + "integrity": "sha512-VO9Ht/+p3SN7SKWqcrgEzjGbRSJYTx+Q1pTQC0wrWqHx0vpJraQ6GtHx8tvcg1rlK1byhU5gccxgOgj7B0TDkQ==", + "engines": { + "node": "*" + } + }, + "node_modules/buffer-fill": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/buffer-fill/-/buffer-fill-1.0.0.tgz", + "integrity": "sha512-T7zexNBwiiaCOGDg9xNX9PBmjrubblRkENuptryuI64URkXDFum9il/JGL8Lm8wYfAXpredVXXZz7eMHilimiQ==", + "optional": true + }, + "node_modules/buffermaker": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/buffermaker/-/buffermaker-1.2.1.tgz", + "integrity": "sha512-IdnyU2jDHU65U63JuVQNTHiWjPRH0CS3aYd/WPaEwyX84rFdukhOduAVb1jwUScmb5X0JWPw8NZOrhoLMiyAHQ==", + "dependencies": { + "long": "1.1.2" + } + }, + "node_modules/buffers": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/buffers/-/buffers-0.1.1.tgz", + "integrity": "sha512-9q/rDEGSb/Qsvv2qvzIzdluL5k7AaJOTrw23z9reQthrbF7is4CtlT0DXyO1oei2DCp4uojjzQ7igaSHp1kAEQ==", + "engines": { + "node": ">=0.2.0" + } + }, + "node_modules/chainsaw": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/chainsaw/-/chainsaw-0.1.0.tgz", + "integrity": "sha512-75kWfWt6MEKNC8xYXIdRpDehRYY/tNSgwKaJq+dbbDcxORuVrrQ+SEHoWsniVn9XPYfP4gmdWIeDk/4YNp1rNQ==", + "dependencies": { + "traverse": ">=0.3.0 <0.4" + }, + "engines": { + "node": "*" + } + }, + "node_modules/chownr": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz", + "integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==", + "optional": true + }, + "node_modules/code-point-at": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/code-point-at/-/code-point-at-1.1.0.tgz", + "integrity": "sha512-RpAVKQA5T63xEj6/giIbUEtZwJ4UFIc3ZtvEkiaUERylqe8xb5IvqcgOurZLahv93CLKfxcw5YI+DZcUBRyLXA==", + "optional": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==" + }, + "node_modules/console-control-strings": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/console-control-strings/-/console-control-strings-1.1.0.tgz", + "integrity": "sha512-ty/fTekppD2fIwRvnZAVdeOiGd1c7YXEixbgJTNzqcxJWKQnjJ/V1bNEEE6hygpM3WjwHFUVK6HTjWSzV4a8sQ==", + "optional": true + }, + "node_modules/core-util-is": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", + "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==" + }, + "node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/decompress-response": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-3.3.0.tgz", + "integrity": "sha512-BzRPQuY1ip+qDonAOz42gRm/pg9F768C+npV/4JOsxRC2sq+Rlk+Q4ZCAsOhnIaMrgarILY+RMUIvMmmX1qAEA==", + "optional": true, + "dependencies": { + "mimic-response": "^1.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/deep-extend": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", + "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", + "optional": true, + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/delegates": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delegates/-/delegates-1.0.0.tgz", + "integrity": "sha512-bd2L678uiWATM6m5Z1VzNCErI3jiGzt6HGY8OVICs40JQq/HALfbyNJmp0UDakEY4pMMaN0Ly5om/B1VI/+xfQ==", + "optional": true + }, + "node_modules/denque": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/denque/-/denque-1.5.1.tgz", + "integrity": "sha512-XwE+iZ4D6ZUB7mfYRMb5wByE8L74HCn30FBN7sWnXksWc1LO1bPDl67pBR9o/kC4z/xSNAwkMYcGgqDV3BE3Hw==", + "engines": { + "node": ">=0.10" + } + }, + "node_modules/detect-libc": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-1.0.3.tgz", + "integrity": "sha512-pGjwhsmsp4kL2RTz08wcOlGN83otlqHeD/Z5T8GXZB+/YcpQ/dgo+lbU8ZsGxV0HIvqqxo9l7mqYwyYMD9bKDg==", + "optional": true, + "bin": { + "detect-libc": "bin/detect-libc.js" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/end-of-stream": { + "version": "1.4.4", + "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", + "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", + "optional": true, + "dependencies": { + "once": "^1.4.0" + } + }, + "node_modules/expand-template": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/expand-template/-/expand-template-2.0.3.tgz", + "integrity": "sha512-XYfuKMvj4O35f/pOXLObndIRvyQ+/+6AhODh+OKWj9S9498pHHn/IMszH+gt0fBCRWMNfk1ZSp5x3AifmnI2vg==", + "optional": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/file-uri-to-path": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz", + "integrity": "sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==", + "optional": true + }, + "node_modules/fs-constants": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz", + "integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==", + "optional": true + }, + "node_modules/gauge": { + "version": "2.7.4", + "resolved": "https://registry.npmjs.org/gauge/-/gauge-2.7.4.tgz", + "integrity": "sha512-14x4kjc6lkD3ltw589k0NrPD6cCNTD6CWoVUNpB85+DrtONoZn+Rug6xZU5RvSC4+TZPxA5AnBibQYAvZn41Hg==", + "deprecated": "This package is no longer supported.", + "optional": true, + "dependencies": { + "aproba": "^1.0.3", + "console-control-strings": "^1.0.0", + "has-unicode": "^2.0.0", + "object-assign": "^4.1.0", + "signal-exit": "^3.0.0", + "string-width": "^1.0.1", + "strip-ansi": "^3.0.1", + "wide-align": "^1.1.0" + } + }, + "node_modules/github-from-package": { + "version": "0.0.0", + "resolved": "https://registry.npmjs.org/github-from-package/-/github-from-package-0.0.0.tgz", + "integrity": "sha512-SyHy3T1v2NUXn29OsWdxmK6RwHD+vkj3v8en8AOBZ1wBQ/hCAQ5bAQTD02kW4W9tUp/3Qh6J8r9EvntiyCmOOw==", + "optional": true + }, + "node_modules/has-unicode": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/has-unicode/-/has-unicode-2.0.1.tgz", + "integrity": "sha512-8Rf9Y83NBReMnx0gFzA8JImQACstCYWUplepDa9xprwwtmgEZUF0h/i5xSA625zB/I37EtrswSST6OXxwaaIJQ==", + "optional": true + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + }, + "node_modules/ini": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", + "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", + "optional": true + }, + "node_modules/is-fullwidth-code-point": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz", + "integrity": "sha512-1pqUqRjkhPJ9miNq9SwMfdvi6lBJcd6eFxvfaivQhaH3SgisfiuudvFntdKOmxuee/77l+FPjKrQjWvmPjWrRw==", + "optional": true, + "dependencies": { + "number-is-nan": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" + }, + "node_modules/kafka-node": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/kafka-node/-/kafka-node-5.0.0.tgz", + "integrity": "sha512-dD2ga5gLcQhsq1yNoQdy1MU4x4z7YnXM5bcG9SdQuiNr5KKuAmXixH1Mggwdah5o7EfholFbcNDPSVA6BIfaug==", + "dependencies": { + "async": "^2.6.2", + "binary": "~0.3.0", + "bl": "^2.2.0", + "buffer-crc32": "~0.2.5", + "buffermaker": "~1.2.0", + "debug": "^2.1.3", + "denque": "^1.3.0", + "lodash": "^4.17.4", + "minimatch": "^3.0.2", + "nested-error-stacks": "^2.0.0", + "optional": "^0.1.3", + "retry": "^0.10.1", + "uuid": "^3.0.0" + }, + "engines": { + "node": ">=8.5.1" + }, + "optionalDependencies": { + "snappy": "^6.0.1" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" + }, + "node_modules/long": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/long/-/long-1.1.2.tgz", + "integrity": "sha512-pjR3OP1X2VVQhCQlrq3s8UxugQsuoucwMOn9Yj/kN/61HMc+lDFJS5bvpNEHneZ9NVaSm8gNWxZvtGS7lqHb3Q==", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/mimic-response": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-1.0.1.tgz", + "integrity": "sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ==", + "optional": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "optional": true, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/mkdirp": { + "version": "0.5.6", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.6.tgz", + "integrity": "sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw==", + "optional": true, + "dependencies": { + "minimist": "^1.2.6" + }, + "bin": { + "mkdirp": "bin/cmd.js" + } + }, + "node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/nan": { + "version": "2.20.0", + "resolved": "https://registry.npmjs.org/nan/-/nan-2.20.0.tgz", + "integrity": "sha512-bk3gXBZDGILuuo/6sKtr0DQmSThYHLtNCdSdXk9YkxD/jK6X2vmCyyXBBxyqZ4XcnzTyYEAThfX3DCEnLf6igw==", + "optional": true + }, + "node_modules/napi-build-utils": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/napi-build-utils/-/napi-build-utils-1.0.2.tgz", + "integrity": "sha512-ONmRUqK7zj7DWX0D9ADe03wbwOBZxNAfF20PlGfCWQcD3+/MakShIHrMqx9YwPTfxDdF1zLeL+RGZiR9kGMLdg==", + "optional": true + }, + "node_modules/nested-error-stacks": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/nested-error-stacks/-/nested-error-stacks-2.1.1.tgz", + "integrity": "sha512-9iN1ka/9zmX1ZvLV9ewJYEk9h7RyRRtqdK0woXcqohu8EWIerfPUjYJPg0ULy0UqP7cslmdGc8xKDJcojlKiaw==" + }, + "node_modules/node-abi": { + "version": "2.30.1", + "resolved": "https://registry.npmjs.org/node-abi/-/node-abi-2.30.1.tgz", + "integrity": "sha512-/2D0wOQPgaUWzVSVgRMx+trKJRC2UG4SUc4oCJoXx9Uxjtp0Vy3/kt7zcbxHF8+Z/pK3UloLWzBISg72brfy1w==", + "optional": true, + "dependencies": { + "semver": "^5.4.1" + } + }, + "node_modules/noop-logger": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/noop-logger/-/noop-logger-0.1.1.tgz", + "integrity": "sha512-6kM8CLXvuW5crTxsAtva2YLrRrDaiTIkIePWs9moLHqbFWT94WpNFjwS/5dfLfECg5i/lkmw3aoqVidxt23TEQ==", + "optional": true + }, + "node_modules/npmlog": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/npmlog/-/npmlog-4.1.2.tgz", + "integrity": "sha512-2uUqazuKlTaSI/dC8AzicUck7+IrEaOnN/e0jd3Xtt1KcGpwx30v50mL7oPyr/h9bL3E4aZccVwpwP+5W9Vjkg==", + "deprecated": "This package is no longer supported.", + "optional": true, + "dependencies": { + "are-we-there-yet": "~1.1.2", + "console-control-strings": "~1.1.0", + "gauge": "~2.7.3", + "set-blocking": "~2.0.0" + } + }, + "node_modules/number-is-nan": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/number-is-nan/-/number-is-nan-1.0.1.tgz", + "integrity": "sha512-4jbtZXNAsfZbAHiiqjLPBiCl16dES1zI4Hpzzxw61Tk+loF+sBDBKx1ICKKKwIqQ7M0mFn1TmkN7euSncWgHiQ==", + "optional": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "optional": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "optional": true, + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/optional": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/optional/-/optional-0.1.4.tgz", + "integrity": "sha512-gtvrrCfkE08wKcgXaVwQVgwEQ8vel2dc5DDBn9RLQZ3YtmtkBss6A2HY6BnJH4N/4Ku97Ri/SF8sNWE2225WJw==" + }, + "node_modules/os-homedir": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/os-homedir/-/os-homedir-1.0.2.tgz", + "integrity": "sha512-B5JU3cabzk8c67mRRd3ECmROafjYMXbuzlwtqdM8IbS8ktlTix8aFGb2bAGKrSRIlnfKwovGUUr72JUPyOb6kQ==", + "optional": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/prebuild-install": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/prebuild-install/-/prebuild-install-5.3.0.tgz", + "integrity": "sha512-aaLVANlj4HgZweKttFNUVNRxDukytuIuxeK2boIMHjagNJCiVKWFsKF4tCE3ql3GbrD2tExPQ7/pwtEJcHNZeg==", + "optional": true, + "dependencies": { + "detect-libc": "^1.0.3", + "expand-template": "^2.0.3", + "github-from-package": "0.0.0", + "minimist": "^1.2.0", + "mkdirp": "^0.5.1", + "napi-build-utils": "^1.0.1", + "node-abi": "^2.7.0", + "noop-logger": "^0.1.1", + "npmlog": "^4.0.1", + "os-homedir": "^1.0.1", + "pump": "^2.0.1", + "rc": "^1.2.7", + "simple-get": "^2.7.0", + "tar-fs": "^1.13.0", + "tunnel-agent": "^0.6.0", + "which-pm-runs": "^1.0.0" + }, + "bin": { + "prebuild-install": "bin.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/process-nextick-args": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", + "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==" + }, + "node_modules/pump": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/pump/-/pump-2.0.1.tgz", + "integrity": "sha512-ruPMNRkN3MHP1cWJc9OWr+T/xDP0jhXYCLfJcBuX54hhfIBnaQmAUMfDcG4DM5UMWByBbJY69QSphm3jtDKIkA==", + "optional": true, + "dependencies": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + }, + "node_modules/rc": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", + "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", + "optional": true, + "dependencies": { + "deep-extend": "^0.6.0", + "ini": "~1.3.0", + "minimist": "^1.2.0", + "strip-json-comments": "~2.0.1" + }, + "bin": { + "rc": "cli.js" + } + }, + "node_modules/readable-stream": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", + "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/readable-stream/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, + "node_modules/retry": { + "version": "0.10.1", + "resolved": "https://registry.npmjs.org/retry/-/retry-0.10.1.tgz", + "integrity": "sha512-ZXUSQYTHdl3uS7IuCehYfMzKyIDBNoAuUblvy5oGO5UJSUTmStUUVPXbA9Qxd173Bgre53yCQczQuHgRWAdvJQ==", + "engines": { + "node": "*" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/semver": { + "version": "5.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz", + "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==", + "optional": true, + "bin": { + "semver": "bin/semver" + } + }, + "node_modules/set-blocking": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", + "integrity": "sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==", + "optional": true + }, + "node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "optional": true + }, + "node_modules/simple-concat": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/simple-concat/-/simple-concat-1.0.1.tgz", + "integrity": "sha512-cSFtAPtRhljv69IK0hTVZQ+OfE9nePi/rtJmw5UjHeVyVroEqJXP1sFztKUy1qU+xvz3u/sfYJLa947b7nAN2Q==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "optional": true + }, + "node_modules/simple-get": { + "version": "2.8.2", + "resolved": "https://registry.npmjs.org/simple-get/-/simple-get-2.8.2.tgz", + "integrity": "sha512-Ijd/rV5o+mSBBs4F/x9oDPtTx9Zb6X9brmnXvMW4J7IR15ngi9q5xxqWBKU744jTZiaXtxaPL7uHG6vtN8kUkw==", + "optional": true, + "dependencies": { + "decompress-response": "^3.3.0", + "once": "^1.3.1", + "simple-concat": "^1.0.0" + } + }, + "node_modules/snappy": { + "version": "6.3.5", + "resolved": "https://registry.npmjs.org/snappy/-/snappy-6.3.5.tgz", + "integrity": "sha512-lonrUtdp1b1uDn1dbwgQbBsb5BbaiLeKq+AGwOk2No+en+VvJThwmtztwulEQsLinRF681pBqib0NUZaizKLIA==", + "hasInstallScript": true, + "optional": true, + "dependencies": { + "bindings": "^1.3.1", + "nan": "^2.14.1", + "prebuild-install": "5.3.0" + } + }, + "node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/string_decoder/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, + "node_modules/string-width": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-1.0.2.tgz", + "integrity": "sha512-0XsVpQLnVCXHJfyEs8tC0zpTVIr5PKKsQtkT29IwupnPTjtPmQ3xT/4yCREF9hYkV/3M3kzcUTSAZT6a6h81tw==", + "optional": true, + "dependencies": { + "code-point-at": "^1.0.0", + "is-fullwidth-code-point": "^1.0.0", + "strip-ansi": "^3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/strip-ansi": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", + "integrity": "sha512-VhumSSbBqDTP8p2ZLKj40UjBCV4+v8bUSEpUb4KjRgWk9pbqGF4REFj6KEagidb2f/M6AzC0EmFyDNGaw9OCzg==", + "optional": true, + "dependencies": { + "ansi-regex": "^2.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/strip-json-comments": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", + "integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==", + "optional": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/tar-fs": { + "version": "1.16.3", + "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-1.16.3.tgz", + "integrity": "sha512-NvCeXpYx7OsmOh8zIOP/ebG55zZmxLE0etfWRbWok+q2Qo8x/vOR/IJT1taADXPe+jsiu9axDb3X4B+iIgNlKw==", + "optional": true, + "dependencies": { + "chownr": "^1.0.1", + "mkdirp": "^0.5.1", + "pump": "^1.0.0", + "tar-stream": "^1.1.2" + } + }, + "node_modules/tar-fs/node_modules/pump": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/pump/-/pump-1.0.3.tgz", + "integrity": "sha512-8k0JupWme55+9tCVE+FS5ULT3K6AbgqrGa58lTT49RpyfwwcGedHqaC5LlQNdEAumn/wFsu6aPwkuPMioy8kqw==", + "optional": true, + "dependencies": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + }, + "node_modules/tar-stream": { + "version": "1.6.2", + "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-1.6.2.tgz", + "integrity": "sha512-rzS0heiNf8Xn7/mpdSVVSMAWAoy9bfb1WOTYC78Z0UQKeKa/CWS8FOq0lKGNa8DWKAn9gxjCvMLYc5PGXYlK2A==", + "optional": true, + "dependencies": { + "bl": "^1.0.0", + "buffer-alloc": "^1.2.0", + "end-of-stream": "^1.0.0", + "fs-constants": "^1.0.0", + "readable-stream": "^2.3.0", + "to-buffer": "^1.1.1", + "xtend": "^4.0.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/tar-stream/node_modules/bl": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/bl/-/bl-1.2.3.tgz", + "integrity": "sha512-pvcNpa0UU69UT341rO6AYy4FVAIkUHuZXRIWbq+zHnsVcRzDDjIAhGuuYoi0d//cwIwtt4pkpKycWEfjdV+vww==", + "optional": true, + "dependencies": { + "readable-stream": "^2.3.5", + "safe-buffer": "^5.1.1" + } + }, + "node_modules/to-buffer": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/to-buffer/-/to-buffer-1.1.1.tgz", + "integrity": "sha512-lx9B5iv7msuFYE3dytT+KE5tap+rNYw+K4jVkb9R/asAb+pbBSM17jtunHplhBe6RRJdZx3Pn2Jph24O32mOVg==", + "optional": true + }, + "node_modules/traverse": { + "version": "0.3.9", + "resolved": "https://registry.npmjs.org/traverse/-/traverse-0.3.9.tgz", + "integrity": "sha512-iawgk0hLP3SxGKDfnDJf8wTz4p2qImnyihM5Hh/sGvQ3K37dPi/w8sRhdNIxYA1TwFwc5mDhIJq+O0RsvXBKdQ==", + "engines": { + "node": "*" + } + }, + "node_modules/tunnel-agent": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", + "integrity": "sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w==", + "optional": true, + "dependencies": { + "safe-buffer": "^5.0.1" + }, + "engines": { + "node": "*" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" + }, + "node_modules/uuid": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz", + "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==", + "deprecated": "Please upgrade to version 7 or higher. Older versions may use Math.random() in certain circumstances, which is known to be problematic. See https://v8.dev/blog/math-random for details.", + "bin": { + "uuid": "bin/uuid" + } + }, + "node_modules/which-pm-runs": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/which-pm-runs/-/which-pm-runs-1.1.0.tgz", + "integrity": "sha512-n1brCuqClxfFfq/Rb0ICg9giSZqCS+pLtccdag6C2HyufBrh3fBOiy9nb6ggRMvWOVH5GrdJskj5iGTZNxd7SA==", + "optional": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/wide-align": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/wide-align/-/wide-align-1.1.5.tgz", + "integrity": "sha512-eDMORYaPNZ4sQIuuYPDHdQvf4gyCF9rEEV/yPxGfwPkRodwEgiMUUXTx/dex+Me0wxx53S+NgUHaP7y3MGlDmg==", + "optional": true, + "dependencies": { + "string-width": "^1.0.2 || 2 || 3 || 4" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "optional": true + }, + "node_modules/xtend": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", + "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", + "optional": true, + "engines": { + "node": ">=0.4" + } + } + } +} diff --git a/streams_playground/kafka/package.json b/streams_playground/kafka/package.json new file mode 100644 index 00000000..c109c91e --- /dev/null +++ b/streams_playground/kafka/package.json @@ -0,0 +1,16 @@ +{ + "name": "kafka", + "version": "1.0.0", + "main": "consumer.js", + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1", + "start": "node consumer.js" + }, + "keywords": [], + "author": "", + "license": "ISC", + "description": "", + "dependencies": { + "kafka-node": "^5.0.0" + } +} diff --git a/streams_playground/rabbitmq/docker-compose.yml b/streams_playground/rabbitmq/docker-compose.yml new file mode 100644 index 00000000..bd8dc1c5 --- /dev/null +++ b/streams_playground/rabbitmq/docker-compose.yml @@ -0,0 +1,14 @@ +services: + rabbitmq: + image: rabbitmq:3-management + ports: + - "5672:5672" + - "15672:15672" + environment: + RABBITMQ_DEFAULT_USER: guest + RABBITMQ_DEFAULT_PASS: guest + networks: + - rabbitmq_net +networks: + rabbitmq_net: + driver: bridge \ No newline at end of file diff --git a/streams_playground/rabbitmq/index.js b/streams_playground/rabbitmq/index.js new file mode 100644 index 00000000..5fb97596 --- /dev/null +++ b/streams_playground/rabbitmq/index.js @@ -0,0 +1,27 @@ +const amqp = require('amqplib'); + +async function setup() { + const connection = await amqp.connect('amqp://guest:guest@localhost:5672'); + const channel = await connection.createChannel(); + + const exchange = 'logs'; + const queue = 'joshes_logs'; + const routingKey = 'info'; + + await channel.assertExchange(exchange, 'direct', { durable: false }); + await channel.assertQueue(queue, { durable: false }); + await channel.bindQueue(queue, exchange, routingKey); + + console.log(`Queue ${queue} is bound to exchange ${exchange} with routing key ${routingKey}`); + + channel.consume(queue, (msg) => { + if (msg !== null) { + console.log(`[x] Received ${msg.content.toString()}`); + channel.ack(msg); + } + }); + + console.log(`[*] Waiting for messages in ${queue}. To exit press CTRL+C`); +} + +setup().catch(console.error); diff --git a/streams_playground/rabbitmq/package-lock.json b/streams_playground/rabbitmq/package-lock.json new file mode 100644 index 00000000..f6990499 --- /dev/null +++ b/streams_playground/rabbitmq/package-lock.json @@ -0,0 +1,124 @@ +{ + "name": "rabbitmq", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "rabbitmq", + "version": "1.0.0", + "license": "ISC", + "dependencies": { + "amqplib": "^0.10.4" + } + }, + "node_modules/@acuminous/bitsyntax": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/@acuminous/bitsyntax/-/bitsyntax-0.1.2.tgz", + "integrity": "sha512-29lUK80d1muEQqiUsSo+3A0yP6CdspgC95EnKBMi22Xlwt79i/En4Vr67+cXhU+cZjbti3TgGGC5wy1stIywVQ==", + "dependencies": { + "buffer-more-ints": "~1.0.0", + "debug": "^4.3.4", + "safe-buffer": "~5.1.2" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/amqplib": { + "version": "0.10.4", + "resolved": "https://registry.npmjs.org/amqplib/-/amqplib-0.10.4.tgz", + "integrity": "sha512-DMZ4eCEjAVdX1II2TfIUpJhfKAuoCeDIo/YyETbfAqehHTXxxs7WOOd+N1Xxr4cKhx12y23zk8/os98FxlZHrw==", + "dependencies": { + "@acuminous/bitsyntax": "^0.1.2", + "buffer-more-ints": "~1.0.0", + "readable-stream": "1.x >=1.1.9", + "url-parse": "~1.5.10" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/buffer-more-ints": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/buffer-more-ints/-/buffer-more-ints-1.0.0.tgz", + "integrity": "sha512-EMetuGFz5SLsT0QTnXzINh4Ksr+oo4i+UGTXEshiGCQWnsgSs7ZhJ8fzlwQ+OzEMs0MpDAMr1hxnblp5a4vcHg==" + }, + "node_modules/core-util-is": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", + "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==" + }, + "node_modules/debug": { + "version": "4.3.6", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.6.tgz", + "integrity": "sha512-O/09Bd4Z1fBrU4VzkhFqVgpPzaGbw6Sm9FEkBT1A/YBXQFGuuSxa1dN2nxgxS34JmKXqYx8CZAwEVoJFImUXIg==", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + }, + "node_modules/isarray": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", + "integrity": "sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ==" + }, + "node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + }, + "node_modules/querystringify": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/querystringify/-/querystringify-2.2.0.tgz", + "integrity": "sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==" + }, + "node_modules/readable-stream": { + "version": "1.1.14", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-1.1.14.tgz", + "integrity": "sha512-+MeVjFf4L44XUkhM1eYbD8fyEsxcV81pqMSR5gblfcLCHfZvbrqy4/qYHE+/R5HoBUT11WV5O08Cr1n3YXkWVQ==", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.1", + "isarray": "0.0.1", + "string_decoder": "~0.10.x" + } + }, + "node_modules/requires-port": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", + "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==" + }, + "node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, + "node_modules/string_decoder": { + "version": "0.10.31", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", + "integrity": "sha512-ev2QzSzWPYmy9GuqfIVildA4OdcGLeFZQrq5ys6RtiuF+RQQiZWr8TZNyAcuVXyQRYfEO+MsoB/1BuQVhOJuoQ==" + }, + "node_modules/url-parse": { + "version": "1.5.10", + "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.10.tgz", + "integrity": "sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==", + "dependencies": { + "querystringify": "^2.1.1", + "requires-port": "^1.0.0" + } + } + } +} diff --git a/streams_playground/rabbitmq/package.json b/streams_playground/rabbitmq/package.json new file mode 100644 index 00000000..9275204f --- /dev/null +++ b/streams_playground/rabbitmq/package.json @@ -0,0 +1,16 @@ +{ + "name": "rabbitmq", + "version": "1.0.0", + "main": "index.js", + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1", + "start": "node index.js" + }, + "keywords": [], + "author": "", + "license": "ISC", + "description": "", + "dependencies": { + "amqplib": "^0.10.4" + } +} diff --git a/streams_playground/webhook/package-lock.json b/streams_playground/webhook/package-lock.json new file mode 100644 index 00000000..3cfd0c26 --- /dev/null +++ b/streams_playground/webhook/package-lock.json @@ -0,0 +1,698 @@ +{ + "name": "webhook", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "webhook", + "version": "1.0.0", + "license": "ISC", + "dependencies": { + "body-parser": "^1.20.2", + "express": "^4.19.2" + } + }, + "node_modules/accepts": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", + "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", + "dependencies": { + "mime-types": "~2.1.34", + "negotiator": "0.6.3" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/array-flatten": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", + "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==" + }, + "node_modules/body-parser": { + "version": "1.20.2", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.2.tgz", + "integrity": "sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==", + "dependencies": { + "bytes": "3.1.2", + "content-type": "~1.0.5", + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "on-finished": "2.4.1", + "qs": "6.11.0", + "raw-body": "2.5.2", + "type-is": "~1.6.18", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/call-bind": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz", + "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/content-disposition": { + "version": "0.5.4", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", + "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", + "dependencies": { + "safe-buffer": "5.2.1" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/content-type": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", + "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.6.0.tgz", + "integrity": "sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie-signature": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", + "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==" + }, + "node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/depd": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", + "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/destroy": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", + "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==", + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/ee-first": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", + "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==" + }, + "node_modules/encodeurl": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", + "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/es-define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz", + "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==", + "dependencies": { + "get-intrinsic": "^1.2.4" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/escape-html": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", + "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==" + }, + "node_modules/etag": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", + "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/express": { + "version": "4.19.2", + "resolved": "https://registry.npmjs.org/express/-/express-4.19.2.tgz", + "integrity": "sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==", + "dependencies": { + "accepts": "~1.3.8", + "array-flatten": "1.1.1", + "body-parser": "1.20.2", + "content-disposition": "0.5.4", + "content-type": "~1.0.4", + "cookie": "0.6.0", + "cookie-signature": "1.0.6", + "debug": "2.6.9", + "depd": "2.0.0", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "finalhandler": "1.2.0", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "merge-descriptors": "1.0.1", + "methods": "~1.1.2", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "path-to-regexp": "0.1.7", + "proxy-addr": "~2.0.7", + "qs": "6.11.0", + "range-parser": "~1.2.1", + "safe-buffer": "5.2.1", + "send": "0.18.0", + "serve-static": "1.15.0", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "type-is": "~1.6.18", + "utils-merge": "1.0.1", + "vary": "~1.1.2" + }, + "engines": { + "node": ">= 0.10.0" + } + }, + "node_modules/finalhandler": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.2.0.tgz", + "integrity": "sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==", + "dependencies": { + "debug": "2.6.9", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "statuses": "2.0.1", + "unpipe": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/forwarded": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", + "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fresh": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", + "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-intrinsic": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gopd": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz", + "integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==", + "dependencies": { + "get-intrinsic": "^1.1.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-property-descriptors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", + "dependencies": { + "es-define-property": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-proto": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.3.tgz", + "integrity": "sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-symbols": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", + "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/http-errors": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", + "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==", + "dependencies": { + "depd": "2.0.0", + "inherits": "2.0.4", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "toidentifier": "1.0.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + }, + "node_modules/ipaddr.js": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", + "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/media-typer": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", + "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/merge-descriptors": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", + "integrity": "sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==" + }, + "node_modules/methods": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", + "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", + "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", + "bin": { + "mime": "cli.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/negotiator": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", + "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/object-inspect": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.2.tgz", + "integrity": "sha512-IRZSRuzJiynemAXPYtPe5BoI/RESNYR7TYm50MC5Mqbd3Jmw5y790sErYw3V6SryFJD64b74qQQs9wn5Bg/k3g==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/on-finished": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", + "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", + "dependencies": { + "ee-first": "1.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/parseurl": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", + "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/path-to-regexp": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", + "integrity": "sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==" + }, + "node_modules/proxy-addr": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", + "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", + "dependencies": { + "forwarded": "0.2.0", + "ipaddr.js": "1.9.1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/qs": { + "version": "6.11.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz", + "integrity": "sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==", + "dependencies": { + "side-channel": "^1.0.4" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/raw-body": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz", + "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==", + "dependencies": { + "bytes": "3.1.2", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" + }, + "node_modules/send": { + "version": "0.18.0", + "resolved": "https://registry.npmjs.org/send/-/send-0.18.0.tgz", + "integrity": "sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==", + "dependencies": { + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "mime": "1.6.0", + "ms": "2.1.3", + "on-finished": "2.4.1", + "range-parser": "~1.2.1", + "statuses": "2.0.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/send/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" + }, + "node_modules/serve-static": { + "version": "1.15.0", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.15.0.tgz", + "integrity": "sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==", + "dependencies": { + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "parseurl": "~1.3.3", + "send": "0.18.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/setprototypeof": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", + "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==" + }, + "node_modules/side-channel": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz", + "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==", + "dependencies": { + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4", + "object-inspect": "^1.13.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/statuses": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", + "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/toidentifier": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", + "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/type-is": { + "version": "1.6.18", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", + "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", + "dependencies": { + "media-typer": "0.3.0", + "mime-types": "~2.1.24" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/unpipe": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", + "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/utils-merge": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", + "integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/vary": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", + "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==", + "engines": { + "node": ">= 0.8" + } + } + } +} diff --git a/streams_playground/webhook/package.json b/streams_playground/webhook/package.json new file mode 100644 index 00000000..4c2ace30 --- /dev/null +++ b/streams_playground/webhook/package.json @@ -0,0 +1,17 @@ +{ + "name": "webhook", + "version": "1.0.0", + "main": "server.js", + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1", + "start": "node server.js" + }, + "keywords": [], + "author": "", + "license": "ISC", + "description": "", + "dependencies": { + "body-parser": "^1.20.2", + "express": "^4.19.2" + } +} diff --git a/streams_playground/webhook/server.js b/streams_playground/webhook/server.js new file mode 100644 index 00000000..19492772 --- /dev/null +++ b/streams_playground/webhook/server.js @@ -0,0 +1,28 @@ +const express = require('express'); +const bodyParser = require('body-parser'); + +const app = express(); +const port = 5003; + +app.use(bodyParser.json()); + +app.use(bodyParser.urlencoded({ extended: true })); + +app.post('/webhook', (req, res) => { + const shared_secret = req.headers['x-rindexer-shared-secret']; + if (shared_secret !== "123") { + console.log('Shared secret does not match'); + res.status(401).send('Unauthorized'); + return; + } + + const receivedData = req.body; + + console.log(`${new Date().toISOString()} - Received webhook data:`, JSON.stringify(receivedData, null, 2)); + + res.status(200).send('OK'); +}); + +app.listen(port, () => { + console.log(`Listening for webhooks on port ${port}...`); +});