diff --git a/.env.example b/.env.example index 6577d37..4f2f325 100644 --- a/.env.example +++ b/.env.example @@ -1,4 +1 @@ -ALLORA_ACCOUNT_NAME=secret -ALLORA_ACCOUNT_MNEMONIC=secret -ALLORA_ACCOUNT_PASSPHRASE=secret -ALLORA_NODE_RPC=https://localhost:26657 +ALLORA_OFFCHAIN_NODE_CONFIG_JSON='{"wallet":{"addressKeyName":"test-offchain","addressRestoreMnemonic":"surge verify input...","alloraHomeDir":"","gas":"1000000","gasAdjustment":1,"nodeRpc":"http://localhost:26657","maxRetries":3,"delay":1,"submitTx":false},"worker":[{"topicId":1,"inferenceEntrypointName":"api-worker-reputer","loopSeconds":5,"parameters":{"InferenceEndpoint":"http://localhost:8000/inference/{Token}","Token":"ETH"}}]}' diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000..bd768aa --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,45 @@ + + +Closes: #XXX + +## What is the purpose of the change + +> Add a description of the overall background and high level changes that this PR introduces + +*(E.g.: This pull request improves documentation of area A by adding ....)* + +## Testing and Verifying + +*(Please pick one of the following options)* + +This change is a trivial rework / code cleanup without any test coverage. + +*(or)* + +This change is already covered by existing tests, such as *(please describe tests)*. + +*(or)* + +This change added tests and can be verified as follows: + +*(example:)* + - *Added unit test that validates ...* + - *Added integration tests for end-to-end deployment with ...* + - *Extended integration test for ...* + - *Manually verified the change by ...* + +## Documentation and Release Note + + - [ ] Does this pull request introduce a new feature or user-facing behavior changes? + + +Where is the change documented? + - [ ] Specification (`x/{module}/README.md`) + - [ ] Allora documentation site `docs.allora.network` source code at: `https://github.com/allora-network/docs` + - [ ] Code comments? + - [ ] N/A diff --git a/.github/workflows/build_push_docker_hub.yml b/.github/workflows/build_push_docker_hub.yml new file mode 100644 index 0000000..381dc68 --- /dev/null +++ b/.github/workflows/build_push_docker_hub.yml @@ -0,0 +1,50 @@ +# This workflow will build and push a new container image to Docker hub based on the tag +name: Build and Push docker image to Docker Hub + +on: + push: + tags: + - 'v*' + - dev-latest + +concurrency: + group: ${{ github.ref }} + cancel-in-progress: true + +jobs: + build-push: + name: Build and Push docker image + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Login to Docker Hub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKER_HUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + + - name: Build, tag, and push image to Docker Hub + id: build-push-image + env: + DOCKERHUB_USERNAME: alloranetwork + DOCKERHUB_REPOSITORY: ${{github.event.repository.name}} + run: | + GIT_TAG="$(echo $GITHUB_REF| sed 's#refs/tags/##')" + + IMAGE_TAG="${GITHUB_SHA:0:8}" + EXTRA_IMAGE_TAGS=$GIT_TAG + + if [[ ${GIT_TAG} == v* ]]; then + EXTRA_IMAGE_TAGS="${EXTRA_IMAGE_TAGS};latest" + fi + + docker build --pull -t $DOCKERHUB_USERNAME/$DOCKERHUB_REPOSITORY:$IMAGE_TAG . + docker push $DOCKERHUB_USERNAME/$DOCKERHUB_REPOSITORY:$IMAGE_TAG + + for tag in $(echo $EXTRA_IMAGE_TAGS| tr ";" "\n"); do + docker tag $DOCKERHUB_USERNAME/$DOCKERHUB_REPOSITORY:$IMAGE_TAG $DOCKERHUB_USERNAME/$DOCKERHUB_REPOSITORY:$tag + docker push $DOCKERHUB_USERNAME/$DOCKERHUB_REPOSITORY:$tag + done diff --git a/.github/workflows/build_push_ecr.yml b/.github/workflows/build_push_ecr.yml index f802f7f..4027990 100644 --- a/.github/workflows/build_push_ecr.yml +++ b/.github/workflows/build_push_ecr.yml @@ -1,5 +1,4 @@ -# This workflow will build and push a new container image to Amazon ECR, -# and then will deploy a new task definition to Amazon ECS which will be run by Fargate when a release is created +# This workflow will build and push a new container image to private AWS ECR repo based on the tag name: Build and Push docker image to ECR on: @@ -35,11 +34,12 @@ jobs: id: login-ecr uses: aws-actions/amazon-ecr-login@v1 - - name: offchain node build, tag, and push image to Amazon ECR - id: build-push-image-node + - name: Source Build, tag, and push image to Amazon ECR + id: build-push-image-source + working-directory: ./adapter/api/source env: ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }} - ECR_REPOSITORY: ${{github.event.repository.name}} # Naming convention: ECR registry name == GITHUB repo name + ECR_REPOSITORY: ${{github.event.repository.name}}-source run: | #! Due to we trigger on push.tags GITHUB_REF - is the tag name GIT_TAG="$(echo $GITHUB_REF| sed 's#refs/tags/##')" @@ -53,8 +53,7 @@ jobs: fi # Build a docker container and push it to ECR so that it can be deployed to ECS. - docker build --pull -f Dockerfile_b7s \ - -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG . + docker build --pull -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG . docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG # Build and PUSH additional tags diff --git a/.gitignore b/.gitignore index da6a2b8..c92eeea 100644 --- a/.gitignore +++ b/.gitignore @@ -25,3 +25,5 @@ go.work.sum # Personal data .env +.allorad +config.json diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..22651db --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,31 @@ +Any contribution that you make to this repository will +be under the Apache 2 License, as dictated by that +[license](http://www.apache.org/licenses/LICENSE-2.0.html): + +~~~ +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. +~~~ + +Contributors must sign-off each commit by adding a `Signed-off-by: ...` +line to commit messages to certify that they have the right to submit +the code they are contributing to the project according to the +[Developer Certificate of Origin (DCO)](https://developercertificate.org/). + +Contributors are encouraged to enable our pre-commit hooks to facilitate +teh passage of CI/CD-related checks in our repositories. To enable them, +please run the following command in the root of the repository: + +```bash +chmod +x .hooks/pre-commit +git config core.hooksPath .hooks/pre-commit +``` + +This will enable the pre-commit hooks for the repository and ensure that +all commits are checked for compliance with our formatter and other +checks before they are accepted into the repository. diff --git a/Dockerfile b/Dockerfile index d245927..1a39b3e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,9 +1,13 @@ FROM golang:alpine -WORKDIR /app +WORKDIR /node -COPY . /app +COPY go.mod go.sum ./ + +RUN go mod download + +COPY . . RUN go build -o allora_offchain_node -CMD ["go", "run", "allora_offchain_node"] +CMD ["./allora_offchain_node"] diff --git a/README.md b/README.md index 6406a90..2ed768e 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,31 @@ Allora off-chain nodes publish inferences, forecasts, and losses informed by a configurable ground truth to the Allora chain. +## How to run with docker +1. Clone the repository +2. Make sure to remove any .env file so it doesn't clash with the automated environment variables +3. Copy config.example.json and populate with your variables + +```shell +cp config.example.json config.json +``` +4. Run + +```shell +chmod +x init.docker +./init.docker +``` + +from the root diectory. This will: + - Automatically create allora keys for you. You will have to request for some tokens from faucet to be able to register your worker and stake your reputer. You can find your address in ./data/env_file + - Automatically export the needed variables from the account created to be used by the offchain node and bundles it with the your provided config.json and then pass them to the node as environemnt variable + +5. Run `docker compose up --build`. This will: + - Run the both the offchain node and the source services, communicating through endpoints attached to the internal dns + +Please note that the environment variable will be created as bumdle of your config.json and allora account secrets, please make sure to remove every sectrets before commiting to remote git repository + + ## How to run without docker 1. Clone the repository @@ -27,10 +52,20 @@ cp .env.example .env 9. Run the following commands: ```shell -chmod +x run -./run +chmod +x start.local +./start.local ``` + +## How to configure + +There are several ways to configure the node. In order of preference, you can do any of these: +* Set the `ALLORA_OFFCHAIN_NODE_CONFIG_JSON` env var with a configuration as a JSON string. +* Set the `ALLORA_OFFCHAIN_NODE_CONFIG_FILE_PATH` env var pointing to a file, which contains configuration as JSON. An example if provided in `config.example.json`. + +Each option completely overwrites the other options. + + This is the entrypoint for the application that simply builds and runs the Go program. It spins off a distinct processes per role worker, reputer per topic configered in `config.json`. @@ -46,7 +81,7 @@ It spins off a distinct processes per role worker, reputer per topic configered 1. Get and set latest_open_worker_nonce_from_chain from the chain 2. If latest_open_worker_nonce_from_chain does not exist or nil then continue to next loop 1. i.e. wait another config.loop_seconds - 3. Retry request_retries times with backoff: + 3. Retry request_retries times with uniform backoff: 1. Invoke configured `inferenceEntrypoint`, `forecastEntrypoint` for topic and get results 1. Else, break this inner retry loop 2. Attempt to commit inference and forecast bundle to the chain @@ -68,7 +103,7 @@ It spins off a distinct processes per role worker, reputer per topic configered 1. Get and set latest_open_reputer_nonce_from_chain from the chain 2. If latest_open_reputer_nonce_from_chain does not exist or nil then continue to next loop 1. i.e. wait another config.loop_seconds - 3. Retry request_retries times with backoff: + 3. Retry request_retries times with uniform backoff: 1. Invoke configured `truthEntrypoint, lossEntrypoint` for topic and get results 1. Else, break this inner retry loop 2. Attempt to commit loss bundle to the chain diff --git a/adapter/README.md b/adapter/README.md new file mode 100644 index 0000000..a412d3b --- /dev/null +++ b/adapter/README.md @@ -0,0 +1,12 @@ +# Adapters + +This directory contains adapters for configuring different options in the node. + +## Adding an adapter + +To add an adapter: +* Add a directory that corresponds to the type eg API, Postgres, etc +* `cd` into the directory and add another directory that corresponds to the package name. +* You can also add your source (eg API server, Postgres db, etc) into this directory +* Create a main.go file inside the package implementing the interface `lib.AlloraAdapter`. +* add a case in the switch in `adapter_factory.go`. diff --git a/adapter/api/source/Dockerfile b/adapter/api/source/Dockerfile new file mode 100644 index 0000000..6908a0a --- /dev/null +++ b/adapter/api/source/Dockerfile @@ -0,0 +1,15 @@ +FROM python:3.9-slim + +RUN apt-get update && apt-get install -y curl && rm -rf /var/lib/apt/lists/* + +WORKDIR /app + +COPY requirements.txt . + +RUN pip install --no-cache-dir -r requirements.txt + +COPY . . + +EXPOSE 8000 + +CMD ["python", "main.py"] diff --git a/adapter/api/source/README.md b/adapter/api/source/README.md new file mode 100644 index 0000000..5143a0c --- /dev/null +++ b/adapter/api/source/README.md @@ -0,0 +1,32 @@ +# Offchain API Source (Server) +This is a small Python Flask API that can be run locally or within a Docker container. It's meant to demonstrate what your server will look like generating model inference and forecast to be called by the adapter + +### Prerequisites +* Python 3.x +* Flask +* Docker (if you want to run the API in a Docker container) + +### Installing Dependencies +Before running the API, you need to install the required dependencies. + +```bash +pip install -r requirements.txt +``` + +### Running the API Locally +To run the Flask API locally, use the following command: +```bash +python main.py +``` +This will start the Flask development server on http://127.0.0.1:8000/ + +### Running the API with Docker +Alternatively, you can run the API using Docker. +```bash +docker build -t offchain-api-source . +``` +Run the Docker container +```bash +docker run -p 8000:8000 offchain-api-source +``` +This will start the Flask API server on http://localhost:8000/ diff --git a/adapter/api/source/main.py b/adapter/api/source/main.py new file mode 100644 index 0000000..19fe1c7 --- /dev/null +++ b/adapter/api/source/main.py @@ -0,0 +1,35 @@ +from flask import Flask, jsonify +import random + +app = Flask(__name__) + +class NodeValue: + def __init__(self, worker, value): + self.worker = worker + self.value = value + +@app.route('/', methods=['GET']) +def health(): + return "Hello, World, I'm alive!" + +@app.route('/inference/', methods=['GET']) +def get_inference(token): + random_float = str(random.uniform(0.0, 100.0)) + return random_float + +@app.route('/forecast', methods=['GET']) +def get_forecast(): + node_values = [ + NodeValue("Worker1", str(random.uniform(0.0, 100.0))), + NodeValue("Worker2", str(random.uniform(0.0, 100.0))), + NodeValue("Worker3", str(random.uniform(0.0, 100.0))), + ] + return jsonify([nv.__dict__ for nv in node_values]) + +@app.route('/truth//', methods=['GET']) +def get_truth(token, blockheight): + random_float = str(random.uniform(0.0, 100.0)) + return random_float + +if __name__ == '__main__': + app.run(debug=True, host='0.0.0.0', port=8000) diff --git a/adapter/api/source/requirements.txt b/adapter/api/source/requirements.txt new file mode 100644 index 0000000..e3e9a71 --- /dev/null +++ b/adapter/api/source/requirements.txt @@ -0,0 +1 @@ +Flask diff --git a/adapter/api/worker-reputer/README.md b/adapter/api/worker-reputer/README.md new file mode 100644 index 0000000..bbc4732 --- /dev/null +++ b/adapter/api/worker-reputer/README.md @@ -0,0 +1,85 @@ +# Allora Offchain API Adapter + +This repository contains the adapter module for the Allora Offchain Node. The adapter is responsible for connecting the offchain node to external systems via hitting an URL, for inferences, forecasts or source of truth. + +It is intended to be used by configuration. + +This Adapter is intended to be used to send inferences and/or forecasts and/or source_truth from external services which provide an API endpoint. + +## Config + +To use and configure this adapter, please use the WorkerConfig object. +Example as Worker: + +``` +Worker: []lib.WorkerConfig{ + TopicId: 1, + InferenceEntrypoint: apiAdapter.NewAlloraAdapter(), + ForecastEntrypoint: apiAdapter.NewAlloraAdapter(), + LoopSeconds: 5, + Parameters: map[string]string{ + "Token": "ETH", + "InferenceEndpoint": "http://localhost:8000/inference/{Token}", + "ForecastEndpoint": "http://localhost:8000/forecast/{TopicId}/{BlockHeight}", + }, +}, +``` + +Example as Reputer: +``` +Reputer: []lib.ReputerConfig{ + { + TopicId: 1, + ReputerEntrypoint: apiAdapter.NewAlloraAdapter(), + LoopSeconds: 30, + MinStake: 100000, + Parameters: map[string]string{ + "SourceOfTruthEndpoint": "http://localhost:8000/groundtruth/{Token}/{BlockHeight}", + "Token": "ethereum", + }, + }, +}, +``` + +## Parameters +The parameters section contains additional properties the user wants to use to configure their URLs to hit. + +### Worker + +`InferenceEndpoint` is required if `InferenceEntrypoint` is defined. +`ForecastEndpoint` is required if `ForecastEntrypoint` is defined. + +`InferenceEndpoint`: provides the inference endpoint to hit. It supports URL template variables. +`ForecastEndpoint`: provides the forecast endpoint to hit. It supports URL template variables. + +If it is not desired to send inferences or forecasts, it can be configured by setting that specific entrypoint to nil. Example, for not sending inferences: +``` +InferenceEntrypoint: nil +``` + +### Reputer + +`SourceOfTruthEndpoint`is required if `ReputerEntrypoint` is defined. + +### Additional Parameters + +Any additional parameter can be defined freely, like `Token` in the example, and be used in the endpoint templates. +Additional parameters do not support template variables. + + +## Template variables + +The URLs support template variables as defined from the Parameters section. + +In addition, it supports two special variables: +* TopicId: as defined in WorkerConfig object +* BlockHeight: the blockheight at which the operation happens + + +## Usage + +* Set up your inference and/or forecast models and serve results via an API. +* Add a Worker configuration like the above in your config.go, configuring your endpoints appropriately. +* Configure the rest of the Allora Offchain Node (e.g. wallet) +* Run the Allora Offchain Node + diff --git a/adapter/api/worker-reputer/main.go b/adapter/api/worker-reputer/main.go new file mode 100644 index 0000000..ade7c0b --- /dev/null +++ b/adapter/api/worker-reputer/main.go @@ -0,0 +1,129 @@ +package api_worker_reputer + +import ( + "allora_offchain_node/lib" + "encoding/json" + "fmt" + "io" + "math" + "net/http" + "strconv" + "strings" + + "github.com/rs/zerolog/log" +) + +type AlloraAdapter struct { + name string +} + +func (a *AlloraAdapter) Name() string { + return a.name +} + +func replacePlaceholders(urlTemplate string, params map[string]string) string { + for key, value := range params { + placeholder := fmt.Sprintf("{%s}", key) + urlTemplate = strings.ReplaceAll(urlTemplate, placeholder, value) + } + return urlTemplate +} + +// Replace placeholders and also the blockheheight +func replaceExtendedPlaceholders(urlTemplate string, params map[string]string, blockHeight int64, topicId uint64) string { + // Create a map of default parameters + blockHeightAsString := strconv.FormatInt(blockHeight, 10) + topicIdAsString := strconv.FormatUint(topicId, 10) + defaultParams := map[string]string{ + "BlockHeight": blockHeightAsString, + "TopicId": topicIdAsString, + } + urlTemplate = replacePlaceholders(urlTemplate, defaultParams) + urlTemplate = replacePlaceholders(urlTemplate, params) + return urlTemplate +} + +func requestEndpoint(url string) (string, error) { + // make request to url + resp, err := http.Get(url) + if err != nil { + return "", fmt.Errorf("failed to make request to %s: %w", url, err) + } + defer resp.Body.Close() + + // Check if the response status is OK + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("received non-OK HTTP status %d", resp.StatusCode) + } + + // Read the response body + body, err := io.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("failed to read response body: %w", err) + } + + log.Debug().Bytes("body", body).Msg("Inference") + // convert bytes to string + return string(body), nil +} + +// Expects an inference as a string scalar value +func (a *AlloraAdapter) CalcInference(node lib.WorkerConfig, blockHeight int64) (string, error) { + urlTemplate := node.Parameters["InferenceEndpoint"] + url := replaceExtendedPlaceholders(urlTemplate, node.Parameters, blockHeight, node.TopicId) + log.Debug().Str("url", url).Msg("Inference") + return requestEndpoint(url) +} + +// Expects forecast as a json array of NodeValue +func (a *AlloraAdapter) CalcForecast(node lib.WorkerConfig, blockHeight int64) ([]lib.NodeValue, error) { + urlTemplate := node.Parameters["InferenceEndpoint"] + url := replaceExtendedPlaceholders(urlTemplate, node.Parameters, blockHeight, node.TopicId) + log.Debug().Str("url", url).Msg("Inference") + forecastsAsString, err := requestEndpoint(url) + if err != nil { + log.Error().Err(err).Msg("Failed to get forecasts") + return []lib.NodeValue{}, err + } + // parse json forecasts into a slice of NodeValue + var nodeValues []lib.NodeValue + err = json.Unmarshal([]byte(forecastsAsString), &nodeValues) + if err != nil { + log.Error().Err(err).Msg("Error unmarshalling JSON forecasts") + } + return nodeValues, nil +} + +func (a *AlloraAdapter) SourceTruth(node lib.ReputerConfig, blockHeight int64) (lib.Truth, error) { + urlTemplate := node.Parameters["SourceOfTruthEndpoint"] + url := replaceExtendedPlaceholders(urlTemplate, node.Parameters, blockHeight, node.TopicId) + log.Debug().Str("url", url).Msg("Source of truth") + return requestEndpoint(url) +} + +func (a *AlloraAdapter) LossFunction(sourceTruth string, inferenceValue string) string { + sourceTruthFloat, _ := strconv.ParseFloat(sourceTruth, 64) + inferenceValueFloat, _ := strconv.ParseFloat(inferenceValue, 64) + loss := math.Abs(sourceTruthFloat - inferenceValueFloat) + str := fmt.Sprintf("%f", loss) + log.Debug().Str("str", str).Msg("Returned loss value") + return str +} + +func (a *AlloraAdapter) CanInfer() bool { + return true +} + +func (a *AlloraAdapter) CanForecast() bool { + return true +} + +func (a *AlloraAdapter) CanSourceTruthAndComputeLoss() bool { + return true +} + +func NewAlloraAdapter() *AlloraAdapter { + return &AlloraAdapter{ + name: "api-worker-reputer", + } +} diff --git a/adapter/worker_reputer_rest_api_l1_loss/main.go b/adapter/worker_reputer_rest_api_l1_loss/main.go deleted file mode 100644 index 3c10261..0000000 --- a/adapter/worker_reputer_rest_api_l1_loss/main.go +++ /dev/null @@ -1,92 +0,0 @@ -package worker_reputer_rest_api_l1_loss - -import ( - "allora_offchain_node/lib" - "fmt" - "io" - "math" - "net/http" - "strconv" - - "github.com/rs/zerolog/log" -) - -type AlloraAdapter struct { - name string -} - -func (a *AlloraAdapter) Name() string { - return a.name -} - -func requestLocalEndpoint(url string) (string, error) { - // make request to url - resp, err := http.Get(url) - if err != nil { - return "", fmt.Errorf("failed to make request to %s: %w", url, err) - } - defer resp.Body.Close() - - // Check if the response status is OK - if resp.StatusCode != http.StatusOK { - return "", fmt.Errorf("received non-OK HTTP status %d", resp.StatusCode) - } - - // Read the response body - body, err := io.ReadAll(resp.Body) - if err != nil { - return "", fmt.Errorf("failed to read response body: %w", err) - } - - log.Debug().Bytes("body", body).Msg("Inference") - // convert bytes to string - return string(body), nil -} - -func (a *AlloraAdapter) CalcInference(node lib.WorkerConfig, blockHeight int64) (string, error) { - urlBase := node.Parameters["inferenceEndpoint"] - token := node.Parameters["token"] - url := fmt.Sprintf("%s/%s", urlBase, token) - return requestLocalEndpoint(url) -} - -func (a *AlloraAdapter) CalcForecast(node lib.WorkerConfig, blockHeight int64) ([]lib.NodeValue, error) { - log.Debug().Str("name", a.name).Msg("Forecast") - return []lib.NodeValue{}, nil -} - -func (a *AlloraAdapter) SourceTruth(node lib.ReputerConfig, blockHeight int64) (lib.Truth, error) { - log.Debug().Str("name", a.name).Msg("truth") - urlBase := node.Parameters["truthEndpoint"] - token := node.Parameters["token"] - url := fmt.Sprintf("%s/%s", urlBase, token) - return requestLocalEndpoint(url) -} - -func (a *AlloraAdapter) LossFunction(sourceTruth string, inferenceValue string) string { - log.Debug().Str("name", a.name).Msg("Loss function processing") - sourceTruthFloat, _ := strconv.ParseFloat(sourceTruth, 64) - inferenceValueFloat, _ := strconv.ParseFloat(inferenceValue, 64) - loss := math.Abs(sourceTruthFloat - inferenceValueFloat) - str := fmt.Sprintf("%f", loss) - log.Debug().Str("str", str).Msg("Returned loss value") - return str -} - -func (a *AlloraAdapter) CanInfer() bool { - return true -} - -func (a *AlloraAdapter) CanForecast() bool { - return false -} - -func (a *AlloraAdapter) CanSourceTruthAndComputeLoss() bool { - return true -} - -func NewAlloraAdapter() *AlloraAdapter { - return &AlloraAdapter{ - name: "worker_reputer_rest_api_l1_loss", - } -} diff --git a/adapter_factory.go b/adapter_factory.go new file mode 100644 index 0000000..18c9b8c --- /dev/null +++ b/adapter_factory.go @@ -0,0 +1,17 @@ +package main + +import ( + api_worker_reputer "allora_offchain_node/adapter/api/worker-reputer" + lib "allora_offchain_node/lib" + "fmt" +) + +func NewAlloraAdapter(name string) (lib.AlloraAdapter, error) { + switch name { + case "api-worker-reputer": + return api_worker_reputer.NewAlloraAdapter(), nil + // Add other cases for different adapters here + default: + return nil, fmt.Errorf("unknown adapter name: %s", name) + } +} diff --git a/config.example.json b/config.example.json new file mode 100644 index 0000000..24188ae --- /dev/null +++ b/config.example.json @@ -0,0 +1,36 @@ +{ + "wallet": { + "addressKeyName": "test", + "addressRestoreMnemonic": "", + "alloraHomeDir": "", + "gas": "1000000", + "gasAdjustment": 1.0, + "nodeRpc": "http://localhost:26657", + "maxRetries": 1, + "delay": 1, + "submitTx": false + }, + "worker": [ + { + "topicId": 1, + "inferenceEntrypointName": "api-worker-reputer", + "loopSeconds": 5, + "parameters": { + "InferenceEndpoint": "http://source:8000/inference/{Token}", + "Token": "ETH" + } + } + ], + "reputer": [ + { + "topicId": 1, + "reputerEntrypointName": "api-worker-reputer", + "loopSeconds": 30, + "minStake": 100000, + "parameters": { + "SourceOfTruthEndpoint": "http://source:8888/truth/{Token}/{BlockHeight}", + "Token": "ethereum" + } + } + ] + } diff --git a/config.go b/config.go deleted file mode 100644 index 99c312f..0000000 --- a/config.go +++ /dev/null @@ -1,66 +0,0 @@ -package main - -import ( - apiAdapter "allora_offchain_node/adapter/worker_reputer_rest_api_l1_loss" - "allora_offchain_node/lib" - "os" -) - -//// If in the event inferences, forecasts, truth, or loss functions are -//// not derived from the Python Flask server, new adapters can be made -//// and added to the UserConfig struct below for the same or different topics. - -var UserConfig = lib.UserConfig{ - Wallet: lib.WalletConfig{ - AddressKeyName: os.Getenv("ALLORA_ACCOUNT_NAME"), // load a address by key from the keystore testing = allo1wmfp4xuurjsvh3qzjhkdxqgepmshpy7ny88pc7 - AddressRestoreMnemonic: os.Getenv("ALLORA_ACCOUNT_MNEMONIC"), // mnemonic for the allora account - AddressAccountPassphrase: os.Getenv("ALLORA_ACCOUNT_PASSPHRASE"), // passphrase for the allora account - AlloraHomeDir: "", // home directory for the allora keystore, if "", it will automatically create in "$HOME/.allorad" - Gas: "1000000", // gas to use for the allora client in uallo - GasAdjustment: 1.0, // gas adjustment to use for the allora client - SubmitTx: false, // set to false to run in dry-run processes without committing to the chain. useful for dev/testing - NodeRpc: os.Getenv("ALLORA_NODE_RPC"), - MaxRetries: 3, - MinDelay: 1, - MaxDelay: 6, - }, - Worker: []lib.WorkerConfig{ - { - TopicId: 1, - InferenceEntrypoint: apiAdapter.NewAlloraAdapter(), - ForecastEntrypoint: nil, - LoopSeconds: 5, - Parameters: map[string]string{ - //// These communicate with local Python Flask server - "inferenceEndpoint": "http://localhost:8000/inference", - "token": "ETH", - "forecastEndpoint": "http://localhost:8000/forecast", - }, - }, - }, - Reputer: []lib.ReputerConfig{ - { - TopicId: 1, - ReputerEntrypoint: apiAdapter.NewAlloraAdapter(), - LoopSeconds: 30, - MinStake: 100000, - Parameters: map[string]string{ - "truthEndpoint": "http://localhost:8000/truth", - "token": "ethereum", - //// Could put this in Python Flask server as well - // "cgSimpleEndpoint": "https://api.coingecko.com/api/v3/simple/price?vs_currencies=usd&ids=", - // "apiKey": os.Getenv("CG_API_KEY"), - }, - }, - }, -} - -//// The config above implies that I have a local server running on port 8000 -//// that can handle the following endpoints: /inference, /forecast, /truth -//// The server should be able to handle GET requests to these endpoints -//// and return the appropriate data for the worker and reputer processes. -//// The server's endpoints are all assigned for topic 1 - -//// It is up to the user to add more topics and endpoints as necessary, -//// and to ensure that models and sources of truth relayed or generated by -//// adapters are compatible with their assigned topic(s). diff --git a/data/env_file b/data/env_file new file mode 100644 index 0000000..f389db5 --- /dev/null +++ b/data/env_file @@ -0,0 +1,2 @@ +NAME=offchain_node +ENV_LOADED=false diff --git a/data/scripts/init.sh b/data/scripts/init.sh new file mode 100755 index 0000000..333d6fa --- /dev/null +++ b/data/scripts/init.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +set -e + +if allorad keys --home=/data/.allorad --keyring-backend test show $NAME > /dev/null 2>&1 ; then + echo "allora account: $NAME already imported" +else + echo "creating allora account: $NAME" + output=$(allorad keys add $NAME --home=/data/.allorad --keyring-backend test 2>&1) + address=$(echo "$output" | grep 'address:' | sed 's/.*address: //') + mnemonic=$(echo "$output" | tail -n 1) + + # Parse and update the JSON string + updated_json=$(echo "$ALLORA_OFFCHAIN_NODE_CONFIG_JSON" | jq --arg name "$NAME" --arg mnemonic "$mnemonic" ' + .wallet.addressKeyName = $name | + .wallet.addressRestoreMnemonic = $mnemonic + ') + + stringified_json=$(echo "$updated_json" | jq -c .) + + echo "ALLORA_OFFCHAIN_NODE_CONFIG_JSON='$stringified_json'" > /data/env_file + echo ALLORA_OFFCHAIN_ACCOUNT_ADDRESS=$address >> /data/env_file + echo "NAME=$NAME" >> /data/env_file + + echo "Updated ALLORA_OFFCHAIN_NODE_CONFIG_JSON saved to /data/env_file" +fi + + +if grep -q "ENV_LOADED=false" /data/env_file; then + sed -i 's/ENV_LOADED=false/ENV_LOADED=true/' /data/env_file +else + echo "ENV_LOADED=true" >> /data/env_file +fi diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 0000000..0abc3e5 --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,22 @@ +services: + source: + container_name: offchain_source + build: ./adapter/api/source + ports: + - "8000:8000" + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8000"] + interval: 10s + timeout: 5s + retries: 5 + + node: + container_name: offchain_node + build: . + volumes: + - ./data:/data + depends_on: + source: + condition: service_healthy + env_file: + - ./data/env_file diff --git a/go.mod b/go.mod index 764169d..3abd07e 100644 --- a/go.mod +++ b/go.mod @@ -8,9 +8,10 @@ go 1.22.5 require ( cosmossdk.io/math v1.3.0 - github.com/allora-network/allora-chain v0.2.15-0.20240729135109-60bad8f079d6 + github.com/allora-network/allora-chain v0.2.15-0.20240807021625-402006fe2b77 github.com/cosmos/cosmos-sdk v0.50.8 github.com/ignite/cli/v28 v28.5.0 + github.com/joho/godotenv v1.5.1 github.com/rs/zerolog v1.33.0 ) @@ -43,7 +44,7 @@ require ( github.com/cockroachdb/pebble v1.1.0 // indirect github.com/cockroachdb/redact v1.1.5 // indirect github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect - github.com/cometbft/cometbft v0.38.9 // indirect + github.com/cometbft/cometbft v0.38.10 // indirect github.com/cometbft/cometbft-db v0.9.1 // indirect github.com/cosmos/btcutil v1.0.5 // indirect github.com/cosmos/cosmos-db v1.0.2 // indirect @@ -101,7 +102,6 @@ require ( github.com/klauspost/compress v1.17.9 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect - github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/linxGnu/grocksdb v1.8.14 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect diff --git a/go.sum b/go.sum index e23918e..8e09474 100644 --- a/go.sum +++ b/go.sum @@ -48,8 +48,22 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/allora-network/allora-chain v0.2.15-0.20240729135109-60bad8f079d6 h1:EAOQ/ynqXT+FR8x2Cl7fm0cTktwfrb0tp/WyI2Bqz5Y= -github.com/allora-network/allora-chain v0.2.15-0.20240729135109-60bad8f079d6/go.mod h1:nYJd641Y0iieiL+t+P5VOWnYdiAq2kiazdbL1Bi4jrw= +github.com/allora-network/allora-chain v0.2.15-0.20240803224711-cc85ac89877a h1:KSIfJZemC1xNC6EYezAKrWcE8WEDs6WBFQ738SA6Jo0= +github.com/allora-network/allora-chain v0.2.15-0.20240803224711-cc85ac89877a/go.mod h1:jt6mz2EdJgvHenWxhSMAesYTp2Ku68r9nzn3D+8UpPg= +github.com/allora-network/allora-chain v0.2.15-0.20240805202614-778f47de0f0c h1:Ld4q6XScZ9MUEoLV1nvKohq4UYVm0lh078fR0S7Ty10= +github.com/allora-network/allora-chain v0.2.15-0.20240805202614-778f47de0f0c/go.mod h1:jt6mz2EdJgvHenWxhSMAesYTp2Ku68r9nzn3D+8UpPg= +github.com/allora-network/allora-chain v0.2.15-0.20240806144904-cb5ee4981d9a h1:Zku/4IDi6vt7CikJdRL6Qp3rZqH8BFOrkndKfBXWQZc= +github.com/allora-network/allora-chain v0.2.15-0.20240806144904-cb5ee4981d9a/go.mod h1:jt6mz2EdJgvHenWxhSMAesYTp2Ku68r9nzn3D+8UpPg= +github.com/allora-network/allora-chain v0.2.15-0.20240806173712-f3a07a5ea3c2 h1:NDNKu8jA0Khflkud/wxyhXy/iLrK7LWqCDANIvRzoZU= +github.com/allora-network/allora-chain v0.2.15-0.20240806173712-f3a07a5ea3c2/go.mod h1:jt6mz2EdJgvHenWxhSMAesYTp2Ku68r9nzn3D+8UpPg= +github.com/allora-network/allora-chain v0.2.15-0.20240806234302-975a0c6c8510 h1:BR+9Fho2bk7w7qphqzaYRNaq2Gw6Kvx6LpHlXOMuldo= +github.com/allora-network/allora-chain v0.2.15-0.20240806234302-975a0c6c8510/go.mod h1:W5dCoxxZltVTmiir93VAy+CFxRKmHW776OrqrPdJCfk= +github.com/allora-network/allora-chain v0.2.15-0.20240807005342-513699dc3d08 h1:vljk7B7bxhzcgg8N7VdCvzyGdl117ZO6Bj3lrYzgHwM= +github.com/allora-network/allora-chain v0.2.15-0.20240807005342-513699dc3d08/go.mod h1:W5dCoxxZltVTmiir93VAy+CFxRKmHW776OrqrPdJCfk= +github.com/allora-network/allora-chain v0.2.15-0.20240807020405-e86664a4ad03 h1:YIRZM1PWqqy4Pwb+1HtoaUHNihfIXgVDNdj89AEBrHg= +github.com/allora-network/allora-chain v0.2.15-0.20240807020405-e86664a4ad03/go.mod h1:W5dCoxxZltVTmiir93VAy+CFxRKmHW776OrqrPdJCfk= +github.com/allora-network/allora-chain v0.2.15-0.20240807021625-402006fe2b77 h1:DARZm4skhtgDIfik/Iq0ejJk6D8G2sh1n1CeLZjtsgQ= +github.com/allora-network/allora-chain v0.2.15-0.20240807021625-402006fe2b77/go.mod h1:W5dCoxxZltVTmiir93VAy+CFxRKmHW776OrqrPdJCfk= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= @@ -125,8 +139,8 @@ github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZ github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/cometbft/cometbft v0.38.9 h1:cJBJBG0mPKz+sqelCi/hlfZjadZQGdDNnu6YQ1ZsUHQ= -github.com/cometbft/cometbft v0.38.9/go.mod h1:xOoGZrtUT+A5izWfHSJgl0gYZUE7lu7Z2XIS1vWG/QQ= +github.com/cometbft/cometbft v0.38.10 h1:2ePuglchT+j0Iao+cfmt/nw5U7K2lnGDzXSUPGVdXaU= +github.com/cometbft/cometbft v0.38.10/go.mod h1:jHPx9vQpWzPHEAiYI/7EDKaB1NXhK6o3SArrrY8ExKc= github.com/cometbft/cometbft-db v0.9.1 h1:MIhVX5ja5bXNHF8EYrThkG9F7r9kSfv8BX4LWaxWJ4M= github.com/cometbft/cometbft-db v0.9.1/go.mod h1:iliyWaoV0mRwBJoizElCwwRA9Tf7jZJOURcRZF9m60U= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -417,6 +431,8 @@ github.com/jhump/protoreflect v1.15.3/go.mod h1:4ORHmSBmlCW8fh3xHmJMGyul1zNqZK4E github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= +github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= +github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= @@ -453,8 +469,6 @@ github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= -github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/linxGnu/grocksdb v1.8.14 h1:HTgyYalNwBSG/1qCQUIott44wU5b2Y9Kr3z7SK5OfGQ= diff --git a/init.docker b/init.docker new file mode 100755 index 0000000..e7a5af3 --- /dev/null +++ b/init.docker @@ -0,0 +1,26 @@ +#!/bin/bash + +set -e + +if [ ! -f config.json ]; then + echo "Error: config.json file not found, please provide one" + exit 1 +fi + +nodeName=$(jq -r '.wallet.addressKeyName' config.json) +if [ -z "$nodeName" ]; then + echo "No name was provided for the node, please provide value for wallet.addressKeyName in the config.json" + exit 1 +fi + +ENV_LOADED=$(grep '^ENV_LOADED=' ./data/env_file | cut -d '=' -f 2) +if [ "$ENV_LOADED" = "false" ]; then + json_content=$(cat ./config.json) + stringified_json=$(echo "$json_content" | jq -c .) + + docker run -it --entrypoint=bash -v $(pwd)/data:/data -e NAME="${nodeName}" -e ALLORA_OFFCHAIN_NODE_CONFIG_JSON="${stringified_json}" alloranetwork/allora-chain:latest -c "bash /data/scripts/init.sh" + echo "config.json saved to ./data/env_file" +else + echo "config.json is already loaded, skipping the operation. You can set ENV_LOADED variable to false in ./data/env_file to reload the config.json" +fi + diff --git a/lib/domain_config.go b/lib/domain_config.go index 8293354..d1a0d17 100644 --- a/lib/domain_config.go +++ b/lib/domain_config.go @@ -13,14 +13,12 @@ type WalletConfig struct { Address string // will be overwritten by the keystore. This is the 1 value that is auto-generated in this struct AddressKeyName string // load a address by key from the keystore AddressRestoreMnemonic string - AddressAccountPassphrase string AlloraHomeDir string // home directory for the allora keystore Gas string // gas to use for the allora client GasAdjustment float64 // gas adjustment to use for the allora client NodeRpc string // rpc node for allora chain MaxRetries int64 // retry to get data from chain up to this many times per query or tx - MinDelay int64 // minimum of uniform distribution that is sampled then used to calcluate exponential backoff for txs (in seconds) - MaxDelay int64 // maximum of uniform distribution that is sampled then used to calcluate exponential backoff for txs (in seconds) + Delay int64 // minimum of uniform distribution that is sampled then used to calcluate exponential backoff for txs (in seconds) SubmitTx bool // useful for dev/testing. set to false to run in dry-run processes without committing to the chain } @@ -36,17 +34,20 @@ type ChainConfig struct { } type WorkerConfig struct { - TopicId emissions.TopicId - InferenceEntrypoint AlloraAdapter - ForecastEntrypoint AlloraAdapter - LoopSeconds int64 // seconds to wait between attempts to get next worker nonce - AllowsNegativeValue bool - Parameters map[string]string // Map for variable configuration values + TopicId emissions.TopicId + InferenceEntrypointName string + InferenceEntrypoint AlloraAdapter + ForecastEntrypointName string + ForecastEntrypoint AlloraAdapter + LoopSeconds int64 // seconds to wait between attempts to get next worker nonce + AllowsNegativeValue bool + Parameters map[string]string // Map for variable configuration values } type ReputerConfig struct { - TopicId emissions.TopicId - ReputerEntrypoint AlloraAdapter + TopicId emissions.TopicId + ReputerEntrypointName string + ReputerEntrypoint AlloraAdapter // Minimum stake to repute. will try to add stake from wallet if current stake is less than this. // Will not repute if current stake is less than this, after trying to add any necessary stake. // This is idempotent in that it will not add more stake than specified here. diff --git a/lib/factory_config.go b/lib/factory_config.go index 895c58a..01c592a 100644 --- a/lib/factory_config.go +++ b/lib/factory_config.go @@ -69,7 +69,7 @@ func (config *UserConfig) GenerateNodeConfig() (*NodeConfig, error) { } } else if config.Wallet.AddressRestoreMnemonic != "" && config.Wallet.AddressKeyName != "" { // restore from mnemonic - account, err = client.AccountRegistry.Import(config.Wallet.AddressKeyName, config.Wallet.AddressRestoreMnemonic, config.Wallet.AddressAccountPassphrase) + account, err = client.AccountRegistry.Import(config.Wallet.AddressKeyName, config.Wallet.AddressRestoreMnemonic, "") if err != nil { if err.Error() == "account already exists" { account, err = client.Account(config.Wallet.AddressKeyName) @@ -82,7 +82,7 @@ func (config *UserConfig) GenerateNodeConfig() (*NodeConfig, error) { } } else { log.Debug().Msg("no allora account was loaded") - return nil, nil + return nil, errors.New("no allora account was loaded") } address, err := account.Address(ADDRESS_PREFIX) diff --git a/lib/repo_query_nonce.go b/lib/repo_query_nonce.go index 095818c..5b1741b 100644 --- a/lib/repo_query_nonce.go +++ b/lib/repo_query_nonce.go @@ -24,7 +24,7 @@ func (node *NodeConfig) GetLatestOpenWorkerNonceByTopicId(topicId emissionstypes return res.Nonces.Nonces[0], nil } -func (node *NodeConfig) GetLatestOpenReputerNonceByTopicId(topicId emissionstypes.TopicId) (BlockHeight, error) { +func (node *NodeConfig) GetOldestReputerNonceByTopicId(topicId emissionstypes.TopicId) (BlockHeight, error) { ctx := context.Background() res, err := node.Chain.EmissionsQueryClient.GetUnfulfilledReputerNonces( @@ -38,6 +38,6 @@ func (node *NodeConfig) GetLatestOpenReputerNonceByTopicId(topicId emissionstype if len(res.Nonces.Nonces) == 0 { return 0, nil } - // Per `AddWorkerNonce()` in `allora-chain/x/emissions/keeper.go`, the latest nonce is first - return res.Nonces.Nonces[0].ReputerNonce.BlockHeight, nil + // Per `AddWorkerNonce()` in `allora-chain/x/emissions/keeper.go`, the oldest nonce is last + return res.Nonces.Nonces[len(res.Nonces.Nonces)-1].ReputerNonce.BlockHeight, nil } diff --git a/lib/repo_tx_utils.go b/lib/repo_tx_utils.go index 2a9d6f5..9e3a01c 100644 --- a/lib/repo_tx_utils.go +++ b/lib/repo_tx_utils.go @@ -2,7 +2,6 @@ package lib import ( "context" - "math/rand" "time" "github.com/rs/zerolog/log" @@ -11,6 +10,8 @@ import ( "github.com/ignite/cli/v28/ignite/pkg/cosmosclient" ) +// SendDataWithRetry attempts to send data with a uniform backoff strategy for retries. +// uniform backoff is preferred to avoid exiting the open submission windows func (node *NodeConfig) SendDataWithRetry(ctx context.Context, req sdktypes.Msg, successMsg string) (*cosmosclient.Response, error) { var txResp *cosmosclient.Response var err error @@ -23,12 +24,8 @@ func (node *NodeConfig) SendDataWithRetry(ctx context.Context, req sdktypes.Msg, } // Log the error for each retry. log.Error().Err(err).Str("msg", successMsg).Msgf("Failed, retrying... (Retry %d/%d)", retryCount, node.Wallet.MaxRetries) - // Generate a random number between MinDelay and MaxDelay - randomDelay := rand.Intn(int(node.Wallet.MaxDelay-node.Wallet.MinDelay+1)) + int(node.Wallet.MinDelay) - // Apply exponential backoff to the random delay - backoffDelay := randomDelay << retryCount - // Wait for the calculated delay before retrying - time.Sleep(time.Duration(backoffDelay) * time.Second) + // Wait for the uniform delay before retrying + time.Sleep(time.Duration(node.Wallet.Delay) * time.Second) } return txResp, err } diff --git a/main.go b/main.go index 5f6a1d3..f12818e 100644 --- a/main.go +++ b/main.go @@ -1,17 +1,105 @@ package main import ( + "allora_offchain_node/lib" usecase "allora_offchain_node/usecase" + "encoding/json" + "fmt" + "os" "github.com/rs/zerolog" "github.com/rs/zerolog/log" + "github.com/joho/godotenv" ) +const ALLORA_OFFCHAIN_NODE_CONFIG_JSON = "ALLORA_OFFCHAIN_NODE_CONFIG_JSON" +const ALLORA_OFFCHAIN_NODE_CONFIG_FILE_PATH = "ALLORA_OFFCHAIN_NODE_CONFIG_FILE_PATH" + +func ConvertEntrypointsToInstances(userConfig lib.UserConfig) error { + /// Initialize adapters using the factory function + for i, worker := range userConfig.Worker { + if worker.InferenceEntrypointName != "" { + adapter, err := NewAlloraAdapter(worker.InferenceEntrypointName) + if err != nil { + fmt.Println("Error creating inference adapter:", err) + return err + } + userConfig.Worker[i].InferenceEntrypoint = adapter + } + + if worker.ForecastEntrypointName != "" { + adapter, err := NewAlloraAdapter(worker.ForecastEntrypointName) + if err != nil { + fmt.Println("Error creating forecast adapter:", err) + return err + } + userConfig.Worker[i].ForecastEntrypoint = adapter + } + } + + for i, reputer := range userConfig.Reputer { + if reputer.ReputerEntrypointName != "" { + adapter, err := NewAlloraAdapter(reputer.ReputerEntrypointName) + if err != nil { + fmt.Println("Error creating reputer adapter:", err) + return err + } + userConfig.Reputer[i].ReputerEntrypoint = adapter + } + } + return nil +} + func main() { + if dotErr := godotenv.Load(); dotErr != nil { + log.Error().Err(dotErr).Msg("Error loading .env file") + } + // UNIX Time is faster and smaller than most timestamps zerolog.TimeFieldFormat = zerolog.TimeFormatUnix log.Info().Msg("Starting allora offchain node...") - spawner := usecase.NewUseCaseSuite(UserConfig) + finalUserConfig := lib.UserConfig{} + alloraJsonConfig := os.Getenv(ALLORA_OFFCHAIN_NODE_CONFIG_JSON) + if alloraJsonConfig != "" { + log.Info().Msg("Config using JSON env var") + // completely reset UserConfig + err := json.Unmarshal([]byte(alloraJsonConfig), &finalUserConfig) + if err != nil { + log.Fatal().Err(err).Msg("Failed to parse JSON config file from Config") + return + } + } else if os.Getenv(ALLORA_OFFCHAIN_NODE_CONFIG_FILE_PATH) != "" { + log.Info().Msg("Config using JSON config file") + // parse file defined in CONFIG_FILE_PATH into UserConfig + file, err := os.Open(os.Getenv(ALLORA_OFFCHAIN_NODE_CONFIG_FILE_PATH)) + if err != nil { + log.Fatal().Err(err).Msg("Failed to open JSON config file") + return + } + defer file.Close() + decoder := json.NewDecoder(file) + // completely reset UserConfig + err = decoder.Decode(&finalUserConfig) + if err != nil { + log.Fatal().Err(err).Msg("Failed to parse JSON config file") + return + } + } else { + log.Fatal().Msg("Could not find config file. Please create a config.json file and pass as environment variable.") + return + } + + // Convert entrypoints to instances of adapters + err := ConvertEntrypointsToInstances(finalUserConfig) + if err != nil { + log.Fatal().Err(err).Msg("Failed to convert Entrypoints to instances of adapters") + return + } + spawner, err := usecase.NewUseCaseSuite(finalUserConfig) + if err != nil { + log.Fatal().Err(err).Msg("Failed to initialize use case, exiting") + return + } spawner.Spawn() } diff --git a/run b/start.local similarity index 100% rename from run rename to start.local diff --git a/usecase/build_commit_worker_payload.go b/usecase/build_commit_worker_payload.go index 29bb5fb..c9d0391 100644 --- a/usecase/build_commit_worker_payload.go +++ b/usecase/build_commit_worker_payload.go @@ -16,25 +16,32 @@ import ( func (suite *UseCaseSuite) BuildCommitWorkerPayload(worker lib.WorkerConfig, nonce *emissionstypes.Nonce) (bool, error) { ctx := context.Background() - inference, err := worker.InferenceEntrypoint.CalcInference(worker, nonce.BlockHeight) - if err != nil { - log.Error().Err(err).Str("worker", worker.InferenceEntrypoint.Name()).Msg("Error computing inference for worker") - return false, err + if worker.InferenceEntrypoint == nil && worker.ForecastEntrypoint == nil { + log.Error().Msg("Worker has no valid Inference or Forecast entrypoints") + return false, nil } - forecasts := []lib.NodeValue{} - if worker.ForecastEntrypoint != nil { - forecasts, err = worker.ForecastEntrypoint.CalcForecast(worker, nonce.BlockHeight) + var workerResponse = lib.WorkerResponse{ + WorkerConfig: worker, + } + + if worker.InferenceEntrypoint != nil { + inference, err := worker.InferenceEntrypoint.CalcInference(worker, nonce.BlockHeight) if err != nil { - log.Error().Err(err).Str("worker", worker.InferenceEntrypoint.Name()).Msg("Error computing forecast for worker") + log.Error().Err(err).Str("worker", worker.InferenceEntrypoint.Name()).Msg("Error computing inference for worker") return false, err } + workerResponse.InfererValue = inference } - var workerResponse = lib.WorkerResponse{ - InfererValue: inference, - ForecasterValues: forecasts, - WorkerConfig: worker, + if worker.ForecastEntrypoint != nil { + forecasts := []lib.NodeValue{} + forecasts, err := worker.ForecastEntrypoint.CalcForecast(worker, nonce.BlockHeight) + if err != nil { + log.Error().Err(err).Str("worker", worker.InferenceEntrypoint.Name()).Msg("Error computing forecast for worker") + return false, err + } + workerResponse.ForecasterValues = forecasts } workerPayload, err := suite.BuildWorkerPayload(workerResponse, nonce.BlockHeight) diff --git a/usecase/spawn_actor_processes.go b/usecase/spawn_actor_processes.go index 17548e3..f60cbe6 100644 --- a/usecase/spawn_actor_processes.go +++ b/usecase/spawn_actor_processes.go @@ -59,7 +59,7 @@ func (suite *UseCaseSuite) runWorkerProcess(worker lib.WorkerConfig) { latestNonceHeightActedUpon := int64(0) for { - log.Debug().Uint64("topicId", worker.TopicId).Msg("Checking for latest open worker nonce on topic") + log.Debug().Uint64("topicId", worker.TopicId).Msg("Checking for latest open worker nonce") latestOpenWorkerNonce, err := suite.Node.GetLatestOpenWorkerNonceByTopicId(worker.TopicId) if err != nil { @@ -91,9 +91,9 @@ func (suite *UseCaseSuite) runReputerProcess(reputer lib.ReputerConfig) { latestNonceHeightActedUpon := int64(0) for { - log.Debug().Uint64("topicId", reputer.TopicId).Msg("Checking for latest open reputer nonce on topic") + log.Debug().Uint64("topicId", reputer.TopicId).Msg("Checking for latest open reputer nonce") - latestOpenReputerNonce, err := suite.Node.GetLatestOpenReputerNonceByTopicId(reputer.TopicId) + latestOpenReputerNonce, err := suite.Node.GetOldestReputerNonceByTopicId(reputer.TopicId) if err != nil { log.Error().Err(err).Uint64("topicId", reputer.TopicId).Msg("Error getting latest open reputer nonce on topic") } diff --git a/usecase/usecase_suite.go b/usecase/usecase_suite.go index 1263f0e..6e58798 100644 --- a/usecase/usecase_suite.go +++ b/usecase/usecase_suite.go @@ -11,11 +11,12 @@ type UseCaseSuite struct { } // Static method to create a new UseCaseSuite -func NewUseCaseSuite(userConfig lib.UserConfig) UseCaseSuite { +func NewUseCaseSuite(userConfig lib.UserConfig) (*UseCaseSuite, error) { userConfig.ValidateConfigAdapters() nodeConfig, err := userConfig.GenerateNodeConfig() if err != nil { log.Error().Err(err).Msg("Failed to initialize allora client") + return nil, err } - return UseCaseSuite{Node: *nodeConfig} + return &UseCaseSuite{Node: *nodeConfig}, nil }