diff --git a/.dockerignore b/.dockerignore
index bd055df2..c3127dc1 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -9,6 +9,18 @@
# CI/CD and GitHub-related files
ci/*
+
+# macOS system files
+.DS_Store
+**/.DS_Store
+.AppleDouble
+.LSOverride
+
+# Editor and IDE files
+.idea/
+.vscode/
+*.swp
+*.swo
.github/*
# Documentation files
@@ -35,3 +47,4 @@ src/*
.vscode/
*.iml
.prettierignore
+.DS_Store
diff --git a/Dockerfile b/Dockerfile
index dd21a372..ca11516e 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -4,13 +4,27 @@ FROM ubuntu:25.04
# Set the maintainer of the image
LABEL maintainer="UDX CAG Team"
-# Set environment variables to avoid interactive prompts and set a fixed timezone
+# Set base environment variables
ENV DEBIAN_FRONTEND=noninteractive \
TZ=Etc/UTC \
USER=udx \
UID=500 \
GID=500 \
- HOME=/home/udx
+ HOME=/home/udx \
+ # Worker specific paths
+ WORKER_BASE_DIR=/opt/worker \
+ WORKER_CONFIG_DIR=/etc/worker \
+ WORKER_APP_DIR=/opt/worker/apps \
+ WORKER_DATA_DIR=/opt/worker/data \
+ WORKER_LIB_DIR=/usr/local/worker/lib \
+ WORKER_BIN_DIR=/usr/local/worker/bin \
+ WORKER_ETC_DIR=/usr/local/worker/etc \
+ # Add worker bin to PATH
+ PATH=/usr/local/worker/bin:${PATH} \
+ # Cloud SDK configurations
+ CLOUDSDK_CONFIG=/usr/local/configs/gcloud \
+ AWS_CONFIG_FILE=/usr/local/configs/aws \
+ AZURE_CONFIG_DIR=/usr/local/configs/azure
# Set the shell with pipefail option
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
@@ -22,10 +36,10 @@ USER root
# hadolint ignore=DL3015
RUN apt-get update && \
apt-get install -y \
- tzdata=2024b-6ubuntu1 \
+ tzdata=2025a-2ubuntu1 \
curl=8.12.0+git20250209.89ed161+ds-1ubuntu1 \
bash=5.2.37-1ubuntu1 \
- apt-utils=2.9.28 \
+ apt-utils=2.9.30ubuntu1 \
gettext=0.23.1-1 \
gnupg=2.4.4-2ubuntu22 \
ca-certificates=20241223 \
@@ -110,34 +124,72 @@ RUN groupadd -g ${GID} ${USER} && \
RUN mkdir -p /var/log/supervisor /var/run/supervisor && \
chown -R ${USER}:${USER} /var/log/supervisor /var/run/supervisor
-# Copy the CLI tool into the image
-COPY lib/cli.sh /usr/local/bin/worker_mgmt
-RUN chmod +x /usr/local/bin/worker_mgmt && \
- ln -s /usr/local/bin/worker_mgmt /usr/local/bin/worker
-
-# Copy the bin, etc, and lib directories
-COPY etc/configs /usr/local/configs
-COPY lib /usr/local/lib
-COPY bin/entrypoint.sh /usr/local/bin/entrypoint.sh
-
-# Set permissions during build
-# Set ownership
-RUN chown -R ${UID}:${GID} /usr/local/configs /usr/local/bin /usr/local/lib && \
- # Make specific scripts executable
- chmod 755 /usr/local/bin/entrypoint.sh /usr/local/lib/process_manager.sh && \
- # Set read-only permissions for config files
- find /usr/local/configs -type f -exec chmod 644 {} + && \
- # Set read-only permissions for library files
- find /usr/local/lib -type f ! -name process_manager.sh -exec chmod 644 {} + && \
- # Ensure directories are accessible
- find /usr/local/configs /usr/local/bin /usr/local/lib -type d -exec chmod 755 {} +
-
-# Create a symbolic link for the supervisord configuration file
-RUN ln -sf /usr/local/configs/supervisor/supervisord.conf /etc/supervisord.conf
-
-# Prepare directories for the user and worker configuration
-RUN mkdir -p ${HOME} && \
- chown -R ${USER}:${USER} ${HOME}
+# Create directory structure
+RUN mkdir -p \
+ # Worker directories
+ ${WORKER_CONFIG_DIR} \
+ ${WORKER_APP_DIR} \
+ ${WORKER_DATA_DIR} \
+ ${WORKER_LIB_DIR} \
+ ${WORKER_BIN_DIR} \
+ ${WORKER_ETC_DIR} \
+ # Environment files directory
+ ${WORKER_CONFIG_DIR}/environment.d \
+ # User and config directories
+ ${HOME}/.config/worker \
+ # Cloud SDK config directories
+ ${CLOUDSDK_CONFIG} \
+ ${AWS_CONFIG_FILE%/*} \
+ ${AZURE_CONFIG_DIR} && \
+ # Create and set permissions for environment files
+ touch ${WORKER_CONFIG_DIR}/environment && \
+ chown ${USER}:${USER} ${WORKER_CONFIG_DIR}/environment && \
+ chmod 644 ${WORKER_CONFIG_DIR}/environment
+
+# Copy worker files
+COPY bin/entrypoint.sh ${WORKER_BIN_DIR}/
+COPY lib ${WORKER_LIB_DIR}/
+COPY etc/configs/worker/default.yaml ${WORKER_CONFIG_DIR}/worker.yaml
+COPY etc/configs/supervisor ${WORKER_CONFIG_DIR}/supervisor/
+
+# Make scripts executable and initialize environment
+RUN chmod +x ${WORKER_LIB_DIR}/*.sh && \
+ ${WORKER_LIB_DIR}/env_handler.sh init_environment
+
+# Set up CLI tool and create symlink
+COPY lib/cli.sh ${WORKER_BIN_DIR}/worker_mgmt
+RUN chmod 755 ${WORKER_BIN_DIR}/worker_mgmt && \
+ ln -sf ${WORKER_BIN_DIR}/worker_mgmt ${WORKER_BIN_DIR}/worker
+
+# Set permissions
+RUN \
+ # Set base ownership
+ chown -R ${UID}:${GID} \
+ ${WORKER_BASE_DIR} \
+ ${WORKER_CONFIG_DIR} \
+ ${WORKER_LIB_DIR} \
+ ${WORKER_BIN_DIR} \
+ ${HOME} \
+ ${CLOUDSDK_CONFIG} \
+ ${AWS_CONFIG_FILE%/*} \
+ ${AZURE_CONFIG_DIR} && \
+ # Set directory permissions
+ find ${WORKER_BASE_DIR} ${WORKER_CONFIG_DIR} ${WORKER_LIB_DIR} ${WORKER_BIN_DIR} -type d -exec chmod 755 {} + && \
+ # Set base file permissions
+ find ${WORKER_CONFIG_DIR} -type f -exec chmod 644 {} + && \
+ find ${WORKER_LIB_DIR} -type f ! -name process_manager.sh -exec chmod 644 {} + && \
+ # Make specific files executable
+ chmod 755 \
+ ${WORKER_BIN_DIR}/entrypoint.sh \
+ ${WORKER_BIN_DIR}/worker_mgmt \
+ ${WORKER_LIB_DIR}/process_manager.sh && \
+ # Set runtime directories permissions
+ chmod 775 ${WORKER_APP_DIR} ${WORKER_DATA_DIR} && \
+ # Set home directory executable
+ chmod 755 ${HOME}
+
+# Set up supervisor configuration
+RUN ln -sf ${WORKER_CONFIG_DIR}/supervisor/supervisord.conf /etc/supervisord.conf
# Switch to the user directory
WORKDIR ${HOME}
@@ -146,7 +198,7 @@ WORKDIR ${HOME}
USER ${USER}
# Set the entrypoint to run the entrypoint script using shell form
-ENTRYPOINT ["/usr/local/bin/entrypoint.sh"]
+ENTRYPOINT ["/usr/local/worker/bin/entrypoint.sh"]
# Set the default command
CMD ["tail", "-f", "/dev/null"]
\ No newline at end of file
diff --git a/Makefile b/Makefile
index ddfe6d1c..d2c61930 100644
--- a/Makefile
+++ b/Makefile
@@ -28,15 +28,28 @@ build:
printf "$(COLOR_BLUE)$(SYM_ARROW) Starting Docker build...$(COLOR_RESET)\n"; \
if [ "$(MULTIPLATFORM)" = "true" ]; then \
printf "$(COLOR_BLUE)$(SYM_ARROW) Building for multiple platforms: [linux/amd64, linux/arm64]$(COLOR_RESET)\n"; \
- docker buildx build --progress=plain \
- --platform linux/amd64,linux/arm64 \
- -t $(DOCKER_IMAGE) \
- --load . 2>&1 | grep -E "$$filter" || exit 1; \
+ if [ "$(DEBUG)" = "true" ]; then \
+ docker buildx build --progress=plain \
+ --platform linux/amd64,linux/arm64 \
+ -t $(DOCKER_IMAGE) \
+ --load .; \
+ else \
+ docker buildx build --progress=plain \
+ --platform linux/amd64,linux/arm64 \
+ -t $(DOCKER_IMAGE) \
+ --load . 2>&1 | grep -E "$$filter" || exit 1; \
+ fi; \
else \
printf "$(COLOR_BLUE)$(SYM_ARROW) Building for local platform$(COLOR_RESET)\n"; \
- DOCKER_BUILDKIT=1 docker build \
- --progress=plain \
- -t $(DOCKER_IMAGE) . 2>&1 | grep -E "$$filter" || exit 1; \
+ if [ "$(DEBUG)" = "true" ]; then \
+ DOCKER_BUILDKIT=1 docker build \
+ --progress=plain \
+ -t $(DOCKER_IMAGE) .; \
+ else \
+ DOCKER_BUILDKIT=1 docker build \
+ --progress=plain \
+ -t $(DOCKER_IMAGE) . 2>&1 | grep -E "$$filter" || exit 1; \
+ fi; \
fi && \
printf "$(COLOR_GREEN)$(SYM_SUCCESS) Docker image build completed$(COLOR_RESET)\n" || \
{ printf "$(COLOR_RED)$(SYM_ERROR) Docker build failed$(COLOR_RESET)\n"; exit 1; }'
@@ -89,9 +102,13 @@ clean:
test: clean
@printf "$(COLOR_BLUE)$(SYM_ARROW) Running tests...$(COLOR_RESET)\n"
@$(MAKE) run \
- VOLUMES="$(TEST_WORKER_CONFIG):/home/$(USER)/worker.yaml:ro $(TEST_SERVICES_CONFIG):/home/$(USER)/services.yaml:ro $(TESTS_TASKS_DIR):/home/$(USER)/tasks:ro $(TESTS_MAIN_SCRIPT):/home/$(USER)/main.sh:ro" \
- COMMAND="/home/$(USER)/main.sh" || exit 1
- @$(MAKE) log FOLLOW_LOGS=true || exit 1
+ VOLUMES="$(PWD)/src/tests:/home/udx/tests $(PWD)/src/examples/simple-config/.config/worker/worker.yaml:/home/udx/.config/worker/worker.yaml $(PWD)/src/examples/simple-service/.config/worker/services.yaml:/home/udx/.config/worker/services.yaml $(PWD)/src/examples/simple-service/index.sh:/home/udx/index.sh" \
+ COMMAND="/home/udx/tests/main.sh"
+ @printf "$(COLOR_BLUE)$(SYM_ARROW) Following test output...$(COLOR_RESET)\n"
+ @docker logs -f $(CONTAINER_NAME) & LOGS_PID=$$!; \
+ docker wait $(CONTAINER_NAME) > /dev/null; EXIT_CODE=$$?; \
+ kill $$LOGS_PID 2>/dev/null || true; \
+ exit $$EXIT_CODE
@$(MAKE) clean || exit 1
@printf "$(COLOR_GREEN)$(SYM_SUCCESS) Tests completed successfully$(COLOR_RESET)\n"
diff --git a/Makefile.variables b/Makefile.variables
index ca79ceea..c4853a5f 100644
--- a/Makefile.variables
+++ b/Makefile.variables
@@ -9,7 +9,7 @@ TESTS_TASKS_DIR ?= ./src/tests/tasks
TEST_WORKER_CONFIG ?= ./src/tests/configs/worker.yaml
TEST_SERVICES_CONFIG ?= ./src/tests/configs/services.yaml
USER = udx
-VOLUMES ?= ./src/scripts:/home/$(USER)
+VOLUMES ?=
DEBUG ?= false
COMMAND ?=
MULTIPLATFORM ?= false
diff --git a/README.md b/README.md
index 5494649c..5ee2357c 100644
--- a/README.md
+++ b/README.md
@@ -1,86 +1,187 @@
-## UDX Worker
+# UDX Worker
-The UDX Worker simplifies DevSecOps by providing a secure, containerized environment for handling secrets and running automation tasks. This repository contains the UDX Worker Docker image, designed for secure and reliable automation tasks based on 12-factor methodology. UDX Worker environments are ephemeral and adhere to zero-trust principles and methodology, ensuring maximum security and reliability.
+[](https://hub.docker.com/r/usabilitydynamics/udx-worker) [](LICENSE) [](https://udx.dev/worker)
-### Deployment
+**Secure, containerized environment for DevSecOps automation**
-1. Make sure Docker installed.
+[Quick Start](#-quick-start) β’ [Documentation](#-documentation) β’ [Development](#οΈ-development) β’ [Contributing](#-contributing)
-2. Pull the Docker image:
+## π Overview
-```shell
-docker pull usabilitydynamics/udx-worker:latest
-```
+UDX Worker is a containerized solution that simplifies DevSecOps by providing:
+
+- π **Secure Environment**: Built on zero-trust principles
+- π€ **Automation Support**: Streamlined task execution
+- π **Secret Management**: Secure handling of sensitive data
+- π¦ **12-Factor Compliance**: Modern application practices
+- βΎοΈ **CI/CD Ready**: Seamless pipeline integration
+
+## π Quick Start
+
+### Prerequisites
+
+- Docker 20.10 or later
+- Make (for development)
+
+### Example 1: Simple Service
+
+```bash
+# Create project structure
+mkdir -p my-worker/.config/worker
+cd my-worker
+
+# Create a simple service script
+cat > index.sh <<'EOF'
+#!/bin/bash
+echo "Starting service..."
+trap 'echo "Shutting down..."; exit 0' SIGTERM
+while true; do
+ echo "[$(date)] Service running..."
+ sleep 5
+done
+EOF
+chmod +x index.sh
-3. Run the Docker container:
+# Create service configuration
+cat > .config/worker/services.yaml <<'EOF'
+kind: workerService
+version: udx.io/worker-v1/service
+services:
+ - name: "index"
+ command: "/home/udx/index.sh"
+ autostart: true
+ autorestart: true
+EOF
-```shell
+# Run the worker
docker run -d \
- --name my-app \
- -v $(pwd):/home/udx \
+ --name my-service \
+ -v "$(pwd):/home/udx" \
usabilitydynamics/udx-worker:latest
+
+# View service logs
+docker logs -f my-service
```
-_Make sure to mount the current directory to `/home/udx` in the container._
+### Example 2: Secrets Management with Authorization
+
+```bash
+# Define secrets configuration
+cat > .config/worker/worker.yaml <<'EOF'
+kind: workerConfig
+version: udx.io/worker-v1/config
+config:
+ secrets:
+ API_KEY: "azure/key-vault/api-key"
+ DB_PASS: "aws/secrets/database"
+EOF
+
+# Create base64-encoded Azure credentials
+AZURE_CREDS=$(echo '{
+ "client_id": "your-client-id",
+ "client_secret": "your-client-secret",
+ "tenant_id": "your-tenant-id"
+}' | base64)
+
+# Run with cloud provider credentials
+docker run -d \
+ --name my-secrets \
+ -v "$(pwd)/.config/worker:/home/udx/.config/worker" \
+ -e AZURE_CREDS="${AZURE_CREDS}" \
+ usabilitydynamics/udx-worker:latest
-### Development
+# Verify authorization and secrets
+docker exec my-secrets worker auth verify
+docker exec my-secrets worker env get API_KEY
+```
-1. Clone the Repository
+See [Authorization Guide](docs/authorization.md) for supported providers and credential formats (JSON, Base64, File Path).
-```shell
+### Development Setup
+
+```bash
+# Clone and build
git clone https://github.com/udx/worker.git
cd worker
-```
+make build
-2. Build Image
+# Run example service
+make run VOLUMES="$(pwd)/src/examples/simple-service:/home/udx"
-```shell
-make build
+# View logs
+make log FOLLOW_LOGS=true
+
+# Run tests
+make test
```
-3. Start the container
+More examples available in [src/examples](src/examples).
-```shell
-make run
-```
+## π Documentation
-_Interactively_
+### Core Concepts
+- [Authorization](docs/authorization.md) - Credential management
+- [Configuration](docs/config.md) - Worker setup
+- [Services](docs/services.md) - Service management
+- [CLI Reference](docs/cli.md) - Command line usage
-```shell
-make run-it
-```
+### Additional Resources
+- [Container Structure](docs/container-structure.md) - Directory layout
+- [Development Notes](docs/notes.md) - Best practices
+- [Git Tips](docs/git-help.md) - Version control helpers
+
+## π οΈ Development
+
+```bash
+# Clone repository
+git clone https://github.com/udx/worker.git
+cd worker
+
+# Build image
+make build
-4. Run tests
+# Run container
+make run # Detached mode
+make run-it # Interactive mode
-```shell
+# Run tests
make test
+
+# View all commands
+make help
```
-_For more details on available commands_
+## π€ Contributing
-```shell
-make
-```
+We welcome contributions! Here's how you can help:
-### Docs
+1. Fork the repository
+2. Create a feature branch
+3. Commit your changes
+4. Push to your branch
+5. Open a Pull Request
-- [Authorization](/docs/authorization.md)
-- [CLI](/docs/cli.md)
-- [Config](/docs/config.md)
-- [Git Help](/docs/git.md)
-- [Notes](/docs/notes.md)
-- [Services](/docs/services.md)
+Please ensure your PR:
+- Follows our coding standards
+- Includes appropriate tests
+- Updates relevant documentation
-### Resources
+## π Resources
- [Docker Hub](https://hub.docker.com/r/usabilitydynamics/udx-worker)
- [Documentation](https://udx.dev/worker)
-- [Marketing Page](https://udx.io/products/udx-worker)
+- [Product Page](https://udx.io/products/udx-worker)
+
+## π― Custom Development
-### Contributing
+Need specific features or customizations?
+[Contact our team](https://udx.io/) for professional development services.
-Contributions are welcome! If you find any issues or have suggestions for improvements, please fork the repository and submit a pull request.
+## π License
-### Custom Development
+This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
-Looking for a unique feature for your next project? [Hire us!](https://udx.io/)
+---
+
+Built with β€οΈ by
UDX
+
diff --git a/bin/entrypoint.sh b/bin/entrypoint.sh
index 16611f82..64a94e64 100644
--- a/bin/entrypoint.sh
+++ b/bin/entrypoint.sh
@@ -1,16 +1,27 @@
#!/bin/bash
# shellcheck disable=SC1091
-source /usr/local/lib/utils.sh
+source "${WORKER_LIB_DIR}/utils.sh"
log_info "Welcome to UDX Worker Container. Initializing environment..."
# shellcheck disable=SC1091
-source /usr/local/lib/environment.sh
+source "${WORKER_LIB_DIR}/environment.sh"
-# Start the process manager in the background
+# Start the process manager
log_info "Starting process manager..."
-/usr/local/lib/process_manager.sh &
+"${WORKER_LIB_DIR}/process_manager.sh" &
+
+# Wait for supervisor to be ready
+max_attempts=10
+attempt=1
+while [ $attempt -le $max_attempts ]; do
+ if supervisorctl status >/dev/null 2>&1; then
+ break
+ fi
+ sleep 1
+ attempt=$((attempt + 1))
+done
# Main execution logic
if [ "$#" -gt 0 ]; then
diff --git a/docs/CLI.md b/docs/CLI.md
index 10f07d0b..b2d16ebb 100644
--- a/docs/CLI.md
+++ b/docs/CLI.md
@@ -1,91 +1,28 @@
# UDX Worker CLI Documentation
-The UDX Worker provides a command-line interface for managing services, environment variables, and generating Software Bill of Materials (SBOM).
+The UDX Worker provides a command-line interface for managing services, environment variables, generating Software Bill of Materials (SBOM), and more.
-## Commands
+## Available Commands
-### Service Management
+| Command | Description |
+|----------|-------------|
+| `auth` | Manage authentication operations |
+| `config` | Manage configuration operations |
+| `env` | Manage environment variables |
+| `health` | Monitor system health status |
+| `help` | Show help for any command |
+| `sbom` | Manage Software Bill of Materials |
+| `service`| Manage service operations |
+| `version`| Show version information |
-* `worker service list`
- - Lists of configured services.
- - Shows service name, status, PID, and uptime
+## Usage
-* `worker service status `
- - Displays detailed status of a service
- - Shows service state, uptime, and process information
-
-* `worker service logs [options]`
- - Views service output logs
- - Options:
- - `--lines N`: Show last N lines (default: 20)
- - `--nostream`: Show logs without following
-
-* `worker service errors [options]`
- - Views service error logs
- - Options:
- - `--lines N`: Show last N lines (default: 20)
- - `--nostream`: Show logs without following
-
-* `worker service config`
- - Shows the services configuration settings
- - Displays supervisor configuration for all services
-
-* `worker service start `
- - Starts a specified service
- - Creates necessary log files and directories
-
-* `worker service stop `
- - Stops a specified service
- - Service can be restarted later
-
-* `worker service restart `
- - Restarts a specified service
- - Equivalent to stop followed by start
-
-### Environment Variables
-
-* `worker env set `
- - Sets an environment variable
- - Variable will be available to all services
-
-* `worker env get [key]`
- - Retrieves environment variable(s)
- - If key is provided, shows specific variable
- - If no key, shows all environment variables
-
-### Software Bill of Materials
-
-* `worker sbom generate`
- - Generates container Software Bill of Materials
- - Lists all installed packages with:
- - Package name
- - Version
- - Architecture
-
-## Examples
+To see detailed help and usage information for any command, run it without arguments:
```bash
-# View service status
-worker service status my_service
-
-# Get last 100 lines of logs without following
-worker service logs my_service --lines=100 --nostream
-
-# Set and verify environment variable
-worker env set MY_VAR "my value"
-worker env get MY_VAR
-
-# Generate SBOM
-worker sbom generate
-```
-
-## Exit Codes
-
-- 0: Command completed successfully
-- 1: Command failed or invalid usage
-
-## Notes
+# Show auth command help
+worker auth
-- All commands use logging levels: INFO, DEBUG, WARN, ERROR
-- Service logs are stored in `/var/log/supervisor/`
-- Configuration files are in `/etc/supervisord.conf`
\ No newline at end of file
+# Show service command help
+worker service
+```
\ No newline at end of file
diff --git a/docs/authorization.md b/docs/authorization.md
index 588a70d7..5d569cf1 100644
--- a/docs/authorization.md
+++ b/docs/authorization.md
@@ -1,46 +1,74 @@
-## Worker Authorization
+# Worker Authorization
-### Supported Environment Variables
-- Azure: `AZURE_CREDS`
-- AWS: `AWS_CREDS`
-- GCP: `GCP_CREDS`
-- Bitwarden: `BITWARDEN_CREDS`
+## Overview
-### Credential Formats
+The UDX Worker supports multiple cloud providers and services through environment-based credential management.
-Credentials can be provided in three ways:
+## Supported Providers
-- **JSON**: Stringified JSON.
+| Provider | Environment Variable | Description |
+|------------|---------------------|-------------|
+| Azure | `AZURE_CREDS` | Azure cloud credentials |
+| AWS | `AWS_CREDS` | Amazon Web Services credentials |
+| GCP | `GCP_CREDS` | Google Cloud Platform credentials |
+| Bitwarden | `BITWARDEN_CREDS` | Bitwarden secrets management credentials |
-- **Base64 Encoded JSON**: Base64 encoded JSON strings.
+## Credential Formats
-- **File Path**: File path to JSON files with the credentials.
+Credentials can be provided in three formats:
+| Format | Description | Use Case |
+|--------|-------------|----------|
+| JSON | Plain JSON string | Direct configuration |
+| Base64 | Base64-encoded JSON | Secure environment variables |
+| File Path | Path to JSON file | Local development |
-### Examples
+## Format Examples
-1. JSON
+### 1. JSON Format
```json
-{"client_id":"CLIENT_ID","client_secret":"CLIENT_SECRET","tenant_id":"TENANT_ID","subscription_id":"SUBSCRIPTION_ID"}
+{
+ "client_id": "CLIENT_ID",
+ "client_secret": "CLIENT_SECRET",
+ "tenant_id": "TENANT_ID",
+ "subscription_id": "SUBSCRIPTION_ID"
+}
```
-2. Base64 Encoded JSON
+### 2. Base64 Encoded Format
-```base64
+```bash
+# Original JSON
+{
+ "client_id": "CLIENT_ID",
+ "client_secret": "CLIENT_SECRET",
+ "tenant_id": "TENANT_ID",
+ "subscription_id": "SUBSCRIPTION_ID"
+}
+
+# Base64 encoded value
ewogICAgImNsaWVudF9pZCI6ICJDTElFTlRfSUQiLAogICAgImNsaWVudF9zZWNyZXQiOiAiQ0xJRU5UX1NFQ1JFVCIsCiAgICAidGVuYW50X2lkIjogIlRFTkFOVF9JRCIsCiAgICAic3Vic2NyaXB0aW9uX2lkIjogIlNVQlNDUklQVElPTl9JRCIKfQ==
```
-Here is how you can base64 encode a JSON string:
-
+**Generate Base64 Format:**
```bash
-echo -n '{"client_id": "CLIENT_ID", "client_secret": "CLIENT_SECRET", "tenant_id": "TENANT_ID", "subscription_id": "SUBSCRIPTION_ID"}' | base64
+echo -n '{"client_id":"CLIENT_ID","client_secret":"CLIENT_SECRET","tenant_id":"TENANT_ID","subscription_id":"SUBSCRIPTION_ID"}' | base64
```
-3. File Path
+### 3. File Path Format
-```txt
-./creds/azure_creds.json
+```bash
+# Environment variable value
+AZURE_CREDS="/path/to/azure_credentials.json"
+
+# Credential file content (azure_credentials.json)
+{
+ "client_id": "CLIENT_ID",
+ "client_secret": "CLIENT_SECRET",
+ "tenant_id": "TENANT_ID",
+ "subscription_id": "SUBSCRIPTION_ID"
+}
```
-_Make sure to replace `./creds/azure_creds.json` with the actual file path to your credentials file_
\ No newline at end of file
+> **Note**: Always use absolute paths in production environments to avoid path resolution issues.
\ No newline at end of file
diff --git a/docs/config.md b/docs/config.md
index 063d0e18..2b29e1e1 100644
--- a/docs/config.md
+++ b/docs/config.md
@@ -1,69 +1,104 @@
-## Worker Configuration
+# Worker Configuration
-The `worker.yaml` configuration file is a crucial component for customizing the environment of the UDX Worker. It allows users to specify both environment variables and secrets that are essential for the worker's operations.
+## Overview
-### Structure
+The UDX Worker uses `worker.yaml` as its primary configuration file, allowing you to:
+- Define environment variables
+- Reference secrets from various providers
+- Configure worker behavior
-- **env**: This section define various environment variables that your worker needs to function.
+## File Location
-```yaml
-env:
- AZURE_CLIENT_ID: "your-azure-client-id"
- AZURE_TENANT_ID: "your-azure-tenant-id"
- AZURE_SUBSCRIPTION_ID: "your-azure-subscription-id"
- ...
+```bash
+/home/udx/.config/worker/worker.yaml
```
-- **secrets**: This section allows to reference secrets stored in secure locations.
+## Configuration Structure
+
+| Section | Purpose | Required |
+|---------|----------|----------|
+| `kind` | Configuration type identifier | Yes |
+| `version` | Schema version | Yes |
+| `config.env` | Environment variables | No |
+| `config.secrets` | Secret references | No |
+
+## Basic Example
```yaml
-secrets:
- DB_PASSWORD: "aws/secrets-manager/db_password"
- API_KEY: "gcp/my-project/api_key"
- ...
+kind: workerConfig
+version: udx.io/worker-v1/config
+config:
+ env:
+ AZURE_CLIENT_ID: "12345678-1234-1234-1234-1234567890ab"
+ AWS_REGION: "us-west-2"
+ secrets:
+ DB_PASSWORD: "aws/prod/db_password"
+ API_KEY: "azure/kv-prod/api-key"
```
-Supported Providers
+## Secret Provider References
-1. Azure Key Vault
+### Azure Key Vault
```yaml
- AZURE_CLIENT_ID: "azure/{key_vault_name}/{secret_name}"
+secrets:
+ CLIENT_SECRET: "azure/{vault_name}/{secret_name}"
+ API_KEY: "azure/kv-prod/api-key"
```
-- AWS Secrets Manager
+### AWS Secrets Manager
```yaml
- AWS_ACCESS_KEY_ID: "aws/{directory}/{secret_name}"
+secrets:
+ DB_PASSWORD: "aws/{path}/{secret_name}"
+ ACCESS_KEY: "aws/prod/access-key"
```
-- GCP Secret Manager
+### Google Cloud Secret Manager
```yaml
- GCP_CREDS: "gcp/{project_id}/{secret_name}"
+secrets:
+ SERVICE_KEY: "gcp/{project_id}/{secret_name}"
+ AUTH_TOKEN: "gcp/my-project/auth-token"
```
-- Bitwarden Vault
+### Bitwarden Vault
```yaml
- BITWARDEN_TOKEN: "bitwarden/{vault_name}/{secret_name}"
+secrets:
+ MASTER_KEY: "bitwarden/{vault_name}/{secret_name}"
+ LICENSE_KEY: "bitwarden/prod/license-key"
```
-
-### Config Example
+## Environment Variables
```yaml
----
-kind: workerConfig
-version: udx.io/worker-v1/config
config:
env:
- AZURE_CLIENT_ID: "12345678-1234-1234-1234-1234567890ab"
-
- secrets:
- APP_CLIENT_SECRET: "azure/kv-example/clientSecret"
+ # Cloud Provider Settings
+ AZURE_TENANT_ID: "tenant-id"
+ AWS_REGION: "us-west-2"
+ GCP_PROJECT: "my-project"
+
+ # Application Settings
+ LOG_LEVEL: "info"
+ MAX_WORKERS: "5"
+ ENABLE_METRICS: "true"
```
-### Usage
+## Best Practices
+
+1. **Secret Management**
+ - Never store sensitive values directly in `env`
+ - Use `secrets` section for sensitive data
+ - Reference secrets from appropriate providers
+
+2. **Environment Variables**
+ - Use `env` for non-sensitive configuration
+ - Keep values consistent across environments
+ - Document any required variables
-To use this configuration file, make sure to mount it with your application under `/home/udx/`. It doesn't matter where you mount it, it could be autodetected in any subdirectory if it's mounted correctly and named `worker.yaml`.
\ No newline at end of file
+3. **File Handling**
+ - Keep configuration in version control (without sensitive data)
+ - Use different files for different environments
+ - Validate configuration before deployment
\ No newline at end of file
diff --git a/docs/container-structure.md b/docs/container-structure.md
new file mode 100644
index 00000000..34c0cf79
--- /dev/null
+++ b/docs/container-structure.md
@@ -0,0 +1,116 @@
+# Container Directory Structure
+
+This document outlines the directory structure of the UDX Worker container and provides guidance for creating child images.
+
+## Base Directory Structure
+
+The worker container uses the following directory structure:
+
+```
+/
+βββ opt/worker/ # Base directory for worker-specific files
+β βββ apps/ # Worker applications and plugins
+β βββ data/ # Worker data storage and processing
+βββ etc/worker/ # Worker configuration files
+βββ usr/local/worker/
+β βββ bin/ # Worker executable files
+β βββ lib/ # Worker library files
+β βββ etc/ # Additional worker configuration
+βββ usr/local/configs/ # Cloud provider configurations
+ βββ gcloud/ # Google Cloud SDK config
+ βββ aws/ # AWS CLI config
+ βββ azure/ # Azure CLI config
+```
+
+### Directory Purposes
+
+#### Worker-Specific Directories
+
+- `WORKER_BASE_DIR=/opt/worker`
+ - Main directory for worker-specific files
+
+- `WORKER_APP_DIR=/opt/worker/apps`
+ - Contains worker-specific applications and plugins
+ - Used for extending worker functionality
+ - **Not intended** for application code in child images
+
+- `WORKER_DATA_DIR=/opt/worker/data`
+ - Used for worker data storage and processing
+ - Temporary and persistent data used by the worker
+
+- `WORKER_CONFIG_DIR=/etc/worker`
+ - Contains worker configuration files
+
+- `WORKER_LIB_DIR=/usr/local/worker/lib`
+ - Worker library files and shared code
+
+- `WORKER_BIN_DIR=/usr/local/worker/bin`
+ - Worker executable files
+ - Added to system PATH
+
+- `WORKER_ETC_DIR=/usr/local/worker/etc`
+ - Additional worker configuration files
+
+#### Cloud Configuration Directories
+
+- `CLOUDSDK_CONFIG=/usr/local/configs/gcloud`
+ - Google Cloud SDK configuration
+
+- `AWS_CONFIG_FILE=/usr/local/configs/aws`
+ - AWS CLI configuration
+
+- `AZURE_CONFIG_DIR=/usr/local/configs/azure`
+ - Azure CLI configuration
+
+## Child Image Development
+
+When developing child images (e.g., PHP, Node.js), follow these guidelines:
+
+### Directory Structure Guidelines
+
+1. **Preserve Worker Directories**
+ - Maintain all worker directories as they are
+ - Do not modify or remove any worker-specific paths
+ - These directories are essential for worker functionality
+
+2. **Application Code Placement**
+ - Use framework/language-specific conventional directories for your application code
+ - Do not place application code in worker directories
+
+### Examples by Language
+
+#### PHP Applications
+```
+/
+βββ var/www/ # PHP application code (standard location)
+βββ opt/worker/ # Worker directories (preserved)
+```
+
+#### Node.js Applications
+```
+/
+βββ usr/src/app/ # Node.js application code (standard location)
+βββ opt/worker/ # Worker directories (preserved)
+```
+
+#### Python Applications
+```
+/
+βββ usr/src/app/ # Python application code (standard location)
+βββ opt/worker/ # Worker directories (preserved)
+```
+
+### Best Practices
+
+1. **Separation of Concerns**
+ - Keep application code separate from worker functionality
+ - Use standard language/framework conventions for your application
+ - Don't mix application data with worker data
+
+2. **Configuration**
+ - Use appropriate configuration directories for your application
+ - Don't modify worker configurations unless specifically required
+
+3. **Data Storage**
+ - Use appropriate data directories for your application
+ - Don't use `WORKER_DATA_DIR` for application data storage
diff --git a/docs/git-help.md b/docs/git-help.md
index d207a592..6f34af09 100644
--- a/docs/git-help.md
+++ b/docs/git-help.md
@@ -1,73 +1,159 @@
-## Git Commands
+# Git Quick Reference
-### 1. Clean Ignored Files from Git
+## π§Ή Cleanup Operations
-Sometimes you may need to remove all files that are listed in your .gitignore from the repository's index but keep them in your working directory. This command helps you do that:
+### Clean Ignored Files
+Remove files from Git index while keeping them in working directory:
-```shell
+```bash
+# Remove from index
git rm -r --cached .
+
+# Commit the cleanup
+git add .
+git commit -m "chore: clean ignored files"
```
-- `rm -r --cached .`: This removes all files from the Git index, including ignored files.
-- `.`: Refers to the current directory, so it applies the command recursively to all files and directories.
+> π‘ Useful when `.gitignore` is updated but files are still tracked
+
+## π Commit Management
-After running this command, you need to commit the changes to update the repository:
+### Amend Last Commit
-```shell
+```bash
+# Change commit message only
+git commit --amend -m "new message"
+
+# Add staged changes to last commit
git add .
-git commit -m "Cleaned up ignored files"
+git commit --amend --no-edit
```
-### 2. Override the Last Commit
+β οΈ **Warning**: Don't amend pushed commits unless working alone
+
+### Force Push Changes
-If you want to modify the last commit (e.g., change the commit message or add new changes), you can amend it:
+```bash
+# Force push with lease (safer than -f)
+git push --force-with-lease
-```shell
-git commit --amend
+# Force push (use with extreme caution)
+git push -f
```
-- `--amend`: This option allows you to modify the most recent commit.
-- You will be prompted to edit the commit message in your default text editor. You can either update the message or keep it as is.
+> π‘οΈ Always prefer `--force-with-lease` over `-f` to prevent overwriting others' work
-> Note: Use --amend with caution, especially if the commit has already been pushed to a shared repository, as it rewrites history.
+## πΏ Branch Operations
-### 3. Force Push Amended Commit
+### Move Commits Between Branches
-After amending a commit, if the changes have already been pushed to a remote repository, you'll need to force push the updated commit:
+```bash
+# 1. Find unpushed commits
+git log branch-name --not --remotes --oneline
-```shell
-git push -f
+# 2. Save current work
+git stash
+
+# 3. Switch and apply
+git checkout target-branch
+git cherry-pick
+
+# 4. Restore work
+git stash pop
```
-- `-f` or `--force`: This option forces Git to push the amended commit to the remote repository, rewriting history.
+## π Common Workflows
-> Note: Force pushing can overwrite changes in the remote repository, so use it carefully, especially when working in a shared environment.
+### Feature Branch Workflow
-### 4. Moving Unpushed Commits Between Branches
+```bash
+# 1. Create feature branch
+git checkout -b feature/name
-If you need to move an unpushed commit from one branch to another, you can use git cherry-pick. Here's an example of moving a commit from branch `UAT-69` to branch `1629`:
+# 2. Make changes and commit
+git add .
+git commit -m "feat: add new feature"
-1. First, identify the unpushed commit on the source branch:
-```shell
-git log UAT-69 --not --remotes --oneline
+# 3. Update with main
+git fetch origin
+git rebase origin/main
+
+# 4. Push changes
+git push -u origin feature/name
```
-2. Note the commit hash from the output (e.g., `4cff367`)
+### Commit Message Format
+
+```bash
+# Format
+():
-3. Switch to the target branch and stash any current changes:
-```shell
-git checkout 1629
-git stash # if you have uncommitted changes
+# Types
+feat: New feature
+fix: Bug fix
+docs: Documentation
+style: Formatting
+refactor: Code restructure
+test: Tests
+chore: Maintenance
```
-4. Cherry-pick the commit:
-```shell
-git cherry-pick 4cff367
+## π Inspection Commands
+
+### View Changes
+
+```bash
+# Show staged changes
+git diff --staged
+
+# Show changes in last commit
+git show HEAD
+
+# Show file history
+git log -p filename
```
-5. Restore your stashed changes if any:
-```shell
-git stash pop
+### Branch Information
+
+```bash
+# List all branches
+git branch -vv
+
+# Show merged branches
+git branch --merged
+
+# Show unmerged branches
+git branch --no-merged
```
-> Note: The cherry-pick command creates a new commit on the target branch with the same changes but a different commit hash.
+## β‘ Tips and Tricks
+
+1. **Stash Management**:
+ ```bash
+ # Named stash
+ git stash save "feature work in progress"
+
+ # List stashes
+ git stash list
+
+ # Apply specific stash
+ git stash apply stash@{n}
+ ```
+
+2. **Quick Fixes**:
+ ```bash
+ # Undo last commit but keep changes
+ git reset --soft HEAD^
+
+ # Discard all local changes
+ git reset --hard HEAD
+ ```
+
+3. **Search History**:
+ ```bash
+ # Search commit messages
+ git log --grep="keyword"
+
+ # Search code changes
+ git log -S"code string"
+ ```
diff --git a/docs/notes.md b/docs/notes.md
index 323c2b13..f5c061dc 100644
--- a/docs/notes.md
+++ b/docs/notes.md
@@ -1,11 +1,91 @@
-Creating a Non-Root User: Creating a non-root user inside the container helps to improve security by avoiding running processes as the root user. This can limit the potential damage in case of a security breach.
+# Container Development Best Practices
-Consistency: Using ARG for these values allows you to parameterize the Dockerfile, making it easier to build images with different users or permissions as needed. You can override these defaults during the build process if necessary.
+## User Management
-Customization: By defining these as arguments, you provide flexibility. For example, if you want to build the image in different environments where different user IDs are required, you can pass different values for UID and GID when building the Docker image.
+### Non-Root User Benefits
+- π‘οΈ **Enhanced Security**: Limits potential damage from security breaches
+- π **Reduced Privileges**: Prevents unauthorized system modifications
+- π **Best Practice**: Follows container security principles
-Security: Running containers as a non-root user enhances security by limiting the potential damage that a compromised application can do. This is a good practice for reducing risks.
+### User Configuration
-Permissions: The UID and GID are important for managing file permissions. If files created or modified by the container need to be accessed by the host or other containers, consistent UIDs and GIDs help avoid permission issues.
+```dockerfile
+ARG USER=udx
+ARG UID=500
+ARG GID=500
-Container Environment: Within a container, the application user is not a system user but rather a user that the containerized application runs as. This user doesn't perform system administration tasks but operates with limited privileges within the container.
\ No newline at end of file
+RUN groupadd -g $GID $USER && \
+ useradd -u $UID -g $GID -m $USER
+```
+
+## Dockerfile Arguments
+
+### Benefits of Using ARGs
+- π§ **Parameterization**: Easy to customize builds
+- π― **Flexibility**: Adapt to different environments
+- ποΈ **Reusability**: Same Dockerfile, different configurations
+
+### Common ARGs
+```dockerfile
+ARG USER=udx
+ARG UID=500
+ARG GID=500
+ARG HOME=/home/udx
+```
+
+## Permission Management
+
+### UID/GID Importance
+- π **File Access**: Consistent access across host and containers
+- π€ **Shared Resources**: Proper permissions for mounted volumes
+- π **Security**: Controlled access to resources
+
+### Best Practices
+1. Use consistent UID/GID across environments
+2. Document required permissions
+3. Verify file ownership after operations
+
+## Container Security
+
+### Running as Non-Root
+- Prevents privileged access
+- Limits system modification capabilities
+- Follows principle of least privilege
+
+### Security Checklist
+- [ ] Use non-root user
+- [ ] Set appropriate file permissions
+- [ ] Limit mounted volumes
+- [ ] Use read-only filesystems where possible
+
+## Environment Setup
+
+### Container User Context
+- Application-specific user
+- Limited privileges
+- No system administration capabilities
+
+### Directory Permissions
+```bash
+chown -R $USER:$USER /app
+chmod -R 755 /app
+```
+
+## Tips and Tricks
+
+1. **Testing User Setup**:
+ ```bash
+ docker run --rm -it myimage whoami
+ docker run --rm -it myimage id
+ ```
+
+2. **Debugging Permissions**:
+ ```bash
+ docker run --rm -it myimage ls -la /app
+ docker run --rm -it myimage stat /app
+ ```
+
+3. **Volume Mounting**:
+ ```bash
+ docker run -v $(pwd):/app:ro myimage # Read-only mount
+ ```
\ No newline at end of file
diff --git a/docs/services.md b/docs/services.md
index 1cf96f80..42a6265b 100644
--- a/docs/services.md
+++ b/docs/services.md
@@ -1,35 +1,125 @@
-## Services Configuration
+# Service Configuration
-The `services.yaml` file contains a list of services to be managed by the worker. Each service is defined using the following structure:
+## Overview
+
+The UDX Worker uses `services.yaml` to define and manage multiple services. Each service can be configured with its own runtime settings, environment variables, and behavior policies.
+
+## File Location
+
+```bash
+/home/udx/.config/worker/services.yaml
+```
+
+## Configuration Structure
+
+| Field | Type | Required | Default | Description |
+|-------|------|----------|---------|-------------|
+| `kind` | string | Yes | - | Must be `workerService` |
+| `version` | string | Yes | - | Must be `udx.io/worker-v1/service` |
+| `services` | array | Yes | - | List of service definitions |
+
+### Service Definition Fields
+
+| Field | Type | Required | Default | Description |
+|-------|------|----------|---------|-------------|
+| `name` | string | Yes | - | Unique service identifier |
+| `command` | string | Yes | - | Command to execute |
+| `ignore` | boolean | No | `false` | Skip service management |
+| `autostart` | boolean | No | `true` | Start on worker launch |
+| `autorestart` | boolean | No | `false` | Restart on failure |
+| `envs` | array | No | `[]` | Environment variables |
+
+## Basic Example
```yaml
----
kind: workerService
version: udx.io/worker-v1/service
services:
- - name: ""
- ignore: ""
- command: ""
- autostart: ""
- autorestart: ""
+ - name: "web-server"
+ command: "python app.py"
+ autostart: true
+ autorestart: true
envs:
- - "="
+ - "PORT=8080"
+ - "DEBUG=true"
```
-### Service Fields Explanation
+## Advanced Examples
+
+### Multiple Services
+
+```yaml
+kind: workerService
+version: udx.io/worker-v1/service
+services:
+ - name: "api-server"
+ command: "node api/server.js"
+ autostart: true
+ autorestart: true
+ envs:
+ - "PORT=3000"
+ - "NODE_ENV=production"
+
+ - name: "worker-queue"
+ command: "python worker.py"
+ autostart: true
+ envs:
+ - "QUEUE_URL=redis://localhost:6379"
+
+ - name: "monitoring"
+ command: "./monitor.sh"
+ ignore: true # Temporarily disabled
+```
+
+### Service with Complex Command
+
+```yaml
+services:
+ - name: "data-processor"
+ command: "bash -c 'source .env && python -m processor.main --config=prod.json'"
+ autostart: true
+ autorestart: true
+ envs:
+ - "PYTHONPATH=/app"
+ - "LOG_LEVEL=info"
+```
+
+## Best Practices
+
+1. **Service Naming**
+ - Use descriptive, lowercase names
+ - Separate words with hyphens
+ - Keep names concise but meaningful
+
+2. **Command Definition**
+ - Use absolute paths when possible
+ - Quote commands with spaces or special characters
+ - Consider using shell scripts for complex commands
-* `name`: Unique identifier for the service. This is used to reference and manage the service within the system.
+3. **Environment Variables**
+ - Use uppercase for variable names
+ - Group related variables together
+ - Document required variables
-* `ignore`: Determines whether the service should be ignored. If set to "true", the service will not be managed by the worker. The default value is "false", which means the service is considered for management.
+4. **Restart Policies**
+ - Enable `autorestart` for critical services
+ - Use `ignore` for maintenance or debugging
+ - Consider dependencies between services
-* `command`: The command that the service will execute. This could be a shell script or any executable along with its arguments.
+## Monitoring and Management
-* `autostart`: Indicates whether the service should start automatically when the worker starts. The default is "true", meaning the service will start automatically.
+Use the following CLI commands to manage services:
-* `autorestart`: Specifies whether the service should automatically restart if it stops. If "true", the service will restart according to the policy defined by the worker management system. The default value is "false", indicating the service will not restart automatically.
+```bash
+# List all services
+worker service list
-* `envs`: An array of environment variables passed to the service in the format `KEY=value`. These variables are made available to the service at runtime.
+# Check specific service status
+worker service status web-server
-## Usage
+# View service logs
+worker service logs web-server
-To use this configuration file, make sure to mount it with your application under `/home/udx/`. It doesn't matter where you mount it, it could be autodetected in any subdirectory if it's mounted correctly and named `services.yaml`.
\ No newline at end of file
+# Restart a service
+worker service restart web-server
+```
\ No newline at end of file
diff --git a/etc/configs/worker/services.yaml b/etc/configs/worker/services.yaml
deleted file mode 100644
index 82f0fa44..00000000
--- a/etc/configs/worker/services.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-kind: workerService
-version: udx.io/worker-v1/service
-services:
- - name: "app_service"
- ignore: "true"
- command: "sh ./main.sh"
- autostart: "true"
- autorestart: "false"
- envs:
- - "KEY1=value1"
- - "KEY2=value2"
- - name: "another_service"
- ignore: "true"
- command: "sh ./main.sh"
- autostart: "true"
- autorestart: "true"
- envs: []
diff --git a/lib/auth.sh b/lib/auth.sh
index 1ebc851c..e49e71ce 100644
--- a/lib/auth.sh
+++ b/lib/auth.sh
@@ -1,11 +1,69 @@
#!/bin/bash
-# shellcheck source=/usr/local/lib/utils.sh disable=SC1091
-source /usr/local/lib/utils.sh
+# shellcheck source=${WORKER_LIB_DIR}/utils.sh disable=SC1091
+source "${WORKER_LIB_DIR}/utils.sh"
# Array to track configured providers
declare -a configured_providers=()
+# Function to get env var names for a provider from actors JSON
+get_provider_env_vars() {
+ local provider=$1
+ local actors_json=$2
+
+ # Get all env var names from actor creds that match ${VAR} pattern
+ # shellcheck disable=SC2016
+ echo "$actors_json" | jq -r ".[].creds" 2>/dev/null | \
+ grep -o '\${[^}]*}' | sed 's/[\${}]//g' || true
+}
+
+# Function to check if a provider is configured
+is_provider_configured() {
+ local provider=$1
+ local actors_json=$2
+
+ # Get provider's actors
+ local provider_actors
+ provider_actors=$(echo "$actors_json" | jq -r "[.[] | select(.type | startswith(\"$provider\"))]" 2>/dev/null)
+
+ if [ -z "$provider_actors" ] || [ "$provider_actors" = "[]" ]; then
+ return 1
+ fi
+
+ # Get all possible env var names from actors
+ local env_vars
+ mapfile -t env_vars < <(get_provider_env_vars "$provider" "$provider_actors")
+
+ # Check all possible env vars
+ for env_var in "${env_vars[@]}"; do
+ if [ -n "${!env_var}" ]; then
+ return 0
+ fi
+ done
+
+ # Check each actor's credentials
+ while IFS= read -r actor; do
+ [ -z "$actor" ] && continue
+
+ local creds
+ creds=$(echo "$actor" | jq -r '.creds' 2>/dev/null)
+ [ "$creds" = "null" ] && continue
+
+ # Evaluate creds as a reference to an environment variable
+ if [[ "$creds" =~ ^\$\{(.+)\}$ ]]; then
+ local env_var_name="${BASH_REMATCH[1]}"
+ creds="${!env_var_name}"
+ fi
+
+ # If we find any valid credentials, return success
+ if [ -n "$creds" ]; then
+ return 0
+ fi
+ done <<< "$(echo "$provider_actors" | jq -r '.[]')"
+
+ return 1
+}
+
# Function to authenticate actors
authenticate_actors() {
local actors_json="$1"
@@ -61,7 +119,7 @@ authenticate_actors() {
# Proceed only if creds are valid JSON
if echo "$creds" | jq empty &>/dev/null; then
log_info "Processing credentials for $provider"
- auth_script="/usr/local/lib/auth/${provider}.sh"
+ auth_script="${WORKER_LIB_DIR}/auth/${provider}.sh"
auth_function="${provider}_authenticate"
if [[ -f "$auth_script" ]]; then
diff --git a/lib/auth/aws.sh b/lib/auth/aws.sh
index 05b41592..e11c3271 100644
--- a/lib/auth/aws.sh
+++ b/lib/auth/aws.sh
@@ -1,7 +1,7 @@
#!/bin/bash
-# shellcheck source=/usr/local/lib/utils.sh disable=SC1091
-source /usr/local/lib/utils.sh
+# shellcheck source=${WORKER_LIB_DIR}/utils.sh disable=SC1091
+source "${WORKER_LIB_DIR}/utils.sh"
# Example usage of the function
# aws_authenticate "/path/to/your/aws_creds.json"
diff --git a/lib/auth/azure.sh b/lib/auth/azure.sh
index 7286fd7b..e913180d 100644
--- a/lib/auth/azure.sh
+++ b/lib/auth/azure.sh
@@ -5,8 +5,8 @@
# Example usage of the function
# azure_authenticate "/path/to/your/azure_creds.json"
-# shellcheck source=/usr/local/lib/utils.sh disable=SC1091
-source /usr/local/lib/utils.sh
+# shellcheck source=${WORKER_LIB_DIR}/utils.sh disable=SC1091
+source "${WORKER_LIB_DIR}/utils.sh"
# Function to authenticate Azure accounts
azure_authenticate() {
diff --git a/lib/auth/bitwarden.sh b/lib/auth/bitwarden.sh
index 57dbd7d6..cd221b05 100644
--- a/lib/auth/bitwarden.sh
+++ b/lib/auth/bitwarden.sh
@@ -1,7 +1,7 @@
#!/bin/bash
-# shellcheck source=/usr/local/lib/utils.sh disable=SC1091
-source /usr/local/lib/utils.sh
+# shellcheck source=${WORKER_LIB_DIR}/utils.sh disable=SC1091
+source "${WORKER_LIB_DIR}/utils.sh"
# Function to authenticate Bitwarden using API key or master password
#
diff --git a/lib/auth/gcp.sh b/lib/auth/gcp.sh
index 0299d3df..27b74d7c 100644
--- a/lib/auth/gcp.sh
+++ b/lib/auth/gcp.sh
@@ -1,7 +1,7 @@
#!/bin/bash
-# shellcheck source=/usr/local/lib/utils.sh disable=SC1091
-source /usr/local/lib/utils.sh
+# shellcheck source=${WORKER_LIB_DIR}/utils.sh disable=SC1091
+source "${WORKER_LIB_DIR}/utils.sh"
# Function to authenticate GCP service accounts
#
diff --git a/lib/cleanup.sh b/lib/cleanup.sh
index 9a716936..6994ec09 100644
--- a/lib/cleanup.sh
+++ b/lib/cleanup.sh
@@ -2,10 +2,13 @@
# Include worker config utilities first
# shellcheck source=/dev/null
-source /usr/local/lib/worker_config.sh
+source "${WORKER_LIB_DIR}/worker_config.sh"
# shellcheck source=/dev/null
-source /usr/local/lib/utils.sh
+source "${WORKER_LIB_DIR}/utils.sh"
+
+# Enable actors cleanup by default
+ACTORS_CLEANUP=${ACTORS_CLEANUP:-true}
# Generic function to clean up authentication for any provider
cleanup_provider() {
@@ -63,34 +66,82 @@ cleanup_provider() {
fi
}
+# Function to clean up credential files
+cleanup_cred_files() {
+ local provider=$1
+ local env_var_name="${provider^^}_CREDS" # Convert to uppercase
+ local creds_value="${!env_var_name}"
+
+ # Skip if no credentials value found
+ if [[ -z "$creds_value" ]]; then
+ return 0
+ fi
+
+ # If the value is a file path and exists, remove it
+ if [[ -f "$creds_value" ]]; then
+ log_info "Removing credential file for $provider: $creds_value"
+ if rm -f "$creds_value"; then
+ log_success "Cleanup" "Removed credential file for $provider"
+ return 0
+ else
+ log_error "Cleanup" "Failed to remove credential file for $provider"
+ return 1
+ fi
+ fi
+
+ return 0
+}
+
# Function to clean up actors based on the providers configured during authentication
cleanup_actors() {
- log_info "Starting cleanup of actors"
+ # Check if cleanup is enabled
+ if [[ "${ACTORS_CLEANUP,,}" != "true" ]]; then
+ log_info "Actors cleanup is disabled via ACTORS_CLEANUP environment variable"
+ return 0
+ fi
+
+ # Skip cleanup if no providers were configured during authentication
+ if [[ ${#configured_providers[@]} -eq 0 ]]; then
+ return 0
+ fi
- # Accept configured providers as arguments
- local configured_providers=('azure' 'gcp' 'aws' 'bitwarden')
+ log_info "Starting cleanup of actors"
# Track if any actual cleanup was performed
local any_cleanup=false
- # Loop through each configured provider only
+ # Only clean up providers that were actually configured
for provider in "${configured_providers[@]}"; do
+ # First cleanup any credential files
+ if cleanup_cred_files "$provider"; then
+ any_cleanup=true
+ fi
+
+ # Then cleanup provider sessions
case "$provider" in
azure)
- cleanup_provider "az" "az logout" "az account show" "Azure" && any_cleanup=true
- ;;
+ if cleanup_provider "az" "az logout" "az account show" "Azure"; then
+ any_cleanup=true
+ fi
+ ;;
gcp)
- cleanup_provider "gcloud" "gcloud auth revoke --all" "gcloud auth list" "GCP" && any_cleanup=true
- ;;
+ if cleanup_provider "gcloud" "gcloud auth revoke --all" "gcloud auth list" "GCP"; then
+ any_cleanup=true
+ fi
+ ;;
aws)
- cleanup_provider "aws" "aws sso logout" "aws sso list-accounts" "AWS" && any_cleanup=true
- ;;
+ if cleanup_provider "aws" "aws sso logout" "aws sso list-accounts" "AWS"; then
+ any_cleanup=true
+ fi
+ ;;
bitwarden)
- cleanup_provider "bw" "bw logout --force" "bw status" "Bitwarden" && any_cleanup=true
- ;;
+ if cleanup_provider "bw" "bw logout --force" "bw status" "Bitwarden"; then
+ any_cleanup=true
+ fi
+ ;;
*)
log_warn "Unsupported or unavailable actor type for cleanup: $provider"
- ;;
+ ;;
esac
done
@@ -98,6 +149,11 @@ cleanup_actors() {
if [[ "$any_cleanup" == false ]]; then
log_info "No active sessions found for any configured providers."
fi
+
+ # Clear the configured providers array
+ configured_providers=()
+
+ return 0
}
# Example usage
diff --git a/lib/cli.sh b/lib/cli.sh
index c9152153..4ebaf35a 100644
--- a/lib/cli.sh
+++ b/lib/cli.sh
@@ -1,30 +1,135 @@
#!/bin/bash
+# Version information
+VERSION="1.0.0"
+
+# shellcheck source=${WORKER_LIB_DIR}/utils.sh disable=SC1091
+source "${WORKER_LIB_DIR}/utils.sh"
+
# Dynamically source all command modules
-for module in /usr/local/lib/cli/*.sh; do
- # shellcheck disable=SC1090
- source "$module"
+for module in "${WORKER_LIB_DIR}/cli/"*.sh; do
+ # shellcheck disable=SC1090
+ source "$module"
done
-# shellcheck source=/usr/local/lib/utils.sh disable=SC1091
-source /usr/local/lib/utils.sh
-
-# CLI Interface
-case $1 in
- env)
- shift
- env_handler "$@"
- ;;
- sbom)
- shift
- sbom_handler "$@"
- ;;
- service)
- shift
- service_handler "$@"
- ;;
- *)
- log_error "CLI" "Unknown command: $1"
- exit 1
- ;;
-esac
\ No newline at end of file
+# Print version information
+show_version() {
+ log_info "Worker CLI version $VERSION"
+}
+
+# Get available commands and their descriptions
+get_available_commands() {
+ local commands={}
+ declare -A commands
+
+ # Add built-in commands
+ commands["help"]="Show help for any command"
+ commands["version"]="Show version information"
+
+ # Scan through CLI modules to find commands and their descriptions
+ for module in "${WORKER_LIB_DIR}/cli/"*.sh; do
+ if [ -f "$module" ]; then
+ local name
+ name=$(basename "$module" .sh)
+ local description=""
+
+ # Extract description from help function
+ if grep -q "${name}_help()" "$module"; then
+ # Try to find description from comment before help function
+ description=$(grep -B 2 "${name}_help()" "$module" |
+ grep "^#" |
+ grep -v "Show help" |
+ head -n 1 |
+ sed 's/^#[[:space:]]*//;s/[[:space:]]*$//')
+
+ # If no description found, use a generic one
+ if [ -z "$description" ]; then
+ description="Manage ${name} operations"
+ fi
+ commands[$name]="$description"
+ fi
+ fi
+ done
+
+ declare -p commands
+}
+
+# Print help information
+show_help() {
+ local -A commands
+ eval "$(get_available_commands)"
+
+ # Find the longest command name for proper padding
+ local max_length=0
+ for cmd in "${!commands[@]}"; do
+ local len=${#cmd}
+ if ((len > max_length)); then
+ max_length=$len
+ fi
+ done
+
+ # Add padding for alignment
+ max_length=$((max_length + 2))
+
+ cat << EOF
+π Welcome to UDX Worker Container!
+
+Available Commands:
+EOF
+
+ # Sort commands alphabetically and display
+ local sorted_commands
+ mapfile -t sorted_commands < <(printf '%s\n' "${!commands[@]}" | sort)
+ for cmd in "${sorted_commands[@]}"; do
+ printf " %-${max_length}s %s\n" "$cmd" "${commands[$cmd]}"
+ done
+
+ cat << EOF
+
+Run any command without arguments to see its detailed help and usage information.
+For example: 'worker auth' will show auth command help.
+EOF
+}
+
+# Main CLI interface
+if [ -z "$1" ] || [ "$1" = "help" ]; then
+ show_help
+ log_info "Container is ready. Run a command to start services."
+ exit 0
+fi
+
+if [ "$1" = "version" ]; then
+ show_version
+ exit 0
+fi
+
+# Handle app commands
+if [ "$1" = "app" ]; then
+ # Source and load configuration for app commands
+ # shellcheck disable=SC1091
+ source "${WORKER_LIB_DIR}/worker_config.sh"
+ config=$(load_and_parse_config)
+ export_variables_from_config "$config"
+
+ log_info "Starting process manager..."
+ "${WORKER_LIB_DIR}/process_manager.sh"
+ pm_status=$?
+ if [ $pm_status -ne 0 ]; then
+ exit $pm_status
+ fi
+ shift
+ exec "$@"
+fi
+
+# Check if the command exists by looking for its handler
+command=$1
+handler_function="${command}_handler"
+
+if [[ $(type -t "$handler_function") == function ]]; then
+ shift
+ "$handler_function" "$@"
+else
+ log_error "CLI" "Unknown command: $command"
+ echo "Run 'worker help' to see available commands."
+ exit 1
+fi
\ No newline at end of file
diff --git a/lib/cli/auth.sh b/lib/cli/auth.sh
new file mode 100644
index 00000000..d1c0f981
--- /dev/null
+++ b/lib/cli/auth.sh
@@ -0,0 +1,277 @@
+#!/bin/bash
+
+# shellcheck source=${WORKER_LIB_DIR}/utils.sh disable=SC1091
+source "${WORKER_LIB_DIR}/utils.sh"
+source "${WORKER_LIB_DIR}/auth.sh"
+
+# Show help for auth command
+auth_help() {
+ cat << EOF
+Manage authentication and credentials
+
+Usage: worker auth [command] [provider] [options]
+
+Available Commands:
+ status Show authentication status for all or specific provider
+ login Re-authenticate with provider(s) using available credentials
+ logout Log out from provider(s)
+
+Options:
+ --format Output format for status command (e.g. json)
+
+Examples:
+ worker auth status # Show status of all providers
+ worker auth status azure # Show status of Azure only
+ worker auth status --format json # Show status in JSON format
+ worker auth login # Re-auth all providers with available creds
+ worker auth login azure # Re-auth Azure only
+ worker auth logout # Log out from all providers
+EOF
+}
+
+# Description: Display authentication status for all cloud providers
+# Example: worker auth status [--format json]
+show_auth_status() {
+ local target_provider=$1
+ local format=$2
+
+ if [ "$format" != "json" ]; then
+ log_info "Auth" "Checking authentication status..."
+ fi
+
+ # Initialize JSON array if json format
+ local json_output="["
+
+ # Load config once and extract actors section
+ local config actors
+ config=$(load_and_parse_config)
+ actors=$(get_config_section "$config" "actors")
+
+ # Function to check a specific provider
+ check_provider_status() {
+ local provider=$1
+ local status="Not configured"
+ local types=""
+
+ # Get provider's actors
+ local provider_actors
+ provider_actors=$(echo "$actors" | jq -r "[.[] | select(.type | startswith(\"$provider\"))]" 2>/dev/null)
+
+ if [ -n "$provider_actors" ] && [ "$provider_actors" != "[]" ]; then
+ types=$(echo "$provider_actors" | jq -r '.[].type' 2>/dev/null | tr '\n' ' ')
+
+ # First check if provider has credentials
+ if is_provider_configured "$provider" "$provider_actors"; then
+ # Then check if it's authenticated
+ if check_provider_auth "$provider"; then
+ status="Authenticated"
+ state="active"
+ else
+ status="Needs re-auth"
+ state="needs_reauth"
+ fi
+ else
+ status="Not configured"
+ state="missing_creds"
+ fi
+ else
+ status="Not configured"
+ state="not_configured"
+ fi
+
+ # Output status based on format
+ if [ "$format" = "json" ]; then
+ [ -n "$json_output" ] && [ "$json_output" != "[" ] && json_output+=","
+ json_output+=$(jq -n \
+ --arg provider "$provider" \
+ --arg state "$state" \
+ --arg status "$status" \
+ --arg types "$types" \
+ '{provider: $provider, state: $state, status: $status, types: $types}')
+ else
+ case "$state" in
+ "active") log_success "Auth" "$provider: $status" ;;
+ "needs_reauth") log_info "Auth" "$provider: $status" ;;
+ "missing_creds") log_warn "Auth" "$provider: $status" ;;
+ "not_configured") log_info "Auth" "$provider: $status" ;;
+ esac
+ fi
+ }
+
+ # Check status for specific provider or all providers
+ if [ -n "$target_provider" ]; then
+ check_provider_status "$target_provider"
+ else
+ check_provider_status "aws"
+ check_provider_status "gcp"
+ check_provider_status "azure"
+ check_provider_status "bitwarden"
+ fi
+
+ # Close JSON array if json format
+ if [ "$format" = "json" ]; then
+ json_output+="]"
+ echo "$json_output"
+ fi
+}
+
+# Login to provider(s)
+login_provider() {
+ local target_provider=$1
+ log_info "Auth" "Authenticating providers..."
+
+ # Load config once and extract actors section
+ local config actors
+ config=$(load_and_parse_config)
+ actors=$(get_config_section "$config" "actors")
+
+ if [[ -z "$actors" || "$actors" == "null" ]]; then
+ log_warn "Auth" "No providers found in configuration"
+ return 1
+ fi
+
+ # Filter actors by provider if specified
+ if [[ -n "$target_provider" ]]; then
+ actors=$(echo "$actors" | jq -r "[.[] | select(.type | startswith(\"$target_provider\"))]")
+ if [[ "$actors" == "[]" ]]; then
+ log_warn "Auth" "$target_provider: Not configured"
+ return 1
+ fi
+ fi
+
+ # Use the same authentication flow as entrypoint
+ if authenticate_actors "$actors"; then
+ log_success "Auth" "Authentication complete"
+ return 0
+ else
+ log_warn "Auth" "No providers were authenticated"
+ return 1
+ fi
+}
+
+# Logout from provider(s)
+logout_provider() {
+ local target_provider=$1
+ log_info "Auth" "Logging out providers..."
+
+ # Source cleanup utilities
+ source "${WORKER_LIB_DIR}/cleanup.sh"
+
+ # Function to logout from a specific provider
+ do_provider_logout() {
+ local provider=$1
+
+ case "$provider" in
+ aws)
+ cleanup_provider "aws" "aws sso logout" "aws sso list-accounts" "AWS"
+ ;;
+ azure)
+ cleanup_provider "az" "az logout" "az account show" "Azure"
+ ;;
+ gcp)
+ cleanup_provider "gcloud" "gcloud auth revoke --all" "gcloud auth list" "GCP"
+ ;;
+ bitwarden)
+ cleanup_provider "bw" "bw logout --force" "bw status" "Bitwarden"
+ ;;
+ esac
+ }
+
+ if [ -n "$target_provider" ]; then
+ do_provider_logout "$target_provider"
+ else
+ for provider in aws gcp azure bitwarden; do
+ do_provider_logout "$provider"
+ done
+ fi
+}
+
+
+
+# Check if a provider is currently authenticated
+check_provider_auth() {
+ local provider=$1
+
+ case "$provider" in
+ aws)
+ if aws sts get-caller-identity &>/dev/null; then
+ return 0
+ fi
+ ;;
+ azure)
+ # Azure CLI can return non-zero exit code even when it succeeds
+ # so we check if the output contains valid JSON
+ if output=$(az account show 2>/dev/null) && echo "$output" | jq empty &>/dev/null; then
+ return 0
+ fi
+ ;;
+ gcp)
+ if gcloud auth list --format="value(account)" 2>/dev/null | grep -q .; then
+ return 0
+ fi
+ ;;
+ bitwarden)
+ if bw status | grep -q "unlocked"; then
+ return 0
+ fi
+ ;;
+ esac
+
+ return 1
+}
+
+# Handle auth commands
+auth_handler() {
+ local cmd=$1
+ shift
+
+ case $cmd in
+ status)
+ local provider=""
+ local format=""
+
+ # Parse arguments
+ while [ $# -gt 0 ]; do
+ case "$1" in
+ --format)
+ format="$2"
+ shift 2
+ ;;
+ --*)
+ log_error "CLI" "Unknown option: $1"
+ return 1
+ ;;
+ *)
+ if [ -z "$provider" ]; then
+ provider="$1"
+ else
+ log_error "CLI" "Unexpected argument: $1"
+ return 1
+ fi
+ shift
+ ;;
+ esac
+ done
+
+ show_auth_status "$provider" "$format"
+ return $?
+ ;;
+ login)
+ login_provider "$provider"
+ return $?
+ ;;
+ logout)
+ logout_provider "$provider"
+ return $?
+ ;;
+ "" | help)
+ auth_help
+ return 0
+ ;;
+ *)
+ log_error "CLI" "Unknown command: auth"
+ auth_help
+ return 1
+ ;;
+ esac
+}
diff --git a/lib/cli/config.sh b/lib/cli/config.sh
new file mode 100644
index 00000000..13a31b41
--- /dev/null
+++ b/lib/cli/config.sh
@@ -0,0 +1,283 @@
+#!/bin/bash
+
+# shellcheck source=${WORKER_LIB_DIR}/utils.sh disable=SC1091
+source "${WORKER_LIB_DIR}/utils.sh"
+source "${WORKER_LIB_DIR}/worker_config.sh"
+source "${WORKER_LIB_DIR}/secrets.sh"
+
+# Show help for config command
+config_help() {
+ cat << EOF
+Manage worker configuration
+
+Usage: worker config [command]
+
+Available Commands:
+ show Show current configuration
+ edit Edit configuration in default editor
+ locations Show configuration file locations
+ init Initialize a new configuration file
+ diff Show differences between default and current config
+ apply Parse and apply the configuration
+ resolve Resolve a secret value by name
+
+Examples:
+ worker config show
+ worker config show --format json
+ worker config edit
+ worker config locations
+ worker config init
+EOF
+}
+
+# Description: Display current worker configuration
+# Options: --format yaml|json
+# Example: worker config show --format json
+config_show() {
+ local format="yaml"
+ local args=("$@")
+ local i=0
+
+ # Parse arguments
+ while [ $i -lt ${#args[@]} ]; do
+ case "${args[$i]}" in
+ --format)
+ i=$((i + 1))
+ format="${args[$i]}"
+ ;;
+ *)
+ log_error "Config" "Unknown argument: ${args[$i]}"
+ return 1
+ ;;
+ esac
+ i=$((i + 1))
+ done
+
+ log_info "Config" "Current configuration:"
+
+ # Check if user config exists
+ if [ ! -f "$USER_CONFIG" ] || [ ! -s "$USER_CONFIG" ]; then
+ log_info "Config" "No user configuration found at $USER_CONFIG"
+ log_info "Config" "Use 'worker config init' to create one"
+ return 0
+ fi
+
+ # Parse user config
+ local config
+ if ! config=$(yq eval -o=json "$USER_CONFIG" 2>/dev/null); then
+ log_error "Config" "Failed to parse user configuration"
+ return 1
+ fi
+
+ # Show config based on format
+ case $format in
+ json)
+ echo "$config"
+ ;;
+ yaml)
+ yq eval "$USER_CONFIG"
+ ;;
+ *)
+ log_error "Config" "Unknown format: $format"
+ return 1
+ ;;
+ esac
+}
+
+# Description: Edit configuration in default editor
+# Example: worker config edit
+edit_config() {
+ local config_file="$USER_CONFIG"
+
+ # Create directory if it doesn't exist
+ mkdir -p "$(dirname "$config_file")"
+
+ # Create file if it doesn't exist
+ if [ ! -f "$config_file" ]; then
+ # Copy built-in config first to ensure actors section is preserved
+ if [ -f "$BUILT_IN_CONFIG" ]; then
+ cp "$BUILT_IN_CONFIG" "$config_file" || {
+ log_error "Config" "Failed to copy built-in configuration"
+ return 1
+ }
+ else
+ # If built-in config doesn't exist, create minimal structure
+ cat > "$config_file" << EOF
+kind: workerConfig
+version: udx.io/worker-v1/config
+config: {}
+EOF
+ fi
+ fi
+
+ # Use default editor or fallback to nano
+ ${EDITOR:-nano} "$config_file"
+
+ # Basic YAML validation
+ if ! yq eval '.' "$config_file" >/dev/null 2>&1; then
+ log_error "Config" "Configuration is invalid YAML"
+ return 1
+ fi
+
+ log_success "Config" "Configuration saved successfully"
+ return 0
+}
+
+
+
+# Description: Display paths of all configuration files
+# Example: worker config locations
+show_locations() {
+ cat << EOF
+Configuration Locations:
+ Built-in config: $BUILT_IN_CONFIG
+ User config: $USER_CONFIG
+ Merged config: $MERGED_CONFIG
+EOF
+}
+
+# Description: Initialize a new configuration file with defaults
+# Example: worker config init
+init_config() {
+ # Skip if config already exists
+ if [ -f "$USER_CONFIG" ]; then
+ log_info "Config" "Configuration already exists at $USER_CONFIG"
+ return 0
+ fi
+
+ # Create directory if it doesn't exist
+ mkdir -p "$(dirname "$USER_CONFIG")"
+
+ # Create minimal config with timestamp
+ cat > "$USER_CONFIG" << EOF
+kind: workerConfig
+version: udx.io/worker-v1/config
+config:
+ env:
+ CREATED: "$(date -u +"%Y-%m-%dT%H:%M:%SZ")"
+EOF
+
+ if [ -f "$USER_CONFIG" ]; then
+ log_success "Config" "Configuration initialized at $USER_CONFIG"
+ merge_worker_configs # Merge with built-in config
+ return 0
+ else
+ log_error "Config" "Failed to create configuration file"
+ return 1
+ fi
+}
+
+# Description: Show differences between default and current configuration
+# Options: --format unified|context|git
+# Example: worker config diff --format git
+show_diff() {
+ # Check if configs exist
+ if ! ensure_config_exists "$BUILT_IN_CONFIG"; then
+ return 1
+ fi
+
+ if [ ! -f "$USER_CONFIG" ]; then
+ log_error "Config" "User configuration does not exist at $USER_CONFIG"
+ return 1
+ fi
+
+ log_info "Config" "Differences between built-in and user configuration:"
+ diff -u "$BUILT_IN_CONFIG" "$USER_CONFIG" || true
+}
+
+# Description: Parse and apply the configuration
+# Example: worker config apply
+apply_config() {
+ log_info "Config" "Parsing and applying configuration..."
+
+ # Load and parse the configuration
+ local config_json
+ if ! config_json=$(load_and_parse_config); then
+ log_error "Config" "Failed to load and parse configuration"
+ return 1
+ fi
+
+ # Export variables from the configuration
+ if ! export_variables_from_config "$config_json"; then
+ log_error "Config" "Failed to export variables from configuration"
+ return 1
+ fi
+
+ # Extract secrets section from config
+ local secrets_json
+ secrets_json=$(echo "$config_json" | jq -r '.config.secrets // {}')
+
+ # Fetch and set secrets if any are defined
+ if [[ "$secrets_json" != "{}" ]]; then
+ if ! fetch_secrets "$secrets_json"; then
+ log_error "Config" "Failed to fetch and set secrets"
+ return 1
+ fi
+ fi
+
+ log_success "Config" "Configuration successfully parsed and applied"
+ return 0
+}
+
+# Description: Resolve a secret value by name
+# Example: worker config resolve SECRET_NAME
+resolve_secret() {
+ local secret_name="$1"
+
+ if [[ -z "$secret_name" ]]; then
+ log_error "Config" "Secret name is required"
+ return 1
+ fi
+
+ # Load and parse the configuration
+ local config_json
+ config_json=$(load_and_parse_config)
+ if [[ -z "$config_json" ]]; then
+ log_error "Config" "Failed to load configuration"
+ return 1
+ fi
+
+ # Use resolve_secret_by_name from secrets.sh
+ resolve_secret_by_name "$secret_name" "$config_json"
+}
+
+# Handle config commands
+config_handler() {
+ local cmd=$1
+ shift
+
+ case $cmd in
+ show)
+ config_show "$@"
+ ;;
+ edit)
+ edit_config
+ ;;
+ locations)
+ show_locations
+ ;;
+ apply)
+ apply_config
+ ;;
+ init)
+ init_config
+ ;;
+ diff)
+ show_diff
+ ;;
+ resolve)
+ resolve_secret "$@"
+ ;;
+ help)
+ config_help
+ ;;
+ "")
+ config_help
+ ;;
+ *)
+ log_error "Config" "Unknown command: $cmd"
+ config_help
+ exit 1
+ ;;
+ esac
+}
diff --git a/lib/cli/env.sh b/lib/cli/env.sh
index b3b68bc5..5df0d67a 100644
--- a/lib/cli/env.sh
+++ b/lib/cli/env.sh
@@ -1,45 +1,405 @@
#!/bin/bash
-# shellcheck source=/usr/local/lib/utils.sh disable=SC1091
-source /usr/local/lib/utils.sh
+# shellcheck source=${WORKER_LIB_DIR}/utils.sh disable=SC1091
+source "${WORKER_LIB_DIR}/utils.sh"
+source "${WORKER_LIB_DIR}/worker_config.sh"
-# Function to display current environment settings or a specific variable
-get_environment() {
- if [ $# -eq 0 ]; then
- log_debug "Current Environment Settings:" "$(env)"
- else
- log_debug "$1" "${!1}"
+# Show help for env command
+env_help() {
+ cat << EOF
+Manage environment variables and secrets
+
+Usage: worker env [command]
+
+Available Commands:
+ show Show environment variables
+ set Set an environment variable
+ unset Unset an environment variable
+ reload Reload environment and secrets from configuration
+ status Show environment status
+
+Options:
+ --format Output format (text/json)
+ --filter Filter variables by prefix
+ --include-secrets Include secrets in output (masked)
+
+Examples:
+ worker env show # Show all environment variables
+ worker env show --format json # Show variables in JSON format
+ worker env show --filter AWS_* # Show only AWS variables
+ worker env set MY_VAR "my value" # Set a new variable
+ worker env unset MY_VAR # Remove a variable
+ worker env reload # Reload environment and secrets from config
+EOF
+}
+
+# Description: Display environment variables with optional filtering
+# Options: --format text|json, --filter PATTERN, --include-secrets
+# Example: worker env show --format json --filter AWS_* --include-secrets
+show_environment() {
+ local format=${1:-text}
+ local filter=$2
+ local include_secrets=${3:-false}
+
+
+ # Check if environment file exists
+ if [ ! -f "$WORKER_ENV_FILE" ]; then
+ log_error "Env" "Environment file not found"
+ return 1
fi
+
+ case $format in
+ json)
+ # Get variables and convert to JSON
+ local vars
+ if [ -n "$filter" ]; then
+ vars=$(grep "^export $filter" "$WORKER_ENV_FILE")
+ else
+ vars=$(grep "^export" "$WORKER_ENV_FILE")
+ fi
+
+ # Convert to JSON
+ local json="{"
+ local first=true
+ while IFS= read -r line; do
+ if [[ $line =~ ^export[[:space:]]+([^=]+)=\"([^\"]*)\" ]]; then
+ if [ "$first" = true ]; then
+ first=false
+ else
+ json="$json,"
+ fi
+ key=${BASH_REMATCH[1]}
+ value=${BASH_REMATCH[2]}
+ json="$json\"$key\":\"$value\""
+ fi
+ done <<< "$vars"
+ json="$json}"
+ if command -v jq >/dev/null 2>&1; then
+ echo "$json" | jq .
+ else
+ echo "$json"
+ fi
+ ;;
+ text)
+ if [ -n "$filter" ]; then
+ grep "^export $filter" "$WORKER_ENV_FILE" | sed 's/export \([^=]*\)="\([^"]*\)"/\1=\2/'
+ else
+ grep "^export" "$WORKER_ENV_FILE" | sed 's/export \([^=]*\)="\([^"]*\)"/\1=\2/'
+ fi
+ ;;
+ *)
+ log_error "Env" "Unknown format: $format"
+ return 1
+ ;;
+ esac
}
-# Function to set a new environment variable
+# Description: Set a new environment variable or update existing one
+# Example: worker env set MY_VAR "my value"
set_environment() {
- if [ $# -ne 2 ]; then
- log_warn "CLI" "Usage: $0 env set "
+ local name=$1
+ local value=$2
+
+ if [ -z "$name" ]; then
+ log_error "Env" "Variable name is required"
+ return 1
+ fi
+
+ if [ -z "$value" ] && [ "$#" -lt 2 ]; then
+ log_error "Env" "Variable value is required"
+ return 1
+ fi
+
+ # Validate variable name
+ if ! [[ $name =~ ^[a-zA-Z_][a-zA-Z0-9_]*$ ]]; then
+ log_error "Env" "Invalid variable name: $name"
+ return 1
+ fi
+
+ # Add to environment file
+ if [ -f "$WORKER_ENV_FILE" ]; then
+ # Remove existing declaration if any
+ sed -i "/^export $name=/d" "$WORKER_ENV_FILE"
+ # Add new declaration
+ echo "export $name=\"$value\"" >> "$WORKER_ENV_FILE"
+ # Export in current session
+ export "$name=$value"
+ log_success "Env" "Set $name to '$value'"
+ else
+ log_error "Env" "Environment file not found"
+ return 1
+ fi
+}
+
+# Description: Remove an environment variable
+# Example: worker env unset MY_VAR
+unset_environment() {
+ local name=$1
+
+ if [ -z "$name" ]; then
+ log_error "Env" "Variable name is required"
+ return 1
+ fi
+
+ # Check if variable exists in environment file
+ if grep -q "^export $name=" "$WORKER_ENV_FILE"; then
+ # Create a temporary file
+ local tmpfile
+ tmpfile=$(mktemp)
+
+ # Remove the variable from environment file
+ grep -v "^export $name=" "$WORKER_ENV_FILE" > "$tmpfile"
+ cat "$tmpfile" > "$WORKER_ENV_FILE"
+ rm -f "$tmpfile"
+
+ # Also remove from current environment
+ unset "$name"
+
+ log_success "Env" "Unset $name"
+
+ # Reload environment to ensure consistency
+ source "$WORKER_ENV_FILE"
+ else
+ log_warn "Env" "Variable $name is not set"
+ fi
+}
+
+
+
+# Description: Validate environment variables against schema
+# Options: --format text|json
+# Example: worker env validate --format json
+validate_environment() {
+ local config
+ config=$(load_and_parse_config)
+
+ if [ -z "$config" ]; then
+ log_error "Env" "Failed to load configuration"
+ return 1
+ fi
+
+ # Extract required variables from config
+ local required_vars
+ required_vars=$(echo "$config" | yq eval '.config.env | keys' -)
+
+ local failed=0
+ while IFS= read -r var; do
+ if [ -z "${!var}" ]; then
+ log_error "Env" "Required variable $var is not set"
+ failed=1
+ fi
+ done <<< "$required_vars"
+
+ if [ $failed -eq 0 ]; then
+ log_success "Env" "All required environment variables are set"
+ else
return 1
fi
- export "$1=$2"
- log_debug "CLI" "Set $1 to '$2'."
+}
+
+# Description: Generate a template environment file
+# Options: --file PATH
+# Example: worker env template --file .env.template
+generate_template() {
+ local config
+ config=$(load_and_parse_config)
+
+ if [ -z "$config" ]; then
+ log_error "Env" "Failed to load configuration"
+ return 1
+ fi
+
+ echo "# Worker Environment Variables"
+ echo "# Generated on $(date)"
+ echo
+
+ # Extract variables from config
+ echo "$config" | yq eval '.config.env | to_entries | .[] | "# " + .key + "\n" + .key + "=\"" + .value + "\""' -
+}
+
+# Description: Reset environment to default values
+# Example: worker env reset
+reset_environment() {
+ # Clear all non-system variables
+ local system_vars="HOME|USER|PATH|SHELL|TERM|LANG|PWD"
+
+ # Get all variables except system ones
+ local vars_to_unset
+ vars_to_unset=$(env | grep -vE "^($system_vars)=")
+
+ while IFS='=' read -r key _; do
+ if [ -n "$key" ]; then
+ unset "$key"
+ fi
+ done <<< "$vars_to_unset"
+
+ # Reconfigure environment
+ if configure_environment; then
+ log_success "Env" "Environment reset to default state"
+ else
+ log_error "Env" "Failed to reset environment"
+ return 1
+ fi
+}
+
+# Parse command line arguments
+
+
+# Description: Show environment status and validation results
+# Options: --format text|json
+# Example: worker env status --format json
+show_status() {
+ local format=${1:-text}
+
+ local env_file_exists=false
+ local secrets_file_exists=false
+ local env_count=0
+ local secrets_count=0
+
+ [ -f "$WORKER_ENV_FILE" ] && env_file_exists=true
+ [ -f "$WORKER_SECRETS_FILE" ] && secrets_file_exists=true
+
+ if [ "$env_file_exists" = true ]; then
+ env_count=$(grep -c "^export" "$WORKER_ENV_FILE" || echo 0)
+ fi
+
+ if [ "$secrets_file_exists" = true ]; then
+ secrets_count=$(grep -c "^export" "$WORKER_SECRETS_FILE" || echo 0)
+ fi
+
+ case $format in
+ json)
+ {
+ echo "{"
+ echo " \"environment\": {"
+ echo " \"file\": \"$WORKER_ENV_FILE\","
+ echo " \"exists\": $env_file_exists,"
+ echo " \"variables\": $env_count"
+ echo " },"
+ echo " \"secrets\": {"
+ echo " \"file\": \"$WORKER_SECRETS_FILE\","
+ echo " \"exists\": $secrets_file_exists,"
+ echo " \"variables\": $secrets_count"
+ echo " }"
+ echo "}"
+ } | jq '.'
+ ;;
+ text)
+ echo "Environment Status:"
+ echo "------------------"
+ echo "Environment File: $WORKER_ENV_FILE"
+ echo " - Exists: $env_file_exists"
+ echo " - Variables: $env_count"
+ echo
+ echo "Secrets File: $WORKER_SECRETS_FILE"
+ echo " - Exists: $secrets_file_exists"
+ echo " - Variables: $secrets_count"
+ ;;
+ *)
+ log_error "Env" "Unknown format: $format"
+ return 1
+ ;;
+ esac
}
# Handle environment commands
env_handler() {
- case $1 in
- get)
- shift
- get_environment "$@"
+ local cmd=$1
+ shift
+
+ # Store original arguments for set/unset commands
+ local orig_args=("$@")
+
+ # Default values
+ local format="text"
+ local filter=""
+ local include_secrets="false"
+
+ # Parse arguments for show/status commands
+ if [[ "$cmd" == "show" || "$cmd" == "status" ]]; then
+ while [[ $# -gt 0 ]]; do
+ case $1 in
+ --format=*|--type=*)
+ format="${1#*=}"
+ shift
+ ;;
+ --format|--type)
+ if [[ -n "$2" && ! "$2" =~ ^-- ]]; then
+ format="$2"
+ shift 2
+ else
+ shift
+ fi
+ ;;
+ --filter=*)
+ filter="${1#*=}"
+ shift
+ ;;
+ --filter)
+ if [[ -n "$2" && ! "$2" =~ ^-- ]]; then
+ filter="$2"
+ shift 2
+ else
+ shift
+ fi
+ ;;
+ --include-secrets)
+ include_secrets="true"
+ shift
+ ;;
+ *)
+ shift
+ ;;
+ esac
+ done
+ fi
+
+ case $cmd in
+ show)
+ show_environment "$format" "$filter" "$include_secrets"
;;
set)
- shift
- if [ $# -eq 2 ]; then
- set_environment "$@"
- else
- log_warn "CLI" "Usage: $0 env set "
- exit 1
+ set_environment "${orig_args[0]}" "${orig_args[1]}"
+ ;;
+ unset)
+ unset_environment "$1"
+ ;;
+ reload)
+ log_info "Env" "Reloading environment from configuration..."
+ local config_json
+ if ! config_json=$(load_and_parse_config); then
+ log_error "Env" "Failed to load and parse configuration"
+ return 1
+ fi
+
+ if ! export_variables_from_config "$config_json"; then
+ log_error "Env" "Failed to export variables from configuration"
+ return 1
fi
+
+ # Extract secrets section from config
+ local secrets_json
+ secrets_json=$(echo "$config_json" | jq -r '.config.secrets // {}')
+
+ # Fetch and set secrets if any are defined
+ if [[ "$secrets_json" != "{}" ]]; then
+ if ! fetch_secrets "$secrets_json"; then
+ log_error "Env" "Failed to fetch and set secrets"
+ return 1
+ fi
+ fi
+
+ log_success "Env" "Environment successfully reloaded from configuration"
+ ;;
+ status)
+ show_status "$format"
+ ;;
+ help)
+ env_help
;;
*)
- log_warn "CLI" "Usage: $0 env {show [VARIABLE_NAME]|set }"
+ log_error "Env" "Unknown command: $cmd"
+ env_help
exit 1
;;
esac
diff --git a/lib/cli/health.sh b/lib/cli/health.sh
new file mode 100644
index 00000000..335171c0
--- /dev/null
+++ b/lib/cli/health.sh
@@ -0,0 +1,150 @@
+#!/bin/bash
+
+# shellcheck source=${WORKER_LIB_DIR}/utils.sh disable=SC1091
+source "${WORKER_LIB_DIR}/utils.sh"
+
+# Show help for health command
+health_help() {
+ cat << EOF
+Check system health and run diagnostics
+
+Usage: worker health [command]
+
+Available Commands:
+ status Show current health status
+
+Options:
+ --format Output format (text|json)
+
+Examples:
+ worker health status
+ worker health status --format json
+EOF
+}
+
+# Description: Check system health metrics
+# Example: worker health status [--format json]
+check_health() {
+ local format="text"
+ local failed=0
+
+ while [ $# -gt 0 ]; do
+ case $1 in
+ --format)
+ format=$2
+ shift 2
+ ;;
+ *)
+ log_error "Health" "Unknown option: $1"
+ return 1
+ ;;
+ esac
+ done
+
+ if [ "$format" != "json" ]; then
+ log_info "Health" "Running health check..."
+ fi
+
+ # Check system resources
+ check_system_resources || failed=1
+
+ # Gather all data
+ local timestamp
+ local disk_usage
+ local mem_total
+ local mem_used
+ local mem_usage
+ local load_avg
+ timestamp=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
+ disk_usage=$(df -h / | awk 'NR==2 {print $5}' | tr -d '%')
+ mem_total=$(free -b | awk '/Mem:/ {printf "%.2f", $2/1024/1024/1024}')
+ mem_used=$(free -b | awk '/Mem:/ {printf "%.2f", $3/1024/1024/1024}')
+ mem_usage=$(free | awk '/Mem:/ {printf("%.0f", $3/$2 * 100)}')
+ load_avg=$(uptime | awk -F'load average:' '{print $2}' | cut -d, -f1 | tr -d ' ')
+
+ if [ "$format" = "json" ]; then
+ {
+ echo "{"
+ echo " \"timestamp\": \"$timestamp\","
+ echo " \"status\": \"$([ $failed -eq 0 ] && echo "healthy" || echo "unhealthy")\","
+ echo " \"disk\": {"
+ echo " \"usage_percent\": $disk_usage"
+ echo " },"
+ echo " \"memory\": {"
+ echo " \"total_gb\": $mem_total,"
+ echo " \"used_gb\": $mem_used,"
+ echo " \"usage_percent\": $mem_usage"
+ echo " },"
+ echo " \"load_average\": $load_avg"
+ echo "}"
+ }
+ else
+ if [ $failed -eq 0 ]; then
+ log_success "Health" "All health checks passed"
+ else
+ log_error "Health" "Some health checks failed"
+ fi
+ fi
+
+ return $failed
+}
+
+# Description: Check system resource usage (disk, memory, CPU)
+# Example: worker health status
+check_system_resources() {
+ local failed=0
+
+ # Check disk space
+ local disk_usage
+ disk_usage=$(df -h / | awk 'NR==2 {print $5}' | tr -d '%')
+ if [ "$disk_usage" -gt 90 ]; then
+ log_error "Health" "Disk usage is critical: ${disk_usage}%"
+ failed=1
+ else
+ log_success "Health" "Disk usage is normal: ${disk_usage}%"
+ fi
+
+ # Check memory
+ local mem_usage
+ mem_usage=$(free | awk '/Mem:/ {printf("%.0f", $3/$2 * 100)}')
+ if [ "$mem_usage" -gt 90 ]; then
+ log_error "Health" "Memory usage is critical: ${mem_usage}%"
+ failed=1
+ else
+ log_success "Health" "Memory usage is normal: ${mem_usage}%"
+ fi
+
+ # Check load average
+ local load_avg
+ load_avg=$(uptime | awk -F'load average:' '{print $2}' | cut -d, -f1)
+ if [ "${load_avg%.*}" -gt 4 ]; then
+ log_error "Health" "Load average is high: $load_avg"
+ failed=1
+ else
+ log_success "Health" "Load average is normal: $load_avg"
+ fi
+
+ return $failed
+}
+
+
+
+# Handle health commands
+health_handler() {
+ local command=$1
+ shift
+
+ case $command in
+ status)
+ check_health "$@"
+ ;;
+ ""|-h|--help)
+ health_help
+ ;;
+ *)
+ log_error "Health" "Unknown command: $command"
+ health_help
+ return 1
+ ;;
+ esac
+}
diff --git a/lib/cli/sbom.sh b/lib/cli/sbom.sh
index 3ba15d70..d1be0e37 100644
--- a/lib/cli/sbom.sh
+++ b/lib/cli/sbom.sh
@@ -1,28 +1,247 @@
#!/bin/bash
-# shellcheck source=/usr/local/lib/utils.sh disable=SC1091
-source /usr/local/lib/utils.sh
+# shellcheck source=${WORKER_LIB_DIR}/utils.sh disable=SC1091
+source "${WORKER_LIB_DIR}/utils.sh"
-# Function to display dpkg packages in a table format using awk
+# Show help for sbom command
+sbom_help() {
+ cat << EOF
+Manage Software Bill of Materials (SBOM)
+
+Usage: worker sbom [command]
+
+Available Commands:
+ generate Generate SBOM in various formats
+ verify Verify package integrity
+
+Options:
+ --format Output format (text/json)
+ --type Package type (system/python/all)
+ --filter Filter packages by name pattern
+
+Examples:
+ worker sbom generate
+ worker sbom generate --format json
+ worker sbom verify
+EOF
+}
+
+# Description: Generate Software Bill of Materials in various formats
+# Options: --format text|json, --type system|python|all, --filter PATTERN
+# Example: worker sbom generate --format json --type python --filter requests
generate_sbom() {
- log_debug "Package Name | Version | Architecture"
- log_debug "----------------------------------------------------------"
- dpkg-query -W -f='${binary:Package} | ${Version} | ${Architecture}\n' | awk -F'|' '{
- printf("%-30s | %-20s | %-10s\n", $1, $2, $3)
- }'
- log_debug "----------------------------------------------------------"
+ local format=${1:-text}
+ local type=${2:-all}
+ local filter=$3
+
+ log_info "SBOM" "Generating software bill of materials..." >&2
+
+ # Get system packages
+ local system_packages
+ if [[ $type == "all" || $type == "system" ]]; then
+ if [ -n "$filter" ]; then
+ system_packages=$(dpkg-query -W -f='${binary:Package}\t${Version}\t${Architecture}\t${Status}\n' | grep "$filter" 2>/dev/null)
+ else
+ system_packages=$(dpkg-query -W -f='${binary:Package}\t${Version}\t${Architecture}\t${Status}\n' 2>/dev/null)
+ fi
+ fi
+
+ # Get Python packages
+ local python_packages
+ if [[ $type == "all" || $type == "python" ]]; then
+ if [ -n "$filter" ]; then
+ python_packages=$(pip list --format=json 2>/dev/null | jq -r '.[] | select(.name | contains("'"$filter"'")) | [.name, .version] | @tsv')
+ else
+ python_packages=$(pip list --format=json 2>/dev/null | jq -r '.[] | [.name, .version] | @tsv')
+ fi
+ fi
+
+ case $format in
+ json)
+ {
+ # Start JSON object
+ echo "{"
+
+ # Track if we need a comma between sections
+ local need_comma=false
+
+ # System packages section
+ if [ -n "$system_packages" ]; then
+ echo ' "system_packages": {'
+ # Convert to array for processing
+ mapfile -t packages <<< "$system_packages"
+ local total=${#packages[@]}
+ local count=0
+
+ for pkg in "${packages[@]}"; do
+ count=$((count + 1))
+ IFS=$'\t' read -r name version arch _ <<< "$pkg"
+ printf ' "%s": {
+ "version": "%s",
+ "architecture": "%s"
+ }' "$name" "$version" "$arch"
+ if [ $count -lt "$total" ]; then
+ echo ","
+ else
+ echo ""
+ fi
+ done
+ echo " }"
+ need_comma=true
+ fi
+
+ # Python packages section
+ if [ -n "$python_packages" ]; then
+ if $need_comma; then
+ echo ","
+ fi
+ echo ' "python_packages": {'
+ # Convert to array for processing
+ mapfile -t packages <<< "$python_packages"
+ local total=${#packages[@]}
+ local count=0
+
+ for pkg in "${packages[@]}"; do
+ count=$((count + 1))
+ IFS=$'\t' read -r name version <<< "$pkg"
+ printf ' "%s": {
+ "version": "%s"
+ }' "$name" "$version"
+ if [ $count -lt "$total" ]; then
+ echo ","
+ else
+ echo ""
+ fi
+ done
+ echo " }"
+ fi
+
+ # Close JSON object
+ echo "}"
+ } | jq '.'
+ ;;
+ text)
+ {
+ echo "Software Bill of Materials"
+ echo "Generated on $(date)"
+ echo
+ if [ -n "$system_packages" ]; then
+ echo "System Packages:"
+ echo "---------------"
+ echo "Package Name | Version | Architecture"
+ echo "----------------------------------------------------------"
+ echo "$system_packages" | awk -F'\t' '{
+ printf("%-20s | %-16s | %-12s\n", $1, $2, $3)
+ }'
+ echo
+ fi
+ if [ -n "$python_packages" ]; then
+ echo "Python Packages:"
+ echo "---------------"
+ echo "Package Name | Version"
+ echo "----------------------------------"
+ echo "$python_packages" | awk -F'\t' '{
+ printf("%-20s | %-16s\n", $1, $2)
+ }'
+ fi
+ }
+ ;;
+ *)
+ log_error "SBOM" "Unknown format: $format" >&2
+ return 1
+ ;;
+ esac
}
-# Handler for the sbom command
+
+# Description: Verify integrity of installed packages
+# Options: --type system|python|all
+# Example: worker sbom verify --type all
+verify_packages() {
+ log_info "SBOM" "Verifying package integrity..."
+
+ # Verify system packages
+ local failed=0
+ while IFS= read -r pkg; do
+ if ! dpkg -V "$pkg" >/dev/null 2>&1; then
+ log_error "SBOM" "Package integrity check failed: $pkg" >&2
+ failed=1
+ fi
+ done < <(dpkg-query -f '${Package}\n' -W)
+
+ if [ $failed -eq 0 ]; then
+ log_success "SBOM" "All packages verified successfully" >&2
+ fi
+}
+
+# Parse command line arguments
+parse_args() {
+ local command=$1
+ shift
+
+ local format="text"
+ local type="all"
+ local filter=""
+
+ while [[ $# -gt 0 ]]; do
+ case $1 in
+ --format)
+ format="$2"
+ shift 2
+ ;;
+ --type)
+ type="$2"
+ shift 2
+ ;;
+ --filter)
+ filter="$2"
+ shift 2
+ ;;
+ *)
+ log_error "SBOM" "Unknown option: $1" >&2
+ return 1
+ ;;
+ esac
+ done
+
+ echo "$command" "$format" "$type" "$filter"
+}
+
+# Handle sbom commands
sbom_handler() {
- case $1 in
+ if [ $# -eq 0 ]; then
+ sbom_help
+ return 0
+ fi
+
+ read -r command format type filter <<< "$(parse_args "$@")"
+
+ case $command in
generate)
- shift
- generate_sbom "$@"
+ if [ "$format" = "json" ]; then
+ # Capture all output
+ output=$(generate_sbom "$format" "$type" "$filter" 2>&1)
+ # Extract only the JSON part (everything between first { and last })
+ echo "$output" | awk '/^{/,/^}$/ {print}'
+ else
+ generate_sbom "$format" "$type" "$filter"
+ fi
+ ;;
+ verify)
+ verify_packages
+ ;;
+ help|"")
+ sbom_help
;;
*)
- log_warn "CLI" "Usage: $0 sbom {generate}"
- exit 1
+ log_error "SBOM" "Unknown command: $command" >&2
+ sbom_help
+ return 1
;;
esac
-}
\ No newline at end of file
+}
+
+# If script is being executed directly
+if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
+ sbom_handler "$@"
+fi
diff --git a/lib/cli/service.sh b/lib/cli/service.sh
index ed764f1e..8aeb9190 100644
--- a/lib/cli/service.sh
+++ b/lib/cli/service.sh
@@ -1,165 +1,462 @@
#!/bin/bash
-# shellcheck source=/usr/local/lib/utils.sh disable=SC1091
-source /usr/local/lib/utils.sh
+# shellcheck source=${WORKER_LIB_DIR}/utils.sh disable=SC1091
+source "${WORKER_LIB_DIR}/utils.sh"
+
+# Constants
+SERVICES_CONFIG_DIR="${HOME}/.config/worker"
+SERVICES_CONFIG_FILE="${SERVICES_CONFIG_DIR}/services.yaml"
+
+# Show help for service command
+service_help() {
+ cat << 'EOF'
+Usage: worker service [options]
+
+Commands:
+ list List all configured and running services
+ status Show detailed status of a service
+ start Start a service
+ stop Stop a service
+ restart Restart a service
+ logs View service logs
+ errors View service error logs
+ config Show current service configuration
+ init Initialize a new service configuration
+ help Show this help message
+
+Options:
+ --format json Output in JSON format (for list, status, config)
+ --tail N Show last N lines of logs (default: 100)
+ --follow Follow log output in real time
+ --error-only Show only error logs
+
+Examples:
+ worker service list
+ worker service start my-app
+ worker service logs my-app --tail 50 --follow
+ worker service status my-app --format json
+EOF
+}
service_handler() {
local cmd=$1
shift # Remove the command from args
+ # Parse format option if present
+ local format=""
+ while [[ $# -gt 0 ]]; do
+ case "$1" in
+ --format)
+ format="$2"
+ shift 2
+ ;;
+ --format=*)
+ format="${1#*=}"
+ shift
+ ;;
+ *)
+ break
+ ;;
+ esac
+ done
+
+ # Show help if no command or help requested
+ if [ -z "$cmd" ] || [ "$cmd" = "help" ]; then
+ service_help
+ return 0
+ fi
+
+ # Check if services config exists before most commands
+ if [ "$cmd" != "init" ]; then
+ if [ ! -f "$SERVICES_CONFIG_FILE" ]; then
+ log_warn "Service" "No services configuration found"
+ log_info "Service" "Run 'worker service' for information about service configuration"
+ return 1
+ fi
+ fi
+
case $cmd in
list)
- list_services
- ;;
+ list_services "$format"
+ ;;
status)
- check_status "$1"
- ;;
+ check_status "$1" "$format"
+ ;;
logs)
follow_logs "$@"
- ;;
+ ;;
errors)
follow_logs "$1" "err"
- ;;
+ ;;
config)
- show_config
- ;;
+ service_show_config "$format"
+ ;;
+ init)
+ init_service_config
+ ;;
start|stop|restart)
manage_service "$cmd" "$1"
- ;;
+ ;;
*)
- log_warn "CLI" "Usage: $0 {list|status|logs|config|start|stop|restart}"
- exit 1
- ;;
+ log_error "Service" "Unknown command: $cmd"
+ service_help
+ return 1
+ ;;
esac
}
-# Function to list all services
+# Description: List all configured services and their status
+# Example: worker service list [--format json]
list_services() {
- # Capture the output of supervisorctl status
- local services_status
- services_status=$(supervisorctl status 2>&1) # Also capture stderr to handle error messages
+ local format="$1"
- # Check if Supervisor is not running, not accessible, or if there are no managed services
- if [[ -z "$services_status" ]] || echo "$services_status" | grep -Eq 'no such|ERROR'; then
- log_warn "Service" "No services are currently managed."
- exit 1
+ # Check if services config exists
+ if [ ! -f "$SERVICES_CONFIG_FILE" ]; then
+ if [ "$format" = "json" ]; then
+ echo '{"error":"No services configuration found","services":[]}'
+ else
+ log_warn "Service" "No services configuration found"
+ log_info "Service" "Run 'worker service init' to create a new configuration"
+ fi
+ return 1
+ fi
+
+ # Get supervisor status and running services
+ local supervisor_status
+ local supervisor_running=false
+ if supervisor_status=$(supervisorctl status 2>&1); then
+ supervisor_running=true
+ fi
+
+ # Format and display services
+ if [ "$format" = "json" ]; then
+ # Build JSON output
+ local json_services="[]"
+
+ # Add running services if supervisor is running
+ if [ "$supervisor_running" = true ] && [ -n "$supervisor_status" ]; then
+ json_services="["
+ local first=true
+ while IFS= read -r line; do
+ if [ -n "$line" ]; then
+ if [ "$first" = true ]; then
+ first=false
+ else
+ json_services+=","
+ fi
+ # Parse supervisor status line
+ local name status pid uptime
+ read -r name status pid uptime <<< "$line"
+ json_services+="{\"name\":\"$name\",\"status\":\"$status\",\"pid\":\"$pid\",\"uptime\":\"$uptime\"}"
+ fi
+ done <<< "$supervisor_status"
+ json_services+="]"
+ fi
+ echo "{\"services\":$json_services}"
+ else
+ # Show running services if supervisor is running
+ if [ "$supervisor_running" = true ]; then
+ if [ -n "$supervisor_status" ]; then
+ log_info "Service" "Running services:"
+ # Print table header
+ printf "%-2s %-15s %-8s %-6s %-12s\n" "" "NAME" "STATUS" "PID" "UPTIME"
+ printf "%-2s %-15s %-8s %-6s %-12s\n" "" "----" "------" "---" "------"
+ while IFS= read -r line; do
+ if [ -n "$line" ]; then
+ # Parse supervisor status line
+ local name status pid uptime
+ read -r name status pid uptime <<< "$line"
+
+ # Use different symbols based on status
+ local symbol="β οΈ"
+ case "$status" in
+ RUNNING) symbol="β
";;
+ STOPPED) symbol="β";;
+ FATAL) symbol="π";;
+ *) symbol="β οΈ";;
+ esac
+ printf "%-2s %-15s %-8s %-6s %-12s\n" "$symbol" "$name" "$status" "$pid" "$uptime"
+ fi
+ done <<< "$supervisor_status"
+ else
+ log_info "Service" "No running services"
+ fi
+ else
+ log_info "Service" "Supervisor is not running"
+ log_info "Service" "Use 'worker service start ' to start a service"
+ fi
+ log_info "Service" "Use 'worker service config' to view service configuration"
fi
-
- log_debug "Service" "Listing all managed services:"
- local i=1
- echo "$services_status" | while read -r line; do
- log_debug "Service" "$i. $line"
- ((i++))
- done
}
-# Function to check the status of one or all services
+# Description: Show detailed status of a specific service
+# Example: worker service status my-app [--format json]
check_status() {
- # Require a service name for this function
- if [ -z "$1" ]; then
- log_warn "Service" "Error: No service name provided."
- log_warn "Service" "Usage: $0 status "
- exit 1
+ local service=$1
+ local format=$2
+
+ # Check if service name is provided
+ if [ -z "$service" ]; then
+ log_error "Service" "Service name required"
+ log_info "Service" "Usage: worker service status "
+ return 1
fi
-
- # Attempt to capture the status of the specific service, including errors
- local service_status
- service_status=$(supervisorctl status "$1" 2>&1)
-
- # Check if Supervisor is not running, not accessible, or if the service does not exist
- if [[ -z "$service_status" ]] || echo "$service_status" | grep -Eq 'no such|ERROR'; then
- log_warn "Service" "The service '$1' does not exist."
- exit 1
+
+ # Check if service exists in config
+ if ! yq e ".services[] | select(.name == \"$service\") | .name" "$SERVICES_CONFIG_FILE" 2>/dev/null | grep -q "$service"; then
+ log_error "Service" "Service '$service' not found in configuration"
+ return 1
fi
+
+ # Get service status from supervisor
+ local status_output
+ status_output=$(supervisorctl status "$service" 2>&1)
- # Directly output the captured service status
- echo "$service_status"
+ # Check if supervisor is running
+ if echo "$status_output" | grep -q "unix:///var/run/supervisor.sock no such file"; then
+ log_error "Service" "Supervisor is not running"
+ log_info "Service" "Use 'worker service start $service' to start the service"
+ return 1
+ fi
+
+ # Parse status output
+ local name status pid uptime
+ read -r name status pid uptime <<< "$status_output"
+
+ if [ "$format" = "json" ]; then
+ echo "{\"name\":\"$name\",\"status\":\"$status\",\"pid\":\"$pid\",\"uptime\":\"$uptime\"}"
+ else
+ # Use different symbols based on status
+ local symbol="β οΈ"
+ case "$status" in
+ RUNNING) symbol="β
";;
+ STOPPED) symbol="β";;
+ FATAL) symbol="π";;
+ *) symbol="β οΈ";;
+ esac
+ log_info "Service" "$symbol $name ($status) - PID: $pid, Uptime: $uptime"
+ fi
}
-# Function to follow logs for a specific service
+# Description: View and follow logs for a service
+# Options: --tail N, --follow, --error-only
+# Example: worker service logs my-app --tail 100 --follow
follow_logs() {
- local service_name=""
- local type="out"
- local lines=20
- local nostream=false
+ local service=$1
+ shift
+
+ # Check if service name is provided
+ if [ -z "$service" ]; then
+ log_error "Service" "Service name required"
+ log_info "Service" "Usage: worker service logs [options]"
+ return 1
+ fi
+
+ # Check if service exists in config
+ if ! yq e ".services[] | select(.name == \"$service\") | .name" "$SERVICES_CONFIG_FILE" 2>/dev/null | grep -q "$service"; then
+ log_error "Service" "Service '$service' not found in configuration"
+ return 1
+ fi
+
+ # Parse options
+ local tail_lines=100
+ local follow=false
+ local error_only=false
- # Parse arguments
while [[ $# -gt 0 ]]; do
case "$1" in
- --lines=*)
- lines="${1#*=}"
+ --tail)
+ tail_lines=$2
+ shift 2
;;
- --lines)
+ --follow)
+ follow=true
shift
- if [[ -n "$1" && "$1" =~ ^[0-9]+$ ]]; then
- lines="$1"
- fi
- ;;
- --nostream)
- nostream=true
;;
- err)
- type="err"
+ --error-only)
+ error_only=true
+ shift
;;
*)
- if [[ -z "$service_name" ]]; then
- service_name="$1"
- fi
+ log_error "Service" "Unknown option: $1"
+ return 1
;;
esac
- shift
done
- if [[ -z "$service_name" ]]; then
- log_error "Service" "Error: No service name provided."
- log_error "Service" "Usage: $0 logs [--lines N] [--nostream]"
- exit 1
- fi
-
- local logfile="/var/log/supervisor/$service_name"
- logfile="$logfile.$type.log"
-
- if [[ ! -f "$logfile" ]]; then
- log_error "Service" "Log file does not exist: $logfile"
- exit 1
+ # Build the tail command
+ local cmd="tail"
+ if [ "$follow" = true ]; then
+ cmd+=" -f"
fi
+ cmd+=" -n $tail_lines"
+
+ # Get log file paths
+ local log_dir="/var/log/supervisor"
+ local out_log="$log_dir/$service.out.log"
+ local err_log="$log_dir/$service.err.log"
- # Ensure lines is a valid number
- if ! [[ "$lines" =~ ^[0-9]+$ ]]; then
- log_error "Service" "Invalid line count: $lines"
- exit 1
+ # Check if log files exist
+ if [ ! -f "$out_log" ] && [ ! -f "$err_log" ]; then
+ log_error "Service" "No log files found for service '$service'"
+ return 1
fi
-
- if [ "$nostream" = true ]; then
- # Just show the last N lines without following
- tail -n "$lines" "$logfile"
+
+ # Show logs based on options
+ if [ "$error_only" = true ]; then
+ if [ -f "$err_log" ]; then
+ $cmd "$err_log"
+ else
+ log_warn "Service" "No error log file found for service '$service'"
+ fi
else
- # Show the last N lines and follow
- exec tail -n "$lines" -f "$logfile"
+ if [ -f "$out_log" ]; then
+ $cmd "$out_log"
+ fi
+ if [ -f "$err_log" ]; then
+ $cmd "$err_log"
+ fi
fi
}
-# Function to show supervisor configuration
-show_config() {
- if [ ! -f "/etc/supervisord.conf" ]; then
- log_error "Service" "Configuration file is not generated since no services are managed."
- exit 1
+# Description: Display current service configuration
+# Example: worker service config [--format json]
+service_show_config() {
+ local format="$1"
+
+ if [ ! -f "$SERVICES_CONFIG_FILE" ]; then
+ log_warn "Service" "No services configuration found"
+ log_info "Service" "Run 'worker service init' to create a new configuration"
+ return 1
+ fi
+
+ if [ "$format" = "json" ]; then
+ # Build JSON output with config file info and content
+ echo "{"
+ echo " \"config_file\": \"$SERVICES_CONFIG_FILE\","
+ echo -n " \"content\": "
+ yq e -o=json '.' "$SERVICES_CONFIG_FILE"
+ echo "}"
+ else
+ log_info "Service" "Current service configuration:"
+ log_info "Service" "Location: $SERVICES_CONFIG_FILE"
+ echo ""
+ cat "$SERVICES_CONFIG_FILE"
fi
- cat /etc/supervisord.conf
}
-# Function to start, stop, or restart a service
+# Description: Start, stop, or restart a service
+# Example: worker service restart my-app
manage_service() {
- if [ -z "$2" ]; then
- log_error "Service" "Error: No service name provided."
- log_warn "Service" "Usage: $0 $1 "
- exit 1
+ local action=$1
+ local service=$2
+
+ # Check if service name is provided
+ if [ -z "$service" ]; then
+ log_error "Service" "Service name required for $action command"
+ log_info "Service" "Usage: worker service $action "
+ return 1
+ fi
+
+ # Check if service exists in config
+ if ! yq e ".services[] | select(.name == \"$service\") | .name" "$SERVICES_CONFIG_FILE" 2>/dev/null | grep -q "$service"; then
+ log_error "Service" "Service '$service' not found in configuration"
+ return 1
+ fi
+
+ # Source process manager functions but prevent main from running
+ WORKER_SERVICE_MODE=1 source "${WORKER_LIB_DIR}/process_manager.sh" || return 1
+
+ # Check if supervisor is running and config is up to date
+ local supervisor_status
+ supervisor_status=$(supervisorctl status 2>&1)
+ local need_reload=false
+
+ if echo "$supervisor_status" | grep -q "unix:///var/run/supervisor.sock no such file"; then
+ log_warn "Service" "Supervisor is not running"
+ log_info "Service" "Starting supervisor..."
+ need_reload=true
fi
- if [ ! -e "/var/run/supervisor/supervisord.sock" ]; then
- log_error "Service" "Error: Service doesn't exist."
- exit 1
+ # Configure services
+ if ! configure_services; then
+ log_error "Service" "Failed to configure services"
+ return 1
fi
- supervisorctl "$1" "$2"
-}
\ No newline at end of file
+ if [ "$need_reload" = true ]; then
+ # Start supervisord directly
+ supervisord
+
+ # Wait for supervisor to be ready
+ local max_attempts=10
+ local attempt=1
+ while [ $attempt -le $max_attempts ]; do
+ if supervisorctl status >/dev/null 2>&1; then
+ break
+ fi
+ sleep 1
+ attempt=$((attempt + 1))
+ done
+
+ if [ $attempt -gt $max_attempts ]; then
+ log_error "Service" "Failed to start supervisor"
+ return 1
+ fi
+ else
+ # Reload config if supervisor is already running
+ log_info "Service" "Reloading supervisor configuration..."
+ supervisorctl reread
+ supervisorctl update
+ fi
+
+ # Show action being taken
+ case "$action" in
+ start)
+ log_info "Service" "π Starting service: $service"
+ ;;
+ stop)
+ log_info "Service" "π Stopping service: $service"
+ ;;
+ restart)
+ log_info "Service" "π Restarting service: $service"
+ ;;
+ esac
+
+ # Execute supervisorctl command and wait for result
+ local result
+ result=$(supervisorctl "$action" "$service" 2>&1)
+
+ # Check for common errors
+ if echo "$result" | grep -q "ERROR (no such process)"; then
+ log_error "Service" "Service '$service' not found in supervisor"
+ log_info "Service" "Run 'worker service config' to check your service configuration"
+ return 1
+ elif echo "$result" | grep -q "ERROR (already started)"; then
+ log_warn "Service" "Service '$service' is already running"
+ return 0
+ elif echo "$result" | grep -q "ERROR (not running)"; then
+ log_warn "Service" "Service '$service' is not running"
+ return 0
+ fi
+
+ # Show the result
+ echo "$result"
+
+ # For restart/start, wait for service to be running
+ if [ "$action" = "restart" ] || [ "$action" = "start" ]; then
+ local max_attempts=10
+ local attempt=1
+ while [ $attempt -le $max_attempts ]; do
+ if supervisorctl status "$service" | grep -q "RUNNING"; then
+ break
+ fi
+ sleep 1
+ attempt=$((attempt + 1))
+ done
+ fi
+
+ # Show current status after action
+ echo ""
+ check_status "$service"
+}
diff --git a/lib/env_handler.sh b/lib/env_handler.sh
new file mode 100644
index 00000000..28ef70db
--- /dev/null
+++ b/lib/env_handler.sh
@@ -0,0 +1,185 @@
+#!/bin/bash
+
+# shellcheck source=${WORKER_LIB_DIR}/utils.sh disable=SC1091
+source "${WORKER_LIB_DIR}/utils.sh"
+
+# Environment file location
+WORKER_ENV_FILE="/etc/worker/environment"
+
+# Generate environment file with regular variables
+generate_env_file() {
+ local config
+ config=$(load_and_parse_config)
+
+ if [ -z "$config" ]; then
+ log_error "Environment" "Failed to load configuration"
+ return 1
+ fi
+
+ # Extract and evaluate environment variables
+ {
+ # Extract environment variables
+ echo "$config" | yq eval '.config.env | to_entries | .[] | "export " + .key + "=\"" + .value + "\""' -
+ } > "$WORKER_ENV_FILE"
+
+}
+
+# Append resolved secrets to environment file
+append_resolved_secrets() {
+ local secrets_json="$1"
+ local has_failures=false
+
+ if [ -z "$secrets_json" ]; then
+ log_error "Environment" "No secrets provided"
+ return 1
+ fi
+
+ # Create a temporary file for resolved secrets
+ local temp_file
+ temp_file=$(mktemp)
+
+ {
+ echo ""
+ echo "# Resolved Secrets"
+ echo "# Generated on $(date)"
+ echo "# DO NOT EDIT THIS FILE DIRECTLY"
+ echo
+ } > "$temp_file"
+
+ # Process each secret and resolve it
+ while IFS= read -r secret; do
+ local name value
+ name=$(echo "$secret" | jq -r '.key')
+
+ # Create config JSON for resolve_secret_by_name
+ local config_json
+ config_json="{ \"config\": { \"secrets\": { \"$name\": $(echo "$secret" | jq '.value') } } }"
+
+ # Resolve the secret
+ if ! value=$(resolve_secret_by_name "$name" "$config_json") || [[ -z "$value" ]]; then
+ log_error "Environment" "Failed to resolve secret for $name"
+ has_failures=true
+ else
+ echo "export $name=\"$value\"" >> "$temp_file"
+ log_success "Environment" "Resolved secret for $name"
+ fi
+ done < <(echo "$secrets_json" | jq -c 'to_entries[]')
+
+ # If any secret failed to resolve, error out
+ if [[ "$has_failures" == "true" ]]; then
+ log_error "Environment" "Failed to resolve one or more secrets"
+ rm -f "$temp_file"
+ return 1
+ fi
+
+ # If we get here, all secrets resolved successfully
+ cat "$temp_file" >> "$WORKER_ENV_FILE"
+ log_success "Environment" "Added all resolved secrets to environment file"
+ rm -f "$temp_file"
+}
+
+# Load environment variables and secrets
+load_environment() {
+ if [ -f "$WORKER_ENV_FILE" ]; then
+ # shellcheck source=/dev/null
+ source "$WORKER_ENV_FILE"
+ else
+ log_warn "Environment" "Environment file not found, generating..."
+ generate_env_file
+ # shellcheck source=/dev/null
+ source "$WORKER_ENV_FILE"
+ fi
+}
+
+# Format environment variables as JSON or text
+format_env_vars() {
+ local vars=$1
+ local format=${2:-text}
+
+ case $format in
+ json)
+ # Convert to JSON
+ local json="{"
+ while IFS= read -r line; do
+ if [[ $line =~ ^export[[:space:]]+([^=]+)=\"([^\"]*)\" ]]; then
+ if [ -n "$json" ] && [ "$json" != "{" ]; then
+ json="$json,"
+ fi
+ key=${BASH_REMATCH[1]}
+ value=${BASH_REMATCH[2]}
+ json="$json\"$key\":\"$value\""
+ fi
+ done <<< "$vars"
+ json="$json}"
+ echo "$json" | jq .
+ ;;
+ text)
+ echo "${vars#export }" | tr -d '\"'
+ ;;
+ *)
+ log_error "Environment" "Unknown format: $format"
+ return 1
+ ;;
+ esac
+}
+
+# Initialize environment
+init_environment() {
+ generate_env_file
+ generate_secrets_file
+ load_environment
+ load_secrets
+}
+
+# Update environment when config changes
+update_environment() {
+ generate_env_file
+ generate_secrets_file
+ load_environment
+ load_secrets
+}
+
+# Get environment variable value
+get_env_value() {
+ local var_name="$1"
+
+ if [ -z "$var_name" ]; then
+ log_error "Environment" "Variable name not provided"
+ return 1
+ fi
+
+ if [ -f "$WORKER_ENV_FILE" ]; then
+ grep "^export $var_name=" "$WORKER_ENV_FILE" | cut -d'=' -f2- | tr -d '"'
+ elif [ -f "$WORKER_SECRETS_FILE" ]; then
+ grep "^export $var_name=" "$WORKER_SECRETS_FILE" | cut -d'=' -f2- | tr -d '"'
+ else
+ log_error "Environment" "Neither environment nor secrets file exists"
+ return 1
+ fi
+}
+
+# List all environment variables
+list_env_vars() {
+ local show_secrets="$1"
+ local env_vars=""
+ local secret_vars=""
+
+ if [ -f "$WORKER_ENV_FILE" ]; then
+ env_vars=$(grep "^export" "$WORKER_ENV_FILE" | cut -d'=' -f1 | cut -d' ' -f2)
+ fi
+
+ if [ "$show_secrets" = "true" ] && [ -f "$WORKER_SECRETS_FILE" ]; then
+ secret_vars=$(grep "^export" "$WORKER_SECRETS_FILE" | cut -d'=' -f1 | cut -d' ' -f2)
+ fi
+
+ if [ -n "$env_vars" ]; then
+ echo "Environment Variables:"
+ echo "$env_vars"
+ fi
+
+ if [ -n "$secret_vars" ]; then
+ echo
+ echo "Secret Variables:"
+ echo "$secret_vars"
+ fi
+}
\ No newline at end of file
diff --git a/lib/environment.sh b/lib/environment.sh
index 2c436af3..02595dac 100644
--- a/lib/environment.sh
+++ b/lib/environment.sh
@@ -2,16 +2,16 @@
# Include necessary modules
# shellcheck disable=SC1091
-source /usr/local/lib/auth.sh
+source "${WORKER_LIB_DIR}/auth.sh"
# shellcheck disable=SC1091
-source /usr/local/lib/secrets.sh
+source "${WORKER_LIB_DIR}/secrets.sh"
# shellcheck disable=SC1091
-source /usr/local/lib/cleanup.sh
+source "${WORKER_LIB_DIR}/cleanup.sh"
# shellcheck disable=SC1091
-source /usr/local/lib/worker_config.sh
+source "${WORKER_LIB_DIR}/worker_config.sh"
# shellcheck disable=SC1091
-source /usr/local/lib/utils.sh
+source "${WORKER_LIB_DIR}/utils.sh"
# Main function to coordinate environment setup
configure_environment() {
diff --git a/lib/process_manager.sh b/lib/process_manager.sh
index 4c13fa28..96c17c33 100644
--- a/lib/process_manager.sh
+++ b/lib/process_manager.sh
@@ -1,19 +1,16 @@
#!/bin/bash
-# shellcheck source=/usr/local/lib/utils.sh disable=SC1091
-source /usr/local/lib/utils.sh
+# shellcheck source=${WORKER_LIB_DIR}/utils.sh disable=SC1091
+source "${WORKER_LIB_DIR}/utils.sh"
# Define paths
-DEFAULT_CONFIG_FILE="/usr/local/configs/worker/services.yaml"
-# Define the user-specific configuration path search
-# shellcheck disable=SC2227
-USER_CONFIG_PATH=$(find "$HOME" -name 'services.yaml' 2>/dev/null -print | head -n 1)
+USER_CONFIG_PATH="${HOME}/.config/worker/services.yaml"
+CONFIG_FILE="${USER_CONFIG_PATH}"
-# Use the first user-specific config found; if none, use the default
-CONFIG_FILE="${USER_CONFIG_PATH:-$DEFAULT_CONFIG_FILE}"
-COMMON_TEMPLATE_FILE="/usr/local/configs/supervisor/common.conf"
-PROGRAM_TEMPLATE_FILE="/usr/local/configs/supervisor/program.conf"
-FINAL_CONFIG="/usr/local/configs/supervisor/supervisord.conf"
+# Supervisor configuration paths
+COMMON_TEMPLATE_FILE="${WORKER_CONFIG_DIR}/supervisor/common.conf"
+PROGRAM_TEMPLATE_FILE="${WORKER_CONFIG_DIR}/supervisor/program.conf"
+FINAL_CONFIG="${WORKER_CONFIG_DIR}/supervisor/supervisord.conf"
# Set up signal handling
trap 'handle_supervisor_signals SIGTERM' SIGTERM
@@ -21,10 +18,18 @@ trap 'handle_supervisor_signals SIGINT' SIGINT
# Main execution
main() {
+ # Check if user config exists
+ if [[ ! -f "${USER_CONFIG_PATH}" ]]; then
+ log_info "No services configuration found at ${USER_CONFIG_PATH}."
+ log_info "Run 'worker service' for information about service configuration"
+ exit 0
+ fi
+
log_info "Process Manager" "Starting process manager..."
if ! configure_and_execute_services; then
log_error "Process Manager" "Failed to configure and start services"
+ log_info "Run 'worker service' for information about service configuration"
exit 1
fi
@@ -157,8 +162,34 @@ handle_supervisor_signals() {
start_supervisor() {
log_info "Starting supervisord..."
- # Start supervisord in non-daemon mode
- exec supervisord -n
+ # Check if supervisor is already running
+ if pgrep -f "supervisord" >/dev/null; then
+ # Reload configuration
+ if supervisorctl reread && supervisorctl update; then
+ log_info "Supervisor configuration reloaded"
+ return 0
+ fi
+ log_error "Failed to reload supervisor configuration"
+ return 1
+ fi
+
+ # Start supervisord in daemon mode
+ supervisord
+
+ # Wait for supervisor to be ready
+ local max_attempts=10
+ local attempt=1
+ while [ $attempt -le $max_attempts ]; do
+ if supervisorctl status >/dev/null 2>&1; then
+ log_info "Supervisor is ready"
+ return 0
+ fi
+ sleep 1
+ attempt=$((attempt + 1))
+ done
+
+ log_error "Failed to start supervisord"
+ return 1
}
# Function to check for service configurations
@@ -215,5 +246,7 @@ configure_and_execute_services() {
start_supervisor
}
-# Execute main function
-main
\ No newline at end of file
+# Only run main if not in service mode
+if [ -z "$WORKER_SERVICE_MODE" ]; then
+ main
+fi
\ No newline at end of file
diff --git a/lib/secrets.sh b/lib/secrets.sh
index 04153cb5..7b96aed7 100644
--- a/lib/secrets.sh
+++ b/lib/secrets.sh
@@ -1,15 +1,17 @@
#!/bin/bash
-# shellcheck source=/usr/local/lib/utils.sh disable=SC1091
-source /usr/local/lib/utils.sh
+# shellcheck source=${WORKER_LIB_DIR}/utils.sh disable=SC1091
+source "${WORKER_LIB_DIR}/utils.sh"
+# shellcheck source=${WORKER_LIB_DIR}/env_handler.sh disable=SC1091
+source "${WORKER_LIB_DIR}/env_handler.sh"
# Dynamically source the required provider-specific modules
source_provider_module() {
local provider="$1"
- local module_path="/usr/local/lib/secrets/${provider}.sh"
+ local module_path="${WORKER_LIB_DIR}/secrets/${provider}.sh"
if [[ -f "$module_path" ]]; then
- # shellcheck source=/usr/local/lib/secrets/${provider}.sh disable=SC1091
+ # shellcheck source=${WORKER_LIB_DIR}/secrets/${provider}.sh disable=SC1091
source "$module_path"
log_info "Loaded module for provider: $provider"
else
@@ -35,84 +37,14 @@ fetch_secrets() {
return 1
fi
- # Create a temporary file to store environment variables
- local secrets_env_file
- secrets_env_file=$(mktemp /tmp/secret_vars.XXXXXX)
- echo "# Secrets environment variables" > "$secrets_env_file"
-
- # Process each secret in the JSON object
- echo "$secrets_json" | jq -c 'to_entries[]' | while IFS= read -r secret; do
- local name url value provider secret_name key_vault_name
- name=$(echo "$secret" | jq -r '.key')
- url=$(resolve_env_vars "$(echo "$secret" | jq -r '.value')")
-
- # Check if the secret has a valid name and URL
- if [[ -z "$name" || -z "$url" ]]; then
- log_error "Secrets" "Secret name or URL is missing or empty."
- continue
- fi
-
- # Extract provider from the URL (first part before '/')
- provider=$(echo "$url" | cut -d '/' -f 1)
-
- # Handle secrets based on the provider
- case "$provider" in
- gcp)
- key_vault_name=$(echo "$url" | cut -d '/' -f 2)
- secret_name=$(echo "$url" | cut -d '/' -f 3)
- if [[ -z "$secret_name" ]]; then
- log_error "Secrets" "Invalid GCP secret name format: $url"
- continue
- fi
- ;;
- azure|bitwarden)
- key_vault_name=$(echo "$url" | cut -d '/' -f 2)
- secret_name=$(echo "$url" | cut -d '/' -f 3)
- if [[ -z "$key_vault_name" || -z "$secret_name" ]]; then
- log_error "Secrets" "Invalid secret format for $provider: $url"
- continue
- fi
- ;;
- *)
- log_warn "Unsupported provider: $provider"
- continue
- ;;
- esac
-
- # Source the provider module dynamically
- source_provider_module "$provider"
-
- # Determine the resolve function for the provider
- local resolve_function="resolve_${provider}_secret"
- if command -v "$resolve_function" > /dev/null; then
- value=$("$resolve_function" "$key_vault_name" "$secret_name")
- else
- log_warn "No resolve function found for provider: $provider"
- continue
- fi
-
- # Export the secret as an environment variable
- if [[ -n "$value" ]]; then
- echo "export $name=\"$value\"" >> "$secrets_env_file"
- log_success "Secrets" "Resolved secret for $name from $provider."
- else
- log_error "Secrets" "Failed to resolve secret for $name from $provider."
- fi
- done
-
- # Source the environment file if it exists
- if [[ -s "$secrets_env_file" ]]; then
- set -a
- # shellcheck disable=SC1090
- source "$secrets_env_file"
- set +a
- log_info "Secrets environment variables sourced successfully."
- else
- log_error "Secrets" "No secrets were written to the environment file."
+ # Resolve secrets and append them to environment file
+ if ! append_resolved_secrets "$secrets_json"; then
+ log_error "Secrets" "Failed to resolve and append secrets"
return 1
fi
- clean_up_files "$secrets_env_file"
+ # Source the environment file to update current session
+ load_environment
}
# Clean up temporary files
@@ -127,5 +59,70 @@ clean_up_files() {
done
}
+# Resolve a secret value by name from the configuration
+resolve_secret_by_name() {
+ local secret_name="$1"
+ local config_json="$2"
+
+ if [[ -z "$secret_name" ]]; then
+ log_error "Secrets" "Secret name is required"
+ return 1
+ fi
+
+ if [[ -z "$config_json" ]]; then
+ log_error "Secrets" "Configuration is required"
+ return 1
+ fi
+
+ # Extract secrets section
+ local secrets
+ secrets=$(echo "$config_json" | jq -r '.config.secrets // empty')
+ if [[ -z "$secrets" || "$secrets" == "null" ]]; then
+ log_error "Secrets" "No secrets found in configuration"
+ return 1
+ fi
+
+ # Find the secret URL
+ local secret_url
+ secret_url=$(echo "$secrets" | jq -r ".[\"$secret_name\"] // empty")
+ if [[ -z "$secret_url" ]]; then
+ log_error "Secrets" "Secret '$secret_name' not found in configuration"
+ return 1
+ fi
+
+ # Resolve any environment variables in the URL
+ secret_url=$(resolve_env_vars "$secret_url")
+ if [[ -z "$secret_url" ]]; then
+ log_error "Secrets" "Failed to resolve environment variables in URL"
+ return 1
+ fi
+
+ # Extract provider and parts from URL
+ local provider key_vault_name secret_value
+ provider=$(echo "$secret_url" | cut -d '/' -f 1)
+ key_vault_name=$(echo "$secret_url" | cut -d '/' -f 2)
+ secret_value=$(echo "$secret_url" | cut -d '/' -f 3)
+
+ # Source the provider module
+ source_provider_module "$provider"
+
+ # Resolve the secret
+ local resolve_function="resolve_${provider}_secret"
+ if command -v "$resolve_function" > /dev/null; then
+ local value
+ value=$("$resolve_function" "$key_vault_name" "$secret_value")
+ if [[ -n "$value" ]]; then
+ echo "$value"
+ return 0
+ else
+ log_error "Secrets" "Failed to resolve secret value"
+ return 1
+ fi
+ else
+ log_error "Secrets" "No resolver found for provider: $provider"
+ return 1
+ fi
+}
+
# Example usage:
# fetch_secrets '{"TEST": "gcp/new_relic_api_key"}'
diff --git a/lib/secrets/aws.sh b/lib/secrets/aws.sh
index 3a34d1d7..e8899621 100644
--- a/lib/secrets/aws.sh
+++ b/lib/secrets/aws.sh
@@ -1,7 +1,7 @@
#!/bin/bash
-# shellcheck source=/usr/local/lib/utils.sh disable=SC1091
-source /usr/local/lib/utils.sh
+# shellcheck source=${WORKER_LIB_DIR}/utils.sh disable=SC1091
+source "${WORKER_LIB_DIR}/utils.sh"
# Function to resolve AWS secret
resolve_aws_secret() {
diff --git a/lib/secrets/azure.sh b/lib/secrets/azure.sh
index 4289d357..6359c13e 100644
--- a/lib/secrets/azure.sh
+++ b/lib/secrets/azure.sh
@@ -1,7 +1,7 @@
#!/bin/bash
-# shellcheck source=/usr/local/lib/utils.sh disable=SC1091
-source /usr/local/lib/utils.sh
+# shellcheck source=${WORKER_LIB_DIR}/utils.sh disable=SC1091
+source "${WORKER_LIB_DIR}/utils.sh"
# Function to resolve Azure secret
resolve_azure_secret() {
diff --git a/lib/secrets/bitwarden.sh b/lib/secrets/bitwarden.sh
index 6075aaf0..ae578b84 100644
--- a/lib/secrets/bitwarden.sh
+++ b/lib/secrets/bitwarden.sh
@@ -1,7 +1,7 @@
#!/bin/bash
-# shellcheck source=/usr/local/lib/utils.sh disable=SC1091
-source /usr/local/lib/utils.sh
+# shellcheck source=${WORKER_LIB_DIR}/utils.sh disable=SC1091
+source "${WORKER_LIB_DIR}/utils.sh"
# Function to resolve Bitwarden secret
resolve_bitwarden_secret() {
diff --git a/lib/secrets/gcp.sh b/lib/secrets/gcp.sh
index 2edff3d2..cf599b12 100644
--- a/lib/secrets/gcp.sh
+++ b/lib/secrets/gcp.sh
@@ -1,7 +1,7 @@
#!/bin/bash
-# shellcheck source=/usr/local/lib/utils.sh disable=SC1091
-source /usr/local/lib/utils.sh
+# shellcheck source=${WORKER_LIB_DIR}/utils.sh disable=SC1091
+source "${WORKER_LIB_DIR}/utils.sh"
# Function to resolve GCP secret
resolve_gcp_secret() {
diff --git a/lib/worker_config.sh b/lib/worker_config.sh
index fdfed67c..274d96c4 100644
--- a/lib/worker_config.sh
+++ b/lib/worker_config.sh
@@ -1,14 +1,14 @@
#!/bin/bash
-# shellcheck source=/usr/local/lib/utils.sh disable=SC1091
-source /usr/local/lib/utils.sh
+# shellcheck source=${WORKER_LIB_DIR}/utils.sh disable=SC1091
+source "${WORKER_LIB_DIR}/utils.sh"
+# shellcheck source=${WORKER_LIB_DIR}/env_handler.sh disable=SC1091
+source "${WORKER_LIB_DIR}/env_handler.sh"
# Paths for configurations
-BUILT_IN_CONFIG="/usr/local/configs/worker/default.yaml"
-# Dynamically find user configuration in any subfolder of $HOME
-# shellcheck disable=SC2227
-USER_CONFIG=$(find "$HOME" -name 'worker.yaml' 2>/dev/null -print | head -n 1)
-MERGED_CONFIG="/usr/local/configs/worker/merged_worker.yaml"
+BUILT_IN_CONFIG="${WORKER_CONFIG_DIR}/worker.yaml" # Built-in default config
+USER_CONFIG="${HOME}/.config/worker/worker.yaml" # Optional user config
+MERGED_CONFIG="${WORKER_CONFIG_DIR}/worker.merged.yaml" # Result of merging both configs
# Ensure `yq` is available
if ! command -v yq >/dev/null 2>&1; then
@@ -32,23 +32,33 @@ merge_worker_configs() {
touch "$MERGED_CONFIG" || { log_error "Worker configuration" "Failed to create merged configuration file at $MERGED_CONFIG"; return 1; }
fi
- # Ensure built-in config exists
- ensure_config_exists "$BUILT_IN_CONFIG" || return 1
+ # Ensure built-in config exists and has actors section
+ if ! ensure_config_exists "$BUILT_IN_CONFIG"; then
+ log_error "Worker configuration" "Built-in configuration not found"
+ return 1
+ fi
- # If a user-provided configuration exists (and path is not empty), merge it
- if [[ -f "$USER_CONFIG" && -n "$USER_CONFIG" ]]; then
- log_success "Worker configuration" "User configuration detected at $USER_CONFIG"
+ # First copy built-in config (with actors) to merged config
+ if ! cp "$BUILT_IN_CONFIG" "$MERGED_CONFIG"; then
+ log_error "Worker configuration" "Failed to copy built-in configuration"
+ return 1
+ fi
- if ! yq eval-all 'select(fileIndex == 0) * select(fileIndex == 1)' "$BUILT_IN_CONFIG" "$USER_CONFIG" > "$MERGED_CONFIG"; then
- log_error "Worker configuration" "Failed to merge configurations. yq returned an error."
+ # If user config exists, merge env and secrets sections
+ if [[ -f "$USER_CONFIG" && -s "$USER_CONFIG" ]]; then
+ # Use yq to merge configs, preserving actors from built-in
+ if ! yq eval-all 'select(fileIndex == 0) * select(fileIndex == 1)' "$MERGED_CONFIG" "$USER_CONFIG" > "${MERGED_CONFIG}.tmp"; then
+ log_error "Worker configuration" "Failed to merge configurations"
return 1
fi
- else
- log_info "No worker configuration provided."
- # Copy the built-in configuration to the merged configuration
- if ! cp "$BUILT_IN_CONFIG" "$MERGED_CONFIG"; then
- log_error "Worker configuration" "Failed to copy built-in configuration to merged configuration."
+ # Check if merge was successful
+ if [ -s "${MERGED_CONFIG}.tmp" ]; then
+ mv "${MERGED_CONFIG}.tmp" "$MERGED_CONFIG"
+ return 0
+ else
+ rm -f "${MERGED_CONFIG}.tmp"
+ log_error "Worker configuration" "Failed to merge configurations - empty result"
return 1
fi
fi
@@ -72,20 +82,23 @@ load_and_parse_config() {
export_variables_from_config() {
local config_json="$1"
- # Extract the `variables` section
- local variables
- variables=$(echo "$config_json" | jq -r '.config.env // empty')
- if [[ -z "$variables" || "$variables" == "null" ]]; then
- log_info "No variables found in the configuration."
+ # Extract only environment variables
+ local env_vars
+ env_vars=$(echo "$config_json" | jq -r '.config.env // empty')
+
+ if [[ -z "$env_vars" ]]; then
+ log_info "No environment variables found in the configuration."
return 0
- else
- log_success "Worker configuration" "Found variables in the configuration. Exporting..."
fi
- # Iterate over variables and export them into the main shell
- while IFS="=" read -r key value; do
- eval "export $key=\"$value\""
- done < <(echo "$variables" | jq -r 'to_entries[] | "\(.key)=\(.value)"')
+ # Generate environment file
+ if [[ -n "$env_vars" && "$env_vars" != "null" ]]; then
+ log_success "Worker configuration" "Found environment variables in the configuration."
+ generate_env_file
+ fi
+
+ # Load environment variables
+ load_environment
}
# Function to extract a specific section from the JSON configuration
diff --git a/src/examples/simple-config/.config/worker/worker.yaml b/src/examples/simple-config/.config/worker/worker.yaml
new file mode 100644
index 00000000..601c9ef7
--- /dev/null
+++ b/src/examples/simple-config/.config/worker/worker.yaml
@@ -0,0 +1,9 @@
+---
+kind: workerConfig
+version: udx.io/worker-v1/config
+config:
+ env:
+ CREATED: "2025-02-21T07:57:59Z"
+ secrets:
+ TEST_ENV: "azure/kv-udx-worker-tooling/test-secret"
+ ANOTHER_TEST_ENV: "azure/kv-udx-worker-tooling/test-secret"
diff --git a/src/tests/configs/services.yaml b/src/examples/simple-service/.config/worker/services.yaml
similarity index 53%
rename from src/tests/configs/services.yaml
rename to src/examples/simple-service/.config/worker/services.yaml
index c197b511..11cbf511 100644
--- a/src/tests/configs/services.yaml
+++ b/src/examples/simple-service/.config/worker/services.yaml
@@ -1,7 +1,9 @@
---
kind: workerService
version: udx.io/worker-v1/service
+
services:
- - name: "test_service"
- command: "tail -f /dev/null"
+ - name: "index"
+ command: "/home/udx/index.sh"
autostart: true
+ autorestart: true
diff --git a/src/examples/simple-service/index.sh b/src/examples/simple-service/index.sh
new file mode 100755
index 00000000..42d92659
--- /dev/null
+++ b/src/examples/simple-service/index.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+# Simple service that prints a message every 5 seconds
+echo "Starting simple service..."
+
+# Trap SIGTERM for graceful shutdown
+trap 'echo "Received shutdown signal, exiting..."; exit 0' SIGTERM
+
+# Main loop
+while true; do
+ echo "[$(date '+%Y-%m-%d %H:%M:%S')] Simple service is running..."
+ sleep 5
+done
diff --git a/src/scripts/main.sh b/src/scripts/main.sh
deleted file mode 100644
index d317945d..00000000
--- a/src/scripts/main.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash
-
-# Script to run as a supervisor service in a loop
-while true; do
- echo "Service is running at $(date)"
- sleep 5
-done
\ No newline at end of file
diff --git a/src/tests/configs/worker.yaml b/src/tests/configs/worker.yaml
deleted file mode 100644
index 1f93e288..00000000
--- a/src/tests/configs/worker.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-kind: workerConfig
-version: udx.io/worker-v1/config
-config:
- env:
- AZURE_CLIENT_ID: "12345678-1234-1234-1234-1234567890ab"
- AZURE_TENANT_ID: "abcdef12-3456-7890-abcd-ef1234567890"
- AZURE_SUBSCRIPTION_ID: "1234abcd-5678-90ef-abcd-12345678abcd"
- AZURE_RESOURCE_GROUP: "rg-example"
- APIM_SERVICE_NAME: "example-apim"
- ACR_REPO_NAME: "exampleacr"
- STORAGE_ACCOUNT_NAME: "examplestorage"
- KEY_VAULT_NAME: "examplekv"
- MANAGED_IDENTITY_NAME: "exampleidentity"
-
- secrets:
- TEST_SECRET: "azure/kv-udx-worker-tooling/test-secret"
diff --git a/src/tests/main.sh b/src/tests/main.sh
index c207b879..88459e5a 100755
--- a/src/tests/main.sh
+++ b/src/tests/main.sh
@@ -1,20 +1,83 @@
#!/bin/bash
-# Source utils.sh for logging functions
-# shellcheck disable=SC1091
-source /usr/local/lib/utils.sh
+# Main test entrypoint that runs inside the container
-log_info "Main" "Running all test suites"
+# Source test helpers
+# shellcheck source=./test_helpers.sh disable=SC1091
+source "/home/udx/tests/test_helpers.sh"
-# Find and execute all test scripts in the tasks directory
-for test_script in ./tasks/*.sh; do
- log_info "Running $(basename "$test_script")..."
- if bash "$test_script"; then
- log_success "$(basename "$test_script")" "Test completed successfully"
+# Exit on any error
+set -e
+
+# Initialize test environment
+print_header "Setting up test environment"
+
+# Setup test config and examples
+CONFIG_DIR=/home/udx/.config/worker
+
+# Create necessary directories
+mkdir -p "$CONFIG_DIR"
+
+# Config files are already mounted at the right locations by the Makefile
+
+# Export test environment variables
+export TEST_CONFIG_DIR=/home/udx/.config/worker
+export TEST_CONFIG_FILE="$TEST_CONFIG_DIR/worker.yaml"
+
+# Counter for test results
+TOTAL=0
+PASSED=0
+FAILED=0
+
+# Directory containing test files
+TEST_DIR="/home/udx/tests"
+cd "$TEST_DIR"
+
+# Don't exit on test failures
+set +e
+
+# Run test modules in numeric order
+for test_file in modules/[0-9]*.sh; do
+ # Skip if no files found
+ [ -e "$test_file" ] || continue
+
+ # Extract just the filename
+ test_name=$(basename "$test_file")
+
+ print_header "Running $test_name"
+ TOTAL=$((TOTAL + 1))
+
+ # Run test and capture all output
+ TEST_OUTPUT=$($test_file 2>&1)
+ TEST_RESULT=$?
+
+ if [ $TEST_RESULT -eq 0 ]; then
+ print_success "$test_name passed"
+ PASSED=$((PASSED + 1))
+ # Show test output even on success
+ echo "$TEST_OUTPUT"
else
- log_error "$(basename "$test_script")" "Test failed"
- exit 1
+ print_error "$test_name failed (exit code: $TEST_RESULT)"
+ print_error "Test output:"
+ echo "$TEST_OUTPUT"
+ FAILED=$((FAILED + 1))
fi
+
+ echo # Add blank line between tests
done
-log_success "Main" "All test suites completed successfully"
\ No newline at end of file
+# Exit with error if any test failed
+[ $FAILED -eq 0 ]
+
+# Print summary
+print_header "Test Summary"
+if [ "$FAILED" -eq 0 ]; then
+ print_success "All $TOTAL tests passed successfully"
+else
+ print_error "$FAILED of $TOTAL tests failed"
+ printf "${GREEN}Passed: %d${NC}\n" "$PASSED"
+ printf "${RED}Failed: %d${NC}\n" "$FAILED"
+fi
+
+# Exit with failure if any tests failed
+[ "$FAILED" -eq 0 ] || exit 1
diff --git a/src/tests/modules/10_config.sh b/src/tests/modules/10_config.sh
new file mode 100755
index 00000000..cb717822
--- /dev/null
+++ b/src/tests/modules/10_config.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+# Source test helpers
+# shellcheck source=../test_helpers.sh disable=SC1091
+source "/home/udx/tests/test_helpers.sh"
+
+# Test configuration commands
+print_header "Configuration Tests"
+
+# Test config show
+print_info "Testing: config show"
+CONFIG_OUTPUT=$(worker config show)
+if ! echo "$CONFIG_OUTPUT" | grep -q "kind: workerConfig"; then
+ print_error "config show should display configuration"
+ exit 1
+fi
+
+# Test config show with format json
+print_info "Testing: config show --format json"
+CONFIG_JSON=$(worker config show --format json)
+if ! echo "$CONFIG_JSON" | jq -e '.config.env' > /dev/null; then
+ print_error "config show json format should include env section"
+ exit 1
+fi
+
+# Test config locations
+print_info "Testing: config locations"
+if ! worker config locations | grep -q "/home/udx/.config/worker"; then
+ print_error "config locations should show config paths"
+ exit 1
+fi
+
+# All tests passed
+print_success "All configuration tests passed"
diff --git a/src/tests/modules/20_env.sh b/src/tests/modules/20_env.sh
new file mode 100755
index 00000000..31ec27f6
--- /dev/null
+++ b/src/tests/modules/20_env.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+# Source test helpers
+# shellcheck source=../test_helpers.sh disable=SC1091
+source "/home/udx/tests/test_helpers.sh"
+
+# Test environment commands
+print_header "Environment Tests"
+
+# Get environment variables from config
+print_info "Getting environment variables from config"
+CONFIG_ENV=$(worker config show --format json | jq -r '.config.env | keys[]')
+if [ -z "$CONFIG_ENV" ]; then
+ print_error "No environment variables found in config"
+ exit 1
+fi
+
+# Test environment show
+print_info "Testing: env show"
+ENV_OUTPUT=$(worker env show)
+for var in $CONFIG_ENV; do
+ if ! echo "$ENV_OUTPUT" | grep -q "$var="; then
+ print_error "env show missing variable: $var"
+ exit 1
+ fi
+done
+
+# Test environment set/get
+print_info "Testing: env set/get"
+worker env set TEST_VAR "test value"
+if ! worker env show | grep -q "TEST_VAR=test value"; then
+ print_error "env set/get not working"
+ exit 1
+fi
+
+# Test environment JSON output
+print_info "Testing: env show --format json"
+JSON_OUTPUT=$(worker env show --format json)
+for var in $CONFIG_ENV; do
+ if ! echo "$JSON_OUTPUT" | jq -e --arg var "$var" 'has($var)' > /dev/null; then
+ print_error "JSON output missing variable: $var"
+ exit 1
+ fi
+done
+
+# All tests passed
+print_success "All environment tests passed"
diff --git a/src/tests/modules/30_auth.sh b/src/tests/modules/30_auth.sh
new file mode 100755
index 00000000..ef011cba
--- /dev/null
+++ b/src/tests/modules/30_auth.sh
@@ -0,0 +1,37 @@
+#!/bin/bash
+
+# Source test helpers
+# shellcheck source=../test_helpers.sh disable=SC1091
+source "/home/udx/tests/test_helpers.sh"
+
+# Test authentication commands
+print_header "Authentication Tests"
+
+# Test auth status
+print_info "Testing: auth status"
+
+# Capture both stdout and stderr
+AUTH_OUTPUT=$(worker auth status 2>&1)
+
+# Test that we got some output
+if [ -z "$AUTH_OUTPUT" ]; then
+ print_error "auth status produced no output"
+ exit 1
+fi
+
+# Test that output contains provider status
+if ! printf "%s" "$AUTH_OUTPUT" | grep -q "Auth:"; then
+ print_error "auth status should show provider status"
+ exit 1
+fi
+
+# Test auth status json format
+print_info "Testing: auth status --format json"
+AUTH_JSON=$(worker auth status --format json)
+if ! echo "$AUTH_JSON" | jq -e '.[] | select(.provider == "aws") | .status' > /dev/null; then
+ print_error "auth status json should include provider status"
+ exit 1
+fi
+
+# All tests passed
+print_success "All authentication tests passed"
diff --git a/src/tests/modules/40_service.sh b/src/tests/modules/40_service.sh
new file mode 100755
index 00000000..e11faf05
--- /dev/null
+++ b/src/tests/modules/40_service.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+# Source test helpers
+# shellcheck source=../test_helpers.sh disable=SC1091
+source "/home/udx/tests/test_helpers.sh"
+
+# Test service commands
+print_header "Service Tests"
+
+# Test service list
+print_info "Testing: service list"
+SERVICE_OUTPUT=$(worker service list 2>&1)
+
+# Check that we get some output
+if [ -z "$SERVICE_OUTPUT" ]; then
+ print_error "service list produced no output"
+ exit 1
+fi
+
+# Test service list json format
+print_info "Testing: service list --format json"
+LIST_JSON=$(worker service list --format json 2>&1)
+if ! echo "$LIST_JSON" | jq -e '.services' > /dev/null; then
+ print_error "service list json should include services array"
+ exit 1
+fi
+
+# Test service config
+print_info "Testing: service config"
+CONFIG_OUTPUT=$(worker service config 2>&1)
+
+# Check that we get some output
+if [ -z "$CONFIG_OUTPUT" ]; then
+ print_error "service config produced no output"
+ exit 1
+fi
+if echo "$STATUS_OUTPUT" | grep -q "Simple service is running"; then
+ print_error "service should be stopped"
+ exit 1
+fi
+
+# All tests passed
+print_success "All service tests passed"
diff --git a/src/tests/modules/50_sbom.sh b/src/tests/modules/50_sbom.sh
new file mode 100755
index 00000000..bf4c141b
--- /dev/null
+++ b/src/tests/modules/50_sbom.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+# Source test helpers
+# shellcheck source=../test_helpers.sh disable=SC1091
+source "/home/udx/tests/test_helpers.sh"
+
+# Test SBOM commands
+print_header "SBOM Tests"
+
+# Test SBOM generation
+print_info "Testing: sbom generate"
+if ! worker sbom generate > /tmp/test-sbom.json; then
+ print_error "sbom generate should create SBOM file"
+ exit 1
+fi
+
+# Test SBOM verify
+print_info "Testing: sbom verify"
+if ! worker sbom verify; then
+ print_error "sbom verify should check package integrity"
+ exit 1
+fi
+
+# Test SBOM format options
+print_info "Testing: sbom generate with format"
+if ! worker sbom generate --format json > /tmp/test-sbom.json; then
+ print_error "sbom generate with json format failed"
+ exit 1
+fi
+
+# Test SBOM type options
+print_info "Testing: sbom generate with type"
+for type in system python all; do
+ if ! worker sbom generate --type "$type" > "/tmp/test-sbom-$type.json"; then
+ print_error "sbom generate for type $type failed"
+ exit 1
+ fi
+ rm -f "/tmp/test-sbom-$type.json"
+done
+
+# Cleanup
+rm -f /tmp/test-sbom.json
+
+# All tests passed
+print_success "All SBOM tests passed"
diff --git a/src/tests/modules/60_health.sh b/src/tests/modules/60_health.sh
new file mode 100755
index 00000000..2ba2e12d
--- /dev/null
+++ b/src/tests/modules/60_health.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+# Source test helpers
+# shellcheck source=../test_helpers.sh disable=SC1091
+source "/home/udx/tests/test_helpers.sh"
+
+# Test health check commands
+print_header "Health Check Tests"
+
+# Test health status
+print_info "Testing: health status"
+HEALTH_OUTPUT=$(worker health status 2>&1)
+
+# Check for health check running message
+if ! echo "$HEALTH_OUTPUT" | grep -q "Running health check"; then
+ print_error "health status should show running message"
+ exit 1
+fi
+
+# Check for successful completion
+if ! echo "$HEALTH_OUTPUT" | grep -q "All health checks passed"; then
+ print_error "health status should show successful completion"
+ exit 1
+fi
+
+# Test health status json format
+print_info "Testing: health status --format json"
+HEALTH_JSON=$(worker health status --format json 2>&1 | sed -n '/^{/,/^}/p')
+
+# Verify JSON structure
+if ! echo "$HEALTH_JSON" | jq -e '.status' > /dev/null; then
+ print_error "health status json should include status field"
+ exit 1
+fi
+
+# Verify health metrics are present
+if ! echo "$HEALTH_JSON" | jq -e '.disk.usage_percent' > /dev/null; then
+ print_error "health status json should include disk usage"
+ exit 1
+fi
+
+# All tests passed
+print_success "All health check tests passed"
diff --git a/src/tests/tasks/10_dependencies.sh b/src/tests/tasks/10_dependencies.sh
deleted file mode 100755
index 3bcc99dc..00000000
--- a/src/tests/tasks/10_dependencies.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/bash
-
-# shellcheck source=/usr/local/lib/utils.sh disable=SC1091
-source /usr/local/lib/utils.sh
-
-log_info "Dependencies: Validating required dependencies and their versions"
-
-# Function to test if a command is available and show its version
-check_command() {
- local cmd_path
- cmd_path=$(command -v "$1")
- if [ -x "$cmd_path" ]; then
- log_success "$1" "$1 is installed at $cmd_path"
- local version
- version=$($1 --version 2>&1 | head -n 1)
- log_success "$1" "Version: $version"
- else
- log_error "$1" "Not installed or not in PATH"
- exit 1
- fi
-}
-
-# Verify gcloud, aws, az, bw, yq, and jq commands are available
-log_info "Checking cloud provider CLIs..."
-check_command gcloud
-check_command aws
-check_command az
-
-log_info "Checking utility tools..."
-check_command bw
-check_command yq
-check_command jq
-
-log_success "Dependencies" "All dependencies validated successfully"
\ No newline at end of file
diff --git a/src/tests/tasks/20_config.sh b/src/tests/tasks/20_config.sh
deleted file mode 100644
index 8b80d78d..00000000
--- a/src/tests/tasks/20_config.sh
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/bin/bash
-
-# shellcheck source=/usr/local/lib/utils.sh disable=SC1091
-source /usr/local/lib/utils.sh
-
-log_info "Starting validation of config..."
-
-# Path to the merged configuration file
-MERGED_CONFIG="/usr/local/configs/worker/merged_worker.yaml"
-
-# Test configure_environment function
-test_configure_environment() {
- log_info "Running test: configure_environment"
-
- # Load the merged configuration
- merged_config=$(cat "$MERGED_CONFIG")
-
- # Extract environment variables from the merged configuration
- env_vars=$(echo "$merged_config" | yq eval '.config.env' -)
-
- # Verify environment variables
- for key in $(echo "$env_vars" | yq eval 'keys' -); do
- # Check if key is empty or not set
- if [[ -z "$key" || "$key" == "null" || "$key" == "-" ]]; then
- continue
- fi
-
- value=$(echo "$env_vars" | yq eval ".${key}" -)
- actual_value="${!key}"
-
- if [[ "$actual_value" != "$value" ]]; then
- log_error "Config" "$key is not set correctly. Expected: $value, Got: $actual_value"
- return 1
- else
- log_success "Config" "$key is set correctly"
- fi
- done
-
- log_success "Config" "configure_environment test passed"
- return 0
-}
-
-# Run the test
-if test_configure_environment; then
- log_success "Config" "All config tests passed successfully"
-else
- log_error "Config" "Config tests failed"
- exit 1
-fi
\ No newline at end of file
diff --git a/src/tests/tasks/30_secrets.sh b/src/tests/tasks/30_secrets.sh
deleted file mode 100644
index e0deae41..00000000
--- a/src/tests/tasks/30_secrets.sh
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/bin/bash
-
-# shellcheck source=/usr/local/lib/utils.sh disable=SC1091
-source /usr/local/lib/utils.sh
-
-log_info "Starting validation of secrets fetching..."
-
-# Path to the merged configuration file
-MERGED_CONFIG="/usr/local/configs/worker/merged_worker.yaml"
-
-# Test verify_secrets function
-test_verify_secrets() {
- log_info "Running test: verify_secrets"
-
- # Load the merged configuration
- merged_config=$(cat "$MERGED_CONFIG")
-
- # Extract secrets from the merged configuration
- secrets=$(echo "$merged_config" | yq eval '.config.secrets' -)
-
- # Check if secrets is empty or null
- if [[ -z "$secrets" || "$secrets" == "null" ]]; then
- log_info "No secrets found in the configuration."
- return 0
- fi
-
- # Verify secrets as environment variables
- for secret_key in $(echo "$secrets" | yq eval 'keys' -); do
-
- # Check if the key is valid
- if [[ -z "$secret_key" || "$secret_key" == "-" ]]; then
- continue
- fi
-
- expected_reference=$(echo "$secrets" | yq eval ".${secret_key}" -)
- actual_value="${!secret_key}"
-
- # Verify that the environment variable is set and different from the reference
- if [[ -z "$actual_value" || "$actual_value" == "$expected_reference" ]]; then
- log_error "Secrets" "$secret_key is not replaced correctly. Got: $actual_value"
- return 1
- else
- log_success "Secrets" "$secret_key is resolved correctly"
- fi
- done
-
- log_success "Secrets" "verify_secrets test passed"
- return 0
-}
-
-# Run the test
-if test_verify_secrets; then
- log_success "Secrets" "All secrets fetching tests passed successfully"
-else
- log_error "Secrets" "Secrets fetching tests failed"
- exit 1
-fi
\ No newline at end of file
diff --git a/src/tests/tasks/40_test_service.sh b/src/tests/tasks/40_test_service.sh
deleted file mode 100644
index e06b638b..00000000
--- a/src/tests/tasks/40_test_service.sh
+++ /dev/null
@@ -1,91 +0,0 @@
-#!/bin/bash
-
-# shellcheck source=/usr/local/lib/utils.sh disable=SC1091
-source /usr/local/lib/utils.sh
-
-log_info "Test" "Starting service tests..."
-
-# Test 1: Check service configuration
-log_info "Test" "Testing service configuration..."
-if ! worker service config | grep -q "test_service"; then
- log_error "Test" "Service configuration test failed: test_service not found in config"
- exit 1
-fi
-log_success "Test" "Service configuration test passed"
-
-# Test 2: Check service list and wait for it to be running
-log_info "Test" "Testing service list and waiting for service to be ready..."
-
-# Wait for up to 30 seconds for the service to be fully running
-for i in {1..30}; do
- status_output=$(worker service list 2>&1)
- if echo "$status_output" | grep "test_service" | grep -q "RUNNING"; then
- log_success "Test" "Service is now running"
- break
- fi
- if [ "$i" -eq 30 ]; then
- log_error "Test" "Timeout waiting for service to start"
- log_error "Test" "Current status: $status_output"
- exit 1
- fi
- sleep 1
-done
-log_success "Test" "Service list test passed"
-
-# Test 3: Check service status
-log_info "Test" "Testing service status..."
-status_output=$(worker service status test_service)
-if ! echo "$status_output" | grep -q "RUNNING"; then
- log_error "Test" "Service status test failed: test_service not running"
- log_error "Test" "Current status: $status_output"
- exit 1
-fi
-log_success "Test" "Service status test passed"
-
-# Test 4: Test service stop/start
-log_info "Test" "Testing service stop..."
-worker service stop test_service
-sleep 2
-if worker service status test_service | grep -q "RUNNING"; then
- log_error "Test" "Service stop test failed: test_service still running"
- exit 1
-fi
-log_success "Test" "Service stop test passed"
-
-log_info "Test" "Testing service start..."
-worker service start test_service
-sleep 2
-if ! worker service status test_service | grep -q "RUNNING"; then
- log_error "Test" "Service start test failed: test_service not running"
- exit 1
-fi
-log_success "Test" "Service start test passed"
-
-# Test 5: Check service logs
-log_info "Test" "Testing service logs..."
-
-# Wait a bit for the service to initialize after restart
-sleep 2
-
-# First check if log file exists
-log_file="/var/log/supervisor/test_service.out.log"
-if [ ! -f "$log_file" ]; then
- log_error "Test" "Service logs test failed: log file not found at $log_file"
- exit 1
-fi
-
-# Try a simple tail command first
-if ! tail -n 1 "$log_file" > /dev/null 2>&1; then
- log_error "Test" "Service logs test failed: cannot read log file"
- exit 1
-fi
-
-# Now test the worker service logs command with --nostream
-if ! worker service logs test_service --lines=1 --nostream > /dev/null 2>&1; then
- log_error "Test" "Service logs test failed: worker service logs command failed"
- exit 1
-fi
-
-log_success "Test" "Service logs test passed"
-
-log_success "Test" "All service tests completed successfully"
diff --git a/src/tests/test_helpers.sh b/src/tests/test_helpers.sh
new file mode 100755
index 00000000..5929c39c
--- /dev/null
+++ b/src/tests/test_helpers.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+# Test helper functions and common utilities
+
+# Colors and symbols
+GREEN='\033[0;32m'
+RED='\033[0;31m'
+BLUE='\033[0;34m'
+YELLOW='\033[1;33m'
+NC='\033[0m'
+CHECK="β"
+CROSS="β"
+INFO="βΉοΈ"
+WARN="β οΈ"
+
+# Helper functions
+print_header() {
+ printf "\n${BLUE}=== %s ===${NC}\n" "$1"
+}
+
+print_success() {
+ printf "${GREEN}%s %s${NC}\n" "$CHECK" "$1"
+}
+
+print_error() {
+ printf "${RED}%s %s${NC}\n" "$CROSS" "$1"
+}
+
+print_info() {
+ printf "${BLUE}%s %s${NC}\n" "$INFO" "$1"
+}
+
+print_warning() {
+ printf "${YELLOW}%s %s${NC}\n" "$WARN" "$1"
+}