Skip to content

Commit

Permalink
Rewrite ;)
Browse files Browse the repository at this point in the history
Signed-off-by: Jason Cameron <[email protected]>
  • Loading branch information
JasonLovesDoggo committed Nov 3, 2024
1 parent 839e023 commit a056bed
Show file tree
Hide file tree
Showing 12 changed files with 770 additions and 290 deletions.
2 changes: 1 addition & 1 deletion .idea/discord.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

163 changes: 163 additions & 0 deletions cmd/main.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,163 @@
package main

import (
"context"
"flag"
"github.com/mcpt/Sentinel/compression"
"log"
"os"
"os/signal"
"path/filepath"
"sync"
"syscall"

"github.com/mcpt/Sentinel/config"
"github.com/mcpt/Sentinel/handlers"
"github.com/mcpt/Sentinel/storage"
"github.com/robfig/cron/v3"
)

func main() {
configFile := flag.String("config", "config.toml", "Path to configuration file")
flag.Parse()

// Load configuration
err := config.Load(*configFile)
if err != nil {
log.Fatalf("Failed to load configuration: %v", err)
}

// Initialize backup handlers
var backupHandlers []handlers.BackupHandler

// MySQL handler
if config.Cfg.MySQL.Enabled {
mysqlHandler, err := handlers.NewMySQLHandler()
if err != nil {
log.Fatalf("Failed to initialize MySQL handler: %v", err)
}
backupHandlers = append(backupHandlers, mysqlHandler)
}

// Filesystem handler
if config.Cfg.Filesystem.Enabled {
fsHandler, err := handlers.NewFileSystemHandler()
if err != nil {
log.Fatalf("Failed to initialize filesystem handler: %v", err)
}
backupHandlers = append(backupHandlers, fsHandler)
}

// Initialize S3 uploader
s3Uploader, err := storage.NewS3Uploader()
if err != nil {
log.Fatalf("Failed to initialize S3 uploader: %v", err)
}

// Create cron scheduler
// If the schedule is empty, don't schedule the backup, just run it immediately
if config.Cfg.Schedule == "" {
if err := performBackup(backupHandlers, s3Uploader); err != nil {
log.Fatalf("Backup failed: %v", err)
}
return
} else {
c := cron.New()
_, err = c.AddFunc(config.Cfg.Schedule, func() {
if err := performBackup(backupHandlers, s3Uploader); err != nil {
log.Printf("Backup failed: %v", err)
}
})
if err != nil {
log.Fatalf("Failed to schedule backup: %v", err)
}

// Start cron scheduler
c.Start()
log.Printf("Backup system started with schedule: %s", config.Cfg.Schedule)

// Handle shutdown gracefully
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
<-sigChan

c.Stop()
}
}

func performBackup(handlerList []handlers.BackupHandler, uploader *storage.S3Uploader) error {
ctx := context.Background()
var backupFiles []string
var wg sync.WaitGroup
var mu sync.Mutex
errors := make(chan error, len(handlerList))

backupPath := filepath.Join(config.Cfg.TempDir, "backup")

// Create temporary backup directory
if err := os.MkdirAll(config.Cfg.TempDir, 0755); err != nil {
return err
}
defer func(path string) {
err := os.RemoveAll(path)
if err != nil {
log.Printf("Failed to remove temporary backup directory: %v", err)
}
}(config.Cfg.TempDir)

// Perform backups concurrently
for _, h := range handlerList {
wg.Add(1)
go func(handler handlers.BackupHandler) {
defer wg.Done()

backupFile, err := handler.Backup(ctx)
if err != nil {
errors <- err
return
}

mu.Lock()
backupFiles = append(backupFiles, backupFile)
mu.Unlock()
}(h)
}

wg.Wait()
close(errors)

for err := range errors {
if err != nil {
return err
}
}

// Create final archive
compressor, err := compression.NewCompressor(config.Cfg.Compression.Format, config.Cfg.Compression.Level)
if err != nil {
return err
}
err = compressor.Compress(backupFiles, backupPath)
if err != nil {
return err
}

// Upload final archive
if err := uploader.UploadDirectory(ctx, backupPath, ""); err != nil {
return err
}

// Cleanup
for _, file := range backupFiles {
err := os.Remove(file)
if err != nil {
return err
}
}
err = os.RemoveAll(backupPath)
if err != nil {
return err
}

return nil
}
86 changes: 86 additions & 0 deletions compression/compressor.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
package compression

import (
"compress/gzip"
"compress/zlib"
"fmt"
"io"
"os"
)

type Compressor interface {
Compress(src []string, dst string) error
}

type ZlibCompressor struct {
level int // 1-9 see https://pkg.go.dev/compress/flate#pkg-constants
}
type GzipCompressor struct{}

func NewCompressor(format string, level int) (Compressor, error) {
switch format {
case "zlib":
return &ZlibCompressor{level: level}, nil
case "gzip":
return &GzipCompressor{}, nil
default:
return nil, fmt.Errorf("unsupported compression format: %s", format)
}
}

func (c *ZlibCompressor) Compress(src []string, dst string) error {
f, err := os.Create(dst)
if err != nil {
return err
}
defer f.Close()

zw, err := zlib.NewWriterLevel(f, c.level)
if err != nil {
return err
}
defer zw.Close()

for _, file := range src {
f, err := os.Open(file)
if err != nil {
return err
}
defer f.Close()

_, err = io.Copy(zw, f)
if err != nil {
return err
}
}
err = zw.Flush()
if err != nil {
return err
}
return nil
}

func (c *GzipCompressor) Compress(src []string, dst string) error {
f, err := os.Create(dst)
if err != nil {
return err
}
defer f.Close()

gw := gzip.NewWriter(f)
defer gw.Close()

for _, file := range src {
f, err := os.Open(file)
if err != nil {
return err
}
defer f.Close()

_, err = io.Copy(gw, f)
if err != nil {
return err
}
}
return nil
}
40 changes: 40 additions & 0 deletions config.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
# Backup system configuration
#schedule = "0 4 * * *" # Daily at 4 AM
temp_dir = "/tmp/backups"

[compression]
format = "zlib" # or "gzip"
level = 3 # Compression level (1-9)

[mysql]
enabled = true
host = "localhost"
port = "3306"
user = "backup_user"
password = "backup_password"
database = "myapp"

[filesystem]
enabled = true
base_path = "/var/www/myapp"
include_patterns = [
"*.txt",
"*.pdf",
"config/**",
"data/**"
]
exclude_patterns = [
".git/**",
"node_modules/**",
"tmp/**",
"*.tmp"
]

[s3]
endpoint = "https://s3.amazonaws.com"
region = "us-east-1"
bucket = "my-backup-bucket"
access_key_id = "your_access_key"
secret_access_key = "your_secret_key"
max_concurrency = 10
part_size=0
72 changes: 72 additions & 0 deletions config/config.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
package config

import (
"fmt"
"os"

"github.com/BurntSushi/toml"
)

type Config struct {
Schedule string `toml:"schedule"`
TempDir string `toml:"temp_dir"`
Compression struct {
Format string `toml:"format"` // "zstd" or "lz4"
Level int `toml:"level"`
} `toml:"compression"`

MySQL struct {
Enabled bool `toml:"enabled"`
Host string `toml:"host"`
Port string `toml:"port"`
User string `toml:"user"`
Password string `toml:"password"`
Database string `toml:"database"`
} `toml:"mysql"`

Filesystem struct {
Enabled bool `toml:"enabled"`
BasePath string `toml:"base_path"`
IncludePatterns []string `toml:"include_patterns"`
ExcludePatterns []string `toml:"exclude_patterns"`
} `toml:"filesystem"`

S3 struct {
Endpoint string `toml:"endpoint"`
Region string `toml:"region"`
Bucket string `toml:"bucket"`
AccessKeyID string `toml:"access_key_id"`
SecretAccessKey string `toml:"secret_access_key"`
MaxConcurrency int `toml:"max_concurrency"`
PartSize int64 `toml:"part_size"`
} `toml:"s3"`
}

var Cfg Config

func Load(path string) error {
if _, err := toml.DecodeFile(path, &Cfg); err != nil {
return fmt.Errorf("failed to decode config file: %v", err)
}

// Validate configuration
if err := validateConfig(&Cfg); err != nil {
return fmt.Errorf("invalid configuration: %v", err)
}

return nil

}

func validateConfig(config *Config) error {

if config.TempDir == "" {
config.TempDir = os.TempDir()
}

if config.Compression.Format == "" {
config.Compression.Format = "zstd"
}

return nil
}
Loading

0 comments on commit a056bed

Please sign in to comment.