Skip to content

Commit

Permalink
Update handlers
Browse files Browse the repository at this point in the history
Signed-off-by: Jason Cameron <[email protected]>
  • Loading branch information
JasonLovesDoggo committed Nov 3, 2024
1 parent a056bed commit 54433c1
Show file tree
Hide file tree
Showing 7 changed files with 64 additions and 46 deletions.
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ RUN CGO_ENABLED=0 GOOS=linux go build -o sentinel

FROM alpine:latest

RUN apk --no-cache add ca-certificates mariadb-client
RUN apk --no-cache add ca-certificates mariadb-client tar

WORKDIR /root/

Expand Down
45 changes: 25 additions & 20 deletions cmd/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ package main
import (
"context"
"flag"
"github.com/mcpt/Sentinel/compression"
"fmt"
"log"
"os"
"os/signal"
Expand Down Expand Up @@ -98,12 +98,6 @@ func performBackup(handlerList []handlers.BackupHandler, uploader *storage.S3Upl
if err := os.MkdirAll(config.Cfg.TempDir, 0755); err != nil {
return err
}
defer func(path string) {
err := os.RemoveAll(path)
if err != nil {
log.Printf("Failed to remove temporary backup directory: %v", err)
}
}(config.Cfg.TempDir)

// Perform backups concurrently
for _, h := range handlerList {
Expand Down Expand Up @@ -132,20 +126,31 @@ func performBackup(handlerList []handlers.BackupHandler, uploader *storage.S3Upl
}
}

// Create final archive
compressor, err := compression.NewCompressor(config.Cfg.Compression.Format, config.Cfg.Compression.Level)
if err != nil {
return err
}
err = compressor.Compress(backupFiles, backupPath)
if err != nil {
return err
}
//// Create final archive
//compressor, err := compression.NewCompressor(config.Cfg.Compression.Format, config.Cfg.Compression.Level)
//if err != nil {
// return err
//}
//fmt.Printf("Compressing backups: %s\n", backupFiles)
//err = compressor.Compress(backupFiles, backupPath)
//if err != nil {
// return err
//}

// Upload final archive
if err := uploader.UploadDirectory(ctx, backupPath, ""); err != nil {
return err
if config.Cfg.Debug {
fmt.Printf("Uploading backup to S3: %s\n", backupPath)

}
for _, file := range backupFiles {
fmt.Printf("Uploading backup file: %s\n", file)
if err := uploader.UploadFile(ctx, file); err != nil {
return err
}
}
//if err := uploader.UploadFile(ctx, backupPath); err != nil {
// return err
//}

// Cleanup
for _, file := range backupFiles {
Expand All @@ -154,9 +159,9 @@ func performBackup(handlerList []handlers.BackupHandler, uploader *storage.S3Upl
return err
}
}
err = os.RemoveAll(backupPath)
err := os.RemoveAll(config.Cfg.TempDir)
if err != nil {
return err
log.Printf("Failed to remove temporary backup directory: %v", err)
}

return nil
Expand Down
10 changes: 6 additions & 4 deletions config.toml
Original file line number Diff line number Diff line change
@@ -1,22 +1,24 @@
# Backup system configuration
#schedule = "0 4 * * *" # Daily at 4 AM
temp_dir = "/tmp/backups"
temp_dir = "" # Will create a new random temp directory if not specified
debug = false

[compression]
format = "zlib" # or "gzip"
format = "gzip" # or "gzip"
level = 3 # Compression level (1-9)

[mysql]
enabled = true
enabled = false
host = "localhost"
port = "3306"
user = "backup_user"
password = "backup_password"
database = "myapp"
#docker_container = "db"

[filesystem]
enabled = true
base_path = "/var/www/myapp"
base_path = "D:\\projects\\mcpt\\backups\\test"
include_patterns = [
"*.txt",
"*.pdf",
Expand Down
24 changes: 17 additions & 7 deletions config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package config

import (
"fmt"
"log"
"os"

"github.com/BurntSushi/toml"
Expand All @@ -10,18 +11,20 @@ import (
type Config struct {
Schedule string `toml:"schedule"`
TempDir string `toml:"temp_dir"`
Debug bool `toml:"debug"`
Compression struct {
Format string `toml:"format"` // "zstd" or "lz4"
Level int `toml:"level"`
} `toml:"compression"`

MySQL struct {
Enabled bool `toml:"enabled"`
Host string `toml:"host"`
Port string `toml:"port"`
User string `toml:"user"`
Password string `toml:"password"`
Database string `toml:"database"`
Enabled bool `toml:"enabled"`
Host string `toml:"host"`
Port string `toml:"port"`
User string `toml:"user"`
Password string `toml:"password"`
Database string `toml:"database"`
DockerContainer string `toml:"docker_container"`
} `toml:"mysql"`

Filesystem struct {
Expand Down Expand Up @@ -59,9 +62,16 @@ func Load(path string) error {
}

func validateConfig(config *Config) error {
if config.Debug {
fmt.Println("Debug mode enabled")
}

if config.TempDir == "" {
config.TempDir = os.TempDir()
dir, err := os.MkdirTemp("", "backups")
if err != nil {
log.Fatal(err)
}
config.TempDir = dir
}

if config.Compression.Format == "" {
Expand Down
6 changes: 2 additions & 4 deletions handlers/filesystem.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ import (
"os"
"os/exec"
"path/filepath"
"time"
)

type FileSystemHandler struct {
Expand All @@ -23,7 +22,7 @@ func NewFileSystemHandler() (*FileSystemHandler, error) {
}

func (h *FileSystemHandler) Backup(ctx context.Context) (string, error) {
tempDir := filepath.Join(config.Cfg.TempDir, "fs_backup")
tempDir := filepath.Join(config.Cfg.TempDir, "filesystem")
if err := os.MkdirAll(tempDir, 0755); err != nil {
return "", fmt.Errorf("failed to create temp directory: %v", err)
}
Expand Down Expand Up @@ -81,8 +80,7 @@ func (h *FileSystemHandler) Backup(ctx context.Context) (string, error) {
}

// Create tar.gz archive
timestamp := time.Now().Format("20060102_150405")
archivePath := filepath.Join(config.Cfg.TempDir, fmt.Sprintf("fs_backup_%s.tar.gz", timestamp))
archivePath := filepath.Join(config.Cfg.TempDir, "filesystem.tar.gz")

cmd := exec.Command("tar", "-czf", archivePath, "-C", tempDir, ".")
if err := cmd.Run(); err != nil {
Expand Down
1 change: 0 additions & 1 deletion handlers/mysql.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ func NewMySQLHandler() (*MySQLHandler, error) {
func (h *MySQLHandler) Backup(ctx context.Context) (string, error) {
timestamp := time.Now().Format("20060102_150405")
filename := filepath.Join(h.tempDir, fmt.Sprintf("mysql_%s.sql", timestamp))

cmd := exec.CommandContext(ctx, "mysqldump",
"-h", config.Cfg.MySQL.Host,
"-P", config.Cfg.MySQL.Port,
Expand Down
22 changes: 13 additions & 9 deletions storage/s3.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ func NewS3Uploader() (*S3Uploader, error) {
}

// UploadDirectory uploads an entire directory to S3
func (u *S3Uploader) UploadDirectory(ctx context.Context, localPath string, s3Prefix string) error {
func (u *S3Uploader) UploadDirectory(ctx context.Context, localPath string) error {
// Create a buffered channel to control concurrency
uploadChan := make(chan string, cfg.Cfg.S3.MaxConcurrency)
errChan := make(chan error, 1)
Expand All @@ -80,7 +80,7 @@ func (u *S3Uploader) UploadDirectory(ctx context.Context, localPath string, s3Pr
go func() {
defer wg.Done()
for path := range uploadChan {
if err := u.uploadFile(ctx, path, localPath, s3Prefix); err != nil {
if err := u.uploadFile(ctx, path, localPath); err != nil {
select {
case errChan <- err:
default:
Expand Down Expand Up @@ -128,7 +128,7 @@ func (u *S3Uploader) UploadDirectory(ctx context.Context, localPath string, s3Pr
}

// uploadFile handles the upload of a single file to S3
func (u *S3Uploader) uploadFile(ctx context.Context, filePath, basePath, s3Prefix string) error {
func (u *S3Uploader) uploadFile(ctx context.Context, filePath, basePath string) error {
file, err := os.Open(filePath)
if err != nil {
return fmt.Errorf("failed to open file %s: %v", filePath, err)
Expand All @@ -137,13 +137,14 @@ func (u *S3Uploader) uploadFile(ctx context.Context, filePath, basePath, s3Prefi

// Calculate relative path for S3 key
relPath, err := filepath.Rel(basePath, filePath)
fmt.Printf("relPath: %s\n", filePath)
if err != nil {
return fmt.Errorf("failed to get relative path: %v", err)
}

// Create S3 key with prefix and timestamp
timestamp := time.Now().Format("20060102-150405")
s3Key := filepath.Join(s3Prefix, timestamp, relPath)
// use a human-readable timestamp
timestamp := time.Now().Format("2006-01-02 15:04:05")
s3Key := filepath.Join(timestamp, relPath)

// Create a pipe for streaming
pr, pw := io.Pipe()
Expand All @@ -160,7 +161,10 @@ func (u *S3Uploader) uploadFile(ctx context.Context, filePath, basePath, s3Prefi
Key: aws.String(s3Key),
Body: pr,
})
pr.Close()
err := pr.Close()
if err != nil {
log.Printf("Failed to close pipe: %v", err)
}
}()

// Copy file to pipe
Expand All @@ -181,6 +185,6 @@ func (u *S3Uploader) uploadFile(ctx context.Context, filePath, basePath, s3Prefi
}

// UploadFile uploads a single file to S3
func (u *S3Uploader) UploadFile(ctx context.Context, filePath, s3Prefix string) error {
return u.uploadFile(ctx, filePath, filepath.Dir(filePath), s3Prefix)
func (u *S3Uploader) UploadFile(ctx context.Context, filePath string) error {
return u.uploadFile(ctx, filePath, filepath.Dir(filePath))
}

0 comments on commit 54433c1

Please sign in to comment.