diff --git a/Dockerfile b/Dockerfile index 67f10b0..b3bdd0e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,8 +9,6 @@ ADD run.sh /run.sh RUN chmod 755 /*.sh ENV S3_BUCKET_NAME docker-backups.example.com -ENV AWS_ACCESS_KEY_ID **DefineMe** -ENV AWS_SECRET_ACCESS_KEY **DefineMe** ENV AWS_DEFAULT_REGION us-east-1 ENV PATHS_TO_BACKUP /paths/to/backup ENV BACKUP_NAME backup diff --git a/README.md b/README.md index 3e997d2..3690c4b 100644 --- a/README.md +++ b/README.md @@ -48,6 +48,8 @@ RESTORE=false `dockup` will use your AWS credentials to create a new bucket with name as per the environment variable `S3_BUCKET_NAME`, or if not defined, using the default name `docker-backups.example.com`. The paths in `PATHS_TO_BACKUP` will be tarballed, gzipped, time-stamped and uploaded to the S3 bucket. +Both `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` are optional if your Docker container is executed in an environment where these credentials are provided -- for example, an AWS IAM role. + ## Restore To restore your data simply set the `RESTORE` environment variable to `true` - this will restore the latest backup from S3 to your volume. diff --git a/tutum.yml b/tutum.yml index 35837f0..2458ac0 100644 --- a/tutum.yml +++ b/tutum.yml @@ -2,8 +2,8 @@ dockup: image: 'tutum/dockup:latest' autodestroy: always environment: - - AWS_ACCESS_KEY_ID= #MUST CHANGE THIS - - AWS_SECRET_ACCESS_KEY= #MUST CHANGE THIS + # AWS_ACCESS_KEY_ID= #MAY OPTIONALLY PROVIDE THIS + # AWS_SECRET_ACCESS_KEY= #MAY OPTIONALLY PROVIDE THIS - AWS_DEFAULT_REGION=us-east-1 #OPTIONALLY CHANGE THIS - BACKUP_NAME=backup #OPTIONALLY CHANGE THIS - PATHS_TO_BACKUP=/first/path/here /second/path/here #MUST CHANGE THIS