diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 2e9c35aa5..f167c5512 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,7 +1,7 @@ -# This file is sensitive since rules below may be used for restricting who can -# make access control changes. -/.github/CODEOWNERS @GSA-TTS/FAC-admins - - -# Changes to the following Terraform directory will impact access control in cloud.gov spaces. Any PR involving these files should get a review from someone in FAC-admins. -/terraform/meta/ @GSA-TTS/FAC-admins +# This file is sensitive since rules below may be used for restricting who can +# make access control changes. +/.github/CODEOWNERS @GSA-TTS/FAC-admins +/.github/workflows/fac-restore-util.yml @GSA-TTS/FAC-admins @asteel-gsa + +# Changes to the following Terraform directory will impact access control in cloud.gov spaces. Any PR involving these files should get a review from someone in FAC-admins. +/terraform/meta/ @GSA-TTS/FAC-admins diff --git a/.github/workflows/deploy-application.yml b/.github/workflows/deploy-application.yml index 47367e448..a31920be9 100644 --- a/.github/workflows/deploy-application.yml +++ b/.github/workflows/deploy-application.yml @@ -1,148 +1,128 @@ ---- -name: Deploy application to cloud.gov -on: - workflow_call: - inputs: - environment: - required: true - type: string - -jobs: - push-with-creds: - name: Deploy to cloud.gov with updated credentials - runs-on: ubuntu-latest - environment: ${{ inputs.environment }} - env: - space: ${{ inputs.environment }} - - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Restore npm cache - uses: actions/cache@v4 - id: cache-npm - with: - path: ~/.npm - key: fac-build-npm-${{ hashFiles('**/package-lock.json') }} - restore-keys: | - fac-build-npm- - fac-build- - - - name: Set up Python 3.10 - uses: actions/setup-python@v5 - with: - python-version: "3.10" - - - name: Restore pip cache - uses: actions/cache@v4 - id: cache-pip - with: - path: | - ~/.cache/pip - /opt/hostedtoolcache/Python/ - key: fac-build-pip-${{ hashFiles('**/requirements.txt') }}-${{ hashFiles('**/dev-requirements.txt') }} - restore-keys: | - fac-build-pip-${{ hashFiles('**/requirements.txt') }}- - fac-build-pip- - fac-build- - - - name: Install npm dependencies - working-directory: ./backend - run: npm ci --production - - - name: Compile JS/CSS assets - working-directory: ./backend - run: npm run build - - - name: Update service keys - uses: cloud-gov/cg-cli-tools@main - env: - SAM_API_KEY: ${{ secrets.SAM_API_KEY }} - DJANGO_SECRET_LOGIN_KEY: $${{ secrets.DJANGO_SECRET_LOGIN_KEY }} - LOGIN_CLIENT_ID: $${{ secrets.LOGIN_CLIENT_ID }} - SECRET_KEY: ${{ secrets.SECRET_KEY }} - with: - cf_username: ${{ secrets.CF_USERNAME }} - cf_password: ${{ secrets.CF_PASSWORD }} - cf_org: gsa-tts-oros-fac - cf_space: ${{ env.space }} - cf_command: update-user-provided-service fac-key-service -p '"{\"SAM_API_KEY\":\"${{ secrets.SAM_API_KEY }}\", \"DJANGO_SECRET_LOGIN_KEY\":\"${{ secrets.DJANGO_SECRET_LOGIN_KEY }}\", \"LOGIN_CLIENT_ID\":\"${{ secrets.LOGIN_CLIENT_ID }}\", \"SECRET_KEY\":\"${{ secrets.SECRET_KEY}}\"}"' - - - name: Bind backup s3 bucket to prod app - if: startsWith(github.ref, 'refs/tags/v1.') - uses: cloud-gov/cg-cli-tools@main - with: - cf_username: ${{ secrets.CF_USERNAME }} - cf_password: ${{ secrets.CF_PASSWORD }} - cf_org: gsa-tts-oros-fac - cf_space: ${{ env.space }} - command: cf bind-service gsa-fac backups - - - name: Backup the database (Prod Only) - if: startsWith(github.ref, 'refs/tags/v1.') - uses: cloud-gov/cg-cli-tools@main - with: - cf_username: ${{ secrets.CF_USERNAME }} - cf_password: ${{ secrets.CF_PASSWORD }} - cf_org: gsa-tts-oros-fac - cf_space: ${{ env.space }} - command: cf run-task gsa-fac -k 2G -m 2G --name pg_backup --command "./backup_database.sh ${{ env.space }}" - - - name: Deploy Preview to cloud.gov - if: ${{ inputs.environment == 'preview' }} - uses: cloud-gov/cg-cli-tools@main - with: - cf_username: ${{ secrets.CF_USERNAME }} - cf_password: ${{ secrets.CF_PASSWORD }} - cf_org: gsa-tts-oros-fac - cf_space: ${{ env.space }} - cf_manifest: backend/manifests/manifest-preview.yml - cf_vars_file: backend/manifests/vars/vars-${{ env.space }}.yml - command: bin/ops/deploy_preview.sh - - - name: Deploy fac to cloud.gov - if: ${{ inputs.environment != 'preview' }} - uses: cloud-gov/cg-cli-tools@main - with: - cf_username: ${{ secrets.CF_USERNAME }} - cf_password: ${{ secrets.CF_PASSWORD }} - cf_org: gsa-tts-oros-fac - cf_space: ${{ env.space }} - cf_manifest: backend/manifests/manifest-fac.yml - cf_vars_file: backend/manifests/vars/vars-${{ env.space }}.yml - command: bin/ops/deploy.sh - - - name: Unbind backup s3 bucket from prod app - if: startsWith(github.ref, 'refs/tags/v1.') - uses: cloud-gov/cg-cli-tools@main - with: - cf_username: ${{ secrets.CF_USERNAME }} - cf_password: ${{ secrets.CF_PASSWORD }} - cf_org: gsa-tts-oros-fac - cf_space: ${{ env.space }} - command: cf unbind-service gsa-fac backups - - - name: Load historical data - uses: cloud-gov/cg-cli-tools@main - with: - cf_username: ${{ secrets.CF_USERNAME }} - cf_password: ${{ secrets.CF_PASSWORD }} - cf_org: gsa-tts-oros-fac - cf_space: ${{ env.space }} - command: cf run-task gsa-fac -k 6G -m 1G --name load_data --command "./load_data.sh" - - # This has to happen after an application deployment because the manifest (currently) is responsible - # for binding the "logdrain service" to the "gsa-fac application". This also needs to be done - # based on the suspicion that fluentbit cannot register the incoming logs when it is initially - # created, resulting in a 502. Restarting the application after everything is configured results - # in a 201, or, the expected status when transmitting logs. - - name: Restart the logshipper application - uses: cloud-gov/cg-cli-tools@main - with: - cf_username: ${{ secrets.CF_USERNAME }} - cf_password: ${{ secrets.CF_PASSWORD }} - cf_org: gsa-tts-oros-fac - cf_space: ${{ env.space }} - command: cf restart logshipper - +--- +name: Deploy application to cloud.gov +on: + workflow_call: + inputs: + environment: + required: true + type: string + +jobs: + push-with-creds: + name: Deploy to cloud.gov with updated credentials + runs-on: ubuntu-latest + environment: ${{ inputs.environment }} + env: + space: ${{ inputs.environment }} + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Restore npm cache + uses: actions/cache@v4 + id: cache-npm + with: + path: ~/.npm + key: fac-build-npm-${{ hashFiles('**/package-lock.json') }} + restore-keys: | + fac-build-npm- + fac-build- + + - name: Set up Python 3.10 + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + - name: Restore pip cache + uses: actions/cache@v4 + id: cache-pip + with: + path: | + ~/.cache/pip + /opt/hostedtoolcache/Python/ + key: fac-build-pip-${{ hashFiles('**/requirements.txt') }}-${{ hashFiles('**/dev-requirements.txt') }} + restore-keys: | + fac-build-pip-${{ hashFiles('**/requirements.txt') }}- + fac-build-pip- + fac-build- + + - name: Install npm dependencies + working-directory: ./backend + run: npm ci --production + + - name: Compile JS/CSS assets + working-directory: ./backend + run: npm run build + + - name: Update service keys + uses: cloud-gov/cg-cli-tools@main + env: + SAM_API_KEY: ${{ secrets.SAM_API_KEY }} + DJANGO_SECRET_LOGIN_KEY: $${{ secrets.DJANGO_SECRET_LOGIN_KEY }} + LOGIN_CLIENT_ID: $${{ secrets.LOGIN_CLIENT_ID }} + SECRET_KEY: ${{ secrets.SECRET_KEY }} + with: + cf_username: ${{ secrets.CF_USERNAME }} + cf_password: ${{ secrets.CF_PASSWORD }} + cf_org: gsa-tts-oros-fac + cf_space: ${{ env.space }} + cf_command: update-user-provided-service fac-key-service -p '"{\"SAM_API_KEY\":\"${{ secrets.SAM_API_KEY }}\", \"DJANGO_SECRET_LOGIN_KEY\":\"${{ secrets.DJANGO_SECRET_LOGIN_KEY }}\", \"LOGIN_CLIENT_ID\":\"${{ secrets.LOGIN_CLIENT_ID }}\", \"SECRET_KEY\":\"${{ secrets.SECRET_KEY}}\"}"' + + - name: Backup the database + # if: startsWith(github.ref, 'refs/tags/v1.') + uses: cloud-gov/cg-cli-tools@main + with: + cf_username: ${{ secrets.CF_USERNAME }} + cf_password: ${{ secrets.CF_PASSWORD }} + cf_org: gsa-tts-oros-fac + cf_space: ${{ env.space }} + command: cf run-task gsa-fac -k 7G -m 3G --name deploy_backup --command "./fac-backup-util.sh v0.1.3 deploy_backup" + + - name: Deploy Preview to cloud.gov + if: ${{ inputs.environment == 'preview' }} + uses: cloud-gov/cg-cli-tools@main + with: + cf_username: ${{ secrets.CF_USERNAME }} + cf_password: ${{ secrets.CF_PASSWORD }} + cf_org: gsa-tts-oros-fac + cf_space: ${{ env.space }} + cf_manifest: backend/manifests/manifest-preview.yml + cf_vars_file: backend/manifests/vars/vars-${{ env.space }}.yml + command: bin/ops/deploy_preview.sh + + - name: Deploy fac to cloud.gov + if: ${{ inputs.environment != 'preview' }} + uses: cloud-gov/cg-cli-tools@main + with: + cf_username: ${{ secrets.CF_USERNAME }} + cf_password: ${{ secrets.CF_PASSWORD }} + cf_org: gsa-tts-oros-fac + cf_space: ${{ env.space }} + cf_manifest: backend/manifests/manifest-fac.yml + cf_vars_file: backend/manifests/vars/vars-${{ env.space }}.yml + command: bin/ops/deploy.sh + + - name: Load historical data + uses: cloud-gov/cg-cli-tools@main + with: + cf_username: ${{ secrets.CF_USERNAME }} + cf_password: ${{ secrets.CF_PASSWORD }} + cf_org: gsa-tts-oros-fac + cf_space: ${{ env.space }} + command: cf run-task gsa-fac -k 6G -m 1G --name load_data --command "./load_data.sh" + + # This has to happen after an application deployment because the manifest (currently) is responsible + # for binding the "logdrain service" to the "gsa-fac application". This also needs to be done + # based on the suspicion that fluentbit cannot register the incoming logs when it is initially + # created, resulting in a 502. Restarting the application after everything is configured results + # in a 201, or, the expected status when transmitting logs. + - name: Restart the logshipper application + uses: cloud-gov/cg-cli-tools@main + with: + cf_username: ${{ secrets.CF_USERNAME }} + cf_password: ${{ secrets.CF_PASSWORD }} + cf_org: gsa-tts-oros-fac + cf_space: ${{ env.space }} + command: cf restart logshipper + diff --git a/.github/workflows/fac-backup-scheduler.yml b/.github/workflows/fac-backup-scheduler.yml new file mode 100644 index 000000000..72683b449 --- /dev/null +++ b/.github/workflows/fac-backup-scheduler.yml @@ -0,0 +1,24 @@ +--- +name: Backup Environments +on: + schedule: + # Invoke every 2 hours + - cron: '0 */2 * * *' + workflow_dispatch: null + +jobs: + backup-environment: + strategy: + fail-fast: false + matrix: + environment: + - name: dev + - name: staging + - name: production + uses: ./.github/workflows/fac-backup-util-scheduled.yml + secrets: inherit + with: + environment: ${{ matrix.environment.name }} + util_version: "v0.1.3" + backup_operation: "scheduled_backup" + diff --git a/.github/workflows/fac-backup-util-scheduled.yml b/.github/workflows/fac-backup-util-scheduled.yml new file mode 100644 index 000000000..d98a5a1be --- /dev/null +++ b/.github/workflows/fac-backup-util-scheduled.yml @@ -0,0 +1,36 @@ +--- +name: Backup the database with fac-backup-utility +### Common Commands: +# ./fac-backup-util.sh v0.1.3 scheduled_backup +# ./fac-backup-util.sh v0.1.3 daily_backup +on: + workflow_call: + inputs: + environment: + required: true + type: string + util_version: + description: Version for fac backup utility to use (ex. vX.Y.Z) + required: true + type: string + backup_operation: + description: Operation for fac-backup-utility + required: true + type: string +jobs: + fac-backup-scheduled: + name: Perform Database Backup + runs-on: ubuntu-latest + environment: ${{ inputs.environment }} + env: + space: ${{ inputs.environment }} + steps: + - name: Backup FAC Database and Sync Media + uses: cloud-gov/cg-cli-tools@main + with: + cf_username: ${{ secrets.CF_USERNAME }} + cf_password: ${{ secrets.CF_PASSWORD }} + cf_org: gsa-tts-oros-fac + cf_space: ${{ env.space }} + command: cf run-task gsa-fac -k 7G -m 3G --name backup_util_scheduled --command "./fac-backup-util.sh ${{ inputs.util_version }} ${{ inputs.backup_operation }}" + diff --git a/.github/workflows/fac-backup-util.yml b/.github/workflows/fac-backup-util.yml new file mode 100644 index 000000000..a3ff9767e --- /dev/null +++ b/.github/workflows/fac-backup-util.yml @@ -0,0 +1,43 @@ +--- +name: Backup the database with fac-backup-utility +### Common Commands: +# ./fac-backup-util.sh v0.1.3 initial_backup +# ./fac-backup-util.sh v0.1.3 deploy_backup +on: + workflow_dispatch: + inputs: + environment: + required: true + type: choice + options: + - 'dev' + - 'preview' + - 'staging' + - 'production' + util_version: + description: Version for fac backup utility to use (ex. vX.Y.Z) + required: true + type: string + backup_operation: + description: Operation for fac-backup-utility + required: true + type: choice + options: + - 'initial_backup' + - 'deploy_backup' +jobs: + fac-backup: + name: Perform Database Backup + runs-on: ubuntu-latest + environment: ${{ inputs.environment }} + env: + space: ${{ inputs.environment }} + steps: + - name: Backup FAC Database and Sync Media + uses: cloud-gov/cg-cli-tools@main + with: + cf_username: ${{ secrets.CF_USERNAME }} + cf_password: ${{ secrets.CF_PASSWORD }} + cf_org: gsa-tts-oros-fac + cf_space: ${{ env.space }} + command: cf run-task gsa-fac -k 7G -m 3G --name deploy_backup_util --command "./fac-backup-util.sh ${{ inputs.util_version }} ${{ inputs.backup_operation }}" diff --git a/.github/workflows/scheduled-dev-snapshot.yml b/.github/workflows/scheduled-dev-snapshot.yml deleted file mode 100644 index 993d72976..000000000 --- a/.github/workflows/scheduled-dev-snapshot.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -name: Development Media Snapshot -on: - schedule: - # Invoke at 9 UTC every monday - - cron: '0 9 * * 1' - workflow_dispatch: null - -jobs: - dev-media-snapshot: - uses: ./.github/workflows/tar-s3-media.yml - secrets: inherit - with: - environment: "dev" - diff --git a/.github/workflows/scheduled-dev-sync.yml b/.github/workflows/scheduled-dev-sync.yml deleted file mode 100644 index 01725a2e6..000000000 --- a/.github/workflows/scheduled-dev-sync.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -name: Sync Dev Media Files -on: - schedule: - # Invoke every 2 hours - - cron: '0 */2 * * *' - workflow_dispatch: null - -jobs: - dev-media-sync: - uses: ./.github/workflows/sync-s3-media.yml - secrets: inherit - with: - environment: "dev" - diff --git a/.github/workflows/scheduled-production-snapshot.yml b/.github/workflows/scheduled-production-snapshot.yml deleted file mode 100644 index f73e826c8..000000000 --- a/.github/workflows/scheduled-production-snapshot.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -name: Production Media Snapshot -on: - #schedule: - # Invoke at 9 UTC every monday - #- cron: '0 9 * * 1' - workflow_dispatch: null - -jobs: - production-media-snapshot: - uses: ./.github/workflows/tar-s3-media.yml - secrets: inherit - with: - environment: "production" - diff --git a/.github/workflows/scheduled-production-sync.yml b/.github/workflows/scheduled-production-sync.yml deleted file mode 100644 index 2521c9ce3..000000000 --- a/.github/workflows/scheduled-production-sync.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -name: Sync Production Media Files -on: - schedule: - # Invoke every 2 hours - - cron: '0 */2 * * *' - workflow_dispatch: null - -jobs: - production-media-sync: - uses: ./.github/workflows/sync-s3-media.yml - secrets: inherit - with: - environment: "production" - diff --git a/.github/workflows/sync-s3-media.yml b/.github/workflows/sync-s3-media.yml deleted file mode 100644 index 0d14ff9b8..000000000 --- a/.github/workflows/sync-s3-media.yml +++ /dev/null @@ -1,64 +0,0 @@ ---- -name: Perform Media and Database Backups -on: - workflow_dispatch: - inputs: - environment: - required: true - type: string - workflow_call: - inputs: - environment: - required: true - type: string - -jobs: - backup-media: - name: Perform Media Backups - runs-on: ubuntu-latest - environment: ${{ inputs.environment }} - env: - space: ${{ inputs.environment }} - steps: - - name: Backup media files - uses: cloud-gov/cg-cli-tools@main - with: - cf_username: ${{ secrets.CF_USERNAME }} - cf_password: ${{ secrets.CF_PASSWORD }} - cf_org: gsa-tts-oros-fac - cf_space: ${{ env.space }} - command: cf run-task gsa-fac -k 2G -m 2G --name s3_sync --command "./s3-sync.sh" - - backup-dev-database: - if: ${{ inputs.environment == 'dev' }} - name: Perform Dev Database Backups - runs-on: ubuntu-latest - environment: ${{ inputs.environment }} - env: - space: ${{ inputs.environment }} - steps: - - name: Backup Dev Database - uses: cloud-gov/cg-cli-tools@main - with: - cf_username: ${{ secrets.CF_USERNAME }} - cf_password: ${{ secrets.CF_PASSWORD }} - cf_org: gsa-tts-oros-fac - cf_space: ${{ env.space }} - command: cf run-task gsa-fac -k 7G -m 2G --name pg_backup --command "./backup_database.sh ${{ env.space }}" - - backup-prod-database: - if: ${{ inputs.environment == 'production' }} - name: Perform Prod Database Backups - runs-on: ubuntu-latest - environment: ${{ inputs.environment }} - env: - space: ${{ inputs.environment }} - steps: - - name: Backup the database (Prod Only) - uses: cloud-gov/cg-cli-tools@main - with: - cf_username: ${{ secrets.CF_USERNAME }} - cf_password: ${{ secrets.CF_PASSWORD }} - cf_org: gsa-tts-oros-fac - cf_space: ${{ env.space }} - command: cf run-task gsa-fac -k 7G -m 2G --name pg_backup --command "./backup_database.sh ${{ env.space }}" diff --git a/.github/workflows/tar-s3-media.yml b/.github/workflows/tar-s3-media.yml deleted file mode 100644 index 00f648b16..000000000 --- a/.github/workflows/tar-s3-media.yml +++ /dev/null @@ -1,30 +0,0 @@ ---- -name: Perform a tar snapshot of the media -on: - workflow_dispatch: - inputs: - environment: - required: true - type: string - workflow_call: - inputs: - environment: - required: true - type: string - -jobs: - backup-media: - name: Perform Media Backups - runs-on: ubuntu-latest - environment: ${{ inputs.environment }} - env: - space: ${{ inputs.environment }} - steps: - - name: Backup media files - uses: cloud-gov/cg-cli-tools@main - with: - cf_username: ${{ secrets.CF_USERNAME }} - cf_password: ${{ secrets.CF_PASSWORD }} - cf_org: gsa-tts-oros-fac - cf_space: ${{ env.space }} - command: cf run-task gsa-fac -k 2G -m 2G --name s3_tar_snapshot --command "./s3-tar-snapshot.sh" diff --git a/.github/workflows/terraform-apply-env.yml b/.github/workflows/terraform-apply-env.yml index 2e8160095..d50ffd12b 100644 --- a/.github/workflows/terraform-apply-env.yml +++ b/.github/workflows/terraform-apply-env.yml @@ -62,24 +62,3 @@ jobs: bucket=${{ secrets.terraform_BUCKET }}, region=${{ secrets.terraform_REGION }}, key=${{ env.KEY }}, - - - - name: Unshare backups s3 bucket to staging space - if: ${{ inputs.environment == 'meta' }} - uses: cloud-gov/cg-cli-tools@main - with: - cf_username: ${{ secrets.CF_USERNAME }} - cf_password: ${{ secrets.CF_PASSWORD }} - cf_org: gsa-tts-oros-fac - cf_space: production - command: cf unshare-service backups -s staging -f - - - name: Share backups s3 bucket to staging space - if: ${{ inputs.environment == 'meta' }} - uses: cloud-gov/cg-cli-tools@main - with: - cf_username: ${{ secrets.CF_USERNAME }} - cf_password: ${{ secrets.CF_PASSWORD }} - cf_org: gsa-tts-oros-fac - cf_space: production - command: cf share-service backups -s staging diff --git a/backend/Makefile b/backend/Makefile index fba24d34b..8e78d3466 100644 --- a/backend/Makefile +++ b/backend/Makefile @@ -90,7 +90,7 @@ docker-lint: ghcr-first-run: docker compose -f docker-compose-web.yml run web python manage.py makemigrations docker compose -f docker-compose-web.yml run web python manage.py migrate - docker compose -f docker-compose-web.yml run web python manage.py load_test_data + docker compose -f docker-compose-web.yml run web python manage.py loaddata # Run Django tests with docker ghcr-test: diff --git a/backend/docker-compose-web.yml b/backend/docker-compose-web.yml index b126572e9..98deabef2 100644 --- a/backend/docker-compose-web.yml +++ b/backend/docker-compose-web.yml @@ -15,6 +15,27 @@ services: interval: 10s timeout: 5s retries: 10 + db2: + image: "postgres:15" + environment: + POSTGRES_HOST_AUTH_METHOD: "trust" + volumes: + - postgres-data2:/var/lib/postgresql/data/ + ports: + - "5431:5432" + healthcheck: + test: ["CMD-SHELL", "pg_isready -d postgres -U postgres"] + interval: 10s + timeout: 5s + retries: 10 + + historic-data: + image: ghcr.io/gsa-tts/fac-historic-public-csvs/load-historic-public-data:20230912 + depends_on: + db: + condition: service_healthy + environment: + DATABASE_URL: postgres://postgres@db/postgres web: image: ghcr.io/gsa-tts/fac/web-container:latest @@ -22,6 +43,8 @@ services: depends_on: db: condition: service_healthy + db2: + condition: service_healthy minio: condition: service_started clamav-rest: @@ -72,6 +95,7 @@ services: - "3000" environment: PGRST_DB_URI: postgres://postgres@db:5432/postgres + PGRST2_DB_URI: postgres://postgres@db:5431/postgres PGRST_OPENAPI_SERVER_PROXY_URI: http://127.0.0.1:3000 PGRST_DB_ANON_ROLE: anon # See https://postgrest.org/en/stable/references/api/schemas.html#multiple-schemas for multiple schemas @@ -83,4 +107,5 @@ services: volumes: postgres-data: + postgres-data2: minio-vol: diff --git a/backend/docker-compose.yml b/backend/docker-compose.yml index 514697f71..f8ee53cad 100644 --- a/backend/docker-compose.yml +++ b/backend/docker-compose.yml @@ -17,6 +17,19 @@ services: interval: 10s timeout: 5s retries: 10 + db2: + image: "postgres:15" + environment: + POSTGRES_HOST_AUTH_METHOD: "trust" + volumes: + - postgres-data2:/var/lib/postgresql/data/ + ports: + - "5431:5432" + healthcheck: + test: ["CMD-SHELL", "pg_isready -d postgres -U postgres"] + interval: 10s + timeout: 5s + retries: 10 #--------------------------------------------- # Historic data @@ -40,6 +53,8 @@ services: depends_on: db: condition: service_healthy + db2: + condition: service_healthy minio: condition: service_started clamav-rest: @@ -107,6 +122,7 @@ services: - "3000" environment: PGRST_DB_URI: postgres://postgres@db:5432/postgres + PGRST2_DB_URI: postgres://postgres@db:5431/postgres PGRST_OPENAPI_SERVER_PROXY_URI: http://127.0.0.1:3000 PGRST_DB_ANON_ROLE: anon # See https://postgrest.org/en/stable/references/api/schemas.html#multiple-schemas for multiple schemas @@ -119,4 +135,5 @@ services: condition: service_healthy volumes: postgres-data: + postgres-data2: minio-vol: diff --git a/backend/fac-backup-util.sh b/backend/fac-backup-util.sh new file mode 100755 index 000000000..ab6e2165e --- /dev/null +++ b/backend/fac-backup-util.sh @@ -0,0 +1,74 @@ +#!/bin/bash +set -e +source tools/util_startup.sh +source tools/setup_env.sh +setup_env +version=$1 +run_option=$2 +s3_name="fac-private-s3" +backup_s3_name="backups" +db_name="fac-db" +backup_db_name="fac-snapshot-db" +initial_date=$(date +%Y%m%d%H%M) +scheduled_date=$(date +%m-%d-%H) +daily_date=$(date +%m-%d) +mkdir tmp && cd tmp || return + +GetUtil() { + curl -x "$https_proxy" -L "https://github.com/GSA-TTS/fac-backup-utility/releases/download/$version/gov.gsa.fac.cgov-util-$version-linux-amd64.tar.gz" -O + tar -xvf "gov.gsa.fac.cgov-util-$version-linux-amd64.tar.gz" && rm "gov.gsa.fac.cgov-util-$version-linux-amd64.tar.gz" +} +InstallAWS() { + ./gov.gsa.fac.cgov-util install_aws +} +AWSS3Sync() { + ./gov.gsa.fac.cgov-util s3_sync --source_s3 s3://"$1"/ --dest_s3 s3://"$2"/ +} +RDSToS3Dump() { + ./gov.gsa.fac.cgov-util db_to_s3 --db "$1" --s3path s3://"$2"/"$3"/ +} +RDSToRDS() { + ./gov.gsa.fac.cgov-util db_to_db --src_db "$1" --dest_db "$2" --operation "$3" +} + +if [ "$run_option" == "initial_backup" ]; then + GetUtil + InstallAWS + gonogo "install_aws" + RDSToS3Dump "$db_name" "$backup_s3_name" "initial/$initial_date" + gonogo "db_to_s3" + RDSToRDS "$db_name" "$backup_db_name" "initial" + gonogo "db_to_db" + AWSS3Sync "$s3_name" "$backup_s3_name" + gonogo "s3_sync" +elif [ "$run_option" == "deploy_backup" ]; then + GetUtil + InstallAWS + gonogo "install_aws" + RDSToRDS "$db_name" "$backup_db_name" "backup" + gonogo "db_to_db" + AWSS3Sync "$s3_name" "$backup_s3_name" + gonogo "s3_sync" +elif [ "$run_option" == "scheduled_backup" ]; then + GetUtil + InstallAWS + gonogo "install_aws" + RDSToS3Dump "$db_name" "$backup_s3_name" "scheduled/$scheduled_date" + gonogo "db_to_s3" + AWSS3Sync "$s3_name" "$backup_s3_name" + gonogo "s3_sync" +elif [ "$run_option" == "daily_backup" ]; then + GetUtil + InstallAWS + gonogo "install_aws" + RDSToS3Dump "$db_name" "$backup_s3_name" "daily/$daily_date" + gonogo "db_to_s3" + AWSS3Sync "$s3_name" "$backup_s3_name" + gonogo "s3_sync" +elif [ "$run_option" == "media_sync" ]; then + GetUtil + InstallAWS + gonogo "install_aws" + AWSS3Sync "$s3_name" "$backup_s3_name" + gonogo "s3_sync" +fi diff --git a/backend/manifests/manifest-fac.yml b/backend/manifests/manifest-fac.yml index 2b679e4a5..b29d59b53 100644 --- a/backend/manifests/manifest-fac.yml +++ b/backend/manifests/manifest-fac.yml @@ -26,3 +26,5 @@ applications: - https-proxy-creds - smtp-proxy-creds - fac-logdrain + - fac-snapshot-db + - backups diff --git a/backend/manifests/manifest-preview.yml b/backend/manifests/manifest-preview.yml index 2b679e4a5..2d51575d1 100644 --- a/backend/manifests/manifest-preview.yml +++ b/backend/manifests/manifest-preview.yml @@ -1,28 +1,30 @@ ---- -applications: - - name: ((app_name)) - buildpacks: - - https://github.com/cloudfoundry/apt-buildpack - - python_buildpack - memory: ((mem_amount)) - path: ../ - timeout: 180 - disk_quota: 2G - env: - ENV: ((cf_env_name)) - DJANGO_BASE_URL: https://((endpoint)) - ALLOWED_HOSTS: ((endpoint)) fac-((env_name)).app.cloud.gov - AV_SCAN_URL: https://fac-av-((service_name)).apps.internal:61443/scan - # DISABLE_COLLECTSTATIC: true - routes: - - route: fac-((env_name)).app.cloud.gov - instances: ((instances)) - services: - - fac-db - - fac-public-s3 - - fac-private-s3 - - fac-key-service - - newrelic-creds - - https-proxy-creds - - smtp-proxy-creds - - fac-logdrain +--- +applications: + - name: ((app_name)) + buildpacks: + - https://github.com/cloudfoundry/apt-buildpack + - python_buildpack + memory: ((mem_amount)) + path: ../ + timeout: 180 + disk_quota: 2G + env: + ENV: ((cf_env_name)) + DJANGO_BASE_URL: https://((endpoint)) + ALLOWED_HOSTS: ((endpoint)) fac-((env_name)).app.cloud.gov + AV_SCAN_URL: https://fac-av-((service_name)).apps.internal:61443/scan + # DISABLE_COLLECTSTATIC: true + routes: + - route: fac-((env_name)).app.cloud.gov + instances: ((instances)) + services: + - fac-db + - fac-public-s3 + - fac-private-s3 + - fac-key-service + - newrelic-creds + - https-proxy-creds + - smtp-proxy-creds + - fac-logdrain + - fac-snapshot-db + - backups diff --git a/backend/s3-sync.sh b/backend/s3-sync.sh deleted file mode 100755 index 26aa2ed4c..000000000 --- a/backend/s3-sync.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash - -# This requires: cf bind-service gsa-fac fac-private-s3 -c '{"additional_instances": ["backups"]}' - -# Grab AWS cli -# awscli.amazonaws.com needs to be added to the proxy allow list -curl -x $https_proxy -L "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" -unzip awscliv2.zip && rm awscliv2.zip -./aws/install -i ~/usr -b ~/bin -/home/vcap/app/bin/aws --version - -# Get the fac-private-s3 bucket -export S3CREDS="$(echo $VCAP_SERVICES|jq -r '.s3')" -export FACPRIVS3="$(echo $S3CREDS|jq '.[]|select(.name=="fac-private-s3")'|jq '.credentials')" -export AWS_ACCESS_KEY_ID="$(echo "$FACPRIVS3"|jq -r '.access_key_id')" -export AWS_SECRET_ACCESS_KEY="$(echo "$FACPRIVS3"|jq -r '.secret_access_key')" -export FAC_MEDIA_BUCKET="$(echo "$FACPRIVS3"|jq -r '.bucket')" -export AWS_DEFAULT_REGION='us-gov-west-1' - -# Get the backups bucket -export FACBACKUPS="$(echo $S3CREDS|jq '.[]|select(.name=="backups")'|jq '.credentials')" -export BACKUPS_BUCKET="$(echo "$FACBACKUPS"|jq -r '.bucket')" - -date=$(date +%Y%m%d%H%M) - -# Grab the s3 tar binary -# objects.githubusercontent.com needs to be added to the proxy allow list -curl -x $https_proxy -L "https://github.com/awslabs/amazon-s3-tar-tool/releases/download/v1.0.14/s3tar-linux-amd64.zip" -o "s3tar-linux-amd64.zip" -unzip s3tar-linux-amd64.zip && rm s3tar-linux-amd64.zip - -# Unset the proxy so that s3tar-tool and aws-cli can function. Without doing this, none of the subsequent commands will work -unset https_proxy - -# Sync the whole media bucket to backup. -# This provides us with a current backup of all the files individually. -# If nothing has changed, this runs really quickly. -/home/vcap/app/bin/aws s3 sync s3://${FAC_MEDIA_BUCKET} s3://${BACKUPS_BUCKET} diff --git a/backend/s3-tar-snapshot.sh b/backend/s3-tar-snapshot.sh deleted file mode 100755 index 8c7e41e46..000000000 --- a/backend/s3-tar-snapshot.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash - -# This requires: cf bind-service gsa-fac fac-private-s3 -c '{"additional_instances": ["backups"]}' - -# Grab AWS cli -# awscli.amazonaws.com needs to be added to the proxy allow list -curl -x $https_proxy -L "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" -unzip awscliv2.zip && rm awscliv2.zip -./aws/install -i ~/usr -b ~/bin -/home/vcap/app/bin/aws --version - -# Get the fac-private-s3 bucket -export S3CREDS="$(echo $VCAP_SERVICES|jq -r '.s3')" -export FACPRIVS3="$(echo $S3CREDS|jq '.[]|select(.name=="fac-private-s3")'|jq '.credentials')" -export AWS_ACCESS_KEY_ID="$(echo "$FACPRIVS3"|jq -r '.access_key_id')" -export AWS_SECRET_ACCESS_KEY="$(echo "$FACPRIVS3"|jq -r '.secret_access_key')" -export FAC_MEDIA_BUCKET="$(echo "$FACPRIVS3"|jq -r '.bucket')" -export AWS_DEFAULT_REGION='us-gov-west-1' - -# Get the backups bucket -export FACBACKUPS="$(echo $S3CREDS|jq '.[]|select(.name=="backups")'|jq '.credentials')" -export BACKUPS_BUCKET="$(echo "$FACBACKUPS"|jq -r '.bucket')" - -date=$(date +%Y%m%d%H%M) - -# Grab the s3 tar binary -# objects.githubusercontent.com needs to be added to the proxy allow list -curl -x $https_proxy -L "https://github.com/awslabs/amazon-s3-tar-tool/releases/download/v1.0.14/s3tar-linux-amd64.zip" -o "s3tar-linux-amd64.zip" -unzip s3tar-linux-amd64.zip && rm s3tar-linux-amd64.zip - -# Unset the proxy so that s3tar-tool and aws-cli can function. Without doing this, none of the subsequent commands will work -unset https_proxy - -# Create a single tar in the backups bucket -./s3tar-linux-amd64 --region $AWS_DEFAULT_REGION -cvf s3://${BACKUPS_BUCKET}/mediabackups/$date/archive.tar s3://${FAC_MEDIA_BUCKET} --storage-class INTELLIGENT_TIERING -# List out the contents -./s3tar-linux-amd64 --region $AWS_DEFAULT_REGION -tvf s3://${BACKUPS_BUCKET}/mediabackups/$date/archive.tar diff --git a/docs/backups_and_restores.md b/docs/backups_and_restores.md new file mode 100644 index 000000000..55c00bee1 --- /dev/null +++ b/docs/backups_and_restores.md @@ -0,0 +1,89 @@ +### Preperation steps +```sh +cf t -o -s +``` + +Bind the backups bucket to the application +```sh +cf bind-service gsa-fac backups +``` + +Restart the app so changes occur and wait for the instance to come back up +```sh +cf restart gsa-fac --strategy rolling +``` + +Unbind the existing fac-private-s3 bucket from the app +```sh +cf unbind-service gsa-fac fac-private-s3 +``` + +Rebind the fac-private-s3 bucket with the backups bucket as an additional instance +```sh +cf bind-service gsa-fac fac-private-s3 -c '{"additional_instances": ["backups"]}' +``` + +Restart the app so changes occur and wait for the instance to come back up +```sh +cf restart gsa-fac --strategy rolling +``` + +### Database Backups + +Information regarding the fac-backup-utility can be found [at the repository](https://github.com/GSA-TTS/fac-backup-utility). +Database backups occur in the following ways: +1. An initial backup, where a backup has not been run in the target environment. This input of `initial_backup` is important, as when it does a the `db_to_db` command, it will not truncate the target table, as the table does not exist in the destination database. +```bash +./fac-backup-util.sh v0.1.3 initial_backup +# Curl the utility +# Install AWS +# DB to S3 table dump (backups) +# DB to DB table dump (fac-db -> fac-snapshot-db) [No Truncate, as tables dont exist] +# AWS S3 sync (fac-private-s3 -> backups) +``` + +2. A deploy backup, where the `db_to_db` function is not called. This is a standard backup strategy before the application deploys, to ensure the s3 contents of the primary s3 are sync'd to the backups bucket, and a table dump is stored in the backups bucket. +```bash +./fac-backup-util.sh v0.1.3 deploy_backup +# Curl the utility +# Install AWS +# DB to S3 table dump (backups) +# AWS S3 sync (fac-private-s3 -> backups) +``` + +3. A scheduled backup is run every two hours, across each environment, ensuring that we have a clean backup in s3, rds, and the bucket contents are in sync. +```bash +./fac-backup-util.sh v0.1.3 scheduled_backup +# Curl the utility +# Install AWS +# DB to S3 table dump (fac-db -> backups) +# DB to DB table dump (fac-db -> fac-snapshot-db) [Truncate target table before dump] +# AWS S3 sync (fac-private-s3 -> backups) +``` + +### Restoring +Restoring from backups can be run via workflow, from designated individuals. There are two paths that we can restore from. + +1. S3 Restore takes a `operation-mm-DD-HH` input (ex `scheduled-06-04-10`), and is required for the backups to be restored. The utility looks in `s3://${bucket}/backups/operation-mm-DD-HH/` for its table dumps, and without supplying the target backups, it will not restore. Once it does a `--data-only` restoration, it will then sync the files from the backups bucket to the application bucket. We do this to ensure the contents of the application bucket are up to date, relative to the data in the database. We know that if we use the latest folder in `/backups/` then the contents of the s3 are the latest available, from the prior backup. +```bash +./fac-restore-util.sh v0.1.3 s3_restore scheduled-06-04-10 +# Curl the utility +# Install AWS +# DB to S3 table dump (backups -> fac-db) [Truncate target table before --data-only pg_restore] +# AWS S3 sync (backups -> fac-private-s3) +``` +Potential Options for restoration: +```bash +initial-YYYYmmddHHMM +scheduled-mm-dd-HH +daily-mm-dd +``` + +2. Database to database restoration also can occur as well, using `psql` to dump the tables from the cold store database to the live database. +```bash +./fac-restore-util.sh v0.1.3 db_restore +# Curl the utility +# Install AWS +# DB to DB table dump (fac-snapshot-db -> fac-db) [Truncate target table before dump] +# AWS S3 sync (fac-private-s3 -> backups) +``` diff --git a/docs/dbbackups.md b/docs/dbbackups.md deleted file mode 100644 index 9a0dddb9f..000000000 --- a/docs/dbbackups.md +++ /dev/null @@ -1,69 +0,0 @@ -# Database Backups - -Information regarding the django utility can be found [on the documentation page](https://django-dbbackup.readthedocs.io/en/master/commands.html) -Database backups occur in the following ways: -1. Django backups -```bash -python manage.py dbbbackup -``` -2. Django restores -```bash -python manage.py dbrestore -``` -3. Backups in the prod environment occur every deployment, [before the most recent code is applied](https://github.com/GSA-TTS/FAC/blob/fd3a59287d58aec06a78d6da3b42a5def8fc9c98/.github/workflows/deploy-application.yml#L72-L100) -4. Manual steps are listed in the following document for where to catalog backups - * [Deploying](./deploying.md) - * Login via CF and tail the logs during a deployment (before it gets to deploy application stage) - * Post the most recent dbbackup and mediabackup file names in https://github.com/GSA-TTS/FAC/issues/2221 -```bash -cf login -a api.fr.cloud.gov --sso -Select an org: -1. gsa-tts-oros-fac -Select a space: -5. production -cf logs gsa-fac -``` - -# Media Backups -```sh -cf t -s -``` - -Bind the backups bucket to the application -```sh -cf bind-service gsa-fac backups -``` - -Restart the app so changes occur and wait for the instance to come back up -```sh -cf restart gsa-fac --strategy rolling -``` - -Unbind the existing fac-private-s3 bucket from the app -```sh -cf unbind-service gsa-fac fac-private-s3 -``` - -Rebind the fac-private-s3 bucket with the backups bucket as an additional instance -```sh -cf bind-service gsa-fac fac-private-s3 -c '{"additional_instances": ["backups"]}' -``` - -Restart the app so changes occur and wait for the instance to come back up -```sh -cf restart gsa-fac --strategy rolling -``` - -Running things by hand: -[s3-sync](../backend/s3-sync.sh) -[s3-tar-snapshot](../backend/s3-tar-snapshot.sh) - -Tail the logs on the app -```sh -cf logs gsa-fac | grep "APP/TASK/media_backup" -``` - -Run the media backups via cf-tasks -```sh -cf run-task gsa-fac -k 2G -m 2G --name media_backup --command "./s3-sync.sh" -``` diff --git a/terraform/dev/dev.tf b/terraform/dev/dev.tf index 6067a043c..c2fd7cafa 100644 --- a/terraform/dev/dev.tf +++ b/terraform/dev/dev.tf @@ -20,3 +20,13 @@ module "dev" { } ) } + +module "dev-backups-bucket" { + source = "github.com/gsa-tts/terraform-cloudgov//s3?ref=v0.9.1" + + cf_org_name = var.cf_org_name + cf_space_name = "dev" + name = "backups" + s3_plan_name = "basic" + tags = ["s3"] +} diff --git a/terraform/dev/variables.tf b/terraform/dev/variables.tf index 5dc957c79..7dc8fe58e 100644 --- a/terraform/dev/variables.tf +++ b/terraform/dev/variables.tf @@ -1,3 +1,9 @@ +variable "cf_org_name" { + type = string + description = "name of the organization to configure" + default = "gsa-tts-oros-fac" +} + variable "new_relic_license_key" { type = string description = "the license key to use when setting up the New Relic agent" @@ -16,4 +22,4 @@ variable "new_relic_api_key" { variable "pgrst_jwt_secret" { type = string description = "the JWT signing secret for validating JWT tokens from api.data.gov" -} \ No newline at end of file +} diff --git a/terraform/meta/meta.tf b/terraform/meta/meta.tf index 28fc325d6..09affc85d 100644 --- a/terraform/meta/meta.tf +++ b/terraform/meta/meta.tf @@ -1,67 +1,68 @@ -# Since we're not platform admins, we need to look for user details in the -# context of our specific org. -data "cloudfoundry_org" "org" { - name = local.org_name -} - -# We need to include the meta deployer user in the set of users with the -# SpaceDeveloper role so it can manage the deployer service and binding in -# each space. We need to add it using the user ID rather than username in order -# to ensure it has the expected permissions. See -# https://github.com/cloudfoundry-community/terraform-provider-cloudfoundry/issues/436 -data "cloudfoundry_user" "meta_deployer" { - name = var.cf_user - org_id = data.cloudfoundry_org.org.id -} - -module "environments" { - for_each = local.spaces - source = "./bootstrap-env" - name = each.key - org_name = local.org_name - developers = concat(local.developers, [data.cloudfoundry_user.meta_deployer.id]) - managers = local.managers - reponame = "GSA-TTS/FAC" - allow_ssh = lookup(each.value, "allow_ssh", true) - populate_creds_locally = var.populate_creds_locally - # Apply egress ASGs if explicitly requested - # - # NOTE: This implies that we should have a 1:1 mapping between environments - # and spaces. But that doesn't seem to be the case... We want to be able to - # specify a staging configuration that spans across the `staging` and - # `staging-egress` spaces, eg to set up network policies and inject client - # credentials. So I think we'll be removing this option and having the - # bootstrap-env module manage both spaces in a future PR! - asgs = lookup(each.value, "allow_egress", false) ? tolist(local.egress_asgs) : tolist(local.internal_asgs) -} - -locals { - # Filters the list of spaces with a value of true for "uses_backups". We only want to share the bucket to those spaces. - spaces_that_use_backups = join(" ", [for key, config in local.spaces : lookup(config, "uses_backups", false) ? key : ""]) -} - -module "s3-backups" { - source = "github.com/18f/terraform-cloudgov//s3?ref=v0.5.1" - - cf_org_name = local.org_name - # TODO: This should be the key for the first space that says "is_production = - # true" rather than being hardcoded - cf_space_name = "production" - name = "backups" - s3_plan_name = "basic" -} - -# TODO: We should have a corresponding "unshar-backup-from-spaces" resource, in -# case a space is removed from the list - -# resource "null_resource" "share-backup-to-spaces" { -# provisioner "local-exec" { -# environment = { -# SPACES = "${local.spaces_that_use_backups}" -# } -# command = "for space in $SPACES ; do cf share-service backups -s $space; done" -# } -# depends_on = [ -# module.s3-backups -# ] -# } +# Since we're not platform admins, we need to look for user details in the +# context of our specific org. +data "cloudfoundry_org" "org" { + name = local.org_name +} + +# We need to include the meta deployer user in the set of users with the +# SpaceDeveloper role so it can manage the deployer service and binding in +# each space. We need to add it using the user ID rather than username in order +# to ensure it has the expected permissions. See +# https://github.com/cloudfoundry-community/terraform-provider-cloudfoundry/issues/436 +data "cloudfoundry_user" "meta_deployer" { + name = var.cf_user + org_id = data.cloudfoundry_org.org.id +} + +module "environments" { + for_each = local.spaces + source = "./bootstrap-env" + name = each.key + org_name = local.org_name + developers = concat(local.developers, [data.cloudfoundry_user.meta_deployer.id]) + managers = local.managers + reponame = "GSA-TTS/FAC" + allow_ssh = lookup(each.value, "allow_ssh", true) + populate_creds_locally = var.populate_creds_locally + # Apply egress ASGs if explicitly requested + # + # NOTE: This implies that we should have a 1:1 mapping between environments + # and spaces. But that doesn't seem to be the case... We want to be able to + # specify a staging configuration that spans across the `staging` and + # `staging-egress` spaces, eg to set up network policies and inject client + # credentials. So I think we'll be removing this option and having the + # bootstrap-env module manage both spaces in a future PR! + asgs = lookup(each.value, "allow_egress", false) ? tolist(local.egress_asgs) : tolist(local.internal_asgs) +} + +locals { + # Filters the list of spaces with a value of true for "uses_backups". We only want to share the bucket to those spaces. + spaces_that_use_backups = join(" ", [for key, config in local.spaces : lookup(config, "uses_backups", false) ? key : ""]) +} + +module "s3-backups" { + source = "github.com/gsa-tts/terraform-cloudgov//s3?ref=v0.9.1" + + cf_org_name = local.org_name + # TODO: This should be the key for the first space that says "is_production = + # true" rather than being hardcoded + cf_space_name = "production" + name = "backups" + s3_plan_name = "basic" + tags = ["s3"] +} + +# TODO: We should have a corresponding "unshar-backup-from-spaces" resource, in +# case a space is removed from the list + +# resource "null_resource" "share-backup-to-spaces" { +# provisioner "local-exec" { +# environment = { +# SPACES = "${local.spaces_that_use_backups}" +# } +# command = "for space in $SPACES ; do cf share-service backups -s $space; done" +# } +# depends_on = [ +# module.s3-backups +# ] +# } diff --git a/terraform/preview/preview.tf b/terraform/preview/preview.tf index 5bc758ebb..1fb6cd811 100644 --- a/terraform/preview/preview.tf +++ b/terraform/preview/preview.tf @@ -21,6 +21,16 @@ module "preview" { ) } +module "preview-backups-bucket" { + source = "github.com/gsa-tts/terraform-cloudgov//s3?ref=v0.9.1" + + cf_org_name = var.cf_org_name + cf_space_name = "preview" + name = "backups" + s3_plan_name = "basic" + tags = ["s3"] +} + import { to = module.preview.module.clamav.cloudfoundry_app.clamav_api id = "ed9b5108-1e31-44b8-9ba0-375e091c5589" diff --git a/terraform/preview/variables.tf b/terraform/preview/variables.tf index 2d752a800..7dc8fe58e 100644 --- a/terraform/preview/variables.tf +++ b/terraform/preview/variables.tf @@ -1,3 +1,9 @@ +variable "cf_org_name" { + type = string + description = "name of the organization to configure" + default = "gsa-tts-oros-fac" +} + variable "new_relic_license_key" { type = string description = "the license key to use when setting up the New Relic agent" diff --git a/terraform/shared/modules/cg-logshipper/cg-logshipper.tf b/terraform/shared/modules/cg-logshipper/cg-logshipper.tf index 5163b25f4..d35309a8c 100644 --- a/terraform/shared/modules/cg-logshipper/cg-logshipper.tf +++ b/terraform/shared/modules/cg-logshipper/cg-logshipper.tf @@ -8,7 +8,7 @@ data "cloudfoundry_space" "apps" { } module "s3-logshipper-storage" { - source = "github.com/18f/terraform-cloudgov//s3?ref=v0.9.1" + source = "github.com/gsa-tts/terraform-cloudgov//s3?ref=v0.9.1" cf_org_name = var.cf_org_name cf_space_name = var.cf_space_name diff --git a/terraform/shared/modules/env/clamav.tf b/terraform/shared/modules/env/clamav.tf index 831a96dbf..f8cc1d22d 100644 --- a/terraform/shared/modules/env/clamav.tf +++ b/terraform/shared/modules/env/clamav.tf @@ -8,7 +8,7 @@ data "docker_registry_image" "clamav" { } module "clamav" { - source = "github.com/18f/terraform-cloudgov//clamav?ref=v0.9.0" + source = "github.com/gsa-tts/terraform-cloudgov//clamav?ref=v0.9.0" # This generates eg "fac-av-staging.apps.internal", avoiding collisions with routes for other projects and spaces name = local.clam_name @@ -28,7 +28,7 @@ module "clamav" { } module "file_scanner_clamav" { - source = "github.com/18f/terraform-cloudgov//clamav?ref=v0.9.0" + source = "github.com/gsa-tts/terraform-cloudgov//clamav?ref=v0.9.0" # This generates eg "fac-av-staging-fs.apps.internal", avoiding collisions with routes for other projects and spaces name = local.fs_clam_name diff --git a/terraform/shared/modules/env/env.tf b/terraform/shared/modules/env/env.tf index 53963f0ff..8eb831953 100644 --- a/terraform/shared/modules/env/env.tf +++ b/terraform/shared/modules/env/env.tf @@ -1,47 +1,59 @@ -module "database" { - source = "github.com/18f/terraform-cloudgov//database?ref=v0.9.1" - - cf_org_name = var.cf_org_name - cf_space_name = var.cf_space_name - name = "fac-db" - recursive_delete = var.recursive_delete - tags = ["rds"] - rds_plan_name = var.database_plan - json_params = var.json_params -} - -module "s3-public" { - source = "github.com/18f/terraform-cloudgov//s3?ref=v0.9.1" - - cf_org_name = var.cf_org_name - cf_space_name = var.cf_space_name - name = "fac-public-s3" - recursive_delete = var.recursive_delete - s3_plan_name = "basic-public" - tags = ["s3"] -} - -module "s3-private" { - source = "github.com/18f/terraform-cloudgov//s3?ref=v0.9.1" - - cf_org_name = var.cf_org_name - cf_space_name = var.cf_space_name - name = "fac-private-s3" - recursive_delete = var.recursive_delete - s3_plan_name = "basic" - tags = ["s3"] -} - -# Stuff used for apps in this space -data "cloudfoundry_space" "apps" { - org_name = var.cf_org_name - name = var.cf_space_name -} - -data "cloudfoundry_domain" "public" { - name = "app.cloud.gov" -} - -data "cloudfoundry_domain" "private" { - name = "apps.internal" -} +module "database" { + source = "github.com/gsa-tts/terraform-cloudgov//database?ref=v0.9.1" + + cf_org_name = var.cf_org_name + cf_space_name = var.cf_space_name + name = "fac-db" + recursive_delete = var.recursive_delete + tags = ["rds"] + rds_plan_name = var.database_plan + json_params = var.json_params +} + +module "snapshot-database" { + source = "github.com/gsa-tts/terraform-cloudgov//database?ref=v0.9.1" + + cf_org_name = var.cf_org_name + cf_space_name = var.cf_space_name + name = "fac-snapshot-db" + recursive_delete = var.recursive_delete + tags = ["rds"] + rds_plan_name = var.database_plan + json_params = var.json_params +} + +module "s3-public" { + source = "github.com/gsa-tts/terraform-cloudgov//s3?ref=v0.9.1" + + cf_org_name = var.cf_org_name + cf_space_name = var.cf_space_name + name = "fac-public-s3" + recursive_delete = var.recursive_delete + s3_plan_name = "basic-public" + tags = ["s3"] +} + +module "s3-private" { + source = "github.com/gsa-tts/terraform-cloudgov//s3?ref=v0.9.1" + + cf_org_name = var.cf_org_name + cf_space_name = var.cf_space_name + name = "fac-private-s3" + recursive_delete = var.recursive_delete + s3_plan_name = "basic" + tags = ["s3"] +} + +# Stuff used for apps in this space +data "cloudfoundry_space" "apps" { + org_name = var.cf_org_name + name = var.cf_space_name +} + +data "cloudfoundry_domain" "public" { + name = "app.cloud.gov" +} + +data "cloudfoundry_domain" "private" { + name = "apps.internal" +} diff --git a/terraform/shared/modules/scanner/scanner.tf b/terraform/shared/modules/scanner/scanner.tf index 77bad6412..4a2fbdd26 100644 --- a/terraform/shared/modules/scanner/scanner.tf +++ b/terraform/shared/modules/scanner/scanner.tf @@ -31,7 +31,7 @@ resource "cloudfoundry_user_provided_service" "clam" { } module "quarantine" { - source = "github.com/18f/terraform-cloudgov//s3?ref=v0.9.1" + source = "github.com/gsa-tts/terraform-cloudgov//s3?ref=v0.9.1" cf_org_name = var.cf_org_name cf_space_name = var.cf_space_name diff --git a/terraform/staging/staging.tf b/terraform/staging/staging.tf index 43c1d4026..4627aec0b 100644 --- a/terraform/staging/staging.tf +++ b/terraform/staging/staging.tf @@ -21,3 +21,12 @@ module "staging" { ) } +module "staging-backups-bucket" { + source = "github.com/gsa-tts/terraform-cloudgov//s3?ref=v0.9.1" + + cf_org_name = var.cf_org_name + cf_space_name = "staging" + name = "backups" + s3_plan_name = "basic" + tags = ["s3"] +} diff --git a/terraform/staging/variables.tf b/terraform/staging/variables.tf index 5dc957c79..7dc8fe58e 100644 --- a/terraform/staging/variables.tf +++ b/terraform/staging/variables.tf @@ -1,3 +1,9 @@ +variable "cf_org_name" { + type = string + description = "name of the organization to configure" + default = "gsa-tts-oros-fac" +} + variable "new_relic_license_key" { type = string description = "the license key to use when setting up the New Relic agent" @@ -16,4 +22,4 @@ variable "new_relic_api_key" { variable "pgrst_jwt_secret" { type = string description = "the JWT signing secret for validating JWT tokens from api.data.gov" -} \ No newline at end of file +}