From 680d99d5f0da59e813c1640cf3eb8dca44296ae0 Mon Sep 17 00:00:00 2001 From: Stijn van der Vegt Date: Sun, 24 Jan 2021 20:02:41 +0100 Subject: [PATCH 01/15] add github workflows with gitops flow --- .github/workflows/gitops.yml | 78 ++++++++++++++++++++++++++++++++++++ gitops_push | 24 +++++++++++ 2 files changed, 102 insertions(+) create mode 100644 .github/workflows/gitops.yml create mode 100644 gitops_push diff --git a/.github/workflows/gitops.yml b/.github/workflows/gitops.yml new file mode 100644 index 00000000..fc6aa8c6 --- /dev/null +++ b/.github/workflows/gitops.yml @@ -0,0 +1,78 @@ +name: GitopsDev + +# Run this workflow every time a new commit pushed to your repository +on: + workflow_dispatch: + inputs: + environmentValuesFile: + description: 'Gitops environment values file' + required: true + default: 'dev' + push: + branches: + - master + - development + - feature/* + - release/* + - release + +jobs: + gitops: + environment: gitops + env: + HELM_REPO_NAME: openstad-kubernetes + HELM_CHART_FOLDER: k8s/openstad + GIT_USER_EMAIL: github@ci.push + GIT_USER_NAME: GitHub + DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + HELM_REPO: ${{ secrets.HELM_REPO }} + HELM_REPO_WITH_TOKEN: ${{ secrets.HELM_REPO_WITH_TOKEN }} + GITOPS_RELEASE_BRANCH: ${{ secrets.GITOPS_RELEASE_BRANCH }} + BRANCH_REF: ${{ github.ref }} + GITOPS_VALUES_FILE: k8s/openstad/environments/dev.values.yaml + + name: gitops commit + runs-on: ubuntu-latest + + services: + docker: + image: docker + + steps: + - name: Checkout code + uses: actions/checkout@v2 + + - name: Set environment to acc + id: acc_values_file + shell: bash + if: contains(github.ref, 'release') + run: echo "GITOPS_VALUES_FILE=k8s/openstad/environments/acc.values.yaml" >> $GITHUB_ENV + + - name: Set environment to production + id: prod_values_file + shell: bash + if: startsWith(github.ref, 'refs/tags/') || github.ref == 'refs/heads/master' + run: echo "GITOPS_VALUES_FILE=k8s/openstad/environments/prod.values.yaml" >> $GITHUB_ENV + + - name: Set commit SHA & current branch + id: vars + shell: bash + run: | + echo "::set-output name=sha_short::$(git rev-parse --short HEAD)" + echo "::set-output name=current_branch::$(git branch --show-current | sed "s/\//-/g")" + + - name: Install yq + run: sudo snap install yq --channel=v3/stable + + - name: Run build script + run: docker build -t ${{ secrets.DOCKER_PUBLIC_USERNAME }}/${{ secrets.DOCKER_IMAGE_NAME }}:${{ steps.vars.outputs.current_branch }}-${{ steps.vars.outputs.sha_short }}-${{ github.run_id}} . + shell: bash + + - name: Run docker push script + run: | + sudo chmod a+x ./gitops_push + ./gitops_push + shell: bash + env: + IMAGE_TAG: ${{ secrets.DOCKER_PUBLIC_USERNAME }}/${{ secrets.DOCKER_IMAGE_NAME }}:${{ steps.vars.outputs.current_branch }}-${{ steps.vars.outputs.sha_short }}-${{ github.run_id}} diff --git a/gitops_push b/gitops_push new file mode 100644 index 00000000..1d5d5e39 --- /dev/null +++ b/gitops_push @@ -0,0 +1,24 @@ +#!/bin/bash + +echo "DOCKER LOGIN" +echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin; +echo "DOCKER PUSH TAG" +echo ${IMAGE_TAG} +docker push ${IMAGE_TAG} + +git config --global user.email ${GIT_USER_EMAIL} +git config --global user.name ${GIT_USER_NAME} + +git clone ${HELM_REPO} && cd ${HELM_REPO_NAME} && \ + +git remote add origin-ci ${HELM_REPO_WITH_TOKEN} > /dev/null 2>&1 + +git checkout ${GITOPS_RELEASE_BRANCH} + +/snap/bin/yq write -i ${GITOPS_VALUES_FILE} api.deploymentContainer.image ${IMAGE_TAG} && \ + +git add ${GITOPS_VALUES_FILE} && \ + +git commit -am "Release ${IMAGE_TAG}" && \ + +git push --quiet --set-upstream origin-ci ${GITOPS_RELEASE_BRANCH} From b0765849351b3509538544c5f34cc6c53216069e Mon Sep 17 00:00:00 2001 From: Stijn van der Vegt Date: Mon, 25 Jan 2021 09:06:45 +0100 Subject: [PATCH 02/15] Update .github/workflows/gitops.yml Co-authored-by: Rudi van Hierden --- .github/workflows/gitops.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/gitops.yml b/.github/workflows/gitops.yml index fc6aa8c6..69d37a61 100644 --- a/.github/workflows/gitops.yml +++ b/.github/workflows/gitops.yml @@ -66,7 +66,7 @@ jobs: run: sudo snap install yq --channel=v3/stable - name: Run build script - run: docker build -t ${{ secrets.DOCKER_PUBLIC_USERNAME }}/${{ secrets.DOCKER_IMAGE_NAME }}:${{ steps.vars.outputs.current_branch }}-${{ steps.vars.outputs.sha_short }}-${{ github.run_id}} . + run: docker build -t ${{ secrets.DOCKER_PUBLIC_USERNAME }}/${{ secrets.DOCKER_IMAGE_NAME }}:${{ steps.vars.outputs.current_branch }}-${{ steps.vars.outputs.sha_short }}-${{ github.run_id }} . shell: bash - name: Run docker push script From 8e62242836642677256adfdf190815c668c5f735 Mon Sep 17 00:00:00 2001 From: Stijn van der Vegt Date: Mon, 25 Jan 2021 09:06:50 +0100 Subject: [PATCH 03/15] Update .github/workflows/gitops.yml Co-authored-by: Rudi van Hierden --- .github/workflows/gitops.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/gitops.yml b/.github/workflows/gitops.yml index 69d37a61..931c25d4 100644 --- a/.github/workflows/gitops.yml +++ b/.github/workflows/gitops.yml @@ -1,4 +1,4 @@ -name: GitopsDev +name: Gitops # Run this workflow every time a new commit pushed to your repository on: From 56e48d62f22a397dc64af5532209c9b2971c39b8 Mon Sep 17 00:00:00 2001 From: Stijn van der Vegt Date: Mon, 25 Jan 2021 09:06:55 +0100 Subject: [PATCH 04/15] Update .github/workflows/gitops.yml Co-authored-by: Rudi van Hierden --- .github/workflows/gitops.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/gitops.yml b/.github/workflows/gitops.yml index 931c25d4..f78f16d4 100644 --- a/.github/workflows/gitops.yml +++ b/.github/workflows/gitops.yml @@ -75,4 +75,4 @@ jobs: ./gitops_push shell: bash env: - IMAGE_TAG: ${{ secrets.DOCKER_PUBLIC_USERNAME }}/${{ secrets.DOCKER_IMAGE_NAME }}:${{ steps.vars.outputs.current_branch }}-${{ steps.vars.outputs.sha_short }}-${{ github.run_id}} + IMAGE_TAG: ${{ secrets.DOCKER_PUBLIC_USERNAME }}/${{ secrets.DOCKER_IMAGE_NAME }}:${{ steps.vars.outputs.current_branch }}-${{ steps.vars.outputs.sha_short }}-${{ github.run_id }} From 1d8fba70a2fc2122db1789e40baa5b0af9e325a5 Mon Sep 17 00:00:00 2001 From: Rudi van Hierden Date: Wed, 12 May 2021 09:47:28 +0200 Subject: [PATCH 05/15] Run gitops actions on release-* branches --- .github/workflows/gitops.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/gitops.yml b/.github/workflows/gitops.yml index fc6aa8c6..2f00b60a 100644 --- a/.github/workflows/gitops.yml +++ b/.github/workflows/gitops.yml @@ -14,6 +14,7 @@ on: - development - feature/* - release/* + - release-* - release jobs: From 5c94461051e5705452ac5cdbc284fadbefcf7836 Mon Sep 17 00:00:00 2001 From: Rudi van Hierden Date: Fri, 8 Oct 2021 15:56:39 +0200 Subject: [PATCH 06/15] Add formId to submissions --- src/models/Submission.js | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/models/Submission.js b/src/models/Submission.js index d07d705d..6d953bcd 100644 --- a/src/models/Submission.js +++ b/src/models/Submission.js @@ -12,6 +12,11 @@ module.exports = function( db, sequelize, DataTypes ) { type : DataTypes.INTEGER, allowNull : true }, + + formId: { + type : DataTypes.TEXT, + allowNull : true + }, status: { type : DataTypes.ENUM('approved','pending','unapproved'), From a4fad313435166d82d31528fb0502d6011dc7692 Mon Sep 17 00:00:00 2001 From: Rudi van Hierden Date: Fri, 8 Oct 2021 15:57:21 +0200 Subject: [PATCH 07/15] Make submissions listable and viewable This allows the react-admin to list and view the submissions, and generate a CSV export. Related ticket: https://trello.com/c/MHQ88xsW/81-inzendingen-resource-form-submission-beschikbaar-maken-voor-export --- src/models/Submission.js | 75 ++++++++++++++++++++++++++++++++++++ src/routes/api/submission.js | 22 +++++++---- 2 files changed, 89 insertions(+), 8 deletions(-) diff --git a/src/models/Submission.js b/src/models/Submission.js index 6d953bcd..c7aae1a2 100644 --- a/src/models/Submission.js +++ b/src/models/Submission.js @@ -1,4 +1,5 @@ const config = require('config'); +var Sequelize = require('sequelize'); module.exports = function( db, sequelize, DataTypes ) { var Submission = sequelize.define('submission', { @@ -83,6 +84,80 @@ module.exports = function( db, sequelize, DataTypes ) { attributes : ['role', 'nickName', 'firstName', 'lastName', 'email'] }] }, + forSiteId: function (siteId) { + return { + where: { + siteId: siteId, + } + }; + }, + filter: function (filtersInclude, filtersExclude) { + let conditions = { + [Sequelize.Op.and]: [] + }; + + const filterKeys = [ + { + 'key': 'id' + }, + { + 'key': 'status' + }, + { + 'key': 'formId' + }, + ]; + + filterKeys.forEach((filter, i) => { + //first add include filters + if (filtersInclude) { + let filterValue = filtersInclude[filter.key]; + + if (filtersInclude[filter.key]) { + if (filter.extraData) { + filterValue = Array.isArray(filterValue) ? filterValue : [filterValue]; + + const escapedKey = sequelize.escape(`$.${filter.key}`); + filterValue.forEach((value, key) => { + const escapedValue = sequelize.escape(value); + conditions[Sequelize.Op.and].push({ + [Sequelize.Op.and]: sequelize.literal(`extraData->${escapedKey}=${escapedValue}`) + }); + }); + + } else { + conditions[Sequelize.Op.and].push({ + [filter.key]: filterValue + }); + } + } + } + + //add exclude filters + if (filtersExclude) { + let excludeFilterValue = filtersExclude[filter.key]; + + if (excludeFilterValue) { + if (filter.extraData) { + excludeFilterValue = Array.isArray(excludeFilterValue) ? excludeFilterValue : [excludeFilterValue]; + + //filter out multiple conditions + const escapedKey = sequelize.escape(`$.${filter.key}`); + excludeFilterValue.forEach((value, key) => { + const escapedValue = sequelize.escape(value); + conditions[Sequelize.Op.and].push({ + [Sequelize.Op.and]: sequelize.literal(`extraData->${escapedKey}!=${escapedValue}`) + }); + }) + } + } + } + }); + + return { + where: sequelize.and(conditions) + } + }, }; } diff --git a/src/routes/api/submission.js b/src/routes/api/submission.js index a2b2fb0d..d5e80eef 100755 --- a/src/routes/api/submission.js +++ b/src/routes/api/submission.js @@ -13,16 +13,21 @@ router.route('/') // -------------- .get(auth.can('Submission', 'list')) .get(pagination.init) - .get(function(req, res, next) { + .get(function (req, res, next) { let where = {}; req.scope = ['defaultScope']; + + if (req.query.filter || req.query.exclude) { + req.scope.push({method: ['filter', JSON.parse(req.query.filter), req.query.exclude]}); + } + db.Submission .scope(...req.scope) - .findAndCountAll({ where, offset: req.dbQuery.offset, limit: req.dbQuery.limit }) - .then(function( result ) { - req.results = result.rows; - req.dbQuery.count = result.count; - return next(); + .findAndCountAll({where, offset: req.dbQuery.offset, limit: req.dbQuery.limit, order: req.dbQuery.order}) + .then(function (result) { + req.results = result.rows; + req.dbQuery.count = result.count; + return next(); }) .catch(next); }) @@ -64,8 +69,9 @@ router.route('/') db.Submission .scope(...req.scope) - // .find({ where }) - .find() + .findOne({ + where: {id: submissionId, siteId: req.params.siteId} + }) .then(found => { if ( !found ) throw new Error('Submission not found'); req.results = found; From e045f06a4416ccf2720db0f1a6c75651b1fe6a6a Mon Sep 17 00:00:00 2001 From: Rudi van Hierden Date: Fri, 8 Oct 2021 15:59:02 +0200 Subject: [PATCH 08/15] Update changelog --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2b850355..e1bec174 100755 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,8 @@ # Changelog +## UNRELEASED +* Make submissions listable & viewable, and allow them to be filtered by formId + ## v0.17.0 * Add option to anonymize only selected users at /api/site/:SITE_ID/user/:USER_ID/do-anonymizeall * Add automatic update of idea status after a given number of days From 19f8909df337e4ad28c75e79c3c7e56734f6a3b1 Mon Sep 17 00:00:00 2001 From: Rudi van Hierden Date: Mon, 11 Oct 2021 11:04:54 +0200 Subject: [PATCH 09/15] Refactor duplicate code for the sequelize filters to utility method Also changes some `var` to `const` at the top-level requires and remove some unnecessary & commented code. --- src/models/Idea.js | 79 ++------------------ src/models/Submission.js | 77 +------------------ src/util/getSequelizeConditionsForFilters.js | 57 ++++++++++++++ 3 files changed, 69 insertions(+), 144 deletions(-) create mode 100644 src/util/getSequelizeConditionsForFilters.js diff --git a/src/models/Idea.js b/src/models/Idea.js index ff689fda..85be967f 100644 --- a/src/models/Idea.js +++ b/src/models/Idea.js @@ -1,17 +1,17 @@ -var Sequelize = require('sequelize'); -var co = require('co') +const Sequelize = require('sequelize'); +const getSequelizeConditionsForFilters = require('./../util/getSequelizeConditionsForFilters'); +const co = require('co') , config = require('config') , moment = require('moment-timezone') , pick = require('lodash/pick') , Promise = require('bluebird'); -var sanitize = require('../util/sanitize'); -// var ImageOptim = require('../ImageOptim'); -var notifications = require('../notifications'); +const sanitize = require('../util/sanitize'); +const notifications = require('../notifications'); const merge = require('merge'); -var argVoteThreshold = config.ideas && config.ideas.argumentVoteThreshold; +const argVoteThreshold = config.ideas && config.ideas.argumentVoteThreshold; const userHasRole = require('../lib/sequelize-authorization/lib/hasRole'); const roles = require('../lib/sequelize-authorization/lib/roles'); const getExtraDataConfig = require('../lib/sequelize-authorization/lib/getExtraDataConfig'); @@ -653,10 +653,6 @@ module.exports = function (db, sequelize, DataTypes) { }, filter: function (filtersInclude, filtersExclude) { - let conditions = { - [Sequelize.Op.and]:[] - }; - const filterKeys = [ { 'key': 'id' @@ -677,67 +673,8 @@ module.exports = function (db, sequelize, DataTypes) { 'extraData': true }, ]; - - filterKeys.forEach((filter, i) => { - //first add include filters - if (filtersInclude) { - let filterValue = filtersInclude[filter.key]; - - if (filtersInclude[filter.key]) { - if (filter.extraData) { - filterValue = Array.isArray(filterValue) ? filterValue : [filterValue]; - - const escapedKey = sequelize.escape(`$.${filter.key}`); - filterValue.forEach((value, key)=>{ - const escapedValue = sequelize.escape(value); - conditions[Sequelize.Op.and].push({ - [Sequelize.Op.and] : sequelize.literal(`extraData->${escapedKey}=${escapedValue}`) - }); - }); - - } else { - conditions[Sequelize.Op.and].push({ - [filter.key] : filterValue - }); - } - } - } - - //add exclude filters - if (filtersExclude) { - let excludeFilterValue = filtersExclude[filter.key]; - - if (excludeFilterValue) { - if (filter.extraData) { - excludeFilterValue = Array.isArray(excludeFilterValue) ? excludeFilterValue : [excludeFilterValue]; - - //filter out multiple conditions - const escapedKey = sequelize.escape(`$.${filter.key}`); - excludeFilterValue.forEach((value, key)=>{ - const escapedValue = sequelize.escape(value); - conditions[Sequelize.Op.and].push({ - [Sequelize.Op.and] : sequelize.literal(`extraData->${escapedKey}!=${escapedValue}`) - }); - - - }) - - } else { - /* - TODO - conditions[Sequelize.Op.and].push({ - [filter.key] : filterValue - }); - */ - } - } - } - }); - - return { - where: sequelize.and(conditions) - //where: sequelize.and(conditions) - } + + return getSequelizeConditionsForFilters(filterKeys, filtersInclude, sequelize, filtersExclude); }, // vergelijk getRunning() diff --git a/src/models/Submission.js b/src/models/Submission.js index c7aae1a2..eac07f7f 100644 --- a/src/models/Submission.js +++ b/src/models/Submission.js @@ -1,5 +1,6 @@ const config = require('config'); -var Sequelize = require('sequelize'); +const Sequelize = require('sequelize'); +const getSequelizeConditionsForFilters = require('./../util/getSequelizeConditionsForFilters'); module.exports = function( db, sequelize, DataTypes ) { var Submission = sequelize.define('submission', { @@ -45,24 +46,6 @@ module.exports = function( db, sequelize, DataTypes ) { value = JSON.parse(value); } } catch(err) {} - let newValue = {}; - - value = value; - - /* - const configExtraData = []; - if (configExtraData) { - Object.keys(configExtraData).forEach((key) => { - if (configExtraData[key].allowNull === false && (typeof value[key] === 'undefined' || value[key] === '')) { // TODO: dit wordt niet gechecked als je het veld helemaal niet meestuurt - // zie validExtraData hieronder - // throw db.sequelize.ValidationError(`${key} is niet ingevuld`); - } - if (value[key] && configExtraData[key].values.indexOf(value[key]) != -1) { // TODO: alles is nu enum, maar dit is natuurlijk veel te simpel - newValue[key] = value[key]; - } - }); - } - */ this.setDataValue('submittedData', JSON.stringify(value)); } @@ -92,10 +75,6 @@ module.exports = function( db, sequelize, DataTypes ) { }; }, filter: function (filtersInclude, filtersExclude) { - let conditions = { - [Sequelize.Op.and]: [] - }; - const filterKeys = [ { 'key': 'id' @@ -107,56 +86,8 @@ module.exports = function( db, sequelize, DataTypes ) { 'key': 'formId' }, ]; - - filterKeys.forEach((filter, i) => { - //first add include filters - if (filtersInclude) { - let filterValue = filtersInclude[filter.key]; - - if (filtersInclude[filter.key]) { - if (filter.extraData) { - filterValue = Array.isArray(filterValue) ? filterValue : [filterValue]; - - const escapedKey = sequelize.escape(`$.${filter.key}`); - filterValue.forEach((value, key) => { - const escapedValue = sequelize.escape(value); - conditions[Sequelize.Op.and].push({ - [Sequelize.Op.and]: sequelize.literal(`extraData->${escapedKey}=${escapedValue}`) - }); - }); - - } else { - conditions[Sequelize.Op.and].push({ - [filter.key]: filterValue - }); - } - } - } - - //add exclude filters - if (filtersExclude) { - let excludeFilterValue = filtersExclude[filter.key]; - - if (excludeFilterValue) { - if (filter.extraData) { - excludeFilterValue = Array.isArray(excludeFilterValue) ? excludeFilterValue : [excludeFilterValue]; - - //filter out multiple conditions - const escapedKey = sequelize.escape(`$.${filter.key}`); - excludeFilterValue.forEach((value, key) => { - const escapedValue = sequelize.escape(value); - conditions[Sequelize.Op.and].push({ - [Sequelize.Op.and]: sequelize.literal(`extraData->${escapedKey}!=${escapedValue}`) - }); - }) - } - } - } - }); - - return { - where: sequelize.and(conditions) - } + + return getSequelizeConditionsForFilters(filterKeys, filtersInclude, sequelize, filtersExclude); }, }; } diff --git a/src/util/getSequelizeConditionsForFilters.js b/src/util/getSequelizeConditionsForFilters.js new file mode 100644 index 00000000..1c67dfd0 --- /dev/null +++ b/src/util/getSequelizeConditionsForFilters.js @@ -0,0 +1,57 @@ +const Sequelize = require('sequelize'); + +module.exports = (filterKeys, filtersInclude, sequelize, filtersExclude) => { + let conditions = { + [Sequelize.Op.and]: [] + }; + + filterKeys.forEach((filter, i) => { + //first add include filters + if (filtersInclude) { + let filterValue = filtersInclude[filter.key]; + + if (filtersInclude[filter.key]) { + if (filter.extraData) { + filterValue = Array.isArray(filterValue) ? filterValue : [filterValue]; + + const escapedKey = sequelize.escape(`$.${filter.key}`); + filterValue.forEach((value, key) => { + const escapedValue = sequelize.escape(value); + conditions[Sequelize.Op.and].push({ + [Sequelize.Op.and]: sequelize.literal(`extraData->${escapedKey}=${escapedValue}`) + }); + }); + + } else { + conditions[Sequelize.Op.and].push({ + [filter.key]: filterValue + }); + } + } + } + + //add exclude filters + if (filtersExclude) { + let excludeFilterValue = filtersExclude[filter.key]; + + if (excludeFilterValue) { + if (filter.extraData) { + excludeFilterValue = Array.isArray(excludeFilterValue) ? excludeFilterValue : [excludeFilterValue]; + + //filter out multiple conditions + const escapedKey = sequelize.escape(`$.${filter.key}`); + excludeFilterValue.forEach((value, key) => { + const escapedValue = sequelize.escape(value); + conditions[Sequelize.Op.and].push({ + [Sequelize.Op.and]: sequelize.literal(`extraData->${escapedKey}!=${escapedValue}`) + }); + }) + } + } + } + }); + + return { + where: sequelize.and(conditions) + } +} From c9f3e08692917730b494eafbf91ec9fcf70cde60 Mon Sep 17 00:00:00 2001 From: Rudi van Hierden Date: Mon, 11 Oct 2021 11:14:13 +0200 Subject: [PATCH 10/15] Refactor to reduce code complexity --- src/util/getSequelizeConditionsForFilters.js | 62 +++++++++----------- 1 file changed, 29 insertions(+), 33 deletions(-) diff --git a/src/util/getSequelizeConditionsForFilters.js b/src/util/getSequelizeConditionsForFilters.js index 1c67dfd0..e3774d2c 100644 --- a/src/util/getSequelizeConditionsForFilters.js +++ b/src/util/getSequelizeConditionsForFilters.js @@ -5,48 +5,44 @@ module.exports = (filterKeys, filtersInclude, sequelize, filtersExclude) => { [Sequelize.Op.and]: [] }; - filterKeys.forEach((filter, i) => { + filterKeys.forEach((filter) => { //first add include filters - if (filtersInclude) { + if (filtersInclude && filtersInclude[filter.key]) { let filterValue = filtersInclude[filter.key]; - if (filtersInclude[filter.key]) { - if (filter.extraData) { - filterValue = Array.isArray(filterValue) ? filterValue : [filterValue]; - - const escapedKey = sequelize.escape(`$.${filter.key}`); - filterValue.forEach((value, key) => { - const escapedValue = sequelize.escape(value); - conditions[Sequelize.Op.and].push({ - [Sequelize.Op.and]: sequelize.literal(`extraData->${escapedKey}=${escapedValue}`) - }); - }); - - } else { - conditions[Sequelize.Op.and].push({ - [filter.key]: filterValue - }); - } + if (!filter.extraData) { + return conditions[Sequelize.Op.and].push({ + [filter.key]: filterValue + }); } + + filterValue = Array.isArray(filterValue) ? filterValue : [filterValue]; + + const escapedKey = sequelize.escape(`$.${filter.key}`); + filterValue.forEach((value) => { + const escapedValue = sequelize.escape(value); + conditions[Sequelize.Op.and].push({ + [Sequelize.Op.and]: sequelize.literal(`extraData->${escapedKey}=${escapedValue}`) + }); + }); + } //add exclude filters - if (filtersExclude) { + if (filtersExclude && filtersExclude[filter.key]) { let excludeFilterValue = filtersExclude[filter.key]; - if (excludeFilterValue) { - if (filter.extraData) { - excludeFilterValue = Array.isArray(excludeFilterValue) ? excludeFilterValue : [excludeFilterValue]; - - //filter out multiple conditions - const escapedKey = sequelize.escape(`$.${filter.key}`); - excludeFilterValue.forEach((value, key) => { - const escapedValue = sequelize.escape(value); - conditions[Sequelize.Op.and].push({ - [Sequelize.Op.and]: sequelize.literal(`extraData->${escapedKey}!=${escapedValue}`) - }); - }) - } + if (filter.extraData) { + excludeFilterValue = Array.isArray(excludeFilterValue) ? excludeFilterValue : [excludeFilterValue]; + + //filter out multiple conditions + const escapedKey = sequelize.escape(`$.${filter.key}`); + excludeFilterValue.forEach((value) => { + const escapedValue = sequelize.escape(value); + conditions[Sequelize.Op.and].push({ + [Sequelize.Op.and]: sequelize.literal(`extraData->${escapedKey}!=${escapedValue}`) + }); + }) } } }); From 6f2e1728215a5091e7aa0205c889d676f263f608 Mon Sep 17 00:00:00 2001 From: Rudi van Hierden Date: Wed, 26 Jan 2022 11:06:43 +0100 Subject: [PATCH 11/15] Use multipart upload & chunking for mongodb s3 backups --- src/cron/mongodb_s3_backups.js | 232 +++++++++++++++++++++++---------- 1 file changed, 162 insertions(+), 70 deletions(-) diff --git a/src/cron/mongodb_s3_backups.js b/src/cron/mongodb_s3_backups.js index 37087350..52c55848 100644 --- a/src/cron/mongodb_s3_backups.js +++ b/src/cron/mongodb_s3_backups.js @@ -1,76 +1,168 @@ -const AWS = require('aws-sdk'); -const fs = require('fs'); // Needed for example below +const fs = require('fs'); const moment = require('moment') -const os = require('os'); -//const BACKUP_PATH = (ZIP_NAME) => path.resolve(os.tmpdir(), ZIP_NAME); -const { exec } = require('child_process'); -const log = require('debug')('app:cron'); -const db = require('../db'); - -// Purpose -// ------- -// Auto-close ideas that passed the deadline. -// accessKeyId: process.env.S3_KEY, - secretAccessKey: process.env.S3_SECRET -// Runs every night at 1:00. -// -function currentTime(timezoneOffset) { - if (timezoneOffset) { - return moment(moment(moment.now()).utcOffset(timezoneOffset, true).toDate()).format("YYYY-MM-DDTHH-mm-ss"); - } else { - return moment - .utc() - .format('YYYY-MM-DDTHH-mm-ss'); - } -} +const {exec} = require('child_process'); +const s3 = require('../services/awsS3'); const backupMongoDBToS3 = async () => { - if (process.env.S3_MONGO_BACKUPS === 'ON') { - const host = process.env.MONGO_DB_HOST || 'localhost'; - const port = process.env.MONGO_DB_PORT || 27017; - const tmpDbFile = 'db_mongo' - - // let DB_BACKUP_NAME = `mongodb_${currentTime()}.gz`; - - // Default command, does not considers username or password - let command = `mongodump -h ${host} --port=${port} --archive=${tmpDbFile}`; - - // When Username and password is provided - // / - //if (username && password) { - // command = `mongodump -h ${host} --port=${port} -d ${database} -p ${password} -u ${username} --quiet --gzip --archive=${BACKUP_PATH(DB_BACKUP_NAME)}`; - //} - - exec(command, (err, stdout, stderr) => { - if (err) { - // Most likely, mongodump isn't installed or isn't accessible - console.log('errere', err); - } else { - const spacesEndpoint = new AWS.Endpoint(process.env.S3_ENDPOINT); - - const created = moment().format('YYYY-MM-DD hh:mm:ss') - const fileContent = fs.readFileSync(tmpDbFile); - - const s3 = new AWS.S3({ - endpoint: spacesEndpoint, - accessKeyId: process.env.S3_KEY, - secretAccessKey: process.env.S3_SECRET - }); - - var params = { - Bucket: process.env.S3_BUCKET, - Key: "mongodb/mongo_" + created, - Body: fileContent, - ACL: "private" - }; - - s3.putObject(params, function(err, data) { - if (err) console.log(err, err.stack); - else console.log(data); - }); - } - }); - } + console.log('backing up to mongodb', process.env.S3_MONGO_BACKUPS); + + if (process.env.S3_MONGO_BACKUPS === 'ON') { + const host = process.env.MONGO_DB_HOST || 'localhost'; + const port = process.env.MONGO_DB_PORT || 27017; + const tmpDbFile = 'db_mongo'; + const isOnK8s = !!process.env.KUBERNETES_NAMESPACE; + const namespace = process.env.KUBERNETES_NAMESPACE; + const bucket = process.env.S3_BUCKET; + const removeTmpDbFile = () => { + try { + console.log ('removing tmp db file', tmpDbFile); + fs.unlinkSync(tmpDbFile); + } catch (e) { + console.error('error removing file', e); + } + }; + + // Default command, does not considers username or password + let command = `mongodump -h ${host} --port=${port} --archive=${tmpDbFile}`; + + + exec(command, async (err, stdout, stderr) => { + if (err) { + // Most likely, mongodump isn't installed or isn't accessible + console.error(`mongodump command error: ${err}`); + removeTmpDbFile(); + } else { + const created = moment().format('YYYY-MM-DD hh:mm:ss') + + const statsFile = fs.statSync(tmpDbFile); + console.info(`file size: ${Math.round(statsFile.size / 1024 / 1024)}MB`); + + const fileNameInS3 = isOnK8s ? `mongodb/${namespace}/mongo_${created}` : `mongodb/mongo_${created}`; + + const s3Client = s3.getClient(); + + // Each part must be at least 5 MB in size, except the last part. + let uploadId; + try { + const params = { + Bucket: bucket, + Key: fileNameInS3, + ACL: "private" + }; + const result = await s3Client.createMultipartUpload(params).promise(); + uploadId = result.UploadId; + console.info(`${fileNameInS3} multipart created with upload id: ${uploadId}`); + } catch (e) { + removeTmpDbFile(); + throw new Error(`Error creating S3 multipart. ${e.message}`); + } + + const chunkSize = 10 * 1024 * 1024; // 10MB + const readStream = fs.createReadStream(tmpDbFile); // you can use a second parameter here with this option to read with a bigger chunk size than 64 KB: { highWaterMark: chunkSize } + + // read the file to upload using streams and upload part by part to S3 + const uploadPartsPromise = new Promise((resolve, reject) => { + const multipartMap = {Parts: []}; + + let partNumber = 1; + let chunkAccumulator = null; + + readStream.on('error', (err) => { + reject(err); + }); + + readStream.on('data', (chunk) => { + // it reads in chunks of 64KB. We accumulate them up to 10MB and then we send to S3 + if (chunkAccumulator === null) { + chunkAccumulator = chunk; + } else { + chunkAccumulator = Buffer.concat([chunkAccumulator, chunk]); + } + if (chunkAccumulator.length > chunkSize) { + // pause the stream to upload this chunk to S3 + readStream.pause(); + + const chunkMB = chunkAccumulator.length / 1024 / 1024; + + const params = { + Bucket: bucket, + Key: fileNameInS3, + PartNumber: partNumber, + UploadId: uploadId, + Body: chunkAccumulator, + ContentLength: chunkAccumulator.length, + }; + s3Client.uploadPart(params).promise() + .then((result) => { + console.info(`Data uploaded. Entity tag: ${result.ETag} Part: ${params.PartNumber} Size: ${chunkMB}`); + multipartMap.Parts.push({ETag: result.ETag, PartNumber: params.PartNumber}); + partNumber++; + chunkAccumulator = null; + // resume to read the next chunk + readStream.resume(); + }).catch((err) => { + removeTmpDbFile(); + console.error(`error uploading the chunk to S3 ${err.message}`); + reject(err); + }); + } + }); + + /*readStream.on('end', () => { + console.info('End of the stream'); + });*/ + + readStream.on('close', () => { + if (chunkAccumulator) { + const chunkMB = chunkAccumulator.length / 1024 / 1024; + + // upload the last chunk + const params = { + Bucket: bucket, + Key: fileNameInS3, + PartNumber: partNumber, + UploadId: uploadId, + Body: chunkAccumulator, + ContentLength: chunkAccumulator.length, + }; + + s3Client.uploadPart(params).promise() + .then((result) => { + console.info(`Last Data uploaded. Entity tag: ${result.ETag} Part: ${params.PartNumber} Size: ${chunkMB}`); + multipartMap.Parts.push({ETag: result.ETag, PartNumber: params.PartNumber}); + chunkAccumulator = null; + resolve(multipartMap); + }).catch((err) => { + removeTmpDbFile(); + console.error(`error uploading the last chunk to S3 ${err.message}`); + reject(err); + }); + } + }); + }); + + const multipartMap = await uploadPartsPromise; + + console.info(`All parts uploaded, completing multipart upload, parts: ${multipartMap.Parts.length} `); + + // gather all parts' tags and complete the upload + try { + const params = { + Bucket: bucket, + Key: fileNameInS3, + MultipartUpload: multipartMap, + UploadId: uploadId, + }; + const result = await s3Client.completeMultipartUpload(params).promise(); + console.info(`Upload multipart completed. Location: ${result.Location} Entity tag: ${result.ETag}`); + removeTmpDbFile(); + } catch (e) { + removeTmpDbFile(); + throw new Error(`Error completing S3 multipart. ${e.message}`); + } + } + }); + } } From 7c195c9323c4055741634e3f4679729d7dc8b95d Mon Sep 17 00:00:00 2001 From: Rudi van Hierden Date: Wed, 26 Jan 2022 17:12:34 +0100 Subject: [PATCH 12/15] Refactor MongoDB S3 backup --- src/cron/mongodb_s3_backups.js | 148 ++++----------------------------- src/services/awsS3.js | 137 ++++++++++++++++++++++++++++++ 2 files changed, 155 insertions(+), 130 deletions(-) create mode 100644 src/services/awsS3.js diff --git a/src/cron/mongodb_s3_backups.js b/src/cron/mongodb_s3_backups.js index 52c55848..f0fbd1c0 100644 --- a/src/cron/mongodb_s3_backups.js +++ b/src/cron/mongodb_s3_backups.js @@ -2,6 +2,7 @@ const fs = require('fs'); const moment = require('moment') const {exec} = require('child_process'); const s3 = require('../services/awsS3'); +const util = require('util'); const backupMongoDBToS3 = async () => { console.log('backing up to mongodb', process.env.S3_MONGO_BACKUPS); @@ -9,156 +10,43 @@ const backupMongoDBToS3 = async () => { if (process.env.S3_MONGO_BACKUPS === 'ON') { const host = process.env.MONGO_DB_HOST || 'localhost'; const port = process.env.MONGO_DB_PORT || 27017; - const tmpDbFile = 'db_mongo'; + const tempFile = 'db_mongo'; const isOnK8s = !!process.env.KUBERNETES_NAMESPACE; const namespace = process.env.KUBERNETES_NAMESPACE; - const bucket = process.env.S3_BUCKET; - const removeTmpDbFile = () => { + const deleteTempFile = () => { try { - console.log ('removing tmp db file', tmpDbFile); - fs.unlinkSync(tmpDbFile); + console.log ('removing temp file', tempFile); + fs.unlinkSync(tempFile); } catch (e) { - console.error('error removing file', e); + console.error('error removing file', tempFile, e); } }; // Default command, does not considers username or password - let command = `mongodump -h ${host} --port=${port} --archive=${tmpDbFile}`; + let command = `mongodump -h ${host} --port=${port} --archive=${tempFile}`; + const promiseExec = util.promisify(exec); - exec(command, async (err, stdout, stderr) => { + return promiseExec(command, async (err, stdout, stderr) => { if (err) { // Most likely, mongodump isn't installed or isn't accessible console.error(`mongodump command error: ${err}`); - removeTmpDbFile(); + deleteTempFile(); } else { const created = moment().format('YYYY-MM-DD hh:mm:ss') - const statsFile = fs.statSync(tmpDbFile); + const statsFile = fs.statSync(tempFile); console.info(`file size: ${Math.round(statsFile.size / 1024 / 1024)}MB`); const fileNameInS3 = isOnK8s ? `mongodb/${namespace}/mongo_${created}` : `mongodb/mongo_${created}`; - - const s3Client = s3.getClient(); - - // Each part must be at least 5 MB in size, except the last part. - let uploadId; - try { - const params = { - Bucket: bucket, - Key: fileNameInS3, - ACL: "private" - }; - const result = await s3Client.createMultipartUpload(params).promise(); - uploadId = result.UploadId; - console.info(`${fileNameInS3} multipart created with upload id: ${uploadId}`); - } catch (e) { - removeTmpDbFile(); - throw new Error(`Error creating S3 multipart. ${e.message}`); - } - - const chunkSize = 10 * 1024 * 1024; // 10MB - const readStream = fs.createReadStream(tmpDbFile); // you can use a second parameter here with this option to read with a bigger chunk size than 64 KB: { highWaterMark: chunkSize } - - // read the file to upload using streams and upload part by part to S3 - const uploadPartsPromise = new Promise((resolve, reject) => { - const multipartMap = {Parts: []}; - - let partNumber = 1; - let chunkAccumulator = null; - - readStream.on('error', (err) => { - reject(err); - }); - - readStream.on('data', (chunk) => { - // it reads in chunks of 64KB. We accumulate them up to 10MB and then we send to S3 - if (chunkAccumulator === null) { - chunkAccumulator = chunk; - } else { - chunkAccumulator = Buffer.concat([chunkAccumulator, chunk]); - } - if (chunkAccumulator.length > chunkSize) { - // pause the stream to upload this chunk to S3 - readStream.pause(); - - const chunkMB = chunkAccumulator.length / 1024 / 1024; - - const params = { - Bucket: bucket, - Key: fileNameInS3, - PartNumber: partNumber, - UploadId: uploadId, - Body: chunkAccumulator, - ContentLength: chunkAccumulator.length, - }; - s3Client.uploadPart(params).promise() - .then((result) => { - console.info(`Data uploaded. Entity tag: ${result.ETag} Part: ${params.PartNumber} Size: ${chunkMB}`); - multipartMap.Parts.push({ETag: result.ETag, PartNumber: params.PartNumber}); - partNumber++; - chunkAccumulator = null; - // resume to read the next chunk - readStream.resume(); - }).catch((err) => { - removeTmpDbFile(); - console.error(`error uploading the chunk to S3 ${err.message}`); - reject(err); - }); - } - }); - - /*readStream.on('end', () => { - console.info('End of the stream'); - });*/ - - readStream.on('close', () => { - if (chunkAccumulator) { - const chunkMB = chunkAccumulator.length / 1024 / 1024; - - // upload the last chunk - const params = { - Bucket: bucket, - Key: fileNameInS3, - PartNumber: partNumber, - UploadId: uploadId, - Body: chunkAccumulator, - ContentLength: chunkAccumulator.length, - }; - - s3Client.uploadPart(params).promise() - .then((result) => { - console.info(`Last Data uploaded. Entity tag: ${result.ETag} Part: ${params.PartNumber} Size: ${chunkMB}`); - multipartMap.Parts.push({ETag: result.ETag, PartNumber: params.PartNumber}); - chunkAccumulator = null; - resolve(multipartMap); - }).catch((err) => { - removeTmpDbFile(); - console.error(`error uploading the last chunk to S3 ${err.message}`); - reject(err); - }); - } - }); - }); - - const multipartMap = await uploadPartsPromise; - - console.info(`All parts uploaded, completing multipart upload, parts: ${multipartMap.Parts.length} `); - - // gather all parts' tags and complete the upload + try { - const params = { - Bucket: bucket, - Key: fileNameInS3, - MultipartUpload: multipartMap, - UploadId: uploadId, - }; - const result = await s3Client.completeMultipartUpload(params).promise(); - console.info(`Upload multipart completed. Location: ${result.Location} Entity tag: ${result.ETag}`); - removeTmpDbFile(); + await s3.uploadFile(tempFile, fileNameInS3); + deleteTempFile(); + console.log('successfully uploaded to s3'); } catch (e) { - removeTmpDbFile(); - throw new Error(`Error completing S3 multipart. ${e.message}`); + deleteTempFile(); + throw e; } } }); @@ -181,6 +69,6 @@ module.exports = { cronTime: '0 0 1 * * *', runOnInit: true, onTick: async function() { - backupMongoDBToS3(); + return backupMongoDBToS3(); } }; diff --git a/src/services/awsS3.js b/src/services/awsS3.js new file mode 100644 index 00000000..91fb0bab --- /dev/null +++ b/src/services/awsS3.js @@ -0,0 +1,137 @@ +const AwsS3 = require('aws-sdk'); +const fs = require('fs'); + +const getClient = () => { + const spacesEndpoint = new AwsS3.Endpoint(process.env.S3_ENDPOINT); + + const s3Config = { + endpoint: spacesEndpoint, + accessKeyId: process.env.S3_KEY, + secretAccessKey: process.env.S3_SECRET + } + + if (process.env.S3_FORCE_PATH_STYLE) { + s3Config.s3ForcePathStyle = process.env.S3_FORCE_PATH_STYLE; + } + + return new AwsS3.S3(s3Config); +} + +const uploadFile = async (localFile, fileNameInS3) => { + + let uploadId; + const s3Client = getClient(); + + try { + const params = { + Bucket: process.env.S3_BUCKET, + Key: fileNameInS3, + ACL: "private" + }; + const result = await s3Client.createMultipartUpload(params).promise(); + uploadId = result.UploadId; + console.info(`${fileNameInS3} multipart created with upload id: ${uploadId}`); + } catch (e) { + throw new Error(`Error creating S3 multipart. ${e.message}`); + } + + const chunkSize = 10 * 1024 * 1024; // 10MB + const readStream = fs.createReadStream(localFile); + + // read the file to upload using streams and upload part by part to S3 + const uploadPartsPromise = new Promise((resolve, reject) => { + const multipartMap = {Parts: []}; + + let partNumber = 1; + let chunkAccumulator = null; + + readStream.on('error', (err) => { + reject(err); + }); + + readStream.on('data', (chunk) => { + // it reads in chunks of 64KB. We accumulate them up to 10MB and then we send to S3 + if (chunkAccumulator === null) { + chunkAccumulator = chunk; + } else { + chunkAccumulator = Buffer.concat([chunkAccumulator, chunk]); + } + if (chunkAccumulator.length > chunkSize) { + // pause the stream to upload this chunk to S3 + readStream.pause(); + + const chunkMB = chunkAccumulator.length / 1024 / 1024; + + const params = { + Bucket: process.env.S3_BUCKET, + Key: fileNameInS3, + PartNumber: partNumber, + UploadId: uploadId, + Body: chunkAccumulator, + ContentLength: chunkAccumulator.length, + }; + s3Client.uploadPart(params).promise() + .then((result) => { + console.info(`Data uploaded. Entity tag: ${result.ETag} Part: ${params.PartNumber} Size: ${chunkMB}`); + multipartMap.Parts.push({ETag: result.ETag, PartNumber: params.PartNumber}); + partNumber++; + chunkAccumulator = null; + // resume to read the next chunk + readStream.resume(); + }).catch((err) => { + console.error(`error uploading the chunk to S3 ${err.message}`); + reject(err); + }); + } + }); + + readStream.on('close', () => { + if (chunkAccumulator) { + const chunkMB = chunkAccumulator.length / 1024 / 1024; + + // upload the last chunk + const params = { + Bucket: process.env.S3_BUCKET, + Key: fileNameInS3, + PartNumber: partNumber, + UploadId: uploadId, + Body: chunkAccumulator, + ContentLength: chunkAccumulator.length, + }; + + s3Client.uploadPart(params).promise() + .then((result) => { + console.info(`Last Data uploaded. Entity tag: ${result.ETag} Part: ${params.PartNumber} Size: ${chunkMB}`); + multipartMap.Parts.push({ETag: result.ETag, PartNumber: params.PartNumber}); + chunkAccumulator = null; + resolve(multipartMap); + }).catch((err) => { + console.error(`error uploading the last chunk to S3 ${err.message}`); + reject(err); + }); + } + }); + }); + + const multipartMap = await uploadPartsPromise; + + console.info(`All parts uploaded, completing multipart upload, parts: ${multipartMap.Parts.length} `); + + // gather all parts' tags and complete the upload + try { + const params = { + Bucket: process.env.S3_BUCKET, + Key: fileNameInS3, + MultipartUpload: multipartMap, + UploadId: uploadId, + }; + const result = await s3Client.completeMultipartUpload(params).promise(); + console.info(`Upload multipart completed. Location: ${result.Location} Entity tag: ${result.ETag}`); + return result; + } catch (e) { + throw new Error(`Error completing S3 multipart. ${e.message}`); + } +} + + +module.exports = {getClient, uploadFile}; From 1e4dd83b276372e809c738da1c0dc433db177e06 Mon Sep 17 00:00:00 2001 From: Rudi van Hierden Date: Thu, 27 Jan 2022 09:35:21 +0100 Subject: [PATCH 13/15] Update changelog --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d1da461d..809b2304 100755 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,8 @@ # Changelog +## UNRELEASED +* Change mongoDB S3 backup to read the database dump in chunks and upload it to the S3 bucket using a MultiPart upload setup. + ## v0.20.0 (2021-12-20) * Make submissions listable & viewable, and allow them to be filtered by formId * Add choices guide stats endpoint From a8739d7d0ce8b6cb628aa92a7cefc97ad0df5f2a Mon Sep 17 00:00:00 2001 From: Rudi van Hierden Date: Thu, 27 Jan 2022 09:54:14 +0100 Subject: [PATCH 14/15] Add backup.js to run MySQL / MongoDB S3 backup This allows us to create a cronjob in Kubernetes which runs just this file, while also allowing us to skip the built-in backup cronjobs with the PREVENT_BACKUP_CRONJOBS environment variable set to `true`. This commit also prevents the normal mysql & mongodb backups to be run when the S3 variant is turned on. --- CHANGELOG.md | 1 + backup.js | 27 +++++++++++++++++++++++++++ src/cron/mongodb_backups.js | 4 ++++ src/cron/mongodb_s3_backups.js | 4 ++++ src/cron/mysql_backups.js | 5 +++++ src/cron/mysql_s3_backups.js | 29 ++++++++++++++--------------- 6 files changed, 55 insertions(+), 15 deletions(-) create mode 100644 backup.js diff --git a/CHANGELOG.md b/CHANGELOG.md index 809b2304..fcb10700 100755 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ## UNRELEASED * Change mongoDB S3 backup to read the database dump in chunks and upload it to the S3 bucket using a MultiPart upload setup. +* Allow s3 backups to be disabled from the cronjobs, but instead be run through a different entrypoint (backup.js) to allow kubernetes cronjobs to be used. ## v0.20.0 (2021-12-20) * Make submissions listable & viewable, and allow them to be filtered by formId diff --git a/backup.js b/backup.js new file mode 100644 index 00000000..6797a975 --- /dev/null +++ b/backup.js @@ -0,0 +1,27 @@ +const backupMapping = { + 'mongo': 'mongodb_s3_backups', + 'mysql': 'mysql_s3_backups' +}; + +if (!!process.env.BACKUP_TYPE === false) { + console.error ('No backup type given in ENV variables'); + process.exit(1); +} + +if (Object.keys(backupMapping).indexOf(process.env.BACKUP_TYPE) === -1) { + console.error ('Backup type not supported'); + process.exit(1); +} + +const backup = require(`./src/cron/${backupMapping[process.env.BACKUP_TYPE]}`); + +async function run() { + try { + console.log(`${backupMapping[process.env.BACKUP_TYPE]}`); + await backup.onTick(); + } catch (err) { + console.error('Backup went wrong', err); + } +} + +run(); diff --git a/src/cron/mongodb_backups.js b/src/cron/mongodb_backups.js index 014082d2..92e8c053 100644 --- a/src/cron/mongodb_backups.js +++ b/src/cron/mongodb_backups.js @@ -10,6 +10,10 @@ module.exports = { cronTime: '0 0 1 * * *', runOnInit: true, onTick: function() { + if (!!process.env.PREVENT_BACKUP_CRONJOBS === true || process.env.S3_MONGO_BACKUPS === 'ON') { + return; + } + const objectStoreUrl = process.env.OBJECT_STORE_URL; const objectStoreUser = process.env.OBJECT_STORE_USER; const objectStorePass = process.env.OBJECT_STORE_PASS; diff --git a/src/cron/mongodb_s3_backups.js b/src/cron/mongodb_s3_backups.js index f0fbd1c0..0dd20b50 100644 --- a/src/cron/mongodb_s3_backups.js +++ b/src/cron/mongodb_s3_backups.js @@ -5,6 +5,10 @@ const s3 = require('../services/awsS3'); const util = require('util'); const backupMongoDBToS3 = async () => { + if (!!process.env.PREVENT_BACKUP_CRONJOBS === true) { + return; + } + console.log('backing up to mongodb', process.env.S3_MONGO_BACKUPS); if (process.env.S3_MONGO_BACKUPS === 'ON') { diff --git a/src/cron/mysql_backups.js b/src/cron/mysql_backups.js index a9ae59ce..b629efbb 100644 --- a/src/cron/mysql_backups.js +++ b/src/cron/mysql_backups.js @@ -10,6 +10,11 @@ module.exports = { cronTime: '0 0 1 * * *', runOnInit: true, onTick: function() { + // Do not run this cronjob if we have PREVENT_BACKUP_CRONJOBS set to true, or if the S3_DBS_TO_BACKUP is not empty (which means we want to backup to S3). + if (!!process.env.PREVENT_BACKUP_CRONJOBS === true || !!process.env.S3_DBS_TO_BACKUP === false) { + return; + } + const mysqlRootPw = process.env.MYSQL_ROOT_PASS; const objectStoreUrl = process.env.OBJECT_STORE_URL; const objectStoreUser = process.env.OBJECT_STORE_USER; diff --git a/src/cron/mysql_s3_backups.js b/src/cron/mysql_s3_backups.js index d0f49075..7810ac9a 100644 --- a/src/cron/mysql_s3_backups.js +++ b/src/cron/mysql_s3_backups.js @@ -1,9 +1,6 @@ -const AWS = require('aws-sdk'); -const fs = require('fs'); // Needed for example below const mysqldump = require('mysqldump'); const moment = require('moment') -const log = require('debug')('app:cron'); -const db = require('../db'); +const s3 = require('../services/awsS3'); // Purpose // ------- @@ -11,8 +8,14 @@ const db = require('../db'); // // Runs every night at 1:00. const backupMysqlToS3 = async () => { + if (!!process.env.PREVENT_BACKUP_CRONJOBS === true) { + return; + } + const dbsToBackup = process.env.S3_DBS_TO_BACKUP ? process.env.S3_DBS_TO_BACKUP.split(',') : false; - + const isOnK8s = !!process.env.KUBERNETES_NAMESPACE; + const namespace = process.env.KUBERNETES_NAMESPACE; + if (dbsToBackup) { dbsToBackup.forEach(async function(dbName) { // return the dump from the function and not to a file @@ -32,24 +35,20 @@ const backupMysqlToS3 = async () => { } }); - const spacesEndpoint = new AWS.Endpoint(process.env.S3_ENDPOINT); - const created = moment().format('YYYY-MM-DD hh:mm:ss') - const s3 = new AWS.S3({ - endpoint: spacesEndpoint, - accessKeyId: process.env.S3_KEY, - secretAccessKey: process.env.S3_SECRET - }); + const key = isOnK8s ? `mysql/${namespace}/${dbName}_${created}sql` : `mysql/${dbName}_${created}sql`; var params = { Bucket: process.env.S3_BUCKET, - Key: 'mysql/' + dbName + '_' + created + ".sql", + Key: key, Body: result.dump.data, ACL: "private" }; + + const client = s3.getClient(); - s3.putObject(params, function(err, data) { + client.putObject(params, function(err, data) { if (err) console.log(err, err.stack); else console.log(data); }); @@ -76,6 +75,6 @@ module.exports = { cronTime: '0 0 1 * * *', runOnInit: false, onTick: async function() { - backupMysqlToS3(); + return backupMysqlToS3(); } }; From 5420817dc6b93f268dd3bd2019f0f145e57bdf97 Mon Sep 17 00:00:00 2001 From: Rudi van Hierden Date: Thu, 27 Jan 2022 10:15:40 +0100 Subject: [PATCH 15/15] Refactor to reduce complexity & duplication --- src/cron/mongodb_s3_backups.js | 32 +++++++++++----------- src/services/awsS3.js | 50 ++++++++++++++-------------------- 2 files changed, 36 insertions(+), 46 deletions(-) diff --git a/src/cron/mongodb_s3_backups.js b/src/cron/mongodb_s3_backups.js index f0fbd1c0..b5ba66c8 100644 --- a/src/cron/mongodb_s3_backups.js +++ b/src/cron/mongodb_s3_backups.js @@ -13,6 +13,8 @@ const backupMongoDBToS3 = async () => { const tempFile = 'db_mongo'; const isOnK8s = !!process.env.KUBERNETES_NAMESPACE; const namespace = process.env.KUBERNETES_NAMESPACE; + const created = moment().format('YYYY-MM-DD hh:mm:ss') + const fileNameInS3 = isOnK8s ? `mongodb/${namespace}/mongo_${created}` : `mongodb/mongo_${created}`; const deleteTempFile = () => { try { console.log ('removing temp file', tempFile); @@ -32,23 +34,21 @@ const backupMongoDBToS3 = async () => { // Most likely, mongodump isn't installed or isn't accessible console.error(`mongodump command error: ${err}`); deleteTempFile(); - } else { - const created = moment().format('YYYY-MM-DD hh:mm:ss') - - const statsFile = fs.statSync(tempFile); - console.info(`file size: ${Math.round(statsFile.size / 1024 / 1024)}MB`); - - const fileNameInS3 = isOnK8s ? `mongodb/${namespace}/mongo_${created}` : `mongodb/mongo_${created}`; - - try { - await s3.uploadFile(tempFile, fileNameInS3); - deleteTempFile(); - console.log('successfully uploaded to s3'); - } catch (e) { - deleteTempFile(); - throw e; - } + return; + } + + const statsFile = fs.statSync(tempFile); + console.info(`file size: ${Math.round(statsFile.size / 1024 / 1024)}MB`); + + try { + await s3.uploadFile(tempFile, fileNameInS3); + deleteTempFile(); + console.log('successfully uploaded to s3'); + } catch (e) { + deleteTempFile(); + throw e; } + }); } } diff --git a/src/services/awsS3.js b/src/services/awsS3.js index 91fb0bab..758effc6 100644 --- a/src/services/awsS3.js +++ b/src/services/awsS3.js @@ -60,20 +60,10 @@ const uploadFile = async (localFile, fileNameInS3) => { // pause the stream to upload this chunk to S3 readStream.pause(); - const chunkMB = chunkAccumulator.length / 1024 / 1024; - - const params = { - Bucket: process.env.S3_BUCKET, - Key: fileNameInS3, - PartNumber: partNumber, - UploadId: uploadId, - Body: chunkAccumulator, - ContentLength: chunkAccumulator.length, - }; - s3Client.uploadPart(params).promise() + s3Client.uploadPart(createUploadPartParams(fileNameInS3, partNumber, uploadId, chunkAccumulator)).promise() .then((result) => { - console.info(`Data uploaded. Entity tag: ${result.ETag} Part: ${params.PartNumber} Size: ${chunkMB}`); - multipartMap.Parts.push({ETag: result.ETag, PartNumber: params.PartNumber}); + console.info(`Data uploaded. Entity tag: ${result.ETag} Part: ${partNumber} Size: ${chunkAccumulator.length}`); + multipartMap.Parts.push({ETag: result.ETag, PartNumber: partNumber}); partNumber++; chunkAccumulator = null; // resume to read the next chunk @@ -87,22 +77,11 @@ const uploadFile = async (localFile, fileNameInS3) => { readStream.on('close', () => { if (chunkAccumulator) { - const chunkMB = chunkAccumulator.length / 1024 / 1024; - // upload the last chunk - const params = { - Bucket: process.env.S3_BUCKET, - Key: fileNameInS3, - PartNumber: partNumber, - UploadId: uploadId, - Body: chunkAccumulator, - ContentLength: chunkAccumulator.length, - }; - - s3Client.uploadPart(params).promise() + s3Client.uploadPart(createUploadPartParams(fileNameInS3, partNumber, uploadId, chunkAccumulator)).promise() .then((result) => { - console.info(`Last Data uploaded. Entity tag: ${result.ETag} Part: ${params.PartNumber} Size: ${chunkMB}`); - multipartMap.Parts.push({ETag: result.ETag, PartNumber: params.PartNumber}); + console.info(`Last Data uploaded. Entity tag: ${result.ETag} Part: ${partNumber} Size: ${chunkAccumulator.length}`); + multipartMap.Parts.push({ETag: result.ETag, PartNumber: partNumber}); chunkAccumulator = null; resolve(multipartMap); }).catch((err) => { @@ -113,16 +92,16 @@ const uploadFile = async (localFile, fileNameInS3) => { }); }); - const multipartMap = await uploadPartsPromise; + const completedMultipartMap = await uploadPartsPromise; - console.info(`All parts uploaded, completing multipart upload, parts: ${multipartMap.Parts.length} `); + console.info(`All parts uploaded, completing multipart upload, parts: ${completedMultipartMap.Parts.length} `); // gather all parts' tags and complete the upload try { const params = { Bucket: process.env.S3_BUCKET, Key: fileNameInS3, - MultipartUpload: multipartMap, + MultipartUpload: completedMultipartMap, UploadId: uploadId, }; const result = await s3Client.completeMultipartUpload(params).promise(); @@ -133,5 +112,16 @@ const uploadFile = async (localFile, fileNameInS3) => { } } +function createUploadPartParams(fileNameInS3, partNumber, uploadId, chunkAccumulator) { + return { + Bucket: process.env.S3_BUCKET, + Key: fileNameInS3, + PartNumber: partNumber, + UploadId: uploadId, + Body: chunkAccumulator, + ContentLength: chunkAccumulator.length, + }; +} + module.exports = {getClient, uploadFile};