From 1c356962d4a003443eb02ddc320eed335b4297bc Mon Sep 17 00:00:00 2001 From: Rishi Kumar Date: Fri, 30 Aug 2024 04:10:00 +0530 Subject: [PATCH] chore: fix scripts and make the guide better --- docs/pg-migration-guide/README.md | 112 ++++++++++-------- modules/postgresql/gcp/bin/create-dms.sh | 57 +++++++-- .../gcp/bin/postgres-perms-update.sh | 21 +++- .../postgresql/gcp/bin/terraform-db-swap.sh | 1 + .../postgresql/gcp/bin/terraform-state-rm.sh | 5 +- modules/postgresql/gcp/main.tf | 2 +- modules/postgresql/gcp/outputs.tf | 16 ++- 7 files changed, 146 insertions(+), 68 deletions(-) diff --git a/docs/pg-migration-guide/README.md b/docs/pg-migration-guide/README.md index f611369d..ba3e1717 100644 --- a/docs/pg-migration-guide/README.md +++ b/docs/pg-migration-guide/README.md @@ -7,65 +7,77 @@ Before proceeding, please review the [known limitations](https://cloud.google.co - Decide upon a instance to upgrade: - We are choosing the `rishi-pg14-volcano-staging-pg-a34e9984` instance, a PostgreSQL 14 instance managed via the `galoy-infra/modules/postgresql/gcp` Terraform module. ![decide-source](./assets/decide-source-instance.png) -- On the terraform file of the decided instance, enable the `prep_upgrade_as_source_db` flag +- On the tofu file of the decided instance, enable the `prep_upgrade_as_source_db` flag ```hcl module "postgresql_migration_source" { -source = "git::https://github.com/GaloyMoney/galoy-infra.git//modules/postgresql/gcp?ref=" +source = "git::https://github.com/GaloyMoney/galoy-infra.git//modules/postgresql/gcp?ref=" # source = "../../../modules/postgresql/gcp" -instance_name          = "${var.name_prefix}-pg" -vpc_name               = "${var.name_prefix}-vpc" -gcp_project            = var.gcp_project -destroyable            = var.destroyable_postgres -user_can_create_db     = true -databases              = ["test"] -replication            = true -provision_read_replica = true -database_version       = "POSTGRES_14" +instance_name           = "${var.name_prefix}-pg" +vpc_name                = "${var.name_prefix}-vpc" +gcp_project             = var.gcp_project +destroyable             = var.destroyable_postgres +user_can_create_db      = true +databases               = ["test"] +replication             = true +provision_read_replica = true +database_version        = "POSTGRES_14" // Enable it as follows prep_upgrade_as_source_db   = true } ``` -The `prep_upgrade_as_source_db` flag configures the source database, initialises a new postgres destination and creates a connection profile with the migration user as required by the Database Migration Service. +The `prep_upgrade_as_source_db` flag configures the source database, initialises a new postgres destination and creates two connection profiles for source and destination as required by the Database Migration Service. -- ** The full specification of how the source instance needs to be configured can be found [Here](https://cloud.google.com/database-migration/docs/postgres/configure-source-database#configure-your-source-instance-postgres) -- ** The specification for connection profile can be found [here](https://cloud.google.com/database-migration/docs/postgres/create-source-connection-profile) - -# Step 2: Start Database Migration Process +Also add the following outputs which we will require in the future steps: -> Reference for [Database Migration Service](https://cloud.google.com/sdk/gcloud/reference/database-migration/migration-jobs) - -Before proceeding with the DMS creation we will expose the required things by gcloud using the `output` block, add these output blocks to your main terraform file. ```sh output "source_connection_profile_id" { -description = "The ID of the source connection profile" -value = .connection_profile_credentials["source_connection_profile_id"] + value = .connection_profile_credentials["source_connection_profile_id"] } output "destination_connection_profile_id" { -description = "The ID of the destination connection profile" -value = .connection_profile_credentials["destination_connection_profile_id"] + value = .connection_profile_credentials["destination_connection_profile_id"] } output "vpc" { -value = .vpc + value = .vpc +} + +output "migration_destination_instance" { + value = .migration_destination_instance + sensitive = true } -output "migration_destination_database_creds" { -value = .migration_destination_database_creds -sensitive = true +output "source_instance" { + value = .source_instance["conn"] + sensitive = true } -output "source-instance-admin-creds" { -value = .admin-creds -sensitive = true +output "migration_sql_command" { + value = .migration_sql_command + sensitive = true } ``` + +Run: + +```sh +$ tofu apply +``` + +- ** The full specification of how the source instance needs to be configured can be found [Here](https://cloud.google.com/database-migration/docs/postgres/configure-source-database#configure-your-source-instance-postgres) +- ** The specification for connection profile can be found [here](https://cloud.google.com/database-migration/docs/postgres/create-source-connection-profile) + +# Step 2: Start Database Migration Process + +> Reference for [Database Migration Service](https://cloud.google.com/sdk/gcloud/reference/database-migration/migration-jobs) + +Before proceeding with the DMS creation we will expose the required things by gcloud using the `output` block, add these output blocks to your main tofu file. ```sh # run the create-dms.sh script located in modules/postgresql/gcp/bin -$ ./create-dms.sh +$ ./create-dms.sh Enter the region: us-east1 Enter the job name: test-migration Creating migration job 'test-migration' in region 'us-east1'... @@ -114,7 +126,7 @@ Migration job 'test-migration' has started demoting the destination instance. The destination instance is being demoted. Run the following command after the process has completed: # The script will specify which command you need to run after the demotion is completed. -gcloud database-migration migration-jobs start "test-migration" --region="us-east1" +$ gcloud database-migration migration-jobs start "test-migration" --region="us-east1" ``` > Run the start command that is prompted @@ -133,21 +145,19 @@ $ gcloud database-migration migration-jobs describe "test-job" --region=us-east1 > - Migration does not transfer privileges and users. Create users manually based on the old database. > - Once you migrated the database using DMS all objects and schema owner will become `cloudsqlexternalsync` by default. -### Step 3.5: Handing the non-migrated settings and syncing state via `terraform` +### Step 3.5: Handing the non-migrated settings and syncing state via `tofu` #### Step 3.5.1 Log in to the `destination instance` as the `postgres` user and change the name of the `cloudsqlexternalsync` user to the ``. The value of `` and `destination-connection-string` can be found by running ```sh -terraform output --json source-instance-admin-creds -terraform output --json migration_destination_database_creds -``` - -```sh -$ psql -postgres=> ALTER ROLE cloudsqlexternalsync RENAME TO ''; -postgres=> ALTER ROLE "" PASSWORD ''; +$ tofu output --json migration_sql_command +# you will will be prompted with +# psql_login <- psql command to login to the destination database as postgres user +# rename_admin_user <- psql command to rename cloudsqlexternalsync to admin +# set_admin_password <- psql command to change the admin user password +# Run the commands, then proceed to the next step ``` #### Step 3.5.2 @@ -155,11 +165,11 @@ Manipulate the old state to reflect the new state by running the two scripts loc ```sh $ ./terraform-db-swap.sh -# This will ask for your terraform module name +# This will ask for your tofu module name # And swap the state between the newer and old instance $ ./terraform-state-rm.sh -# This will ask for your terraform module name, give it the same name as you gave before -# This will remove all the conflicting state which terraform will try to remove manually +# This will ask for your tofu module name, give it the same name as you gave before +# This will remove all the conflicting state which tofu will try to remove manually ``` #### Step 3.5.3 @@ -195,7 +205,7 @@ module "postgresql" { Finally, do a ```sh -terraform apply +$ tofu apply ``` The destination instance should be exactly as with the source PostgreSQL instance, expect backups which we will enable after promotion, and database artifacts which we will fix in the next step. @@ -204,9 +214,15 @@ The destination instance should be exactly as with the source PostgreSQL instanc Change the owners of the tables and schemas to the correct owner using the psql command: +Get the promoted instance PG15 connection string by running +```sh +$ tofu output --raw source_instance > pg_connection.txt +``` + + ```sh -#TODO | Need to do a dry run -./postgres-perms-update.sh +#TODO | Need to do a dry run again +$ ./postgres-perms-update.sh ``` # Step 4: Promote the instance @@ -242,7 +258,7 @@ module "postgresql" { pre_promotion = false # <-we can also remove this line completely } ``` -Do a `terraform apply` +Do a `tofu apply` # Step 6: Delete all the dangling resources diff --git a/modules/postgresql/gcp/bin/create-dms.sh b/modules/postgresql/gcp/bin/create-dms.sh index 9f965cdb..d2ef6b91 100755 --- a/modules/postgresql/gcp/bin/create-dms.sh +++ b/modules/postgresql/gcp/bin/create-dms.sh @@ -1,24 +1,56 @@ -#!/bin/bash +#!/usr/bin/env bash +set -ex + +# the directory we want to run the script in +dir=${1} +# the gcp project +PROJECT=${2} +# the gcp region +REGION=${3} +# the migration job name +JOB_NAME=${4} + TYPE="CONTINUOUS" -# Get user input for region and job name -read -p "Enter the region: " REGION -read -p "Enter the job name: " JOB_NAME +pushd ${dir} -# Validate user input -if [ -z "$REGION" ] || [ -z "$JOB_NAME" ]; then - echo "Error: Region and job name cannot be empty." +if [ -z "$PROJECT" ]; then + echo "Error: PROJECT cannot be empty." + exit 1 +fi +if [ -z "$REGION" ]; then + echo "Error: REGION cannot be empty." + exit 1 +fi +if [ -z "$JOB_NAME" ]; then + echo "Error: JOB_NAME cannot be empty." exit 1 fi +f [ ! -d "$dir" ]; then + echo "Error: Directory '$dir' does not exist." + exit 1 +fi + +# Function to check if a command exists +command_exists() { + command -v "$1" >/dev/null 2>&1 +} +# Set the command to use, defaulting to 'terraform' if 'tofu' is not available +if command_exists tofu; then + cmd="tofu" +else + cmd="terraform" +fi # Get Terraform outputs -SOURCE_ID=$(terraform output -raw source_connection_profile_id) -DEST_ID=$(terraform output -raw destination_connection_profile_id) -VPC=$(terraform output -raw vpc) +SOURCE_ID=$($cmd output -raw source_connection_profile_id) +DEST_ID=$($cmd output -raw destination_connection_profile_id) +VPC=$($cmd output -raw vpc) # Construct and run the gcloud command to create the migration job echo "Creating migration job '$JOB_NAME' in region '$REGION'..." gcloud database-migration migration-jobs create "$JOB_NAME" \ + --project="$PROJECT" \ --region="$REGION" \ --type="$TYPE" \ --source="$SOURCE_ID" \ @@ -35,6 +67,7 @@ fi # Demote the destination echo "Demoting the destination for migration job '$JOB_NAME'..." gcloud database-migration migration-jobs demote-destination "$JOB_NAME" \ + --project="$PROJECT" \ --region="$REGION" if [ $? -eq 0 ]; then @@ -46,4 +79,6 @@ fi # Mention instructions on how to start the DMS echo -e "\nThe destination instance is being demoted. Run the following command after the process has completed:" -echo -e "\n$ gcloud database-migration migration-jobs start \"$JOB_NAME\" --region=\"$REGION\"\n" \ No newline at end of file +echo -e "\n$ gcloud database-migration migration-jobs start \"$JOB_NAME\" --project=\"$PROJECT\" --region=\"$REGION\"\n" + +popd diff --git a/modules/postgresql/gcp/bin/postgres-perms-update.sh b/modules/postgresql/gcp/bin/postgres-perms-update.sh index e4b1ed9f..a9fd9d64 100755 --- a/modules/postgresql/gcp/bin/postgres-perms-update.sh +++ b/modules/postgresql/gcp/bin/postgres-perms-update.sh @@ -1,11 +1,12 @@ -#!/bin/bash +#!/usr/bin/env bash +set -ex -# Prompt user for input -read -p "Enter database name: " DB_NAME -read -p "Enter new owner: " NEW_OWNER -read -p "Enter PostgreSQL connection string: " PG_CON +DB_NAME=${1} +NEW_OWNER=${DB_NAME}-user +# READ PG_CON from a file +PG_CON=$(cat pg_connection.txt) -PSQL_CMD="psql $PG_CON -d $DB_NAME -At -c" +PSQL_CMD="psql $PG_CON -At -c" $PSQL_CMD "ALTER DATABASE postgres OWNER TO cloudsqlsuperuser;" $PSQL_CMD "ALTER SCHEMA public OWNER TO cloudsqlsuperuser;" @@ -19,4 +20,12 @@ for table in $tables; do $PSQL_CMD "ALTER TABLE public.\"$table\" OWNER TO \"$NEW_OWNER\";" done +# Get list of all sequences in the database +sequences=$($PSQL_CMD "SELECT sequence_name FROM information_schema.sequences WHERE sequence_schema = 'public';") + +# Loop through each sequence and change the owner +for sequence in $sequences; do + $PSQL_CMD "ALTER SEQUENCE public.\"$sequence\" OWNER TO \"$NEW_OWNER\";" +done + echo "Ownership of all tables in $DB_NAME has been granted to $NEW_OWNER." diff --git a/modules/postgresql/gcp/bin/terraform-db-swap.sh b/modules/postgresql/gcp/bin/terraform-db-swap.sh index 52440483..1860f785 100755 --- a/modules/postgresql/gcp/bin/terraform-db-swap.sh +++ b/modules/postgresql/gcp/bin/terraform-db-swap.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +set -ex dir=${1} module_prefix=${2} diff --git a/modules/postgresql/gcp/bin/terraform-state-rm.sh b/modules/postgresql/gcp/bin/terraform-state-rm.sh index 715bd09e..ce4a7dd0 100755 --- a/modules/postgresql/gcp/bin/terraform-state-rm.sh +++ b/modules/postgresql/gcp/bin/terraform-state-rm.sh @@ -1,8 +1,10 @@ #!/usr/bin/env bash +set -ex + dir=${1} module_prefix=${2} -pushd ${1} +pushd ${dir} # Function to check if a command exists command_exists() { @@ -21,4 +23,5 @@ $cmd state rm "${module_prefix}.module.migration" # remove admin user $cmd state rm "${module_prefix}.google_sql_user.admin" + popd diff --git a/modules/postgresql/gcp/main.tf b/modules/postgresql/gcp/main.tf index 98663994..3ea56782 100644 --- a/modules/postgresql/gcp/main.tf +++ b/modules/postgresql/gcp/main.tf @@ -13,7 +13,7 @@ resource "google_sql_database_instance" "instance" { project = local.gcp_project database_version = local.database_version region = local.region - deletion_protection = !local.destroyable + deletion_protection = local.prep_upgrade_as_source_db ? false : !local.destroyable settings { tier = local.tier diff --git a/modules/postgresql/gcp/outputs.tf b/modules/postgresql/gcp/outputs.tf index c18d8829..b963d24b 100644 --- a/modules/postgresql/gcp/outputs.tf +++ b/modules/postgresql/gcp/outputs.tf @@ -48,8 +48,22 @@ output "vpc" { value = "projects/${local.gcp_project}/global/networks/${local.vpc_name}" } -output "migration_destination_database_creds" { +output "migration_destination_instance" { value = local.prep_upgrade_as_source_db ? { conn = "postgres://postgres:${module.migration[0].postgres_user_password}@${module.migration[0].destination_instance_private_ip_address}:5432/postgres" } : {} } + +output "source_instance" { + value = { + conn = "postgres://${google_sql_user.admin.name}:${random_password.admin.result}@${google_sql_database_instance.instance.private_ip_address}:5432/postgres" + } +} + +output "migration_sql_command" { + value = local.prep_upgrade_as_source_db ? { + psql_login = "psql postgres://postgres:${module.migration[0].postgres_user_password}@${module.migration[0].destination_instance_private_ip_address}:5432/postgres" + rename_admin_user = "ALTER ROLE cloudsqlexternalsync RENAME TO ${google_sql_user.admin.name};" + set_admin_password = "ALTER ROLE ${google_sql_user.admin.name} PASSWORD ${random_password.admin.result};" + } : {} +}