diff --git a/__fixtures__/productConfig.mjs b/__fixtures__/productConfig.mjs
index 262a63fa7..b7c8f0f57 100644
--- a/__fixtures__/productConfig.mjs
+++ b/__fixtures__/productConfig.mjs
@@ -436,4 +436,26 @@ export const PRODUCT_CONFIG = {
// versionedDocs: true,
// websiteDir: 'website',
// },
+ 'well-architected-framework': {
+ /**
+ * ✅ Initial migration attempt: SEEMS TO WORK
+ *
+ * Maybe worth noting: versioned docs is not enabled for `terraform-docs-common`.
+ * `branchForLatest` is set to `main`. We treat the single version
+ * as `v0.0.x` in our version metadata in the current content API:
+ * https://content.hashicorp.com/api/content/terraform-docs-common/version-metadata?partial=true
+ */
+ /**
+ * TODO: `terraform-docs-common` has _both_ an `img` folder, _and_ a
+ * `public` folder. Need to investigate how these are used, and whether
+ * we need to move both over (eg assetDirs could be an array?)
+ */
+ assetDir: 'img',
+ contentDir: 'docs',
+ dataDir: 'data',
+ productSlug: 'well-architected-framework',
+ semverCoerce: semver.coerce,
+ versionedDocs: false,
+ websiteDir: 'website',
+ },
}
diff --git a/app/utils/productConfig.mjs b/app/utils/productConfig.mjs
index 58f1cbcd0..d65dbed3b 100644
--- a/app/utils/productConfig.mjs
+++ b/app/utils/productConfig.mjs
@@ -440,4 +440,26 @@ export const PRODUCT_CONFIG = {
// versionedDocs: true,
// websiteDir: 'website',
// },
+ 'well-architected-framework': {
+ /**
+ * ✅ Initial migration attempt: SEEMS TO WORK
+ *
+ * Maybe worth noting: versioned docs is not enabled for `terraform-docs-common`.
+ * `branchForLatest` is set to `main`. We treat the single version
+ * as `v0.0.x` in our version metadata in the current content API:
+ * https://content.hashicorp.com/api/content/terraform-docs-common/version-metadata?partial=true
+ */
+ /**
+ * TODO: `terraform-docs-common` has _both_ an `img` folder, _and_ a
+ * `public` folder. Need to investigate how these are used, and whether
+ * we need to move both over (eg assetDirs could be an array?)
+ */
+ assetDir: 'img',
+ contentDir: 'docs',
+ dataDir: 'data',
+ productSlug: 'well-architected-framework',
+ semverCoerce: semver.coerce,
+ versionedDocs: false,
+ websiteDir: 'website',
+ },
}
diff --git a/content/well-architected-framework/data/docs-nav-data.json b/content/well-architected-framework/data/docs-nav-data.json
new file mode 100644
index 000000000..be597161c
--- /dev/null
+++ b/content/well-architected-framework/data/docs-nav-data.json
@@ -0,0 +1,253 @@
+[
+ {
+ "title": "What is well-architected framework?",
+ "path": "implement-cloud-operating-model"
+ },
+ {
+ "title": "Cloud operating model",
+ "path": "cloud-operating-model"
+ },
+ {
+ "title": "Implementation resources",
+ "routes": [
+ {
+ "title": "Terraform reliability",
+ "path": "implementation-resources/terraform-reliability"
+ },
+ {
+ "title": "Packer reliability",
+ "path": "implementation-resources/packer-reliability"
+ },
+ {
+ "title": "Vault reliability",
+ "path": "implementation-resources/vault-reliability"
+ },
+ {
+ "title": "Consul reliability",
+ "path": "implementation-resources/consul-reliability"
+ },
+ {
+ "title": "Nomad reliability",
+ "path": "implementation-resources/nomad-reliability"
+ }
+ ]
+ },
+ {
+ "divider": true
+ },
+ {
+ "heading": "Framework pillars"
+ },
+ {
+ "title": "Automate and define processes",
+ "routes": [
+ {
+ "title": "Introduction",
+ "path": "operational-excellence/introduction"
+ },
+ {
+ "title": "Application deployments",
+ "routes": [
+ {
+ "title": "Application deployments",
+ "path": "automate-and-define-processes/application-deployments/application-deployments"
+ },
+ {
+ "title": "Evaluate existing deployments",
+ "path": "automate-and-define-processes/application-deployments/evaluate-existing-deployments"
+ },
+ {
+ "title": "Integrate CI/CD pipelines",
+ "path": "automate-and-define-processes/application-deployments/ci-cd"
+ },
+ {
+ "title": "Deploy applications",
+ "path": "automate-and-define-processes/application-deployments/deploy"
+ },
+ {
+ "title": "Package applications",
+ "path": "automate-and-define-processes/application-deployments/package"
+ },
+ {
+ "title": "Write and test applications",
+ "path": "automate-and-define-processes/application-deployments/testing"
+ }
+ ]
+ },
+ {
+ "title": "Automate infrastructure",
+ "routes": [
+ {
+ "title": "Automate infrastructure",
+ "path": "automate-and-define-processes/automate-infrastructure/automate"
+ },
+ {
+ "title": "Codify infrastructure",
+ "path": "automate-and-define-processes/automate-infrastructure/codify-infrastructure"
+ },
+ {
+ "title": "Use version control",
+ "path": "automate-and-define-processes/automate-infrastructure/use-version-control"
+ },
+ {
+ "title": "Identify reusable components",
+ "path": "automate-and-define-processes/automate-infrastructure/identify-reusable-components"
+ },
+ {
+ "title": "Atomic deployments",
+ "path": "automate-and-define-processes/automate-infrastructure/atomic-deployments"
+ },
+ {
+ "title": "Standardize workflows",
+ "path": "automate-and-define-processes/automate-infrastructure/standardize-workflows"
+ },
+ {
+ "title": "Plan for scale",
+ "path": "automate-and-define-processes/automate-infrastructure/scale"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "title": "Optimize resources",
+ "routes": [
+ {
+ "title": "Introduction",
+ "path": "optimize-resources/introduction"
+ }
+ ]
+ },
+ {
+ "title": "Design resilient systems",
+ "routes": [
+ {
+ "title": "Introduction",
+ "path": "design-resilient-systems/introduction"
+ },
+ {
+ "title": "Disaster recovery",
+ "routes": [
+ {
+ "title": "State management and disaster recovery",
+ "path": "design-resilient-systems/disaster-recovery/state-management-and-dr"
+ }
+ ]
+ },
+ {
+ "title": "Zero-downtime deployments",
+ "routes": [
+ {
+ "title": "Zero-downtime deployments",
+ "path": "design-resilient-systems/zero-downtime-deployments/zero-downtime-deployments"
+ },
+ {
+ "title": "Infrastructures",
+ "path": "design-resilient-systems/zero-downtime-deployments/infrastructure"
+ },
+ {
+ "title": "Application",
+ "path": "design-resilient-systems/zero-downtime-deployments/application"
+ },
+ {
+ "title": "Service mesh",
+ "path": "design-resilient-systems/zero-downtime-deployments/service-mesh"
+ }
+ ]
+ },
+ {
+ "title": "Fault tolerance",
+ "routes": [
+ {
+ "title": "Fault tolerant systems",
+ "path": "design-resilient-systems/fault-tolerance/fault-tolerant-systems"
+ },
+ {
+ "title": "Networking and communication protocols",
+ "path": "design-resilient-systems/fault-tolerance/networking-and-communication-protocols"
+ },
+ {
+ "title": "Redundancy and replication",
+ "path": "design-resilient-systems/fault-tolerance/redundancy-and-replication"
+ },
+ {
+ "title": "Resiliency and availability",
+ "path": "design-resilient-systems/fault-tolerance/resiliency-and-availability"
+ },
+ {
+ "title": "Scale and tune performance",
+ "path": "design-resilient-systems/fault-tolerance/scale"
+ },
+ {
+ "title": "Secure distributed systems",
+ "path": "design-resilient-systems/fault-tolerance/secure-distributed-systems"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "title": "Secure systems",
+ "routes": [
+ {
+ "title": "Introduction",
+ "path": "secure-systems/introduction"
+ },
+ {
+ "title": "Protect data",
+ "routes": [
+ {
+ "title": "Protect sensitive data",
+ "path": "secure-systems/protect-data/sensitive-data"
+ },
+ {
+ "title": "Data at-rest",
+ "path": "secure-systems/protect-data/data-at-rest"
+ },
+ {
+ "title": "Data in-transit",
+ "path": "secure-systems/protect-data/data-in-transit"
+ },
+ {
+ "title": "Tokenize data",
+ "path": "secure-systems/protect-data/tokenize-data"
+ }
+ ]
+ },
+ {
+ "title": "Prevent lateral movement",
+ "path": "secure-systems/prevent-lateral-movement"
+ },
+ {
+ "title": "Appendix",
+ "path": "secure-systems/appendix"
+ }
+ ]
+ },
+ {
+ "divider": true
+ },
+ {
+ "heading": "Reference architecture"
+ },
+ {
+ "title": "Terraform",
+ "href": "/well-architected-framework/terraform/enterprise-reference-architecture"
+ },
+ {
+ "title": "Vault (multi-cluster)",
+ "href": "/well-architected-framework/zero-trust-security/multi-cluster-architecture"
+ },
+ {
+ "title": "Vault (integrated storage)",
+ "href": "/well-architected-framework/zero-trust-security/raft-reference-architecture"
+ },
+ {
+ "title": "Consul",
+ "href": "/well-architected-framework/zero-trust-networking/reference-architecture"
+ },
+ {
+ "title": "Nomad",
+ "href": "/well-architected-framework/nomad/production-reference-architecture-vm-with-consul"
+ }
+]
diff --git a/content/well-architected-framework/docs/docs/automate-and-define-processes/application-deployments/application-deployments.mdx b/content/well-architected-framework/docs/docs/automate-and-define-processes/application-deployments/application-deployments.mdx
new file mode 100644
index 000000000..03ff8b729
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/automate-and-define-processes/application-deployments/application-deployments.mdx
@@ -0,0 +1,42 @@
+---
+page_title: Application deployments
+description: Learn how to deploy application.
+---
+
+# Application deployments
+
+This guide describes the best practices for creating an application delivery
+strategy with HashiCorp tools. A well-designed deployment strategy helps you
+deliver your application to production faster and catch bugs before your users
+run into them. A complete deployment strategy has five fundamental components:
+
+1. Write and test applications
+1. Package applications
+1. Provision infrastructure
+1. Deploy applications
+1. Connect and monitor applications
+1. Monitor infrastructure
+1. Securely access infrastructure
+
+There are also two components to consider throughout the entire deployment process:
+
+1. Define your security strategy
+1. Integrate with your CI/CD pipeline
+
+
+
+
+
+Your organization may have additional requirements for delivering your
+applications. For example, your security guidelines may require you to perform
+static code analysis on your codebase before you package it. After you package
+your application, your operations team may have a package registry that they
+require you to upload your application to.
+
+Once you identify and implement each required step of your deployment strategy,
+we recommend automating them in a CI/CD pipeline. These pipelines will help
+every team in your organization adopt your deployment strategy by enforcing your
+designed strategy.
+
+This guide gives our recommendations on how your organization can implement
+these seven steps, and covers how HashiCorp tools can fit into that cycle.
\ No newline at end of file
diff --git a/content/well-architected-framework/docs/docs/automate-and-define-processes/application-deployments/ci-cd.mdx b/content/well-architected-framework/docs/docs/automate-and-define-processes/application-deployments/ci-cd.mdx
new file mode 100644
index 000000000..19b8b60aa
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/automate-and-define-processes/application-deployments/ci-cd.mdx
@@ -0,0 +1,21 @@
+---
+page_title: Integrate CI/CD pipelines
+description: Learn how to evaluate existing deployments.
+---
+
+# Integrate CI/CD pipelines
+
+You can integrate each recommendation from this guide into your existing CI/CD
+pipeline. If you already have a solution to address some of these
+recommendations but are looking to improve other aspects of your application
+deployment strategy, each of these recommendations can be implemented
+individually.
+
+
+
+
+HashiCorp resources:
+
+- [Automate Packer with GitHub Actions](/packer/tutorials/cloud-production/github-actions)
+- [Automate Terraform with GitHub Actions](/terraform/tutorials/automation/github-actions)
+- [Retrieving CI/CD secrets from Vault](/well-architected-framework/security/security-cicd-vault)
\ No newline at end of file
diff --git a/content/well-architected-framework/docs/docs/automate-and-define-processes/application-deployments/deploy.mdx b/content/well-architected-framework/docs/docs/automate-and-define-processes/application-deployments/deploy.mdx
new file mode 100644
index 000000000..b6cddc135
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/automate-and-define-processes/application-deployments/deploy.mdx
@@ -0,0 +1,62 @@
+---
+page_title: Deploy applications
+description: Learn how to package applications.
+---
+
+# Deploy applications
+
+Your application deployment process depends on how you package your application.
+For example, the process differs when you deploy your application on virtual
+machines (VM) versus deploying to a container orchestrator like Kubernetes or
+Nomad.
+
+If your applications run on virtual machines directly, we recommend that you
+package your application in the machine image itself rather than install it
+after deploying the VM. This immutable infrastructure approach means you know
+exactly what's running on the VM, simplifies scaling, and reduces deployment
+time. When you need to update the application, build a new machine image and
+deploy it as a new VM. This prevents ad-hoc changes from putting the VM in an
+unknown state.
+
+You can use Terraform to manage and deploy your applications to a container
+orchestrator like Nomad or Kubernetes. By using Terraform, you have a unified
+workflow to manage your application and infrastructure resources.
+
+### Kubernetes
+
+Kubernetes is a system for automating the deployment and scaling of
+containerized workloads. You tell Kubernetes the desired state of your workload,
+and it will automatically determine how to meet those requirements. The
+[Terraform
+Kubernetes provider](https://registry.terraform.io/providers/hashicorp/kubernetes/latest)
+lets you interact with the Kubernetes API to deploy and manage workloads. Helm
+is a popular packaging tool for Kubernetes workloads, and you can also deploy
+these packages with the [Helm Terraform
+provider](https://registry.terraform.io/providers/hashicorp/helm/latest/docs).
+
+HashiCorp Resources:
+
+- [Manage Kubernetes resources via Terraform](/terraform/tutorials/kubernetes/kubernetes-provider)
+- [Deploy applications with the Helm provider](/terraform/tutorials/kubernetes/helm-provider)
+
+### Nomad
+
+[Nomad](/nomad) lets you orchestrate and manage workloads such as containers,
+standalone binaries, and batch jobs. Nomad [jobs](/nomad/docs/job-specification)
+define the entire workload to deploy to Nomad. Jobs include information about
+the applications to deploy, where to deploy them, how to network them, and more.
+To manage, update, and scale jobs, you can update your job file with the desired
+resources and Nomad will automatically reconcile the changes that you require.
+
+While you can manage workloads with the [Nomad CLI](/nomad/docs/commands) or
+API, we recommend that you use Terraform or Nomad Pack. The [Terraform
+Normad provider](https://registry.terraform.io/providers/hashicorp/nomad/latest) lets
+you manage workloads using infrastructure-as-code, just as you would with your
+infrastructure. You can also use [Nomad
+Pack](https://github.com/hashicorp/nomad-pack), a package manager and templating
+tool for Nomad.
+
+HashiCorp resources:
+
+- [Get started with Nomad](/nomad/tutorials/get-started)
+- [Introduction to Nomad Pack](/nomad/tutorials/nomad-pack/nomad-pack-intro)
\ No newline at end of file
diff --git a/content/well-architected-framework/docs/docs/automate-and-define-processes/application-deployments/evaluate-existing-deployments.mdx b/content/well-architected-framework/docs/docs/automate-and-define-processes/application-deployments/evaluate-existing-deployments.mdx
new file mode 100644
index 000000000..ea7e67422
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/automate-and-define-processes/application-deployments/evaluate-existing-deployments.mdx
@@ -0,0 +1,316 @@
+---
+page_title: Evaluate existing deployments
+description: Learn how to evaluate existing deployments.
+---
+
+# Evaluate existing deployments
+
+
+Manually deploying and configuring cloud infrastructure is a natural starting
+point for cloud practitioners. However, as applications mature, maintaining
+manually deployed infrastructure is unsustainable and is an industry
+anti-pattern.
+
+In this guide, you will learn how to identify manual cloud infrastructure
+deployments and progress from semi-automated to fully-automated deployments.
+
+In the following video, HashiCorp co-founder Armon Dadgar explains what
+infrastructure as code (IaC) is and why it's important.
+
+
+
+## Evaluate current deployments
+
+You should evaluate where your organization currently is in the automation
+maturity model. The automation maturity model describes the criteria from manual
+deployments to automated deployments and what you need to do to achieve each
+level of automation.
+
+- **Manual deployments**: Common methods include using the cloud UI dashboard or
+ connecting to a remote server and running commands through CLI or RDP. Manual
+ deployments introduce risks such as a lack of version control, human error,
+ consistent repeatability, hard-to-audit changes, and difficulty scaling.
+
+- **Semi-automation**: Instead of running deployment and configuration commands,
+ you run scripts, such as bash, python, and cloud-init or user-data.
+ Semi-automation gives you repeatability and basic version control. However,
+ sometimes, the script you run drifts from the version of the script in source
+ control.
+
+- **Fully-automated deployments**: You use version controlled scripts to deploy
+ your infrastructure in a repeatable and controlled manner. Automated
+ deployments use automation tools such as Ansible or Terraform, and run on
+ CI/CD systems.
+
+
+HashiCorp resources:
+- [Evaluating Your Current Provisioning Practices](/terraform/cloud-docs/recommended-practices/part2#your-current-configuration-and-provisioning-practices)
+
+## Identify manual processes
+
+You should review your infrastructure deployments to identify manual processes.
+You can then determine which processes you should focus on automating.
+
+The following are ways you can identify manual deployments in your cloud environment:
+
+- Start with the basics of your infrastructure. If your application runs on a
+ virtual server and you deploy it through the cloud UI, automating the creation
+ of that server with IaC is a good starting point.
+- Do you SSH or RDP into your servers to run commands? Common manual
+ configuration includes installing dependencies, setting up filesystems, and
+ installing security patches.
+- Review ticketing processes like JIRA or other help-desk software to find
+ recurring infrastructure requests, such as increasing server size, giving
+ users permissions, or creating new cloud infrastructure. Your organization
+ might be manually resolving these requests, and they are perfect examples of
+ manual processes to automate.
+- Ask your team what infrastructure processes they commonly work on.
+ Specifically, ask if any work is done through the cloud UI, as you should
+ automate that work.
+
+It is important to note that connecting to a system to run manual commands is
+sometimes unavoidable. In this case, you should use a secure connection method
+like HashiCorp Boundary. Boundary provides access to applications and critical
+systems with fine-grained authorizations without managing credentials or
+exposing your network.
+
+While it might seem overwhelming to find all of the things to automate, you can
+narrow the scope and focus on the following:
+
+- High-value processes such as database backups
+- Repeatable tasks like creating development and test environments
+- Error-prone operations such as security configurations
+- Time-consuming activities like large-scale system updates, performance testing, and audits.
+
+HashiCorp resources:
+- [Get started with HCP Boundary](/boundary/tutorials/get-started-hcp)
+
+## Implement semi-automated deployments
+
+Once you identify the manual deployments in your organization, you can start
+automating them. Going from manual to fully automated deployments can be
+challenging and overwhelming. It is common to migrate to semi-automated
+deployments before evolving into fully-automated ones.
+
+You should adopt the following semi-automated best practices:
+
+### Use version control systems (VCS)
+
+You should store your automation scripts and IaC in version control systems like
+GitHub or GitLab. Version control systems increase repeatability and reliability
+when deploying and configuring infrastructure. Each time you run a script, you
+should pull it down from your version control system to ensure it is up-to-date
+since the last time you ran it. You can also version scripts for environments.
+You can use tags in VCS for each environment, one for development, staging, and
+production.
+
+### Use scripts instead of manually running commands
+
+Infrastructure automation through scripting is a fundamental shift that can
+increase the reliability and security of your infrastructure and application.
+When you script your infrastructure, you reduce human error, enable scaling,
+increase consistency, and create a foundation for further automation.
+
+You can use infrastructure as code tools, such as Terraform and Ansible, to
+deploy and configure your infrastructure. Infrastructure as code lets you define
+your infrastructure using code and configuration files instead of manually
+configuring servers, networks, and other resources. This approach allows you to
+version, test, and deploy infrastructure changes just like you would with
+application code, making it easier to maintain consistency, automate
+deployments, and quickly recover from failures. By treating infrastructure as
+code, you can ensure your environments are reproducible, scalable, and
+maintainable.
+
+### Create application images with code
+
+You should automate building images that your application runs on. These images
+contain dependencies, security patches, and the application. Manual
+configuration can lead to installing incorrect dependencies, missing security
+patches, and inconsistent application installation and configuration.
+
+Packer and Dockerfiles create images using IaC and automation, provisioning with
+scripts and tools like Ansible. Packer is a tool for creating
+identical machine images for multiple platforms from a single source
+configuration. Packer is lightweight, runs on every major operating system, and
+is highly performant, creating machine images for multiple platforms in
+parallel.
+
+You can use Packer to build a golden image pipeline that creates a reusable base
+image called a golden image. A golden image is an image on top of which
+developers can build applications, letting them focus on the application itself
+instead of system dependencies and patches. A typical golden image includes
+common system, logging, and monitoring tools, recent security patches, and
+application dependencies.
+
+You can use this golden image as the base to install your application on. Once
+Packer installs and configures your application, you can use Terraform to deploy
+it. Terraform will tear down the existing server and replace it with a new one which
+runs the new version of your Packer generated image.
+
+HCP Packer further simplifies image creation and deployment by providing a
+central artifact registry for operations and development teams. HCP Packer
+stores metadata about the artifacts you build, including when you create the
+artifact, the associated platform, and which Git commit is associated with your
+build.
+
+### Create immutable infrastructure
+
+Immutable infrastructure is infrastructure that, once deployed, is never
+modified, only replaced. For example, in a mutable server, you update the server
+either by connecting to the server and running commands or by using a script.
+With immutable infrastructure, you would fully replace the server with a new
+one.
+
+Terraform, Packer, and Nomad facilitate immutable infrastructure.
+
+Terraform allows you to define and provision infrastructure environments, such
+as servers, virtual networks, and IAM roles and policies. When you change your
+IaC, Terraform will update your infrastructure components by updating the
+configuration or destroying and recreating it. To see what changes Terraform
+will apply, you can run `terraform plan` before running `terraform apply`.
+
+Packer creates and configures machine images that are ready to be run. You can
+use Terraform to deploy these images and `user_data` or other start-up scripts
+to ensure services start. With Packer, you can create new images when you need
+to update your application rather than update the existing infrastructure.
+
+Nomad orchestrates application workloads across clusters, treating application
+containers and instances as immutable. When you need to update your application,
+Nomad can replace running instances with new versions rather than modifying
+existing ones.
+
+HashiCorp co-founder and CTO Armon Dadgar explains the differences and
+trade-offs between mutable and immutable infrastructure.
+
+
+
+### Audit your cloud logs
+
+Once you start automating your infrastructure, it is important to make sure that
+infrastructure is not manually changed or deployed. By using tools like AWS
+CloudTrail, Azure Activity Log, or Google Cloud Audit Logs, you can uncover
+who is making manual changes through the console, when these changes occur, and
+what resources are being modified outside of automated processes.
+
+HashiCorp resources:
+- [Codify your entire infrastructure with Terraform](/well-architected-framework/operational-excellence/operational-excellence-manage-your-infrastructure-components-with-terraform)
+- [Best practices to automate infrastructure](/well-architected-framework/operational-excellence/operational-excellence-automate-infrastructure)
+- [Manage infrastructure and service monitoring](/well-architected-framework/reliability/reliability-deploy-application-monitoring-components)
+- Introduction to [Terraform](/terraform/intro), [Packer](/packer/docs/intro), and [Nomad](/nomad/intro)
+- [Build Immutable Infrastructure with Packer in CI/CD](/packer/guides/packer-on-cicd)
+- Learn to build a [golden image pipeline with HCP Packer](/packer/tutorials/cloud-production/golden-image-with-hcp-packer)
+
+External resources:
+- [Ansible community documentation](https://docs.ansible.com/?extIdCarryOver=true&sc_cid=701f2000001OH7YAAW)
+- Cloud logs documentation for [GCP](https://cloud.google.com/logging/docs/audit), [AWS](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/WhatIsCloudWatchLogs.html), and [Azure](https://learn.microsoft.com/en-us/azure/azure-monitor/logs/data-platform-logs)
+
+## Implement fully-automated deployments
+
+A fully automated system is a holistic approach to managing the IaC lifecycle. A
+fully automated infrastructure deployment system will be composed of Git-driven
+infrastructure changes, implemented through a CI/CD system, with automated tests
+and monitoring.
+
+Fully automated systems build on the best practices of semi-automated systems
+(VSC, automation tools, audit). The following are additional best practices when
+you implement fully automated infrastructure.
+
+### Run Git-driven deployments through CI/CD
+
+When you manage your IaC through Git-driven development, you combine your IaC
+management with interactions to your Git VCS and CI/CD. When you want to modify
+your infrastructure, such as standing up or tearing down a server, you must
+commit a change to your VCS, which will kick off a CI/CD job to change your
+infrastructure. This process ensures that every infrastructure modification goes
+through proper testing, security scanning, and validation before reaching
+production.
+
+Using Git-driven development, you gain the following benefits:
+
+- Ties code to infrastructure which gives you documentation as code.
+- Audit trail of changes. The commit history provides a complete audit trail, showing who made what changes and when.
+- Allows you to scale infrastructure efficiently.
+- Automated infrastructure tests.
+
+With HCP Terraform, you can use the built-in VCS workflow to automatically
+trigger runs based on changes to your VCS repositories. The CLI-driven workflow
+allows you to quickly iterate on your configuration and work locally, while the
+VCS-driven workflow enables collaboration within teams by establishing your
+shared repositories as the source of truth for infrastructure configuration.
+
+You can manage your image creation with Git and CI/CD, similar to how you manage
+your other infrastructure. Once you commit a change to your Packer file, your
+CI/CD should trigger a Packer build. Upon completion, your CI/CD system should
+tag and upload your image to an image repository. You can use HCP Packer to
+store metadata about the images you build, including when you create the
+artifact, the associated platform, and which Git commit is associated with your
+build. HCP Packer allows your downstream processes, like Terraform, to consume
+these images efficiently.
+
+### Deploy self-service infrastructure
+
+Application developers can utilize IaC and temporary infrastructure without
+writing IaC. You increase application development velocity by creating a process
+for developers to quickly and reliably build the infrastructure they need to run
+their application code.
+
+HCP Waypoint is an internal developer platform (IDP) that allows platform teams
+to define golden patterns and workflows that enable a self-service experience
+for developers. Developers understand the requirements of their application,
+such as dependencies like MySQL and Redis, but they should not have to create and
+maintain the infrastructure on which their application runs.
+
+Platform teams define golden workflows for actions such as building an
+application, deploying to production, performing a rollback, and other
+workflows. Developers can execute these workflows with a simple UX while being
+abstracted from the details. HCP Waypoint integrates with your existing CI/CD
+systems while providing a consistent abstraction layer.
+
+Once developers deploy the templates to create infrastructure, they must
+maintain their application. HCP Waypoints actions allow developers to perform
+day 2 operations to maintain their application, such as rotating secrets,
+rebuilding a database index, or flushing an application's cache.
+
+### Use infrastructure monitoring and testing
+
+Infrastructure monitoring and testing are important to help prevent outages,
+security breaches, and performance issues before they impact your business. By
+continuously monitoring your infrastructure, you gain real-time visibility into
+system health, resource utilization, and performance metrics, allowing you to
+detect and address issues proactively rather than reactively.
+
+Testing complements monitoring by validating that your infrastructure works as
+intended before promoting it to production. Through comprehensive testing,
+including load testing, security scanning, and disaster recovery drills, you can
+verify that your infrastructure is not just running, but running correctly and
+securely.
+
+You can use Terraform and Packer to install and deploy monitoring agents into
+your application images. By automating the installation of the monitoring agents
+and deploying the application image, you can ensure you will have visibility
+over the infrastructure your application runs on.
+
+You can also use Terraform to configure cloud-native tools to monitor your cloud
+infrastructure stacks. You can create dashboards and alarms and automate
+responses to alerts.
+
+HashiCorp resources:
+- Learn how to use [VCS-driven workflow](/terraform/tutorials/cloud-get-started/cloud-vcs-change) with HCP Terraform
+- Learn how to [automate Packer with GitHub Actions](/packer/tutorials/cloud-production/github-actions)
+- [What is HCP Packer?](/hcp/docs/packer)
+- [What is HCP Waypoint?](/hcp/docs/waypoint)
+- [Get started with HCP Waypoint](/hcp/docs/waypoint)
+- [Manage infrastructure and service monitoring](/well-architected-framework/reliability/reliability-deploy-application-monitoring-components)
+
+External resources:
+- [What is GitOps?](https://about.gitlab.com/topics/gitops/)
+
+## Next steps
+
+In this document, you learned how to identify infrastructure that you should
+automate. You also learned the differences between semi and fully-automated
+infrastructure deployments. You now have the tools to go from manual to
+automated deployments.
+
+You can learn best practices on writing [Terraform with our Terraform code style
+guide](/terraform/language/style).
diff --git a/content/well-architected-framework/docs/docs/automate-and-define-processes/application-deployments/package.mdx b/content/well-architected-framework/docs/docs/automate-and-define-processes/application-deployments/package.mdx
new file mode 100644
index 000000000..41f772a00
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/automate-and-define-processes/application-deployments/package.mdx
@@ -0,0 +1,41 @@
+---
+page_title: Package applications
+description: Learn how to package applications.
+---
+
+# Package applications
+
+Packaging your application is the process of turning your source code,
+configuration, and dependencies into a single deployable artifact, such as a
+virtual machine image or Docker container.
+
+[Packer](/packer) lets you create identical artifacts for multiple platforms,
+such as AWS, Azure, and GCP, from a single source template. Packer templates
+define which source images to use and the steps to build your artifacts.
+
+HCP Packer stores and manages metadata associated with the artifacts you build
+with Packer, such as inheritance between artifacts HCP Packer lets you label
+artifact versions with channels. If you find a security issue in an artifact and
+want to prevent people from deploying it, you can revoke a version of an
+artifact and point the label to a fixed, or previous version.
+
+You can use the [Terraform HCP
+provider](/packer/tutorials/hcp-get-started) to
+query your HCP Packer registry from your Terraform configuration and ensure that
+your infrastructure is using the most up-to-date artifact version.
+
+Together, this creates a golden image pipeline. This pipeline may look like the following example:
+
+1. Your DevOps team builds a base OS image that includes common dependencies between your applications.
+1. Your application team builds their application image on top of the base image.
+1. Your platform team deploys the application image with Terraform.
+1. If your security team finds a vulnerability, they can update the base image and revoke the vulnerable version so that no new images are built on top of it.
+1. Since HCP Packer knows the child images are now out of date, it shows which images need to be rebuilt on the new base image.
+
+
+
+HashiCorp resources:
+
+- [Get started with Packer](/packer/tutorials/aws-get-started)
+- [Get started with HCP Packer](/packer/tutorials/hcp-get-started)
+- [Build a golden image pipeline with HCP Packer](/packer/tutorials/cloud-production/golden-image-with-hcp-packer)
\ No newline at end of file
diff --git a/content/well-architected-framework/docs/docs/automate-and-define-processes/application-deployments/testing.mdx b/content/well-architected-framework/docs/docs/automate-and-define-processes/application-deployments/testing.mdx
new file mode 100644
index 000000000..5e0af518e
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/automate-and-define-processes/application-deployments/testing.mdx
@@ -0,0 +1,31 @@
+---
+page_title: Write and test applications
+description: Learn how to evaluate existing deployments.
+---
+
+# Write and test applications
+
+We recommend that your application environments (development, test, and
+production) be as similar as possible. Inconsistent environments, such as
+different operating systems, external dependencies like databases, and
+configurations, may impact your application's behavior. These inconsistencies
+are usually more prominent between development and production environments.
+
+[Vagrant](/vagrant) lets you define a development environment in a configuration
+file called a [Vagrantfile](/vagrant/docs/vagrantfile). The Vagrantfile
+describes all the information about the development environment, including the
+base operating system image, setup scripts, and network configuration. When a
+developer runs the command `vagrant up`, Vagrant uses the Vagrantfile to build
+and configure a virtual machine, allowing you to reliably create consistent
+development environments.
+
+Using Vagrant, developers can automatically create development and test
+environments that mimic production as closely as possible. When your production
+environment changes, you can update the Vagrantfile to update the development
+environments. Using a common Vagrantfile also lets you create consistent
+development environments between teams for common tooling.
+
+HashiCorp resources:
+
+- [Get started with Vagrant](/vagrant/tutorials/getting-started)
+- [Use Vagrant with Packer](/packer/tutorials/aws-get-started/aws-get-started-post-processors-vagrant)
\ No newline at end of file
diff --git a/content/well-architected-framework/docs/docs/automate-and-define-processes/automate-infrastructure/atomic-deployments.mdx b/content/well-architected-framework/docs/docs/automate-and-define-processes/automate-infrastructure/atomic-deployments.mdx
new file mode 100644
index 000000000..ec7ba87ad
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/automate-and-define-processes/automate-infrastructure/atomic-deployments.mdx
@@ -0,0 +1,19 @@
+---
+page_title: Atomic deployments
+description: Learn how to Deploy atomic infrastructure components.
+---
+
+# Deploy atomic infrastructure components
+
+Cloud infrastructure can be large and complex. You should deploy small updates to your infrastructure on a frequent cadence. Small and frequent deployments lower the risk of bad deployments and increase the ability to roll back changes.
+
+You can deploy infrastructure more frequently when you couple them with CI/CD pipelines. These pipelines increase the speed and reliability of your deployments, allowing you to ship updates to your services faster.
+
+Understanding the changes Terraform will apply to your infrastructure before you execute them is important. Terraform lets you preview changes with the `plan` command so you can understand the effects of your modifications on your infrastructure prior to deploying them. Many popular CI/CD products integrate with Terraform, allowing you to manage your infrastructure effectively.
+
+HashiCorp resources:
+- [Learn how to automate Terraform with GitHub Actions](/terraform/tutorials/automation/github-actions)
+- [Terraform plan](/terraform/cli/commands/plan)
+
+External resources:
+- [Infrastructure as Code with Terraform and GitLab](https://docs.gitlab.com/ee/user/infrastructure/iac/)
\ No newline at end of file
diff --git a/content/well-architected-framework/docs/docs/automate-and-define-processes/automate-infrastructure/automate.mdx b/content/well-architected-framework/docs/docs/automate-and-define-processes/automate-infrastructure/automate.mdx
new file mode 100644
index 000000000..5ece74100
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/automate-and-define-processes/automate-infrastructure/automate.mdx
@@ -0,0 +1,12 @@
+---
+page_title: Automate infrastructure
+description: Learn how to automate and define processes for your infrastructure.
+---
+
+# Automate infrastructure
+
+When you establish operational excellence, you enable your team to focus on development by creating safe, consistent, and reliable workflows for deployment. Standardized processes allow teams to work efficiently and more easily adapt to changes in technology or business requirements.
+
+Manual provisioning infrastructure is risky, inefficient, and difficult to scale. Operator error is inevitable, and while you can create audit logs of user actions, it can be hard to diagnose failures. As your organization grows, there will be a higher volume of changes to monitor and deploy, and manual processes will slow your development velocity. By standardizing on best practices and automating repeated workflows, you can more safely and efficiently introduce changes to your infrastructure.
+
+These will provide you with best practices and resources for achieving operational excellence. You will learn why you should incorporate infrastructure as code, version control, reusable components, atomic infrastructure, standard workflows, and planning for scale to accelerate your development process.
\ No newline at end of file
diff --git a/content/well-architected-framework/docs/docs/automate-and-define-processes/automate-infrastructure/codify-infrastructure.mdx b/content/well-architected-framework/docs/docs/automate-and-define-processes/automate-infrastructure/codify-infrastructure.mdx
new file mode 100644
index 000000000..cb3ba85b2
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/automate-and-define-processes/automate-infrastructure/codify-infrastructure.mdx
@@ -0,0 +1,221 @@
+---
+page_title: Codify infrastructure
+description: Learn how to unify your cloud infrastructure with Terraform and HCL.
+---
+
+# Codify your entire infrastructure with Terraform
+
+Terraform lets you unify your infrastructure by using infrastructure as code (IaC) to create and manage your entire infrastructure stack. While organizations use Terraform to manage traditional infrastructure resources (for example, networking, servers, and databases), you can extend Terraform to manage your entire infrastructure stack using a single configuration language, tool, and implementation pattern. This may include vendors outside of traditional cloud providers such as vendors for CI/CD, observability, and databases. By adopting Terraform for your entire stack, your organization can consistently manage resources, minimize errors, and enhance deployment agility.
+
+The first section of this document explains the benefits of adopting Terraform and how it enables consistent workflows to manage your resources. The remaining sections dive into each provider category in the Terraform Registry so you can fully adopt Terraform to streamline your organization. These sections are not an exhaustive list of all the Terraform providers.
+
+The provider types covered in this document are:
+- Public cloud and Cloud automation
+- Container orchestration
+- Continuous Integration/Deployment (CI/CD)
+- Logging & Monitoring
+- Database
+- Version control systems
+- Security and authentication
+
+## Terraform-driven development
+
+Terraform uses providers to directly interact with cloud resources and services. Terraform creates and manages these resources by storing the state of your infrastructure. Terraform codifies cloud APIs into declarative configuration files, using HashiCorp Configuration Language (HCL). You define the desired state of your infrastructure in HCL and Terraform will deploy and configure the resources to match your configuration.
+
+In addition to configuring your networks, servers, and databases, you can also use Terraform to manage other resources like containers, machine images, and observability services. Terraform enables you to create an infrastructure delivery workflow where you create consistent infrastructure, application configurations, and images. This workflow consists of adopt, build, standardize, and scale.
+
+- **Adopt:** Compose infrastructure as code in a Terraform file using HCL to provision resources from any infrastructure provider.
+- **Build:** Infrastructure automation workflows to compose, collaborate, reuse, and provision infrastructure as code across IT operations and teams of developers.
+- **Standardize:** Establish guardrails for security, compliance, and cost management through role-based access controls, policy, enforcement, and audit.
+- **Scale:** Extend workflow automation to all teams in the organization with self-service infrastructure as code and integrate with VSC, ITSM, and CI/CD.
+
+The Terraform Registry is a single source for finding providers and their supporting documentation. You can find official, partner, and community providers, modules, policy libraries, and run tasks in the Terraform Registry. The Terraform Registry is a great first step in learning all the resources you can manage with Terraform.
+
+
+Modules are the main way to package and reuse resource configurations with Terraform. This is especially useful for standardizing your infrastructure deployments. You can find publicly available Terraform modules for configuring many kinds of common infrastructure in the Terraform Registry. These modules are free to use, and Terraform can download them automatically if you specify the appropriate source and version in a module call block.
+
+Your organization may produce modules tailored for your own infrastructure needs. HCP Terraform and Terraform Enterprise both include a private module registry for sharing modules internally within your organization.
+
+HashiCorp resources:
+- [Terraform providers registry](https://registry.terraform.io/browse/providers)
+- [Terraform providers documentation](/terraform/language/providers)
+- Learn to [write your own Terraform provider](/terraform/tutorials/providers-plugin-framework/providers-plugin-framework-provider)
+- Learn about [Terraform modules](/terraform/language/modules)
+- Learn HCL by [writing Terraform configuration](/terraform/tutorials/configuration-language)
+- Learn HCL by following our [Terraform certification prep](/terraform/tutorials/certification-003/associate-study-003#use-and-create-modules)
+
+### Use policy as code
+
+When you provision infrastructure, manage secrets, and manage your services, you want to ensure they follow certain behaviors. By managing policy with infrastructure as code, you can unify the process of maintaining and implementing policies across your entire infrastructure stack. This approach brings consistency, automation, and version control to policy management.
+
+HCP Terraform uses Sentinel to enable granular policy control for your infrastructure. Sentinel is a language and policy framework, which restricts Terraform actions to defined, allowed behaviors. Policy authors manage Sentinel policies in HCP Terraform with policy sets, which are groups of policies. Organization owners control the scope of policy sets by applying certain policy sets to the entire organization or by selecting workspaces.
+
+
+
+
+
+
+
+A common Sentinel policy is to help control cloud cost. By using these policies, in combination with HCP Terraform, you can ensure your IaC doesn’t create infrastructure that isn't appropriate for your budget.
+
+The following is an example of a policy that limits the creation of instances outside of the three listed instance types.
+
+```hcl hideClipboard
+import "tfplan"
+
+main = rule {
+ all tfplan.resources.aws_instance as _, instances {
+ all instances as _, r {
+ r.applied.instance_type in ["t2.micro", "t2.small", "t2.medium"]
+ }
+ }
+}
+```
+
+When organizations use Sentinel in HCP Terraform, they can ensure that all infrastructure changes adhere to corporate policies, security standards, and compliance requirements. This proactive approach to policy enforcement helps prevent misconfigurations, reduces security risks, and maintains consistency across the infrastructure lifecycle.
+
+You can also enforce OPA policies with HCP Terraform. These policies, written in Rego, can validate resource configurations, enforce tagging standards, control costs, and ensure adherence to company or regulatory requirements. Terraform runs OPA policy checks before each plan or apply, preventing implementation of non-compliant changes.
+
+HashiCorp resources:
+- [HCP Sentinel](https://www.hashicorp.com/sentinel)
+- [Get started with Sentinel](/sentinel/intro/getting-started)
+- [Sentinel language](/sentinel/docs/language)
+- Define [OPA policies with HCP Terraform](/terraform/cloud-docs/policy-enforcement/opa)
+
+## Public cloud and Cloud automation
+
+A good starting point when unifying your cloud infrastructure is understanding all the resources Terraform can manage in your cloud. The best way to do this is to review the cloud provider resources located in the Terraform Registry.
+
+Terraform integrates into most major clouds, including AWS, Azure, GCP, Oracle, VMWare, and OVHcloud. These cloud providers give you the resources to create a full cloud stack that’s ready to run your application. Cloud automation providers give you specialized cloud functions, and generally will not provide you with all the resources that the major cloud providers will. These providers include Helm, F5, Ansible, and more. A common pattern is to review the cloud documentation to determine what cloud infrastructure you want to deploy, and then refer to the provider documentation to learn which resource you should use.
+
+You can also visit the cloud provider modules. These modules give you pre-written code that you can use to deploy your infrastructure. For example, you can use the AWS S3-bucket module to deploy an S3 bucket by only passing in the required variable and authenticating to AWS.
+
+HashiCorp resources:
+- List of [public cloud](https://registry.terraform.io/browse/providers?category=public-cloud) and [cloud automation](https://registry.terraform.io/browse/providers?category=cloud-automation) providers in the Terraform Registry.
+- [Get started tutorials for AWS, Azure, and Google Cloud](/terraform/tutorials)
+- [Provision infrastructure with Terraform Cloud-Init](/terraform/tutorials/provision/cloud-init)
+
+## Container orchestration
+Terraform can automate the provisioning, configuration, and management of orchestration systems. These systems range from self-managed and cloud-managed orchestrators like Nomad and Kubernetes to cloud-native container services like AWS ECS.
+
+We recommend you separate the configuration to deploy, configure, and manage the orchestrator from the configuration to deploy services to the orchestrator. For example, you would have Terraform deploy your Kubernetes system in one configuration and then deploy services like NGINX in a different configuration.
+
+### Kubernetes
+
+Terraform allows you to manage Kubernetes as infrastructure as code. It can create and manage cloud resources such as virtual machines, networks, load balancers, and storage systems across various cloud providers like AWS, GCP, and Azure. For example, on AWS, Terraform can set up an EKS (Elastic Kubernetes Service) cluster, configure VPCs, subnets, and security groups, and provision EC2 instances to serve as worker nodes.
+
+Terraform can interact directly with Kubernetes to manage cluster resources. Terraform can use the Kubernetes provider to create and manage namespaces, deploy workloads, set up services, and configure ingress rules. It can also handle more complex Kubernetes concepts like role-based access control (RBAC) by enabling you to set up roles, role bindings, and service accounts.
+
+While self-managed Kubernetes is the **Container orchestration** category in the Terraform Registry, the cloud-managed versions are in the cloud-specific provider documentation. For example, the AWS EKS (Elastic Kubernetes Service) documentation is in the AWS provider documentation under `aws_eks_cluster`.
+
+Benefits of using Terraform:
+- Deploy orchestrator to multiple environments
+- Manage container workloads
+- Manage orchestrator configuration with IaC
+- Manage hardware orchestrator runs on
+
+HashiCorp resources:
+- List of [container orchestration](https://registry.terraform.io/browse/providers?category=container-orchestration) providers in the Terraform Registry
+- Tutorials to [manage Kubernetes with Terraform](/terraform/tutorials/kubernetes)
+- Deploy cloud native Kubernetes , [EKS](/terraform/tutorials/kubernetes/eks), [AKS](/terraform/tutorials/kubernetes/aks), and [GKE](/terraform/tutorials/kubernetes/gke)
+- [Deploy federated multi-cloud Kubernetes clusters](/terraform/tutorials/kubernetes/multicloud-kubernetes)
+- [Manage Kubernetes resources via Terraform](/terraform/tutorials/kubernetes/kubernetes-provider)
+- [Deploy applications with the Helm provider](/terraform/tutorials/kubernetes/helm-provider)
+- [HCP Terraform Operator for Kubernetes overview](/terraform/cloud-docs/integrations/kubernetes)
+
+## Continuous integration/deployment (CI/CD)
+
+When you use Terraform to manage your CI/CD, you are able to manage the CI/CD system’s configuration through version control and create a defined approval process for system modifications.
+
+An example is managing CI/CD build agents. CI/CD systems use build agents to run jobs like building software, or deploying infrastructure. CI/CD tools, like GitHub and GitLab, often offer build agents as a service, meaning they manage the build agent for you. However, you can manage the build agents yourself, which gives you control of hardware, operating system, and software tools. Packer can create an image and install the agent and other software. The build agent image can be a VM image or a container. Terraform can then create the build infrastructure, whether it’s VMs or container clusters, and deploy your build agents.
+
+Terraform can manage other components of your CI/CD systems. As CI/CD systems are vital to your organization's operational excellence, managing these systems as code will increase their reliability and security by adding a defined approval process audit logs for configuration changes.
+
+Benefits of using Terraform:
+- Configure build agents and build infrastructure
+- Setup CI/CD in dev, staging, and production environments
+- Manage security and IAM as code
+- Manage build tokens and keys
+- Create approval process and audit logs for configuration changes
+
+HashiCorp resources:
+- List of [CI/CD](https://registry.terraform.io/browse/providers?category=ci-cd) providers in the Terraform Registry
+- Terraform [GitLab](https://registry.terraform.io/providers/gitlabhq/gitlab/latest/docs/resources/runner) and [GitHub](https://registry.terraform.io/providers/integrations/github/latest/docs) providers, with documentation to manage build agents
+- [Terraform GitHub Actions](/terraform/tutorials/automation/github-actions)
+
+External resources:
+- Learn how to [manage BuildKite CI/CD with Terraform](https://buildkite.com/blog/manage-your-ci-cd-resources-as-code-with-terraform)
+
+## Logging and monitoring
+
+You can use Terraform to deploy and manage logging and monitoring services. Instead of creating scripts to interact with monitoring tools APIs, you can directly configure monitoring solutions like Datadog and Grafana with their Terraform provider.
+Terraform uses monitoring as code (MaC) to install and set up monitoring agents to collect metrics and logs of your infrastructure and applications, and create dashboards to view the metrics and set up alerts to notify you of issues. MaC gives you all of the benefits of infrastructure as code, such as version control and automated deployments, increasing your organization’s reliability and meeting RTO and RPO expectations.
+
+Benefits of using Terraform:
+- Use monitoring as code
+- Create dashboards and alerts with infrastructure as code
+
+HashiCorp resources:
+- List of [logging & monitoring](https://registry.terraform.io/browse/providers?category=logging-monitoring) providers in the Terraform Registry
+- [Manage infrastructure and service monitoring](/well-architected-framework/reliability/reliability-deploy-application-monitoring-components)
+- [Automate monitoring with the Terraform Datadog provider](/terraform/tutorials/use-case/datadog-provider)
+
+External resources:
+- Learn how to [manage Grafana with Terraform](https://grafana.com/docs/grafana-cloud/developer-resources/infrastructure-as-code/terraform/)
+
+## Database
+
+Terraform can deploy, manage, and interact with database management systems. You can use cloud providers, like AWS, GCP, or Azure, to deploy database services such as RDS, and vendor providers to deploy databases like Cockroach, and MongoDB.
+
+Terraform offers features beyond just deploying databases. It can manage database schemas, allowing you to version control database structures. You can also manage user permissions, providing consistent access controls across different environments. Terraform can also handle cross-database operations like creating read replicas or configuring cross-database access. It can also manage integrations of your database with other services, such as communication with VMs.
+
+Upon database creation, you can also data seed your database, which is great for creating test environments that DBAs can quickly use. With IaC, you can make sure you're seeding the same data across multiple environments, which will increase the reliability of your tests, as you know your data will be consistent.
+
+Benefits of using Terraform:
+- Automate seed and migrate data during database deployment
+- Provision and configure database instances
+- Manage cross-database and service integrations
+- Automate backup plans in multi-cloud/region
+
+HashiCorp resources:
+- List of [database providers](https://registry.terraform.io/browse/providers?category=database) in the Terraform Registry
+
+## Version control systems
+
+Version control systems (VCS), such as GitHub, GitLab, and Bitbucket, allow you to store and version your code. VCS fosters collaboration by making it easy to share and collaborate on code with peers. Terraform can manage the installation and configuration of on-prem VSC systems and configure VCS as a service.
+
+Automating VCS configuration with Terraform improves the reliability and security of your infrastructure by applying the same controls you use for managing applications and infrastructure. By managing your VCS as code, you can reduce manual configuration errors, integrate VCS configuration into CI/CD, and enforce security best practices at a granular level.
+
+Benefits of using Terraform:
+- Set default branches and branch protections
+- Create branches and repositories
+- Manage code releases
+- Create and manage security controls, users, and groups
+- Install and configure your VCS in the cloud and on-prem
+
+HashiCorp resources:
+- List of [version control systems](https://registry.terraform.io/browse/providers?category=version-control-systems) in the Terraform Registry
+- [Manage GitHub Users, Teams, and Repository Permissions](/terraform/tutorials/it-saas/github-user-teams)
+
+## Security and authentication
+
+Terraform can manage cloud native security along with vendor security tools. You can deploy identity and access management, encryption keys, network security like firewalls, and auditing. You can manage all these controls as code, allowing you to version and audit code changes.
+
+Terraform can also connect to secret management tools like HashiCorp Vault to use its security features. You will have more control over your secrets and can select a secret management solution that provides value to your specific organizational needs.
+
+Benefits of using Terraform:
+- Manage your cloud IAM
+- Create your security and auth with IaC
+- Integrate third-party security services into your cloud environment
+Audit your security changes though VCS
+- Connect to secret management tools like Vault
+
+HashiCorp resources:
+- List of [security and authentication providers](https://registry.terraform.io/browse/providers?category=security-authentication) in the Terraform Registry
+
+## Next steps
+
+You can use Terraform to codify your entire infrastructure stack with a variety of providers in the Terraform Registry. When you use Terraform providers, you are able to use infrastructure as code, gaining benefits like reducing human error, version control, and automation.
+
+To learn more about how to use Terraform and HCL, visit our [Terraform code style guide](/terraform/language/style).
+
diff --git a/content/well-architected-framework/docs/docs/automate-and-define-processes/automate-infrastructure/identify-reusable-components.mdx b/content/well-architected-framework/docs/docs/automate-and-define-processes/automate-infrastructure/identify-reusable-components.mdx
new file mode 100644
index 000000000..aae84d36e
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/automate-and-define-processes/automate-infrastructure/identify-reusable-components.mdx
@@ -0,0 +1,30 @@
+---
+page_title: Identify reusable components
+description: Learn how to identify reusable components.
+---
+
+# Identify reusable components
+
+If you repeatedly provision the same group of resources, you can refactor your configuration to use reusable collections instead. Reusable collections of components reduce the time it takes to deploy infrastructure by allowing developers to reuse configuration instead of writing it from scratch. Developers can also design reusable collections to comply with organizational best practices and security guidelines.
+
+Defining reusable collections of components, such as Terraform modules, also reduces your time to provision, giving engineers a configurable way to deploy commonly used resources. You can adapt these components to account for changes to service demand, modify them based on failure modes, and release updates to allow downstream users to deploy your updated configuration.
+
+A Terraform module is a set of Terraform configuration files in a single directory. Modules are reusable and customizable; you can wrap modules with configurations to fit your organization's standards. Creating a more modular infrastructure encourages your organization to decouple services by helping you focus on logically related resources. Decoupling can reduce the scope of failure and enable more efficient deployment due to reduced system dependencies.
+
+For example, if your team manages object storage for multiple applications that all follow your organization's common standards, such as security or lifecycle management, you can use an object storage Terraform module for your cloud providers. The object storage module can contain configuration such as lifecycle policies, or security standards. You can store and version the module in a version control repository or Terraform registry and share it across your organization for developers to access.
+
+Machine images can also benefit from following a reusable component workflow, commonly called a golden image. A golden image is an image on top of which developers can build applications, letting them focus on the application itself instead of system dependencies and patches. A typical golden image includes a common system, logging and monitoring tools, recent security patches, and application dependencies.
+
+You can create a golden image with Packer and make it available for your developers and operations teams. These teams can then use Packer to ingest the golden image and install their applications and other dependencies before deploying the image with tools like Terraform. You can integrate this process with a CI/CD system to create a complete application deployment workflow for deploying to your cloud infrastructure. You can also use this workflow for containers.
+
+If you have common infrastructure that your developers use to deploy their applications, you can use HCP Waypoint to accelerate deployments. Waypoint templates allow platform engineers to pre-define infrastructure in a Terraform no-code module. You can create a template with common infrastructure that complies with your organization's security, finance, scaling, and other policies.
+
+For example, a template can consist of a code repository template configured with your organization's default frontend framework, linting libraries, and CI/CD pipelines. A developer could use the Waypoint template to deploy their application and know that the underlying infrastructure is configured correctly. Other template examples include a production-ready Kubernetes cluster or backend API framework configured for serverless.
+
+HashiCorp resources:
+- [What is a Terraform module](/terraform/language/modules)
+- [Terraform module registry](https://registry.terraform.io/browse/modules)
+- [Learn how to create Terraform modules](/terraform/language/modules/develop)
+- [Terraform create and use no-code modules](/terraform/tutorials/cloud/no-code-provisioning)
+- [Learn how to reuse configuration with modules](/terraform/tutorials/modules)
+- [Build a golden image pipeline with HCP Packer](/packer/tutorials/cloud-production/golden-image-with-hcp-packer)
\ No newline at end of file
diff --git a/content/well-architected-framework/docs/docs/automate-and-define-processes/automate-infrastructure/scale.mdx b/content/well-architected-framework/docs/docs/automate-and-define-processes/automate-infrastructure/scale.mdx
new file mode 100644
index 000000000..6e7a484b5
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/automate-and-define-processes/automate-infrastructure/scale.mdx
@@ -0,0 +1,23 @@
+---
+page_title: Plan for scale
+description: Learn how to plan for variations in capacity and traffic by automating scaling events.
+---
+
+# Plan for scale
+
+You should plan for variations in capacity and traffic by automating scaling events. By using monitoring and alerting to track your infrastructure and service resource usage, you can proactively and dynamically respond to varying demands for your services, ensuring more reliability and resilience.
+
+Most major cloud providers have native auto-scaling features. You can use Terraform to manage the autoscaling configurations through the auto-scaling resources, such as the `aws_autoscaling_group` resource.
+
+Monitoring cost is an important factor when planning for scale. HashiCorp's Sentinel is a policy-as-code framework that allows you to introduce logic-based policy decisions to your systems. Codifying your policies offers the same benefits as IaC, allowing for collaborative development, visibility, and predictability in your operations. You can use Sentinel to help manage your infrastructure spending.
+
+HashiCorp resources:
+- [Manage infrastructure and service monitoring](/well-architected-framework/reliability/reliability-deploy-application-monitoring-components)
+- [Manage cloud native resources monitoring with Terraform](/well-architected-framework/reliability/reliability-deploy-application-monitoring-components#deploy-cloud-native)
+- [Monitor infrastructure cost with Sentinel](/terraform/tutorials/cloud-get-started/cost-estimation)
+- Learn about [HashiCorp Sentinel](https://www.hashicorp.com/sentinel)
+
+External resources:
+- [AWS Auto scaling](https://aws.amazon.com/autoscaling/)
+- [Azure Auto scaling](https://learn.microsoft.com/en-us/azure/azure-monitor/autoscale/autoscale-overview)
+- GCP Auto scaling [instances](https://cloud.google.com/compute/docs/autoscaler) and [load balancing](https://cloud.google.com/compute/docs/load-balancing-and-autoscaling)
\ No newline at end of file
diff --git a/content/well-architected-framework/docs/docs/automate-and-define-processes/automate-infrastructure/standardize-workflows.mdx b/content/well-architected-framework/docs/docs/automate-and-define-processes/automate-infrastructure/standardize-workflows.mdx
new file mode 100644
index 000000000..4357c4a7e
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/automate-and-define-processes/automate-infrastructure/standardize-workflows.mdx
@@ -0,0 +1,19 @@
+---
+page_title: Standardize workflows
+description: Learn how to Standardize and automate workflows.
+---
+
+# Standardize and automate workflows
+
+One of the key HashiCorp principles is to design for workflows, not underlying technologies. Focusing on workflows gives you the flexibility to introduce new tools more easily to your organization as necessary. When establishing a culture of automation, you should also ensure that you regularly reflect on your operations procedures as your team evolves. You can more easily modify and adjust standardized automation procedures than inconsistent manual processes. This also allows you to review any operational failures and update your workflows accordingly.
+
+Terraform allows you to standardize your cloud infrastructure workflow to manage resources across cloud providers, so you do not need to learn provider-specific workflows. Standard cloud infrastructure workflows let your team work more efficiently and enable you to choose the best service for the job rather than tying you to any one platform. HCP Terraform has run triggers that can combine multiple workflows. When one workflow completes, such as creating a Kubernetes cluster, a second workflow can start automatically to create a Vault instance for the Kubernetes cluster to use.
+
+As highlighted in the [reusable components](##identify-reusable-components) section, you can use Packer to create a standard and automated workflow for multi-cloud deployments. Packer allows you to configure an image from a single configuration file called a template, which will create multiple images that can be ingested by cloud providers such as AWS, Azure, and GCP.
+
+HashiCorp resources:
+- [Terraform multi-cloud provisioning](https://www.terraform.io/use-cases/multi-cloud-deployment)
+- [Build a golden image pipeline with HCP Packer](/packer/tutorials/cloud-production/golden-image-with-hcp-packer)
+- [HCP Terraform Run triggers](/terraform/cloud-docs/workspaces/settings/run-triggers)
+- [Implement HashiCorp's Tao and principles](/well-architected-framework/operational-excellence/operational-excellence-tao)
+- [Get started with Terraform on AWS, Azure, GCP, and OCI](/terraform/tutorials)
\ No newline at end of file
diff --git a/content/well-architected-framework/docs/docs/automate-and-define-processes/automate-infrastructure/use-version-control.mdx b/content/well-architected-framework/docs/docs/automate-and-define-processes/automate-infrastructure/use-version-control.mdx
new file mode 100644
index 000000000..14ea78466
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/automate-and-define-processes/automate-infrastructure/use-version-control.mdx
@@ -0,0 +1,25 @@
+---
+page_title: Use version control
+description: Learn how to version control your IaC.
+---
+
+# Use version control
+
+Version control systems add predictability and visibility to your infrastructure management process by creating a single source of truth for your infrastructure configuration. Storing your configuration in version control also allows you to revert infrastructure to previous commits, tags, or releases.
+
+Version control also helps facilitate collaboration between team members by allowing them to test out specific code versions locally or remotely. They will also be able to conduct code reviews by leaving comments or suggestions for the version they are testing.
+
+HCP Terraform streamlines your development process by integrating directly with your version control system and CI/CD pipelines. These integrations provide previews of your infrastructure changes before you apply them. This lets your team review and approve changes before you apply them.
+
+HashiCorp provides GitHub Actions that integrate with the HCP Terraform API. These actions let you create your own custom CI/CD workflows to meet your organization's needs.
+
+
+
+HashiCorp resources:
+- Learn how to use HCP Terraform to [automate Terraform with GitHub Actions](/terraform/tutorials/automation/github-actions).
+- [Why should I use version control for my infrastructure?](https://www.hashicorp.com/resources/why-use-version-control-for-infrastructure)
+- [Terraform code style guide](/terraform/language/style)
+- [Write Terraform tests](/terraform/tutorials/configuration-language/test)
+
+External resources:
+- [DevOps - Version your infrastructure](https://devops.com/version-your-infrastructure/)
\ No newline at end of file
diff --git a/content/well-architected-framework/docs/docs/automate-and-define-processes/introduction.mdx b/content/well-architected-framework/docs/docs/automate-and-define-processes/introduction.mdx
new file mode 100644
index 000000000..e69de29bb
diff --git a/content/well-architected-framework/docs/docs/cloud-operating-model.mdx b/content/well-architected-framework/docs/docs/cloud-operating-model.mdx
new file mode 100644
index 000000000..9989ecd5e
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/cloud-operating-model.mdx
@@ -0,0 +1,80 @@
+---
+layout: docs
+page_title: What is the cloud operating model?
+description: The cloud operating model is a new approach for IT operations that organizations need to be successful with cloud adoption and thrive in an era of multi-cloud architecture.
+---
+
+# What is cloud operating model?
+
+The cloud operating model is a new approach for IT operations that enables organizations to be successful with cloud adoption and thrive in an era of multi-cloud architecture.
+
+
+
+HashiCorp's [cloud operating model](https://www.hashicorp.com/cloud-operating-model) enables your organization to unlock the fastest path to value in a modern
+multi-cloud datacenter. When your organization adopts this common operating
+model; it enables people, process, and tool to work most efficiently.
+
+The [Unlocking the cloud operating model](https://www.hashicorp.com/cloud-operating-model)
+whitepaper reviews each component of this approach and the path to standardizing
+application delivery across all layers necessary to support a cloud-based architecture.
+
+This document provides a brief overview of each component in that whitepaper and an introduction to **HashiCorp Well-Architected Framework**.
+
+### Multi-datacenter transition
+
+To begin, your organization must negotiate the initial challenge; the shift from largely dedicated servers in a private datacenter to a pool of compute capacity available on-demand, with many cloud providers.
+
+
+
+This approach allows your organization to optimize your infrastructure
+for consistent and reusable workflows across multiple clouds while addressing
+the common enterprise challenges of:
+
+- core business databases and internal applications that must stay private.
+- application delivery that's dependent on multiple teams.
+- technology changes from contained VM environments to cloud 'resources'.
+
+### Cloud operating model foundations
+
+Your IT organization can deliver on-demand resources when infrastructure is consistently provisioned, secured, connected, and run. For most enterprises, delivering on-demand resources will require a transition in each of those areas.
+
+
+
+- **Provision**. The infrastructure layer transitions from dedicated servers at
+ limited scale to a dynamic environment where organizations can adjust to increased demand
+ by spinning up thousands of servers and scaling them down when not in use.
+- **Secure**. The security layer transitions from a fundamentally “high-trust” world
+ with a strong perimeter and firewall to a “low-trust” or “zero-trust” environment
+ with no clear or static perimeter.
+- **Connect**. The networking layer transitions from heavily dependent on the physical
+ location and IP address of services and applications to using a dynamic registry of
+ services for discovery, segmentation, and composition.
+- **Run**. The runtime layer shifts from deploying artifacts to a static application server
+ to deploying applications with a scheduler atop a pool of infrastructure which is
+ provisioned on-demand. In addition, new applications have become collections of services
+ that are dynamically provisioned, and packaged in multiple ways: from virtual machines
+ to containers.
+
+### HashiCorp stack implementation
+
+Implementation of the HashiCorp stack eases the infrastructure transition by providing the
+consistency at each layer for your organizations people, processes, and tools. There are
+five steps in the journey that we have seen organizations adopt successfully.
+
+1. Multi-cloud infrastructure provisioning with Terraform.
+1. Multi-cloud security with Vault.
+1. Multi-cloud service networking with Consul.
+1. Multi-cloud application delivery with Nomad.
+1. Industrialized application delivery process.
+
+
+
+## HashiCorp Well-Architected Framework
+
+HashiCorp's well-architected framework extends the cloud operating model and provides step-by-step guidance for implementing the HashiStack.
+
+The goal of our framework is to enable enterprises to migrate their workloads to a
+multi-cloud architecture that is secure, reliable, high-performing, resilient,
+with automated and dynamic infrastructure. It will provide practitioners with a set of best practices that align with the cloud operating model based on a set of pillars.
+
+Continue to the next document to understand how to implement [HashiCorp Well-Architected Framework](/well-architected-framework/com/implement-cloud-operating-model) at your organization.
diff --git a/content/well-architected-framework/docs/docs/design-resilient-systems/disaster-recovery/state-management-and-dr.mdx b/content/well-architected-framework/docs/docs/design-resilient-systems/disaster-recovery/state-management-and-dr.mdx
new file mode 100644
index 000000000..fbe1709b8
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/design-resilient-systems/disaster-recovery/state-management-and-dr.mdx
@@ -0,0 +1,44 @@
+---
+page_title: State management and disaster recovery
+description: Use HashiCorp tools in the design of your fault-tolerant systems to avoid disruptions from a single point of failure, and ensure the business continuity of your mission-critical deployments.
+---
+
+# State management and disaster recovery
+
+Disaster recovery considerations are an important part of your organization's overall business continuity planning. You should consider both Recovery Point Objectives (RPO) and Recovery Time Objectives (RTO).
+
+At a minimum, your recovery plan should include the following:
+
+- State and change management.
+- Immutable infrastructure for quick and replicable deployment.
+- Automated backups stored on mounted or external storage, instead of local or ephemeral storage.
+
+HashiCorp Consul resources:
+
+- [Backup Consul data and state](/consul/tutorials/operate-consul/backup-and-restore) with the RTO and RPO set forth within each customer's disaster recovery policies.
+- Understand [disaster recovery for Consul on Kubernetes](/consul/tutorials/production-kubernetes/kubernetes-disaster-recovery).
+- Learn more about [Consul disaster recovery considerations](/consul/tutorials/operate-consul/disaster-recovery).
+- How to perform [disaster recovery for Consul clusters](/consul/tutorials/datacenter-operations/recovery-outage), including loss of quorum.
+- How to recover from disaster in a [federated primary datacenter](/consul/tutorials/operate-consul/recovery-outage-primary).
+
+HashiCorp Nomad resources:
+
+- [Failure recovery strategies](/nomad/tutorials/job-failure-handling/failures)
+- [Outage recovery](/nomad/tutorials/manage-clusters/outage-recovery)
+
+HashiCorp Terraform resources:
+
+- Automate backups of your [Terraform Enterprise deployment](/terraform/tutorials/recommended-patterns/pattern-backups) to ensure business continuity.
+- Understand the [Terraform Enterprise backup - recommended pattern](/terraform/tutorials/recommended-patterns/pattern-backups).
+
+HashiCorp Vault resources:
+
+- Learn how to enable automated backups of cluster data in [Vault Enterprise](/vault/tutorials/standard-procedures/sop-backup).
+- [Protect Vault cluster from data loss with backups](/vault/tutorials/standard-procedures/sop-backup)
+- Vault Enterprise supports multi-datacenter deployments where clusters replicate data across datacenters [disaster recovery](/vault/docs/enterprise/replication#disaster-recovery-dr-replication). Learn how to [enable disaster recovery replication](/vault/tutorials/enterprise/disaster-recovery), and [recover from catastrophic failure with disaster recovery replication](/vault/tutorials/enterprise/disaster-recovery-replication-failover).
+- [Recover from lost quorum in Vault clusters](/vault/tutorials/raft/raft-lost-quorum).
+
+External resources:
+
+- [Plan for Disaster Recovery](https://docs.aws.amazon.com/wellarchitected/latest/reliability-pillar/plan-for-disaster-recovery-dr.html)
+- [Identify and back up all data that needs to be backed up, or reproduce the data from sources](https://docs.aws.amazon.com/wellarchitected/latest/framework/rel_backing_up_data_identified_backups_data.html)
diff --git a/content/well-architected-framework/docs/docs/design-resilient-systems/fault-tolerance/fault-tolerant-systems.mdx b/content/well-architected-framework/docs/docs/design-resilient-systems/fault-tolerance/fault-tolerant-systems.mdx
new file mode 100644
index 000000000..66bf519f1
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/design-resilient-systems/fault-tolerance/fault-tolerant-systems.mdx
@@ -0,0 +1,43 @@
+---
+page_title: Fault-tolerant systems
+description: Use HashiCorp tools in the design of your fault-tolerant systems to avoid disruptions from a single point of failure, and ensure the business continuity of your mission-critical deployments.
+---
+
+# Fault-tolerant systems
+
+A fault-tolerant system prevents disruptions from a single point of failure, and ensures high availability and business continuity for your mission-critical applications or infrastructure. When you design and implement fault-tolerant systems, you reduce costs, intelligently scale, and minimize downtime.
+
+Fault-tolerant systems also enhance your business outcomes and increase customer satisfaction in the following ways:
+
+- Ensures critical systems remain available even during failure to support business continuity and minimize the effects of disruptions.
+- Failover and redundancy help reduce risk of data loss, corruption, and damage from security breaches.
+- Improve the quality of service and deliver better user experience through consistent performance and responsiveness.
+
+HashiCorp tools feature fault-tolerance properties which enhance the overall resiliency and fault tolerance of applications they integrate with. Use the guidance and resources here to help design your fault-tolerant systems with HashiCorp tools.
+
+## Identify potential faults
+
+As you design a fault-tolerant system, your starting point should be to identify the types of failures which are possible, their effects, and mitigation strategies. Consider the following common faults in your designs:
+
+- Hardware failures in compute or storage systems
+- Software bugs that cause outages or otherwise block production
+- Network partitions between datacenters or individual cluster nodes
+- Upgrade issues that introduce regressions or unexpected configuration issues
+
+HashiCorp Consul resources:
+
+- [Fault Injection](/consul/docs/troubleshoot/fault-injection)
+- [Provide fault tolerance with redundancy zones](/consul/tutorials/operate-consul/redundancy-zones)
+
+HashiCorp Nomad resources:
+
+- [Failure scenarios](/nomad/tutorials/enterprise/production-reference-architecture-vm-with-consul#failure-scenarios)
+
+HashiCorp Vault resources:
+
+- [Set up fault tolerance with Vault redundancy zones](/vault/tutorials/raft/raft-redundancy-zones)
+
+External resources:
+
+- [Design your workload to withstand component failures](https://docs.aws.amazon.com/wellarchitected/latest/reliability-pillar/design-your-workload-to-withstand-component-failures.html)
+- [Failure scenarios](https://docs.aws.amazon.com/sap/latest/general/arch-guide-failure-scenarios.html)
\ No newline at end of file
diff --git a/content/well-architected-framework/docs/docs/design-resilient-systems/fault-tolerance/networking-and-communication-protocols.mdx b/content/well-architected-framework/docs/docs/design-resilient-systems/fault-tolerance/networking-and-communication-protocols.mdx
new file mode 100644
index 000000000..716b2db55
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/design-resilient-systems/fault-tolerance/networking-and-communication-protocols.mdx
@@ -0,0 +1,19 @@
+---
+page_title: Networking and communication protocols
+description: Use HashiCorp tools in the design of your fault-tolerant systems to avoid disruptions from a single point of failure, and ensure the business continuity of your mission-critical deployments.
+---
+
+# Networking and communication protocols
+
+Your fault-tolerant applications or infrastructure can benefit from choosing best in class robust networking and communication protocols. Choose solutions which ensure your networks and protocols can survive failure, such as the following:
+
+- Load balancing and distribution solutions
+- Network isolation and segmentation techniques
+- Caching and content delivery solutions
+- Reliable multicast networking
+- Connection-oriented networking and service mesh solutions
+- Message queue and buffering solutions
+
+HashiCorp resources:
+
+- [What is zero trust security and zero trust networking?](/well-architected-framework/security/security-zero-trust-video)
\ No newline at end of file
diff --git a/content/well-architected-framework/docs/docs/design-resilient-systems/fault-tolerance/redundancy-and-replication.mdx b/content/well-architected-framework/docs/docs/design-resilient-systems/fault-tolerance/redundancy-and-replication.mdx
new file mode 100644
index 000000000..0a65941d8
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/design-resilient-systems/fault-tolerance/redundancy-and-replication.mdx
@@ -0,0 +1,30 @@
+---
+page_title: Redundancy and replication
+description: Use HashiCorp tools in the design of your fault-tolerant systems to avoid disruptions from a single point of failure, and ensure the business continuity of your mission-critical deployments.
+---
+
+# Redundancy and replication
+
+Your fault-tolerant system design can benefit from redundancy in key hardware and software components to ensure maximum availability and ideal user experience. State replication enables availability and performance by ensuring that a system can continue to operate when components fail.
+
+Some ways in which you can build redundancy into your designs include:
+
+- Duplicate critical service instances
+- Redundant data storage solutions from different vendors
+- Several network paths for communications
+
+HashiCorp Consul resources:
+
+- [Consul Multi-Cluster reference architecture](/consul/tutorials/production-multi-cluster/multi-cluster-reference-architecture)
+
+HashiCorp Nomad resources:
+
+- [Federation](/nomad/docs/concepts/architecture/federation)
+
+HashiCorp Vault resources:
+
+- [Vault multi-cluster architecture guide](/vault/tutorials/day-one-raft/multi-cluster-architecture)
+- [Performance replication](/vault/docs/enterprise/replication#performance-replication)
+- [Enable Performance Replication](/vault/tutorials/enterprise/performance-replication)
+- [Disaster recovery (DR) replication](/vault/docs/enterprise/replication#disaster-recovery-dr-replication)
+- [Recover from catastrophic failure with disaster recovery replication](/vault/tutorials/enterprise/disaster-recovery-replication-failover)
\ No newline at end of file
diff --git a/content/well-architected-framework/docs/docs/design-resilient-systems/fault-tolerance/resiliency-and-availability.mdx b/content/well-architected-framework/docs/docs/design-resilient-systems/fault-tolerance/resiliency-and-availability.mdx
new file mode 100644
index 000000000..025ff21a1
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/design-resilient-systems/fault-tolerance/resiliency-and-availability.mdx
@@ -0,0 +1,92 @@
+---
+page_title: Resiliency and availability
+description: Use HashiCorp tools in the design of your fault-tolerant systems to avoid disruptions from a single point of failure, and ensure the business continuity of your mission-critical deployments.
+---
+
+# Resiliency and availability
+
+As you plan for resiliency and availability, you must decide how robust your system architecture needs to be in terms of failure, degradation, and performance. The following are key considerations that you should include when planning for availability and resilience in your deployment:
+
+1. Identify all applications and infrastructure where availability is critical.
+1. Calculate the cost of your failure domain strategy.
+1. Decide your uptime goals.
+1. Compare your architecture and failure recovery plans to the business requirements (BCP).
+
+HashiCorp resources:
+
+- [Run a reliable Nomad cluster](/well-architected-framework/ir-nomad/implementation-resources-nomad-reliability)
+- [Run a reliable Vault cluster](/well-architected-framework/ir-vault/implementation-resources-vault-reliability)
+
+External resources:
+
+- [Fault tolerance and fault isolation](https://docs.aws.amazon.com/whitepapers/latest/availability-and-beyond-improving-resilience/fault-tolerance-and-fault-isolation.html)
+- [Designing resilient systems](https://cloud.google.com/compute/docs/tutorials/robustsystems)
+- [Getting Started with Reliability on Azure: Ensuring Cloud Applications Stay Up and Running](https://techcommunity.microsoft.com/blog/azurearchitectureblog/getting-started-with-reliability-on-azure-ensuring-cloud-applications-stay-up-an/4152905)
+- [Thinking like an architect: Understanding failure domains](https://www.ibm.com/blog/thinking-like-an-architect-understanding-failure-domains/)
+- [What Could Possibly Go Wrong?](https://deploy.equinix.com/blog/explaining-failure-domains-sre-lifeblood/)
+- [Uptime versus Availability: How to measure and improve reliability](https://www.pluralsight.com/resources/blog/tech-operations/uptime-availability-metrics-app-reliability)
+- [Business continuity versus disaster recovery: Which plan is right for you?](https://www.ibm.com/think/topics/business-continuity-vs-disaster-recovery-plan)
+- [Business Continuity Plan (BCP)](https://docs.aws.amazon.com/whitepapers/latest/disaster-recovery-workloads-on-aws/business-continuity-plan-bcp.html)
+
+## Consul
+
+Consul has a range of features that operate both locally and remotely that can help you offer a resilient service across datacenters. Each Consul datacenter depends on a set of Consul voting server agents. The voting servers ensure Consul has a consistent, fault-tolerant state by requiring a majority of voting servers, known as a quorum, to agree upon state changes.
+
+Consider the following factors when you use Consul to design your resilient architectures:
+
+- **Cluster quorum**: Consul uses the Raft protocol to achieve consensus with a quorum (or majority) of operational servers, and can tolerate failure in one or more servers depending on quorum size. A Consul cluster will enter a read-only state to prevent data inconsistency if it loses quorum. You can also use Redundancy Zones in your Consul deployments to run one voter and any number of non-voters in each defined zone.
+- **Data distribution and replication**: Consul replicates all data across cluster servers. Any server in the cluster can respond to read requests, but writes require consensus from a quorum of servers. Consul also automatically synchronizes data across the cluster when a failed server recovers.
+- **Cluster leader election**: When the cluster leader fails, Consul automatically conducts an election to elect a new leader. The leader election requires a quorum of voter participation by cluster servers, and quorum constraints prevent split-brain scenarios.
+- **Service discovery and resilience**: Consul clients keep local caches of service information, and can continue basic service discovery even when temporarily disconnected. Health checks also continue to function during partial cluster outages, and the anti-entropy system automatically repairs inconsistencies between the client's local state and cluster state.
+- **Network partition handling**: A Consul cluster uses a gossip protocol to detect network partitions, and uses quorum requirements to enforce data consistency during network partitions. Servers in the minority partition enter read-only mode to prevent split-brain scenarios, and when the partition heals, the cluster automatically resynchronizes data.
+- **Multi-datacenter support**: Each Consul datacenter independently operates with its own consensus group, and cross-datacenter replication continues even if some datacenters are unreachable. Consul also uses a WAN gossip pool to keep datacenter connectivity details.
+- **Backup and recovery**: You can recover from catastrophic failures with data snapshots.
+
+HashiCorp resources:
+
+- [Fault tolerance](/consul/docs/concept/reliability)
+- [Raft protocol](/consul/docs/concept/consensus)
+- [Redundancy zones](/consul/tutorials/operate-consul/redundancy-zones)
+- [Anti-Entropy Enforcement](/consul/docs/concept/consistency)
+- [Backup Consul data and state](/consul/tutorials/operate-consul/backup-and-restore)
+
+## Nomad
+
+Nomad is a simple and flexible scheduler and orchestrator that deploys and manages containers and non-containerized applications across on-premises and clouds at scale. Your Nomad deployments can achieve fault tolerance, and offer resilience and availability to your use cases through the following key properties:
+
+- **Cluster quorum**: Nomad uses the Raft protocol to achieve consensus with a quorum (or majority) of operational servers, and can tolerate failure in one or more servers depending on quorum size. If Nomad loses quorum, it enters a read-only state to prevent data inconsistency.
+- **Node failure handling**: Nomad relies on a heartbeat mechanism to automatically detect node failure. Failed nodes which do not heartbeat get marked as down and the Nomad maintains the desired state by automatically rescheduling new instances of failed jobs on to operational nodes.
+- **Job scheduling resilience**: Job scheduling supports automatic restarts for failed tasks with configurable restart attempts. Jobs can specify affinities and constraints to effectively spread across failure domains. Nomad's system jobs ensure instances run on all eligible nodes to support coverage, and rolling updates enable zero-downtime deployments with automatic rollbacks.
+- **Data persistence**: Nomad's implementation of the Raft protocol ensures consistent state across cluster nodes by replicating server state across the cluster. Snapshot functionality enables back up and restoration of cluster state, while client nodes keep local state for running allocations.
+- **Automatic scaling**: The Nomad Autoscaler is a separate tool that enables horizontal application and cluster scaling for Nomad clusters. Horizontal application autoscaling is the process of automatically controlling the number of instances of an application to gain work throughput to meet service-level agreements (SLA). Horizontal cluster autoscaling is the process of adding or removing Nomad clients from a cluster to ensure there is an appropriate amount of cluster resource for the scheduled applications.
+
+HashiCorp resources:
+
+- [Nomad reference architecture](/nomad/tutorials/enterprise/production-reference-architecture-vm-with-consul)
+- [Consensus protocol](/nomad/docs/concepts/consensus)
+- [Client Heartbeats](/nomad/docs/configuration/server#client-heartbeats)
+- [Scheduling](/nomad/docs/concepts/scheduling)
+- [Affinities and constraints](/nomad/docs/concepts/scheduling/placement#affinities-and-constraints)
+- [Nomad Autoscaler overview](/nomad/tools/autoscaling)
+- [Increase failure tolerance with spread](/nomad/tutorials/advanced-scheduling/spread)
+
+## Vault
+
+Vault is an identity-based secret and encryption management system that secures, stores, and controls access to tokens, passwords, certificates, and encryption keys for protecting secrets and other sensitive data.
+
+Vault clusters have some important resiliency and availability properties you should consider when you are designing fault-tolerant systems:
+
+- **High availability architecture**: A cluster of Vault servers can run in high availability (HA) mode with an active server and standby servers. When operating in HA mode, Vault can automatically failover a non-operational active server, and use leader election to choose a new active server.
+- **Automated Integrated Storage management**: Vault can automate the cluster management for Integrated Storage with the Autopilot feature to check server health, stabilize quorum, and periodically clean up failed servers.
+- **Performance standbys**: By default, Vault standby servers forward all requests they receive to the active server. Vault Enterprise offers extra functionality that allow HA servers to service read requests which do not mutate storage directly from a standby node. These performance standby servers can distribute the load, enabling your cluster to scale horizontally and reduce latency for read-heavy use cases.
+- **Replication**: Scaling your Vault cluster to meet performance demands is critical to ensure workloads operate efficiently. Vault Enterprise and HCP Vault Dedicated support multi-region deployments so you can replicate data to regional Vault clusters to support local workloads.
+
+HashiCorp resources:
+
+- [Vault with integrated storage reference architecture](/vault/tutorials/raft/raft-reference-architecture)
+- [Configure Vault cluster with Integrated Storage](/vault/tutorials/raft/raft-storage)
+- [Automate Integrated Storage management](/vault/tutorials/raft/raft-autopilot)
+- [Autopilot](/vault/docs/concepts/integrated-storage/autopilot)
+- [Automate upgrades with Vault Enterprise](/vault/tutorials/raft/raft-upgrade-automation)
+- [Performance standby nodes](/vault/docs/enterprise/performance-standby)
+- [Scale horizontally with performance standby nodes](/vault/tutorials/enterprise/
\ No newline at end of file
diff --git a/content/well-architected-framework/docs/docs/design-resilient-systems/fault-tolerance/scale.mdx b/content/well-architected-framework/docs/docs/design-resilient-systems/fault-tolerance/scale.mdx
new file mode 100644
index 000000000..371aed2b7
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/design-resilient-systems/fault-tolerance/scale.mdx
@@ -0,0 +1,48 @@
+---
+page_title: Scale and tune performance
+description: Use HashiCorp tools in the design of your fault-tolerant systems to avoid disruptions from a single point of failure, and ensure the business continuity of your mission-critical deployments.
+---
+
+# Scale and tune performance
+
+Scale and performance are critical to fault-tolerant systems, and the interplay between them can complex, complementary, and conflicting relationships For example, the same redundant components which enable fault-tolerance can also support load balancing to improve performance. When you geographically distributed replicas for disaster recovery, they can also support latency reduction goals. You can also benefit from excess production capacity need for fault-tolerance to offer extra performance headroom.
+
+Some examples of conflicting relationships and trade-offs between fault-tolerance and performance include consistency versus performance; stronger consistency guarantees for fault-tolerance often require extra network round trips. Synchronous replication for durability can affect latency, and complex recovery mechanisms can slow down regular operations. Resource overhead for redundancy results in costs which can further improve performance, health checking and monitoring consumer processing and bandwidth, while state synchronization adds network overhead.
+
+Examples of design strategies you can use for scale and performance include:
+
+- **Bulkheads and circuit breakers**: To isolate components so that failures do not cascade, implement back-pressure mechanisms to prevent overload, and design for graceful degradation.
+- **Caching and state management**: To improve resilience and performance with local state to reduce network dependency, and cache hierarchies to offer fallback options.
+- **Monitoring and adaptation**: To catch potential failures, and inform automatic scaling and recovery systems which respond to overload and failure. Your capacity planning should account for both regular operation and operation in different failure modes.
+- **Testing and validation**: Load test and include testing of failure scenarios. Use chaos engineering principles to help understand behavior at scale. Include recovery scenarios in your performance benchmarks.
+
+HashiCorp Consul resources:
+
+- [Operating Consul at Scale](/consul/docs/manage/scale)
+- [Enhanced Read Scalability with Read Replicas](/consul/docs/manage/scale/read-replica)
+- [Scale Consul DNS](/consul/docs/discover/dns/scale)
+- [Monitor Consul server health and performance with metrics and logs](/consul/tutorials/observe-your-network/server-metrics-and-logs)
+
+HashiCorp Nomad resources:
+
+- [Autopilot](/nomad/tutorials/manage-clusters/autopilot)
+- [Horizontal cluster autoscaling](/nomad/tutorials/autoscaler/horizontal-cluster-scaling)
+- [On-demand batch job cluster autoscaling](/nomad/tutorials/autoscaler/horizontal-cluster-scaling-on-demand-batch)
+- [Scale a service](/nomad/tutorials/migrate-monolith/monolith-migration-autoscale)
+- [Monitoring Nomad](/nomad/docs/operations/monitoring-nomad)
+- [Nomad Autoscaler Telemetry](/nomad/tools/autoscaling/telemetry)
+
+HashiCorp Vault resources:
+
+- [Tune server performance](/vault/tutorials/archive/performance-tuning)
+- [Vault telemetry](/vault/docs/internals/telemetry)
+
+HashiCorp resources:
+
+- [React to metrics and monitoring](/well-architected-framework/reliability/reliability-react-to-monitoring)
+- [Manage infrastructure and service monitoring](/well-architected-framework/reliability/reliability-deploy-application-monitoring-components)
+
+External resources:
+
+- [What is chaos engineering?](https://www.ibm.com/think/topics/chaos-engineering)
+- [Principles of chaos engineering](https://principlesofchaos.org/)
\ No newline at end of file
diff --git a/content/well-architected-framework/docs/docs/design-resilient-systems/fault-tolerance/secure-distributed-systems.mdx b/content/well-architected-framework/docs/docs/design-resilient-systems/fault-tolerance/secure-distributed-systems.mdx
new file mode 100644
index 000000000..996972de5
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/design-resilient-systems/fault-tolerance/secure-distributed-systems.mdx
@@ -0,0 +1,37 @@
+---
+page_title: Secure distributed systems
+description: Use HashiCorp tools in the design of your fault-tolerant systems to avoid disruptions from a single point of failure, and ensure the business continuity of your mission-critical deployments.
+---
+
+# Secure distributed systems
+
+Distributed systems involve several components communicating across networks, which presents an expanded attack surfaces with many potential points of vulnerability. A breach in one component can lead to a cascade through the system, potentially compromising data integrity, confidentiality, and availability. You must implement a robust design that include security measures to prevent data breaches and service disruptions without impairing performance and reliability.
+
+HashiCorp resources:
+
+- [Prevent lateral movement](/well-architected-framework/security/security-prevent-lateral-movement)
+- [Best practices to protect sensitive data](/well-architected-framework/security/security-sensitive-data)
+- [What is zero trust security and zero trust networking?](/well-architected-framework/security/security-zero-trust-video)
+- [Secure identifying digital entities with X.509 certificates](/well-architected-framework/security/security-secure-entities-with-x509)
+- [Secure HashiCorp tools with TLS](/well-architected-framework/security/security-secure-hashicorp-tools-with-tls)
+
+
+## Quorum
+
+When you deploy a distributed system like Consul, Nomad, or Vault, one of your primary design considerations should be cluster quorum. Quorum refers to the minimum number of nodes in a distributed system required to reach consensus regarding shared state or configuration. Without quorum, the system becomes unreliable and can experience leader election issues, data inconsistencies, or complete loss of consensus.
+
+In a Consul or Vault cluster using high availability integrated storage, quorum is a majority of members from a peer set. For a set of size `N`, quorum requires at least `(N/2)+1` members. For example, if there are 5 members in the peer set, you would need 3 nodes to form a quorum. If a quorum of Consul or Vault nodes is unavailable for any reason, the cluster becomes unavailable and can't commit new logs.
+
+We recommend using three to five servers for both Vault or Consul deployments, including cloud platforms like HashiCorp Cloud Platform (HCP). We recommend the non-voting or read replication feature available in the Enterprise or HCP editions for large deployments that need to scale reads without impacting write latency. A single server deployment is **highly discouraged** due to inevitable data loss in a failure scenario.
+
+The following are quorum sizes and node failure tolerances for a range of cluster node counts.
+
+| **Servers** | **Quorum** | **Node failure tolerance** |
+| ----------- | ---------- | -------------------------- |
+| 1 | 1 | 0 |
+| 2 | 2 | 0 |
+| 3 | 2 | 1 |
+| 4 | 3 | 1 |
+| 5 | 3 | 2 |
+| 6 | 4 | 2 |
+| 7 | 4 | 3 |
diff --git a/content/well-architected-framework/docs/docs/design-resilient-systems/introduction.mdx b/content/well-architected-framework/docs/docs/design-resilient-systems/introduction.mdx
new file mode 100644
index 000000000..0eec79418
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/design-resilient-systems/introduction.mdx
@@ -0,0 +1,6 @@
+---
+page_title: intro
+description: Learn how to automate and define processes for your infrastructure.
+---
+
+asdfdsf
\ No newline at end of file
diff --git a/content/well-architected-framework/docs/docs/design-resilient-systems/zero-downtime-deployments/application.mdx b/content/well-architected-framework/docs/docs/design-resilient-systems/zero-downtime-deployments/application.mdx
new file mode 100644
index 000000000..bbfa09b6b
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/design-resilient-systems/zero-downtime-deployments/application.mdx
@@ -0,0 +1,92 @@
+---
+page_title: Application
+description: Learn how to deploy new versions of your infrastructure and applications without downtime.
+---
+
+# Application changes
+
+Application changes can use blue/green, canary, rolling, or a combination of the
+three. Your deployment method depends on if you use virtual machines or
+containers, along with the criticality of your application
+
+## Load balancers and proxies
+
+Load balancers and reverse proxies can manage your application by directing
+traffic between your blue and green environments. They can then direct a subset
+of users for canary deployments and testing and control traffic for rolling
+deployments.
+
+Regardless of your cloud provider, you can use Terraform to manage the
+deployment and control of load balancers and proxies.
+
+HashiCorp resources:
+- Read the [use Application Load Balancers for blue-green and canary deployments](/terraform/tutorials/aws/blue-green-canary-tests-deployments?utm_source=WEBSITE&utm_medium=WEB_BLOG&utm_offer=ARTICLE_PAGE) tutorial.
+
+External resources:
+- [AWS Fine-tuning blue/green deployments on application load balancer](https://aws.amazon.com/blogs/devops/blue-green-deployments-with-application-load-balancer/)
+- [Using AWS Load Balancer Controller for blue/green deployment, canary deployment and A/B testing](https://aws.amazon.com/blogs/containers/using-aws-load-balancer-controller-for-blue-green-deployment-canary-deployment-and-a-b-testing/)
+- [Azure Blue-Green deployments using Azure Traffic Manager](https://azure.microsoft.com/en-us/blog/blue-green-deployments-using-azure-traffic-manager/)
+- [F5 Flexible Load Balancing for Blue/Green Deployments and Beyond](https://www.f5.com/resources/solution-guides/flexible-load-balancing-for-blue-green-deployments-and-beyond)
+
+## Non-containerized applications
+
+Using a blue/green or rolling deployment is a good approach if you are deploying
+applications on virtual machines.
+
+Blue/green deployments deploys your new application version to your new green
+environment. Once you deploy your application, you can start testing the new
+version in-house, and once you deem it ready, you can switch production traffic
+over to it.
+
+For high-impact applications, we advise incorporating canary testing into your
+blue/green deployment strategy. This testing method allows you to validate your
+new version before fully transitioning your traffic, ensuring a stable and
+desired user experience.
+
+The following is an example of canary testing your green environment:
+
+After the green environment is ready, the load balancer sends a small fraction
+of the traffic to the green environment (in this example, 10%).
+
+
+
+If the canary test succeeds without errors, you can incrementally direct traffic
+to the green environment (50/50 — split traffic) over time. In the end state,
+you redirect all traffic to the green environment. After verifying the new
+deployment, you can destroy the old blue environment. The green environment is
+now your current production service.
+
+
+
+## Containerized applications
+
+Containers can use rolling, blue/green, and canary deployments, either through
+orchestration tools like Nomad and Kubernetes.
+
+Rolling deployments are a popular strategy for deploying applications using
+orchestration systems. With rolling deployments, the orchestrator gradually
+replaces old instances with new ones. Once the new instances are available and
+pass health checks, the orchestrator can direct traffic to the new instances and
+then destroy the old instances.
+
+Nomad supports rolling updates as a first-class feature. To enable rolling
+updates, you can annotate a job or task group with a high-level description of
+the update strategy using the `update` block.
+
+Kubernetes by default uses rolling updates. Kubernetes does this by
+incrementally replacing current pods with new ones. The new Pods are scheduled
+on Nodes with available resources, and Kubernetes waits for those new Pods to
+start before removing the old Pods.
+
+As described in [infrastructure-changes](#infrastructure-changes), both Nomad
+and Kubernetes support blue/green deployments. Before sending all your traffic
+to your new cluster, you can use canary testing to ensure the new cluster is
+working as intended.
+
+HashiCorp resources:
+- Learn how to use blue/green deployments with the [Nomad blue/green and canary deployments](/nomad/tutorials/job-updates/job-blue-green-and-canary-deployments#blue-green-deployments) tutorial.
+- To learn about Nomad rolling updates, refer to the [Nomad's Rolling updates](/nomad/tutorials/job-updates/job-rolling-update) tutorial.
+- Learn about Nomad's `update` block. The [update strategy](/nomad/docs/job-specification/update) is used to control things like rolling upgrades and canary deployments.
+
+External resources:
+- [Kubernetes - Performing a rolling update](https://kubernetes.io/docs/tutorials/kubernetes-basics/update/update-intro/)
\ No newline at end of file
diff --git a/content/well-architected-framework/docs/docs/design-resilient-systems/zero-downtime-deployments/infrastructure.mdx b/content/well-architected-framework/docs/docs/design-resilient-systems/zero-downtime-deployments/infrastructure.mdx
new file mode 100644
index 000000000..21257c30e
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/design-resilient-systems/zero-downtime-deployments/infrastructure.mdx
@@ -0,0 +1,52 @@
+---
+page_title: Infrastructure
+description: Learn how to deploy new versions of your infrastructure and applications without downtime.
+---
+
+# Infrastructure changes
+
+Properly managing changes to your infrastructure, such updating network policies
+or upgrading your Kubernetes cluster, is important to ensure the reliability of
+your upgraded application and achieve zero-downtime deployments.
+
+Blue/green deployments are good for deploying your application on a new
+infrastructure. Blue/green deployments require two identical application
+infrastructure environments, a method for deploying your application to your two
+environments, and a way to route your traffic between them.
+
+The following diagram shows a basic blue/green deployment. The blue environment
+is the infrastructure where your current application runs. The green environment
+is identical except you upgraded it to host the new version of the application.
+This environment can be a set of servers or a new cluster running a new AMI or
+container.
+
+
+
+Your blue and green environments need to be as similar as possible.
+Infrastructure as code (IaC) lets you describe your environment as code and
+consistently deploy identical environments.
+
+IaC makes your operations more cost-effective by allowing you to easily build
+and remove resources when you do not need them. Using IaC also lets you spin up
+your green environment whenever you need it. Instead of letting your blue and
+green environments persist indefinitely or allocating time to build them, you
+can deploy your green infrastructure environment when you want to deploy your
+new software application. Once your green environment is stable, you can tear
+down your blue environment.
+
+HashiCorp's Terraform is an infrastructure as code tool that can help you deploy
+and manage blue/green infrastructure environments. By using Terraform modules,
+you can consistently deploy identical infrastructure using the same code but
+different environments through variables. You can also define feature toggles in
+your Terraform code to create a blue and green deployment environment
+simultaneously. You can then test your application in your new green
+environment, and then when you are ready, set the toggle in your code to destroy
+your blue environment.
+
+HashiCorp resources:
+- Read the [use Application Load Balancers for blue-green and canary deployments](/terraform/tutorials/aws/blue-green-canary-tests-deployments) tutorial.
+- [Feature Toggles, Blue-Green Deployments & Canary Tests with Terraform](https://www.hashicorp.com/blog/terraform-feature-toggles-blue-green-deployments-canary-test) blog by Rosemary Wang
+
+External resources:
+- [Blue Green Deployment](https://martinfowler.com/bliki/BlueGreenDeployment.html) blog by Martin Fowler
+- [Continuous Blue-Green Deployments With Kubernetes](https://semaphoreci.com/blog/continuous-blue-green-deployments-with-kubernetes) blog by Tomas Fernandez
\ No newline at end of file
diff --git a/content/well-architected-framework/docs/docs/design-resilient-systems/zero-downtime-deployments/service-mesh.mdx b/content/well-architected-framework/docs/docs/design-resilient-systems/zero-downtime-deployments/service-mesh.mdx
new file mode 100644
index 000000000..f4296dd87
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/design-resilient-systems/zero-downtime-deployments/service-mesh.mdx
@@ -0,0 +1,56 @@
+---
+page_title: Service mesh
+description: Learn how to deploy new versions of your infrastructure and applications without downtime.
+---
+
+# Service mesh deployments
+
+You can use service splitters to implement zero-downtime deployments. These
+components, often used in service mesh architectures, allow traffic to route
+between different versions of an application dynamically.
+
+You can use Consul to help make traffic splitting decisions. Consul proxy
+metrics gives you detailed health and performance information about your service
+mesh applications. This includes upstream/downstream network traffic metrics,
+ingress/egress request details, error rates, and additional performance
+information that you can use to understand your distributed applications.
+
+With blue/green deployments, you can configure a service splitter to initially
+direct all traffic to your application's "blue" (current) version. When you are
+ready to deploy the "green" (new) version, you can gradually adjust the splitter
+to shift traffic from blue to green.
+
+You can use Consul to manage traffic for zero downtime deployments using the
+following steps:
+
+First, you register your service’s blue and green versions with Consul and
+configure health checks to monitor the availability and health of each service
+instance. Once the instances are healthy, you can deploy your new version to the
+green stack, ensure it passes Consul's health checks, and then update traffic
+splitting or routing rules to shift traffic from the blue to the green service
+gradually.
+
+Since your green service is now receiving traffic, you should monitor the health
+and performance of both versions. If issues arise, you can roll back the traffic
+to your blue service. Once all health and performance checks pass, you can
+decommission the blue service to complete your blue/green deployment.
+
+With canary deployments, you can release new software gradually, and identify
+and reduce the potential blast radius of a failed software release. You first
+route a small fraction of the service to the new version. Similar to blue/green
+deployments, this can be done with a service splitter. When you confirm no
+errors, you slowly increase traffic to the new service until you fully promote
+the new environment.
+
+Amazon EKS and Azure Kubernetes Service can use Consul service mesh to observe
+traffic within your service mesh. This observability enables you to quickly
+understand how services interact with each other and effectively debug your
+services' traffic.
+
+HashiCorp resources:
+- [Deploy seamless canary deployments with service splitters](/consul/tutorials/control-network-traffic/service-splitters-canary-deployment) tutorial
+- [Register your services to Consul](/consul/tutorials/get-started-vms/virtual-machine-gs-service-discovery) tutorial
+- [Monitor your application health with distributed checks](/consul/tutorials/connect-services/monitor-applications-health-checks) tutorial
+- [Observe Consul service mesh traffic](/consul/tutorials/get-started-kubernetes/kubernetes-gs-observability?variants=consul-deploy%3Ahcp) tutorial
+- [Monitor application health and performance with Consul proxy metrics](/consul/tutorials/observe-your-network/proxy-metrics) tutorial
+- [Service splitting](/consul/docs/reference/config-entry/service-splitter) documentation
\ No newline at end of file
diff --git a/content/well-architected-framework/docs/docs/design-resilient-systems/zero-downtime-deployments/zero-downtime-deployments.mdx b/content/well-architected-framework/docs/docs/design-resilient-systems/zero-downtime-deployments/zero-downtime-deployments.mdx
new file mode 100644
index 000000000..a49f7e6ea
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/design-resilient-systems/zero-downtime-deployments/zero-downtime-deployments.mdx
@@ -0,0 +1,92 @@
+---
+page_title: Zero-downtime Deployments
+description: Learn how to deploy new versions of your infrastructure and applications without downtime.
+---
+
+# Zero-downtime Deployments
+
+Zero-downtime deployment strategies aim to reduce or eliminate downtime when you
+update your infrastructure or applications. These strategies involve deploying
+new versions incrementally rather than all at once to detect and resolve issues.
+Each strategy lets you test the new version in an environment with real user
+traffic. This helps validate the new release's performance and reliability.
+
+This guide covers best practices for popular zero-downtime deployment methods,
+such as blue/green, canary, and rolling deployments. It will help you decide the
+deployment method best for your organization and provide the resources to
+implement that method.
+
+
+
+Stateful workloads like databases require additional work for blue/green,
+canary, and rolling deployments. Consult your database’s documentation while
+considering these zero-downtime strategies.
+
+
+
+## Deployment methods overview
+
+Blue/green, canary, and rolling deployments all improve application reliability
+and reduce risk. While they share similar goals, each approach offers unique
+advantages that make it more suitable for certain types of applications or
+organizational needs. By choosing the most appropriate deployment method,
+companies can ensure smoother updates and reduce the likelihood of service
+disruptions.
+
+- Blue/green deployments maintain two identical production environments
+ concurrently. This method allows you to shift traffic from the current version
+ (blue) to the upgraded version (green).
+- Canary deployments introduce new versions incrementally to a subset of users.
+ This approach lets you test upgrades with limited exposure, working alongside
+ other deployment systems.
+- Rolling deployments update applications gradually across multiple servers.
+ This technique ensures only a portion of your infrastructure changes at once,
+ reducing the risk of widespread issues.
+
+The difference between these strategies is how and where the application
+deploys. This involves the environment the application runs in, cost
+considerations, deployment methods, and traffic direction.
+
+- Environment setup:
+ - Blue/Green: Requires two nearly identical environments.
+ - Canary: Requires two nearly identical environments. Initially uses a small subset of users or servers.
+ - Rolling: Updates subsets of servers in batches.
+- Traffic switching:
+ - Blue/Green: Switches all traffic at once.
+ - Canary: Gradually increases traffic to the new version.
+ - Rolling: Sequentially updates and transitions traffic.
+- Rollback mechanism:
+ - Blue/Green: Switching back to the blue environment.
+ - Canary: Rollback involves reducing or stopping the canary deployment.
+ - Rolling: Rollback involves reverting batches, which can be more complex.
+
+Since all three zero-downtime strategies offer similar benefits and aim to
+achieve zero-downtime deployments, the changes you plan to make will be the most
+important consideration when determining which deployment to implement. The
+changes can be either infrastructure or application.
+
+Infrastructure changes involve setting up your environments so they are prepared
+to host your zero-downtime application. With blue/green deployments, you must
+have two identical environments. An infrastructure environment can range from
+creating a new green full stack (servers, networking, or databases) to creating
+a new cluster to run containers or adding a single green VM to an existing
+infrastructure stack.
+
+However, it is important to note that running two identical infrastructure
+environments can increase costs. You can run blue/green environments only in
+production to save money. You should also have an infrastructure lifecycle
+strategy, such as using infrastructure-as-code to deploy your green environment
+only when you plan to deploy your new application version.
+
+Application changes involve deploying and directing traffic to your new
+application version. You can configure your load balancer or reverse proxies to
+direct traffic to your green stack and perform canary testing or direct traffic
+in a controlled manner for rolling deployments.
+
+Service mesh deployments use service splitters to implement zero-downtime
+deployments. These components, often used in service mesh architectures, allow
+traffic to route between different versions of an application dynamically.
+
+External resources:
+- [AWS deployment strategies](https://docs.aws.amazon.com/whitepapers/latest/overview-deployment-options/deployment-strategies.html)
+- [Google Cloud application deployment and testing strategies](https://cloud.google.com/architecture/application-deployment-and-testing-strategies)
\ No newline at end of file
diff --git a/content/well-architected-framework/docs/docs/implement-cloud-operating-model.mdx b/content/well-architected-framework/docs/docs/implement-cloud-operating-model.mdx
new file mode 100644
index 000000000..271c40d5b
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/implement-cloud-operating-model.mdx
@@ -0,0 +1,35 @@
+---
+layout: docs
+page_title: What is HashiCorp Well-Architected Framework?
+description: Implement HashiCorp Well-Architected Framework at your organization.
+---
+
+# What is HashiCorp Well-Architected Framework?
+
+HashiCorp Well-Architected Framework provides best-practice guidance for organizations. Specifically, it aims to help practitioners optimize their production HashiCorp deployments while meeting their organization's specific architectural needs. The well-architected framework starts at the [cloud operating model](https://www.hashicorp.com/cloud-operating-model), it sets the overarching goal of the framework, cloud migration enablement.
+
+
+
+The cloud operating model provides the fundamental approach to industrialize the application delivery process across each layer of the cloud: embracing the cloud operating model, and tuning people, processes, and tools to it. HashiCorp Well-Architected Framework enables practitioners to implement the cloud operating model's approach through sets of best practices organized under pillars.
+
+## Framework pillars
+
+HashiCorp Well-Architected Framework is comprised of pillars. Each pillar contains groupings of best practices. Below is an overview of each pillar.
+
+### Operational excellence
+
+The operational excellence pillar recommends strategies to enable your organization to build products quickly and efficiently; including shipping changes, updates, and upgrades. These strategies will help teams in your organization to collaborate with each other without delays or friction, even in failure scenarios. They include recommendations for both team and infrastructure architecture.
+
+Get started with the [operational excellence pillar](/well-architected-framework/operational-excellence/operational-excellence-introduction).
+
+### Reliability
+
+The reliability pillar recommends strategies that help prevent disruptions from a single point of failure; ensuring high availability and business continuity of your mission-critical applications and infrastructure.
+
+Get started with the [reliability pillar](/well-architected-framework/reliability/reliability-introduction).
+
+### Security
+
+The security pillar recommends strategies to implement security best practices to secure your applications, protect your network, manage sensitive data, manage identity and access, and build security controls.
+
+Get started with the [security pillar](/well-architected-framework/security/security-introduction).
diff --git a/content/well-architected-framework/docs/docs/implementation-resources/consul-reliability.mdx b/content/well-architected-framework/docs/docs/implementation-resources/consul-reliability.mdx
new file mode 100644
index 000000000..1acc2cc3f
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/implementation-resources/consul-reliability.mdx
@@ -0,0 +1,64 @@
+---
+page_title: Run a reliable Consul cluster
+description: Learn how to run a reliable Consul cluster.
+---
+
+# Run a reliable Consul cluster
+
+This document outlines implementation resources for maintaining a reliable Consul cluster. When you implement proper reliability measures, you ensure high availability, fault tolerance, and consistent performance of your Consul infrastructure.
+
+The following sections cover architecture, monitoring, resource management, recovery, and resilience.
+
+## Architecture
+
+Learn about Consul Community Edition and Enterprise architecture and best practices for building reliable Consul clusters.
+
+- Improving a Consul [cluster resilience](/consul/docs/concept/reliability)
+- Recommendations for operating [Consul clusters at scale](/consul/docs/manage/scale)
+
+## Monitoring
+
+Monitor Consul to collect telemetry data to view performance, audits, and infrastructure usage to ensure Consul is reliable.
+
+- Monitor [service-to-service communication with Envoy](/consul/docs/observe/grafana/service-to-service)
+- Monitor [Raft metrics and logs for WAL](/consul/docs/deploy/server/wal/monitor-raft)
+- Monitor [Consul components](/consul/docs/monitor)
+- [Monitoring and alerts recommendations for Consul](/consul/docs/monitor/alerts)
+- Monitor [key metrics for Consul agent telemetry](/consul/docs/monitor/telemetry/agent)
+
+## Resource management
+
+Efficiently manage your Consul infrastructure, scaling, and performance.
+
+- Optimize Consul [server performance](/consul/docs/reference/architecture/server) for overall throughput and health.
+- Collect metrics with Consul [Dataplane Telemetry](/consul/docs/reference/dataplane/telemetry).
+- Learn about [capacity planning recommendations](/consul/docs/reference/architecture/capacity) when deploying and maintaining a Consul cluster in production.
+- Monitor [Raft metrics and logs for WAL](/consul/docs/deploy/server/wal/monitor-raft).
+
+## Recovery
+
+Recover Consul in the case of cluster degradation through the use of regular backups.
+
+- [General considerations for Consul disaster recovery](/consul/tutorials/operate-consul/disaster-recovery)
+- [Disaster recovery for Consul clusters](/consul/tutorials/operate-consul/recovery-outage)
+- [Disaster recovery for federated primary Consul datacenter](/consul/tutorials/operate-consul/recovery-outage-primary)
+- [Disaster recovery for Consul on Kubernetes](/consul/tutorials/production-kubernetes/kubernetes-disaster-recovery)
+- [Disaster recovery for Consul on multi-cluster deployments](/consul/tutorials/production-multi-cluster/multi-disaster-recovery)
+
+## Resilience
+
+Run a resilient Consul cluster to avoid application downtime.
+
+- Understand [fault tolerance in Consul clusters](/consul/docs/concept/reliability)
+- Provide [fault-tolerance with Consul Redundancy Zones](/consul/docs/manage/scale/redundancy-zone)
+- [Failover configuration overview](/consul/docs/manage-traffic/failover)
+- Gain insight into service mesh events and errors with [Consul proxy access logs](/consul/tutorials/observe-your-network/proxy-access-logs)
+
+## Next steps
+
+In this document, you learned about the HashiCorp resources for implementing and running a reliable Consul cluster. The following are implementation guides on the other HashiCorp products.
+
+- [Vault implementation resources](/well-architected-framework/ir-vault)
+- [Nomad implementation resources](/well-architected-framework/ir-nomad)
+- [Packer implementation resources](/well-architected-framework/ir-packer)
+- [Terraform implementation resources](/well-architected-framework/ir-terraform)
diff --git a/content/well-architected-framework/docs/docs/implementation-resources/nomad-reliability.mdx b/content/well-architected-framework/docs/docs/implementation-resources/nomad-reliability.mdx
new file mode 100644
index 000000000..2dc19a6ad
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/implementation-resources/nomad-reliability.mdx
@@ -0,0 +1,50 @@
+---
+page_title: Run a reliable Nomad cluster
+description: Learn how to run a reliable Nomad cluster.
+---
+
+# Run a reliable Nomad cluster
+
+This document outlines implementation resources for maintaining reliable Nomad clusters. When you implement proper reliability measures, you ensure high availability, fault tolerance, and consistent performance of your Nomad infrastructure.
+
+The following sections cover architecture, monitoring, resource management, and recovery.
+
+## Architecture
+
+Learn about Nomad Community Edition and Enterprise architecture and best practices to build reliable Nomad environments.
+
+- [Reference architecture](/well-architected-framework/nomad/production-reference-architecture-vm-with-consul) for HashiCorp Nomad production deployments
+- Learn the technical details of Nomad with [Nomad system architecture](/nomad/docs/concepts/architecture)
+
+## Monitoring
+
+Monitor your Nomad environment to collect telemetry data to view performance, audits, and infrastructure usage and ensure Nomad's reliability.
+
+- [Monitor the Nomad client and server agents](/nomad/docs/operations/monitoring-nomad) with metrics collected by the Nomad client and server agents.
+- Use [Nomad runtime metrics](/nomad/docs/operations/metrics-reference) to debug or understand the performance of your Nomad cluster.
+- [Monitor the underlying infrastructure](/well-architected-framework/reliability/reliability-deploy-application-monitoring-components) that Nomad runs on.
+
+## Resource management
+
+Efficiently manage your Nomad infrastructure, scaling, and performance.
+
+- Use [Nomad Bench](/nomad/docs/operations/benchmarking) to run test scenarios to collect metrics and data from Nomad clusters running at scale.
+- Scale with [Nomad Autoscaler](/nomad/tools/autoscaling), a horizontal application and cluster autoscaler for Nomad.
+- Manage resources quotas with [Sentinel](/nomad/tutorials/governance-and-policy/governance-and-policy#resource-quotas).
+
+## Recovery
+
+Recover Nomad in the case of cluster degradation through regular backups.
+
+- [Recover from a Nomad outage](/nomad/tutorials/manage-clusters/outage-recovery)
+- Generate a [snapshot of Nomad server state](/nomad/api-docs/operator/snapshot) for disaster recovery.
+- Learn about [failure recovery strategies](/nomad/tutorials/job-failure-handling/failures) for tasks and jobs.
+
+
+## Next steps
+
+In this document, you learned about the HashiCorp resources for implementing and running a reliable Nomad cluster. The following are implementation guides on the other HashiCorp products.
+
+- [Vault implementation resources](/well-architected-framework/ir-vault)
+- [Packer implementation resources](/well-architected-framework/ir-packer)
+- [Terraform implementation resources](/well-architected-framework/ir-terraform)
\ No newline at end of file
diff --git a/content/well-architected-framework/docs/docs/implementation-resources/packer-reliability.mdx b/content/well-architected-framework/docs/docs/implementation-resources/packer-reliability.mdx
new file mode 100644
index 000000000..c873f2bbb
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/implementation-resources/packer-reliability.mdx
@@ -0,0 +1,34 @@
+---
+page_title: Run a reliable Packer pipeline
+description: Learn how to run a reliable Packer pipeline.
+---
+
+# Run a reliable Packer pipeline
+
+This document outlines implementation resources for maintaining reliable Packer pipelines. When you implement proper reliability measures, you ensure high availability, fault tolerance, and consistent performance of your Packer pipelines.
+
+## Automation
+
+Implement automation to build consistent and secure images.
+
+- [Build a golden image pipeline with HCP Packer](/packer/tutorials/cloud-production/golden-image-with-hcp-packer) to allow developers to focus on the application instead of system dependencies and patches.
+- Automate Packer with [GitHub Actions](/packer/tutorials/cloud-production/github-actions) and Packer templates.
+- Run unattended installations for [Debian](/packer/guides/automatic-operating-system-installs/preseed_ubuntu) and [Windows](/packer/guides/automatic-operating-system-installs/autounattend_windows).
+- Use Packer to [streamline your container or VM application deployments](/well-architected-framework/operational-excellence/operational-excellence-application-deployment).
+
+## Compliance
+
+Enforce your organization's compliance best practices and revoke compromised artifacts.
+
+- Schedule [artifact version revocation](/packer/tutorials/hcp/hcp-schedule-image-iterations-revocation).
+- Identify [compromised artifacts with HCP Terraform](/packer/tutorials/hcp/run-tasks-data-source-image-validation).
+- Enforce [artifact compliance with HCP Terraform](/packer/tutorials/hcp/run-tasks-resource-image-validation).
+- Revoke [an artifact and its descendants using inherited revocation with HCP Terraform](/packer/tutorials/hcp/revoke-image).
+
+## Next steps
+
+In this document, you learned about the HashiCorp resources for implementing and running a reliable Packer pipeline. The following are implementation guides on the other HashiCorp products.
+
+- [Vault implementation resources](/well-architected-framework/ir-vault)
+- [Nomad implementation resources](/well-architected-framework/ir-nomad)
+- [Terraform implementation resources](/well-architected-framework/ir-terraform)
\ No newline at end of file
diff --git a/content/well-architected-framework/docs/docs/implementation-resources/terraform-reliability.mdx b/content/well-architected-framework/docs/docs/implementation-resources/terraform-reliability.mdx
new file mode 100644
index 000000000..eb4aab308
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/implementation-resources/terraform-reliability.mdx
@@ -0,0 +1,55 @@
+---
+page_title: Run a reliable Terraform Enterprise instance
+description: Learn how to run a reliable Terraform Enterprise instance.
+---
+
+# Run a reliable Terraform Enterprise instance
+
+This document outlines implementation resources for maintaining reliable Terraform clusters. When you implement proper reliability measures, you ensure high availability, fault tolerance, and consistent performance of your Terraform infrastructure.
+
+The following sections cover architecture, monitoring, resource management, and recovery.
+
+
+## Architecture
+
+The following architecture is Terraform Enterprise architecture for Replicated environments. Terraform Enterprise is now deployed with containers, as described in the [Terraform Enterprise deployment overview](/terraform/enterprise/deploy).
+
+
+
+The discontinued clustered version of Terraform Enterprise is no longer supported, and we strongly advise all customers to move to an Active/Active installation. Please contact your support representative if you need assistance.
+
+
+
+- Follow [cloud-specific reference architectures](/terraform/enterprise/deploy/replicated/architecture/reference-architecture) for Terraform Enterprise deploy to Replicated.
+
+## Monitoring
+
+Monitor your Terraform Enterprise environment to collect telemetry data, view performance, perform audits, and measure infrastructure usage. You can use this data to proactively catch issues and reliably run Terraform Enterprise.
+
+- [Enable logs and metrics in Terraform Enterprise](/terraform/enterprise/deploy/replicated/monitoring/logging) so that you can monitor your non-Replicated deployment.
+
+- [Monitor the underlying infrastructure](/well-architected-framework/reliability/reliability-deploy-application-monitoring-components) that Terraform runs on.
+
+## Recovery
+
+Recover Terraform Enterprise in the case of cluster degradation through regular backups.
+
+- Use the [Terraform Enterprise API](/terraform/enterprise/deploy/manage/backup-restore) to back up all the data stored in a Terraform Enterprise installation, including the blob storage and the PostgreSQL database.
+
+- Configure your [Terraform Enterprise deployment to connect to a PostgreSQL database replica](/terraform/enterprise/deploy/manage/failover) to ensure continuous database availability.
+
+## Resilience
+
+Run a resilient Terraform Enterprise cluster to avoid application downtime.
+
+- [Perform diagnostics](/terraform/enterprise/deploy/troubleshoot/perform-diagnostics) on your Terraform Enterprise deployment.
+- [Troubleshoot common errors](/terraform/enterprise/deploy/troubleshoot/error-messages) Terraform Enterprise may report if you misconfigure your deployment, and how learn to resolve them.
+
+
+## Next steps
+
+In this document, you learned about the HashiCorp resources for implementing and running a reliable Terraform cluster. The following are implementation guides on the other HashiCorp products.
+
+- [Vault implementation resources](/well-architected-framework/ir-vault)
+- [Nomad implementation resources](/well-architected-framework/ir-nomad)
+- [Packer implementation resources](/well-architected-framework/ir-packer)
diff --git a/content/well-architected-framework/docs/docs/implementation-resources/vault-reliability.mdx b/content/well-architected-framework/docs/docs/implementation-resources/vault-reliability.mdx
new file mode 100644
index 000000000..eb14a7eb0
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/implementation-resources/vault-reliability.mdx
@@ -0,0 +1,67 @@
+---
+page_title: Run a reliable Vault cluster
+description: Learn how to run a reliable Vault cluster.
+---
+
+# Run a reliable Vault cluster
+
+This document outlines implementation resources for maintaining a reliable Vault cluster. When you implement proper reliability measures, you ensure high availability, fault tolerance, and consistent performance of your Vault infrastructure.
+
+The following sections cover architecture, monitoring, resource management, recovery, and resilience.
+
+## Architecture
+
+Learn about Vault Community Edition and Enterprise architecture and best practices for building reliable Vault clusters.
+
+- Vault [reference architecture](/vault/docs/internals/architecture).
+- Implement a robust [Vault Enterprise cluster](/vault/docs/enterprise/cluster-design).
+- Follow [recommended patterns](/vault/docs/internals/recommended-patterns) to keep your Vault cluster operating reliably.
+
+## Monitoring
+
+Monitor Vault to collect telemetry data to view performance, audits, and infrastructure usage to ensure Vault is reliable.
+
+- [Telemetry metrics overview](/vault/docs/internals/telemetry/metrics) to learn about three types of telemetry metrics: counter, gauge, and summary.
+- [Monitor common Vault metrics](/vault/docs/internals/telemetry/key-metrics) such as core, usage, storage backend, audit, and resource.
+- Use the [Vault usage metrics dashboard](/vault/docs/concepts/client-count/usage-metrics) in the Vault UI to filter usage metrics by namespace or auth methods.
+- [Enable Vault telemetry gathering](/vault/docs/internals/telemetry/enable-telemetry) to collect [telemetry data](/vault/docs/internals/telemetry) from your Vault cluster.
+- Use [audit devices](/vault/docs/audit) to collect a detailed log of all requests to Vault and their responses.
+- [Monitor the underlying infrastructure](/well-architected-framework/reliability/reliability-deploy-application-monitoring-components) Vault runs on.
+
+## Resource management
+
+Efficiently manage your Vault infrastructure, scaling, and performance.
+
+- Enforce resource limits with [Vault resource quotas](/vault/docs/concepts/resource-quotas) and [lease count quotas](/vault/docs/enterprise/lease-count-quotas).
+- [Prevent lease explosions](/vault/docs/configuration/prevent-lease-explosions) in your Vault cluster.
+- [Benchmark and measure Vault performance](/vault/tutorials/operations/benchmark-vault) in environments that resemble production use cases to produce realistic results.
+- Use [Vault Enterprise Adaptive overload protection](/vault/docs/concepts/adaptive-overload-protection) to prevent client requests from overwhelming different server resources.
+
+## Recovery
+
+Recover Vault in the case of cluster degradation through the use of regular backups.
+
+- Use the [`-recovery` flag](/vault/docs/concepts/recovery-mode) to bring Vault up in recovery mode.
+- Configure [Vault Enterprise to take regular backups](/vault/docs/enterprise/automated-integrated-storage-snapshots).
+- [Vault Enterprise performance replication](/vault/docs/enterprise/replication) provides consistency, scalability, and highly-available disaster recovery.
+
+
+## Resilience
+
+Run a resilient Vault cluster to avoid application downtime.
+
+- Run Vault in high availability (HA) mode to protect against outages by running multiple Vault servers.
+ - [High availability](/vault/docs/internals/high-availability)
+ - [High availability mode (HA)](/vault/docs/concepts/ha)
+- Run [performance standby nodes](/vault/docs/enterprise/performance-standby).
+- Use [integrated storage](/vault/docs/concepts/integrated-storage) for durable storage.
+- Run [Vault Enterprise redundancy zones](/vault/docs/enterprise/redundancy-zones) to increase read scaling and resiliency.
+- Use [Vault Enterprise multi-datacenter replication](/vault/docs/internals/replication) for high availability and scalability through a primary/secondary (1:N) asynchronous model.
+
+## Next steps
+
+In this document, you learned about the HashiCorp resources for implementing and running a reliable Vault cluster. The following are implementation guides on the other HashiCorp products.
+
+- [Nomad implementation resources](/well-architected-framework/ir-nomad)
+- [Packer implementation resources](/well-architected-framework/ir-packer)
+- [Terraform implementation resources](/well-architected-framework/ir-terraform)
diff --git a/content/well-architected-framework/docs/docs/index.mdx b/content/well-architected-framework/docs/docs/index.mdx
new file mode 100644
index 000000000..5bfbd6ca4
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/index.mdx
@@ -0,0 +1,17 @@
+---
+layout: docs
+page_title: What is a well-architected framework?
+description: HashiCorp Well-Architected Framework provides best practice guidance for organizations.
+---
+
+
+
+# What is HashiCorp's Well-Architected Framework?
+
+HashiCorp Well-Architected Framework provides best practice guidance for organizations. Use it to optimize your production HashiCorp deployments while also meeting your specific architectural needs. The well-architected framework starts with the cloud operating model and moves toward cloud migration enablement.
+
+The operational excellence pillar recommends strategies to enable your organization to build products quickly and efficiently; including shipping changes, updates, and upgrades. These strategies will help teams in your organization to collaborate with each other without delays or friction, even in failure scenarios. They include recommendations for both team and infrastructure architecture.
+
+The reliability pillar recommends strategies that help prevent disruptions from a single point of failure; ensuring high availability and business continuity of your mission-critical applications and infrastructure.
+
+The Security pillar defines a zero trust architecture approach and best practices to protect your applications, secure your networks, manage sensitive data, manage identity and access, and build security controls.
diff --git a/content/well-architected-framework/docs/docs/optimize-resources/introduction.mdx b/content/well-architected-framework/docs/docs/optimize-resources/introduction.mdx
new file mode 100644
index 000000000..0eec79418
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/optimize-resources/introduction.mdx
@@ -0,0 +1,6 @@
+---
+page_title: intro
+description: Learn how to automate and define processes for your infrastructure.
+---
+
+asdfdsf
\ No newline at end of file
diff --git a/content/well-architected-framework/docs/docs/secure-systems/appendix.mdx b/content/well-architected-framework/docs/docs/secure-systems/appendix.mdx
new file mode 100644
index 000000000..77e90e723
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/secure-systems/appendix.mdx
@@ -0,0 +1,29 @@
+---
+page_title: Appendix
+description: Learn how to automate and define processes for your infrastructure.
+---
+
+# Appendix
+
+## HCP-S-01: Don't hard-code passwords or secrets in your code
+Hard-coding passwords or secrets in your code can lead to security vulnerabilities. Instead, use a secure secrets management solution like [HashiCorp Vault](/vault) to store and manage sensitive information. This allows you to retrieve secrets dynamically at runtime without exposing them in your codebase.
+
+HashiCorp resources:
+- [Vault documentation](/vault/docs)
+- [Vault tutorials](/vault/tutorials)
+
+External resources:
+- [CWE-259: Use of Hard-coded Password](https://cwe.mitre.org/data/definitions/259.html)
+- [CWE-798: Use of Hard-coded Credentials](https://cwe.mitre.org/data/definitions/798.html)
+
+
+## HCP-S-02: Rotate secrets and keys regularly
+Regularly rotating secrets and keys is essential for maintaining security. Use Vault's dynamic secrets feature to automatically generate and rotate credentials for your applications. This reduces the risk of credential leakage and ensures that your systems remain secure over time.
+
+HashiCorp resources:
+- [Vault dynamic secrets](/vault/docs/secrets/dynamic)
+- [Vault key rotation](/vault/docs/concepts/rotation)
+
+External resources:
+- [NIST SP 800-57: Recommendation for Key Management](https://csrc.nist.gov/publications/detail/sp/800-57/part-1/rev-5/final)
+- [OWASP Cheat Sheet: Key Management Cheat Sheet](https://cheatsheetseries.owasp.org/cheatsheets/Key_Management_Cheat_Sheet.html)
\ No newline at end of file
diff --git a/content/well-architected-framework/docs/docs/secure-systems/introduction.mdx b/content/well-architected-framework/docs/docs/secure-systems/introduction.mdx
new file mode 100644
index 000000000..72f664eaf
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/secure-systems/introduction.mdx
@@ -0,0 +1,6 @@
+---
+page_title: intro
+description: Learn how to automate and define processes for your infrastructure.
+---
+
+secure systems
\ No newline at end of file
diff --git a/content/well-architected-framework/docs/docs/secure-systems/prevent-lateral-movement.mdx b/content/well-architected-framework/docs/docs/secure-systems/prevent-lateral-movement.mdx
new file mode 100644
index 000000000..a7ebc9951
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/secure-systems/prevent-lateral-movement.mdx
@@ -0,0 +1,102 @@
+---
+page_title: Prevent lateral movement
+description: Learn how to automate and define processes for your infrastructure.
+---
+
+# Prevent lateral movement
+
+In this article, you learn about one of the foundational approaches to safeguard and protect against
+“bad actors” following the well documented [MITRE ATT&CK pattern](https://attack.mitre.org/docs/attack_matrix_poster_2021_june.pdf) (lateral movement).
+
+Lateral movement is the act of moving through the environment from a compromised resource into
+an uncompromised one by exploiting the assumption that the former is secure.
+The following diagram is the lateral movement techniques attackers commonly use to compromise systems through unauthorized means.
+
+
+
+
+
+
+
+## Lateral movement techniques
+
+- Exploitation of remote services
+- Internal spear phishing
+- Lateral tool transfer
+- Remote service session hijacking
+- Remote services
+- Replication through removable media
+- Software deployment tools
+- Taint shared content
+- Use alternate authentication material
+
+With all of these techniques available to anyone, the threat of an attack has never been more real.
+These techniques are highly available and easily reproducible patterns.
+This article will inform you on how to protect against lateral movement with a simple, scalable solution that
+runs on any runtime or platform and stops lateral movement through bi-directional or mutual transport layer security or (mTLS).
+
+## The castle and moat are gone; authorize and authenticate every room
+
+In the traditional datacenter approach to network security, the “castle” and “moat” keep everything inside safe.
+Imagine your services as rooms in a castle. Just because you make it into the castle does not mean you should have
+free rein to waltz around like you own the place. The locks on the door and the quartermaster who issues the keys are
+two essential parts of keeping each room safe.
+This approach is what authentication and authorization do with mTLS, except this all happens in the digital world at an
+exponentially faster pace. In the cloud, dynamic or ephemeral services can come up and down very fast and need to maintain
+connections to provide a data mesh to users or machines. Applications in modern CI/CD workflows are being delivered by
+deploying code thousands of times a day on many different runtimes and platforms.
+The complexity can quickly become unmanageable, unless you can abstract some of this complexity away from developers and
+limit the cognitive load while maintaining security.
+
+## Service mesh 101
+
+
+
+
+
+
+
+With the rising use of microservices, Kubernetes, public cloud, and hybrid computing, Site Reliability Engineers and
+DevOps engineers are encouraged to avoid hardcoding values into application code.
+Additionally, they have to ensure secure, resilient, and performant applications while reducing complexity at scale.
+
+HashiCorp's [service mesh](/consul/docs/use-case/service-mesh) solution [Consul](/consul/), enables practitioners to achieve this today by leveraging
+mTLS between microservices in the mesh.. Furthermore, certificates, keys, and other items in the mesh also need
+to be secured and managed. [HashiCorp Vault](/vault) can help practitioners simplify these workloads and centralize these secrets.
+Vault's Secrets engines provide infrastructure automation and are essential to reduce cognitive load for the teams
+trying to manage secrets.
+Luckily there are secrets management platforms today that are able to automate secrets rotation, generation, and protection,
+delivering a unique capability to service mesh: The ability to manage the secrets within a service mesh.
+An example of some of these secrets in a mesh are:
+- Access control list (ACL) bootstrap token
+- ACL partition token
+- ACL replication token
+- Enterprise license
+- Gossip encryption key
+- Snapshot agent config
+- Server TLS credentials
+- Service mesh client TLS credentials
+
+Centralization of secrets across your environments are essential to stop a lateral attack among other vulnerabilities.
+Identity is the new perimeter and identity is a secret. A service mesh can contribute to the problem of
+[secret sprawl](https://www.hashicorp.com/resources/what-is-secret-sprawl-why-is-it-harmful) if not properly addressed. If you're using a service mesh today you should consider the level of effort to
+store the secrets associated and learn how HashiCorp solutions can help ease the cognitive load while defending against
+emerging threats. Follow the
+[Vault as secrets management for Consul](/consul/tutorials/kubernetes/kubernetes-vault-consul-secrets-management) to learn how
+to start defending against lateral movement.
+
+
+
+
+
+
+
+## How HashiCorp can help you prevent lateral movement with HashiCorp Vault & Consul
+
+HashiCorp Vault (a secrets management platform) and HashiCorp Consul (a service mesh) are two good examples of integrating
+tools that can prevent lateral movement based on the MITRE ATT&CK Framework techniques.
+You can test this out and learn more at [HashiCorp Developer](/consul/tutorials/kubernetes/kubernetes-vault-consul-secrets-management).
+
+## Tutorials
+
+- [Vault as secrets management for Consul](/consul/tutorials/kubernetes/kubernetes-vault-consul-secrets-management)
\ No newline at end of file
diff --git a/content/well-architected-framework/docs/docs/secure-systems/protect-data/data-at-rest.mdx b/content/well-architected-framework/docs/docs/secure-systems/protect-data/data-at-rest.mdx
new file mode 100644
index 000000000..c5052aff0
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/secure-systems/protect-data/data-at-rest.mdx
@@ -0,0 +1,101 @@
+---
+page_title: Protect data at-rest
+description: Learn how to protect your sensitive data.
+---
+
+# Protect data at rest
+
+Data at rest represents any data you maintain in non-volatile storage in your environment.
+Encrypting data at rest, and implementing secure access to your data are two ways you can protect
+your applications from security threats.
+
+### Encrypt data with Vault
+
+Vault uses a security barrier for all requests made to its API endpoints.
+This security barrier automatically encrypts all data leaving Vault using a 256-bit Advanced Encryption Standard (AES) cipher in the Galois Counter Mode (GCM) with 96-bit nonces.
+Vault's barrier encrypts your data, and Vault stores only encrypted data regardless of configured storage type.
+Whenever you use a Vault secrets engine, such as the [Key/Value (KV) secrets engine](/vault/docs/secrets/kv), you also gain the benefits of Vault's cryptographic barrier. You (or your application) must authenticate with Vault to receive a token with attached policies that authorize access to data stored in a secrets engine. You shouldn't store large volumes of secrets in Vault. Instead, you should store the secrets in a database, encrypt the database, and store the encryption key in Vault.
+
+For example, when working with a Microsoft SQL Server using Transparent Data Encryption (TDE), your database already encrypts data using a Data Encryption Key (DEK). Rather than moving all that data to Vault, you should store the Key Encryption Key (KEK) in Vault's KV secrets engine. This KEK encrypts the DEK, which in turn encrypts your database content. This approach leverages Vault's strong security features for the most sensitive component (the encryption key) while enabling your database to efficiently manage the encrypted data.
+
+The following diagram shows how you can store an encryption key in Vault, and use that key to encrypt your database.
+
+
+
+
+
+
+
+When you control access to data, you gain another layer of data protection.
+Vault can secure access to your external data at rest through dynamic credentials.
+These dynamic credentials have a lifecycle attached to them, and Vault automatically revokes them after a predefined period of time.
+We recommend using dynamic secrets when accessing your external data.
+
+For example, you can use Vault to issue your CI/CD pipeline dynamic credentials to an external service,
+such as a PostgreSQL database. Dynamic secrets allows your CI/CD pipelines to access your data at rest,
+and then once the pipeline finishes, Vault revokes the credentials.
+The next time your pipeline runs, Vault issues your pipeline new credentials.
+
+HashiCorp resources:
+- Learn to use [Vault dynamic secrets](/vault/tutorials/get-started/understand-static-dynamic-secrets)
+- Learn to use [versioned key/value secrets engine](/vault/tutorials/secrets-management/versioned-kv)
+- Read how to [retrieve CI/CD secrets from Vault](/well-architected-framework/security/security-cicd-vault).
+- Read the [Vault's Key/Value (KV) secrets engine](/vault/docs/secrets/kv) documentation
+
+
+External resources:
+- [Advanced Encryption Standard](https://en.wikipedia.org/wiki/Advanced_Encryption_Standard) and [Galois Counter Mode](https://en.wikipedia.org/wiki/Galois/Counter_Mode)
+- [Enabling transparent data encryption for Microsoft SQL with Vault](https://www.hashicorp.com/blog/enabling-transparent-data-encryption-for-microsoft-sql-with-vault)
+- [Why you should use ephemeral credentials](https://www.liatrio.com/blog/why-you-should-be-using-ephemeral-credentials)
+
+
+### Enforce encryption with Terraform
+
+You should protect sensitive data by enforcing encryption standards with infrastructure-as-code (IaC).
+
+Terraform can help you secure your data at rest by deploying infrastructure from code that specifies
+resource and data encryption, along with access control policies.
+
+As an example of using Terraform to create infrastructure that securely stores data, consider
+enabling server-side encryption by default in an AWS S3 bucket.
+Terraform can [create a KMS key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key)
+using the `aws_kms_key` resource. It can then create an S3 bucket,
+[enable default server side encryption](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket#enable-default-server-side-encryption)
+for the S3 bucket, and then use the KMS key to encrypt objects.
+
+The following example creates a KMS key and enforces S3 object encryption server-side:
+
+```hcl
+resource "aws_kms_key" "mykey" {
+ description = "This key is used to encrypt bucket objects"
+ deletion_window_in_days = 10
+}
+
+resource "aws_s3_bucket" "mybucket" {
+ bucket = "mybucket"
+}
+
+resource "aws_s3_bucket_server_side_encryption_configuration" "example" {
+ bucket = aws_s3_bucket.mybucket.id
+
+ rule {
+ apply_server_side_encryption_by_default {
+ kms_master_key_id = aws_kms_key.mykey.arn
+ sse_algorithm = "aws:kms"
+ }
+ }
+}
+```
+
+The [AWS S3 module](https://github.com/terraform-aws-modules/terraform-aws-s3-bucket/blob/master/examples/complete/main.tf#L28)
+provides Terraform code that creates a KMS key and
+[encrypts objects](https://github.com/terraform-aws-modules/terraform-aws-s3-bucket/blob/master/examples/complete/main.tf#L189)
+stored in the S3 bucket with the KMS key. You can use this module to create and secure your S3 buckets.
+
+You can enforce data at rest encryption with Terraform in other clouds such as GCP, Azure, and on-prem.
+
+HashiCorp resources:
+
+- [Encrypting data with Transform secrets engine](/vault/tutorials/adp/transform-code-example)
+- [Transform sensitive data with Vault](/vault/tutorials/adp/transform)
+- View our tutorial library on [data encryption](/vault/tutorials/encryption-as-a-service)
\ No newline at end of file
diff --git a/content/well-architected-framework/docs/docs/secure-systems/protect-data/data-in-transit.mdx b/content/well-architected-framework/docs/docs/secure-systems/protect-data/data-in-transit.mdx
new file mode 100644
index 000000000..b2cbd8141
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/secure-systems/protect-data/data-in-transit.mdx
@@ -0,0 +1,91 @@
+---
+page_title: Protect data in-transit
+description: Learn how to protect your sensitive data.
+---
+
+# Protect data in-transit
+
+Data in transit is any data moving between systems, such as passwords, secrets, and keys.
+In-transit data includes data moving between resources within your organization, and incoming
+and outgoing data with services outside your organization. By protecting your data in transit,
+you protect the confidentiality and integrity of the data within your organization.
+
+### TLS for client to server communication
+
+Human client-to-machine communication is the first hop of data in transit.
+TLS/SSL certificates are used to encrypt such communication - in most cases via browsers - using HTTPS instead of HTTP.
+TLS can also wrap FTP (FTPS, not to be confused with SFTP, which uses the SSH protocol), IMAP (IMAPS), POP3 (POP3S),
+and SMTP (SMTPS), among others.
+
+HTTP is dangerous because someone (man-in-the-middle) can intercept the traffic and insert malicious code before forwarding
+it to the user's browser. The Transport Layer Security (TLS) protocol solves this problem by allowing the client to verify the identity of the server and allows the server to verify the identity of the client. You should use the latest TLS version (v1.3) because it provides stronger security through improved encryption algorithms and patches known vulnerabilities. It also offers better performance with faster connection times and enhanced privacy protections.
+
+Protect yourself by verifying that your browser supports TLS v1.3.
+Additionally, you can identify whether a site supports HTTP Strict Transport Security (HSTS)
+to protect against man-in-the-middle attacks using the Qualys SSL Server Test.
+Most web browsers show if a website uses TLS encryption, usually with a lock icon the address bar.
+
+External resource:
+- Verify your browser [supports TLS v1.3](https://caniuse.com/tls1-3)
+- [View Qualys SSL Server Test on hashicorp.com](https://www.ssllabs.com/analyze.html?d=hashicorp.com&latest)
+
+
+### Consul for universal networking
+
+Unencrypted cross-application communications are susceptible to man-in-the-middle attacks.
+An application can protect itself against malicious activities by requiring mTLS (mutual TLS) on both ends
+of the application to application communications.
+
+HashiCorp Consul automatically enable mTLS for all communication between application
+services (machine-to-machine). Even legacy apps can use mTLS through local Consul proxies that intercept network traffic
+as parts of a service mesh. A service mesh architecture lets Consul enforce mTLS across clouds and platforms.
+Consul automatically generates signed certificates, and lets you rapidly and comprehensively upgrade TLS versions
+and cipher suites in the future. This process helps resolve the
+typical slow process of updating the TLS version in your application.
+
+Consul automatically encrypts communications within the service mesh with mTLS. You should also secure outside traffic entering the service mesh.
+Two common entry points for traffic into the Consul Service mesh are the Ingress Gateway and the API Gateway.
+To secure inbound traffic to these gateways, you can enable
+TLS on ingress gateways,
+and enable TLS on the API gateway listeners.
+
+HashiCorp resources:
+- [What is Consul?](/consul/docs/intro)
+- [Update Consul agents to securely communicate with TLS](/consul/tutorials/archive/tls-encryption-secure-existing-datacenter)
+- [Enable TLS on ingress gateways](/consul/docs/north-south/ingress-gateway#custom-tls-certificates-via-secret-discovery-service-sds)
+- [Enable TLS on API gateway listeners](/consul/docs/api-gateway/configuration/gateway#listeners-tls)
+
+
+### Vault for securing specific types of content
+
+Encrypting data sent across the public network is a common practice to protect highly sensitive data.
+However, managing the encryption key introduces operational overhead.
+An organization may require a specific type of encryption key.
+Vault's Transit secrets engine supports a number of key
+types to encrypt and decrypt your in-transit data. The Transit secrets engine can also manage the encryption key lifecycle to relieve the operational burden.
+
+The Transit secrets engine handles cryptographic functions on in-transit data.
+Vault doesn't store any data sent to the Transit secrets engine. You can think of the Transit secrets engine as providing "cryptography as a service" or
+"encryption as a service". The Transit secrets engine can sign and verify data,
+generate hashes and HMACs of data, and act as a source of random bytes.
+
+For more advanced use cases, like encoding credit card numbers, data transformation and tokenization
+are more desirable data protection methods. Vault's Transform secrets engine provides data encryption
+service similar to the Transit secrets engine. The key difference is that the users can specify the
+format of the resulting ciphertext using the Transform secrets engine's format-preserving encryption (FPE) feature.
+
+In addition to FPE, the Transform secrets engine provides data tokenization capability.
+Refer to the [Vault Tokenization](#tokenize-critical-data) section to learn how the Transform secrets
+engine tokenizes data for secure in-transit data transmission.
+
+
+
+ Transform secrets engine is a Vault Enterprise feature.
+
+
+
+HashiCorp resources:
+- [Vault's Transit secrets engine](/vault/docs/secrets/transit)
+- [Encryption as a service: transit secrets engine](/vault/tutorials/encryption-as-a-service/eaas-transit)
+- [Data encryption](/vault/tutorials/encryption-as-a-service)
+- [Transform secrets engine](/vault/tutorials/adp/transform)
diff --git a/content/well-architected-framework/docs/docs/secure-systems/protect-data/sensitive-data.mdx b/content/well-architected-framework/docs/docs/secure-systems/protect-data/sensitive-data.mdx
new file mode 100644
index 000000000..6378535c8
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/secure-systems/protect-data/sensitive-data.mdx
@@ -0,0 +1,8 @@
+---
+page_title: Protect sensitive data
+description: Learn how to protect your sensitive data.
+---
+
+# Protect sensitive data
+
+The shift from on-premises data centers to cloud infrastructure requires new secrets management techniques for the cloud's dynamic environments, applications, machines, and user credentials. Securing infrastructure, data, and access across clouds requires careful planning. You must identify and categorize your organization's data based on its sensitivity to decide how to secure them. You must apply different practices to protect data in transit and at rest.
\ No newline at end of file
diff --git a/content/well-architected-framework/docs/docs/secure-systems/protect-data/tokenize-data.mdx b/content/well-architected-framework/docs/docs/secure-systems/protect-data/tokenize-data.mdx
new file mode 100644
index 000000000..c3108d5f1
--- /dev/null
+++ b/content/well-architected-framework/docs/docs/secure-systems/protect-data/tokenize-data.mdx
@@ -0,0 +1,33 @@
+---
+page_title: Tokenize data
+description: Learn how to tokenize critical data.
+---
+
+# Tokenize data
+
+kenization converts sensitive data into nonsensitive data called tokens.
+Tokens are helpful when sensitive data is being sent out remotely, such as client authentication
+like GitHub login authentication, credit card numbers, banking credentials,
+or any other systems which require external authentication or data exchange.
+
+You can use HashiCorp Vault to create tokens to secure data.
+Vault Transform secrets engine
+can tokenize data to replace highly sensitive data, like credit card numbers, with unique values (tokens)
+that are unrelated to the original value in any algorithmic sense. Therefore,
+the tokens do not risk exposing the critical data satisfying the Payment Card Industry Data Security Standard
+(PCI-DSS) guidance.
+
+The following diagram shows how Vault can take sensitive data, such as a customer's credit card number,
+encrypt the value, and allow the application to use that credit card securely.
+
+
+
+
+
+
+
+
+HashiCorp resources:
+-
+- [Tokenize data with Transform secrets engine](/vault/tutorials/adp/tokenization)
+- [Vault Transform secrets engine](/vault/docs/secrets/transform)
\ No newline at end of file
diff --git a/content/well-architected-framework/img/docs/well-architected/Circleci-icon-logo.svg.png b/content/well-architected-framework/img/docs/well-architected/Circleci-icon-logo.svg.png
new file mode 100644
index 000000000..3755b01bb
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/Circleci-icon-logo.svg.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/anatomy-cyber-attack-1.png b/content/well-architected-framework/img/docs/well-architected/anatomy-cyber-attack-1.png
new file mode 100644
index 000000000..f317a88de
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/anatomy-cyber-attack-1.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/boundary-components-min.png b/content/well-architected-framework/img/docs/well-architected/boundary-components-min.png
new file mode 100644
index 000000000..3ae25da4c
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/boundary-components-min.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/cloud-adoption-vault-workflow.png b/content/well-architected-framework/img/docs/well-architected/cloud-adoption-vault-workflow.png
new file mode 100644
index 000000000..92f2613fe
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/cloud-adoption-vault-workflow.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/com-consul.png b/content/well-architected-framework/img/docs/well-architected/com-consul.png
new file mode 100644
index 000000000..c086e4269
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/com-consul.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/com-enabling-stackup.png b/content/well-architected-framework/img/docs/well-architected/com-enabling-stackup.png
new file mode 100644
index 000000000..12e15228b
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/com-enabling-stackup.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/com-hashicorp-stack-app-delivery.png b/content/well-architected-framework/img/docs/well-architected/com-hashicorp-stack-app-delivery.png
new file mode 100644
index 000000000..2d2aedcb5
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/com-hashicorp-stack-app-delivery.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/com-security-intro.png b/content/well-architected-framework/img/docs/well-architected/com-security-intro.png
new file mode 100644
index 000000000..653d7f488
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/com-security-intro.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/com-static-dynamic-run-connect-secure-provision.png b/content/well-architected-framework/img/docs/well-architected/com-static-dynamic-run-connect-secure-provision.png
new file mode 100644
index 000000000..a08aef76a
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/com-static-dynamic-run-connect-secure-provision.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/commaturity2020x.png b/content/well-architected-framework/img/docs/well-architected/commaturity2020x.png
new file mode 100644
index 000000000..f7e40ea12
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/commaturity2020x.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/commulti-cloud-transition-2x.png b/content/well-architected-framework/img/docs/well-architected/commulti-cloud-transition-2x.png
new file mode 100644
index 000000000..6ac1dd4ba
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/commulti-cloud-transition-2x.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/diagram-authenticate-tls-dark.png b/content/well-architected-framework/img/docs/well-architected/diagram-authenticate-tls-dark.png
new file mode 100644
index 000000000..85deda697
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/diagram-authenticate-tls-dark.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/diagram-authenticate-tls.png b/content/well-architected-framework/img/docs/well-architected/diagram-authenticate-tls.png
new file mode 100644
index 000000000..7bf11f61e
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/diagram-authenticate-tls.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/diagram-pki-ca-dark.png b/content/well-architected-framework/img/docs/well-architected/diagram-pki-ca-dark.png
new file mode 100644
index 000000000..178918fdf
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/diagram-pki-ca-dark.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/diagram-pki-ca.png b/content/well-architected-framework/img/docs/well-architected/diagram-pki-ca.png
new file mode 100644
index 000000000..17fe0e67e
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/diagram-pki-ca.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/diagram-secure-cluster-tls-consul-dark.png b/content/well-architected-framework/img/docs/well-architected/diagram-secure-cluster-tls-consul-dark.png
new file mode 100644
index 000000000..501bdb448
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/diagram-secure-cluster-tls-consul-dark.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/diagram-secure-cluster-tls-consul.png b/content/well-architected-framework/img/docs/well-architected/diagram-secure-cluster-tls-consul.png
new file mode 100644
index 000000000..4891507e0
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/diagram-secure-cluster-tls-consul.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/diagram-secure-cluster-tls-dark.png b/content/well-architected-framework/img/docs/well-architected/diagram-secure-cluster-tls-dark.png
new file mode 100644
index 000000000..a48b576c2
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/diagram-secure-cluster-tls-dark.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/diagram-secure-cluster-tls.png b/content/well-architected-framework/img/docs/well-architected/diagram-secure-cluster-tls.png
new file mode 100644
index 000000000..d07886510
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/diagram-secure-cluster-tls.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/diagram-secure-consul-tls-dark.png b/content/well-architected-framework/img/docs/well-architected/diagram-secure-consul-tls-dark.png
new file mode 100644
index 000000000..e5c94e651
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/diagram-secure-consul-tls-dark.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/diagram-secure-consul-tls.png b/content/well-architected-framework/img/docs/well-architected/diagram-secure-consul-tls.png
new file mode 100644
index 000000000..b1f4f1bb1
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/diagram-secure-consul-tls.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/diagram-secure-nomad-tls-dark.png b/content/well-architected-framework/img/docs/well-architected/diagram-secure-nomad-tls-dark.png
new file mode 100644
index 000000000..74b9579ef
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/diagram-secure-nomad-tls-dark.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/diagram-secure-nomad-tls.png b/content/well-architected-framework/img/docs/well-architected/diagram-secure-nomad-tls.png
new file mode 100644
index 000000000..4daae4752
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/diagram-secure-nomad-tls.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/diagram-secure-vault-tls-dark.png b/content/well-architected-framework/img/docs/well-architected/diagram-secure-vault-tls-dark.png
new file mode 100644
index 000000000..5a6b21a32
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/diagram-secure-vault-tls-dark.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/diagram-secure-vault-tls.png b/content/well-architected-framework/img/docs/well-architected/diagram-secure-vault-tls.png
new file mode 100644
index 000000000..3e56ee5fa
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/diagram-secure-vault-tls.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/diagram-terraform-tls-provider-dark.png b/content/well-architected-framework/img/docs/well-architected/diagram-terraform-tls-provider-dark.png
new file mode 100644
index 000000000..538511799
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/diagram-terraform-tls-provider-dark.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/diagram-terraform-tls-provider.png b/content/well-architected-framework/img/docs/well-architected/diagram-terraform-tls-provider.png
new file mode 100644
index 000000000..efe6a5b25
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/diagram-terraform-tls-provider.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/gitlab-vault-workflow.png b/content/well-architected-framework/img/docs/well-architected/gitlab-vault-workflow.png
new file mode 100644
index 000000000..e13caed59
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/gitlab-vault-workflow.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/hcp-tf-github-actions-workflow.png b/content/well-architected-framework/img/docs/well-architected/hcp-tf-github-actions-workflow.png
new file mode 100644
index 000000000..a458d6bb4
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/hcp-tf-github-actions-workflow.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/secret-sprawl-1.png b/content/well-architected-framework/img/docs/well-architected/secret-sprawl-1.png
new file mode 100644
index 000000000..a0e6bf010
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/secret-sprawl-1.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/sentinel-waf.png b/content/well-architected-framework/img/docs/well-architected/sentinel-waf.png
new file mode 100644
index 000000000..9c361878c
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/sentinel-waf.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/service-mesh-1.png b/content/well-architected-framework/img/docs/well-architected/service-mesh-1.png
new file mode 100644
index 000000000..a2201cf2d
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/service-mesh-1.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/static-to-dynamic.png b/content/well-architected-framework/img/docs/well-architected/static-to-dynamic.png
new file mode 100644
index 000000000..3e6090e47
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/static-to-dynamic.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/tf-vault-ns-1.png b/content/well-architected-framework/img/docs/well-architected/tf-vault-ns-1.png
new file mode 100644
index 000000000..3fa80ee6b
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/tf-vault-ns-1.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/tf-vault-ns-2.png b/content/well-architected-framework/img/docs/well-architected/tf-vault-ns-2.png
new file mode 100644
index 000000000..681d3ae81
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/tf-vault-ns-2.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/traditional_tokenization.png b/content/well-architected-framework/img/docs/well-architected/traditional_tokenization.png
new file mode 100644
index 000000000..d1d3a0fe4
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/traditional_tokenization.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/vault-cicd-1.png b/content/well-architected-framework/img/docs/well-architected/vault-cicd-1.png
new file mode 100644
index 000000000..121bb1066
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/vault-cicd-1.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/vault-encoded-text.jpg b/content/well-architected-framework/img/docs/well-architected/vault-encoded-text.jpg
new file mode 100644
index 000000000..12f764285
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/vault-encoded-text.jpg differ
diff --git a/content/well-architected-framework/img/docs/well-architected/vault-identity-triangle-diagram.png b/content/well-architected-framework/img/docs/well-architected/vault-identity-triangle-diagram.png
new file mode 100644
index 000000000..f66af99b4
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/vault-identity-triangle-diagram.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/vault-ms-sql-1.png b/content/well-architected-framework/img/docs/well-architected/vault-ms-sql-1.png
new file mode 100644
index 000000000..e9651df24
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/vault-ms-sql-1.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/vault-tokenization-4.png b/content/well-architected-framework/img/docs/well-architected/vault-tokenization-4.png
new file mode 100644
index 000000000..e3da28238
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/vault-tokenization-4.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/vault-tokenization-transformation-1.png b/content/well-architected-framework/img/docs/well-architected/vault-tokenization-transformation-1.png
new file mode 100644
index 000000000..ee1840350
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/vault-tokenization-transformation-1.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/vault-transit-secrets-engine-1.png b/content/well-architected-framework/img/docs/well-architected/vault-transit-secrets-engine-1.png
new file mode 100644
index 000000000..53d3eed35
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/vault-transit-secrets-engine-1.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/workspace-net-infra-combined.png b/content/well-architected-framework/img/docs/well-architected/workspace-net-infra-combined.png
new file mode 100644
index 000000000..de0d770d5
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/workspace-net-infra-combined.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/workspace-net-infra-split.png b/content/well-architected-framework/img/docs/well-architected/workspace-net-infra-split.png
new file mode 100644
index 000000000..33bb2d03f
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/workspace-net-infra-split.png differ
diff --git a/content/well-architected-framework/img/docs/well-architected/zero-trust-components.png b/content/well-architected-framework/img/docs/well-architected/zero-trust-components.png
new file mode 100644
index 000000000..5f1e71501
Binary files /dev/null and b/content/well-architected-framework/img/docs/well-architected/zero-trust-components.png differ
diff --git a/content/well-architected-framework/redirects.jsonc b/content/well-architected-framework/redirects.jsonc
new file mode 100644
index 000000000..4ef5823bf
--- /dev/null
+++ b/content/well-architected-framework/redirects.jsonc
@@ -0,0 +1,10 @@
+/**
+ * Redirects in this file are intended to be for documentation content only. The redirects will be applied to developer.hashicorp.com.
+ */
+[
+ // {
+ // "source": "",
+ // "destination": "",
+ // "permanent": true
+ // }
+]
\ No newline at end of file
diff --git a/docker-compose.yaml b/docker-compose.yaml
index 0a73c5e1c..aa7ce1ef2 100644
--- a/docker-compose.yaml
+++ b/docker-compose.yaml
@@ -3,7 +3,7 @@
services:
dev-portal:
- image: 'hashicorp/dev-portal:latest'
+ image: 'dev-portal:waf-docs'
container_name: dev-portal
ports:
- ${DEV_PORTAL_PORT}:${DEV_PORTAL_PORT}