diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index ee179f1f92..427e40c039 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -380,6 +380,7 @@ jobs: arch: ${{ matrix.arch }} tags: | docker.io/hashicorp/${{ env.repo }}:${{ env.version }} + docker.io/hashicorp/${{ env.repo }}:${{ env.version }}_${{ github.sha }} public.ecr.aws/hashicorp/${{ env.repo }}:${{ env.version }} # Per-commit dev images follow the naming convention MAJOR.MINOR-dev # And MAJOR.MINOR-dev-$COMMITSHA diff --git a/.go-version b/.go-version index 49e0a31d49..ac1df3fce3 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.23.1 +1.23.3 diff --git a/.release/security-scan.hcl b/.release/security-scan.hcl index da2c1a239d..afe49295c3 100644 --- a/.release/security-scan.hcl +++ b/.release/security-scan.hcl @@ -5,6 +5,13 @@ container { dependencies = true alpine_secdb = true secrets = false + + triage { + suppress { + // Suppress wget vulnerability + vulnerabilities = ["CVE-2024-10524"] + } + } } binary { diff --git a/CHANGELOG.md b/CHANGELOG.md index 58b6446307..9a1ea9fadf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,23 @@ Canonical reference for changes, improvements, and bugfixes for Boundary. +## 0.18.1 (2024/11/21) +### New and Improved + +* Delete terminated sessions in batches to avoid long running jobs. + ([PR](https://github.com/hashicorp/boundary/pull/5201)) + +### Bug fixes + +* Fix an issue where users would lose access to managed groups if + there are more than 10,000 managed groups in the auth method used. + ([PR](https://github.com/hashicorp/boundary/pull/5242)) +* Fix an issue where only the first 10,000 members of a managed group + are returned when getting the managed group, and a similar issue where + only the first 10,000 managed groups an account is part of is included + when getting the account. + ([PR](https://github.com/hashicorp/boundary/pull/5245)) + ## 0.18.0 (2024/10/01) ### New and Improved diff --git a/CODEOWNERS b/CODEOWNERS index 44fba948e1..d896efc591 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -2,11 +2,6 @@ # the repo, unless a later match takes precedence. @hashicorp/boundary -# release configuration +# web presence and education -/.release/ @hashicorp/github-secure-boundary -/.github/workflows/build.yml @hashicorp/github-secure-boundary - -# education - -/website/content/ @hashicorp/boundary-education-approvers +/website/ @hashicorp/boundary-education-approvers @hashicorp/web-presence @hashicorp/boundary \ No newline at end of file diff --git a/api/go.mod b/api/go.mod index ce20c34df6..5859d115f3 100644 --- a/api/go.mod +++ b/api/go.mod @@ -1,12 +1,12 @@ module github.com/hashicorp/boundary/api -go 1.23.1 +go 1.23.3 require ( github.com/hashicorp/boundary/sdk v0.0.48 github.com/hashicorp/go-cleanhttp v0.5.2 - github.com/hashicorp/go-kms-wrapping/v2 v2.0.14 - github.com/hashicorp/go-retryablehttp v0.7.4 + github.com/hashicorp/go-kms-wrapping/v2 v2.0.16 + github.com/hashicorp/go-retryablehttp v0.7.7 github.com/hashicorp/go-rootcerts v1.0.2 github.com/hashicorp/go-secure-stdlib/base62 v0.1.2 github.com/hashicorp/go-secure-stdlib/parseutil v0.1.8 @@ -38,7 +38,7 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/ryanuber/go-glob v1.0.0 // indirect golang.org/x/crypto v0.18.0 // indirect - golang.org/x/sys v0.16.0 // indirect + golang.org/x/sys v0.20.0 // indirect google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240125205218-1f4bbc51befe // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/api/go.sum b/api/go.sum index 4b7b62d724..ccb30a5010 100644 --- a/api/go.sum +++ b/api/go.sum @@ -12,8 +12,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= -github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/go-test/deep v1.0.4 h1:u2CU3YKy9I2pmu9pX0eq50wCgjfGIt539SqR7FbHiho= github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= @@ -36,19 +36,18 @@ github.com/hashicorp/eventlogger/filters/encrypt v0.1.8-0.20231025104552-802587e github.com/hashicorp/eventlogger/filters/encrypt v0.1.8-0.20231025104552-802587e608f0/go.mod h1:tMywUTIvdB/FXhwm6HMTt61C8/eODY6gitCHhXtyojg= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= -github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.5 h1:jrnDfQm2hCQ0/hEselgqzV4fK16gpZoY0OWGZpVPNHM= github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.5/go.mod h1:psh1qKep5ukvuNobFY/hCybuudlkkACpmazOsCgX5Rg= -github.com/hashicorp/go-kms-wrapping/v2 v2.0.14 h1:1ZuhfnZgRnLK8S0KovJkoTCRIQId5pv3sDR7pG5VQBw= -github.com/hashicorp/go-kms-wrapping/v2 v2.0.14/go.mod h1:0dWtzl2ilqKpavgM3id/kFK9L3tjo6fS4OhbVPSYpnQ= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.16 h1:WZeXfD26QMWYC35at25KgE021SF9L3u9UMHK8fJAdV0= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.16/go.mod h1:ZiKZctjRTLEppuRwrttWkp71VYMbTTCkazK4xT7U/NQ= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-plugin v1.5.2 h1:aWv8eimFqWlsEiMrYZdPYl+FdHaBJSN4AWwGWfT1G2Y= github.com/hashicorp/go-plugin v1.5.2/go.mod h1:w1sAEES3g3PuV/RzUrgow20W2uErMly84hhD3um1WL4= -github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA= -github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= @@ -96,8 +95,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mitchellh/cli v1.1.5 h1:OxRIeJXpAMztws/XHlN2vu6imG5Dpq+j61AzAX5fLng= github.com/mitchellh/cli v1.1.5/go.mod h1:v8+iFts2sPIKUV1ltktPXMCC8fumSKFItNcD2cLtRR4= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= @@ -131,7 +130,6 @@ github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFR github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= @@ -163,8 +161,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= diff --git a/enos/modules/aws_boundary/variables.tf b/enos/modules/aws_boundary/variables.tf index abeaf87080..1509dd3723 100644 --- a/enos/modules/aws_boundary/variables.tf +++ b/enos/modules/aws_boundary/variables.tf @@ -25,7 +25,7 @@ variable "worker_count" { variable "worker_instance_type" { description = "EC2 Instance type" type = string - default = "t2.micro" + default = "t2.small" } variable "worker_type_tags" { @@ -72,7 +72,7 @@ variable "controller_count" { variable "controller_instance_type" { description = "EC2 Instance type" type = string - default = "t2.micro" + default = "t2.small" } variable "controller_ebs_iops" { diff --git a/enos/modules/aws_worker/main.tf b/enos/modules/aws_worker/main.tf index 67b8a8d8d8..1d1ce692dc 100644 --- a/enos/modules/aws_worker/main.tf +++ b/enos/modules/aws_worker/main.tf @@ -10,6 +10,7 @@ terraform { } data "enos_environment" "current" {} +data "aws_caller_identity" "current" {} locals { selected_az = data.aws_availability_zones.available.names[random_integer.az.result] @@ -144,7 +145,7 @@ resource "aws_instance" "worker" { tags = merge( local.common_tags, { - Name = "${var.name_prefix}-boundary-worker", + Name = "${var.name_prefix}-boundary-worker-${split(":", data.aws_caller_identity.current.user_id)[1]}", }, ) } diff --git a/enos/modules/aws_worker/variables.tf b/enos/modules/aws_worker/variables.tf index dcda83f665..2b3dfbe426 100644 --- a/enos/modules/aws_worker/variables.tf +++ b/enos/modules/aws_worker/variables.tf @@ -31,7 +31,7 @@ variable "ubuntu_ami_id" { variable "worker_instance_type" { description = "The EC2 Instance type to be used for the worker's node" type = string - default = "t2.micro" + default = "t2.small" } variable "ssh_aws_keypair" { diff --git a/enos/modules/docker_openssh_server_ca_key/custom-cont-init.d/00-trust-user-ca b/enos/modules/docker_openssh_server_ca_key/custom-cont-init.d/00-trust-user-ca index 00cd08e724..de348a417a 100644 --- a/enos/modules/docker_openssh_server_ca_key/custom-cont-init.d/00-trust-user-ca +++ b/enos/modules/docker_openssh_server_ca_key/custom-cont-init.d/00-trust-user-ca @@ -2,13 +2,13 @@ # Copyright (c) HashiCorp, Inc. # SPDX-License-Identifier: BUSL-1.1 -cp /ca/ca-key.pub /etc/ssh/ca-key.pub -chown 1000:1000 /etc/ssh/ca-key.pub -chmod 644 /etc/ssh/ca-key.pub -echo TrustedUserCAKeys /etc/ssh/ca-key.pub >> /etc/ssh/sshd_config -echo PermitTTY yes >> /etc/ssh/sshd_config -sed -i 's/X11Forwarding no/X11Forwarding yes/' /etc/ssh/sshd_config -echo "X11UseLocalhost no" >> /etc/ssh/sshd_config +cp /ca/ca-key.pub /config/sshd/ca-key.pub +chown 1000:1000 /config/sshd/ca-key.pub +chmod 644 /config/sshd/ca-key.pub +echo TrustedUserCAKeys /config/sshd/ca-key.pub >> /config/sshd/sshd_config +echo PermitTTY yes >> /config/sshd/sshd_config +sed -i 's/X11Forwarding no/X11Forwarding yes/' /config/sshd/sshd_config +echo "X11UseLocalhost no" >> /config/sshd/sshd_config apk update apk add xterm util-linux dbus ttf-freefont xauth firefox diff --git a/enos/modules/docker_openssh_server_ca_key/custom-cont-init.d/01-allow-tcp-forwarding b/enos/modules/docker_openssh_server_ca_key/custom-cont-init.d/01-allow-tcp-forwarding new file mode 100644 index 0000000000..b5f589da23 --- /dev/null +++ b/enos/modules/docker_openssh_server_ca_key/custom-cont-init.d/01-allow-tcp-forwarding @@ -0,0 +1,5 @@ +#!/usr/bin/with-contenv bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +sed -i 's/AllowTcpForwarding no/AllowTcpForwarding yes/' /config/sshd/sshd_config diff --git a/enos/modules/docker_openssh_server_ca_key/main.tf b/enos/modules/docker_openssh_server_ca_key/main.tf index c6cd4bab6c..cf1441aefe 100644 --- a/enos/modules/docker_openssh_server_ca_key/main.tf +++ b/enos/modules/docker_openssh_server_ca_key/main.tf @@ -61,9 +61,14 @@ locals { ca_public_key = data.tls_public_key.ca_key.public_key_openssh } +data "docker_registry_image" "openssh" { + name = var.image_name +} + resource "docker_image" "openssh_server" { - name = var.image_name - keep_locally = true + name = var.image_name + keep_locally = true + pull_triggers = [data.docker_registry_image.openssh.sha256_digest] } resource "docker_container" "openssh_server" { @@ -75,6 +80,7 @@ resource "docker_container" "openssh_server" { "TZ=US/Eastern", "USER_NAME=${var.target_user}", "PUBLIC_KEY=${local.ssh_public_key}", + "SUDO_ACCESS=true", ] network_mode = "bridge" dynamic "networks_advanced" { diff --git a/go.mod b/go.mod index 4a46436146..a32559cce3 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/hashicorp/boundary -go 1.23.1 +go 1.23.3 replace github.com/hashicorp/boundary/api => ./api @@ -91,7 +91,7 @@ require ( github.com/golang/protobuf v1.5.3 github.com/hashicorp/cap/ldap v0.0.0-20240206183135-ed8f24513744 github.com/hashicorp/dbassert v0.0.0-20231012105025-1bc1bd88e22b - github.com/hashicorp/go-kms-wrapping/extras/kms/v2 v2.0.0-20231219183231-6bac757bb482 + github.com/hashicorp/go-kms-wrapping/extras/kms/v2 v2.0.0-20241126174344-f3b1a41a15fd github.com/hashicorp/go-rate v0.0.0-20231204194614-cc8d401f70ab github.com/hashicorp/go-version v1.6.0 github.com/hashicorp/nodeenrollment v0.2.13 diff --git a/go.sum b/go.sum index 828d0d4a71..639ad523fa 100644 --- a/go.sum +++ b/go.sum @@ -205,8 +205,8 @@ github.com/hashicorp/go-dbw v0.1.5-0.20240909162114-6cee92b3da36 h1:rPD+2QPhCLq8 github.com/hashicorp/go-dbw v0.1.5-0.20240909162114-6cee92b3da36/go.mod h1:/YHbfK7mgG9k09aB74Imw3fEOwno0eTtlFTTYGZ7SFk= github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-kms-wrapping/extras/kms/v2 v2.0.0-20231219183231-6bac757bb482 h1:1DqTnLaNk658AEenlF4PNGYd9b1hXE/+0jSOBIGOAms= -github.com/hashicorp/go-kms-wrapping/extras/kms/v2 v2.0.0-20231219183231-6bac757bb482/go.mod h1:323uN1BJ6bc9F1U6DPvgmLTVlBlMMnOIRrzCd5ZDee0= +github.com/hashicorp/go-kms-wrapping/extras/kms/v2 v2.0.0-20241126174344-f3b1a41a15fd h1:CmPn4FXkYbPgmIqAKU970nXOEWW0u2RYZ7NnB6f7jkQ= +github.com/hashicorp/go-kms-wrapping/extras/kms/v2 v2.0.0-20241126174344-f3b1a41a15fd/go.mod h1:8G70jr/DzTk81B2Z+bXnvqWHwPq6GkoRWagyZsbX0U0= github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.7 h1:gM4OwbF16Cmfxt2QMkoGMQbRTfYFZLvDMPgU3rM3KIo= github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.7/go.mod h1:7ZMHVluyqgHgEuTADeDzFNWoA9mnyPfdiK8Tk2Bct1c= github.com/hashicorp/go-kms-wrapping/v2 v2.0.17-0.20240313190905-91d44aa8e360 h1:AgzTis5Y2hKvmluFZH7V6+evaB1LoKT1KKjXysywyRI= diff --git a/internal/auth/oidc/repository_managed_group_members.go b/internal/auth/oidc/repository_managed_group_members.go index 96e04b60b9..6c65413196 100644 --- a/internal/auth/oidc/repository_managed_group_members.go +++ b/internal/auth/oidc/repository_managed_group_members.go @@ -111,7 +111,7 @@ func (r *Repository) SetManagedGroupMemberships(ctx context.Context, am *AuthMet msgs = append(msgs, &mgOplogMsg) } - currentMemberships, err = r.ListManagedGroupMembershipsByMember(ctx, acct.PublicId, WithReader(reader)) + currentMemberships, err = r.ListManagedGroupMembershipsByMember(ctx, acct.PublicId, WithReader(reader), WithLimit(-1)) if err != nil { return errors.Wrap(ctx, err, op, errors.WithMsg("unable to retrieve current managed group memberships before deletion")) } @@ -181,7 +181,7 @@ func (r *Repository) SetManagedGroupMemberships(ctx context.Context, am *AuthMet } } - currentMemberships, err = r.ListManagedGroupMembershipsByMember(ctx, acct.PublicId, WithReader(reader)) + currentMemberships, err = r.ListManagedGroupMembershipsByMember(ctx, acct.PublicId, WithReader(reader), WithLimit(-1)) if err != nil { return errors.Wrap(ctx, err, op, errors.WithMsg("unable to retrieve current managed group memberships after set")) } diff --git a/internal/auth/oidc/service_callback.go b/internal/auth/oidc/service_callback.go index 505c252ae9..4bd62b07d3 100644 --- a/internal/auth/oidc/service_callback.go +++ b/internal/auth/oidc/service_callback.go @@ -193,7 +193,7 @@ func Callback( } // Get the set of all managed groups so we can filter - mgs, _, err := r.ListManagedGroups(ctx, am.GetPublicId()) + mgs, _, err := r.ListManagedGroups(ctx, am.GetPublicId(), WithLimit(-1)) if err != nil { return "", errors.Wrap(ctx, err, op) } diff --git a/internal/auth/oidc/service_callback_test.go b/internal/auth/oidc/service_callback_test.go index e5b088aea0..f9a3219952 100644 --- a/internal/auth/oidc/service_callback_test.go +++ b/internal/auth/oidc/service_callback_test.go @@ -675,7 +675,8 @@ func Test_ManagedGroupFiltering(t *testing.T) { return iam.NewRepository(ctx, rw, rw, kmsCache) } repoFn := func() (*Repository, error) { - return NewRepository(ctx, rw, rw, kmsCache) + // Set a low limit to test that the managed group listing overrides the limit + return NewRepository(ctx, rw, rw, kmsCache, WithLimit(1)) } atRepoFn := func() (*authtoken.Repository, error) { return authtoken.NewRepository(ctx, rw, rw, kmsCache) @@ -819,7 +820,7 @@ func Test_ManagedGroupFiltering(t *testing.T) { tp.SetExpectedState(state) // Set the filters on the MGs for this test. First we need to get the current versions. - currMgs, ttime, err := repo.ListManagedGroups(ctx, testAuthMethod.PublicId) + currMgs, ttime, err := repo.ListManagedGroups(ctx, testAuthMethod.PublicId, WithLimit(-1)) require.NoError(err) // Transaction timestamp should be within ~10 seconds of now assert.True(time.Now().Before(ttime.Add(10 * time.Second))) @@ -860,7 +861,7 @@ func Test_ManagedGroupFiltering(t *testing.T) { assert.Contains(key.(map[string]any)["payload"], "auth_token_end") } // Ensure that we get the expected groups - memberships, err := repo.ListManagedGroupMembershipsByMember(ctx, account.PublicId) + memberships, err := repo.ListManagedGroupMembershipsByMember(ctx, account.PublicId, WithLimit(-1)) require.NoError(err) assert.Equal(len(tt.matchingMgs), len(memberships)) var matchingIds []string diff --git a/internal/census/census_job.go b/internal/census/census_job.go index dcca071189..1c116cdc54 100644 --- a/internal/census/census_job.go +++ b/internal/census/census_job.go @@ -54,7 +54,7 @@ func (c *censusJob) Status() scheduler.JobStatus { // Run performs the required work depending on the implementation. // The context is used to notify the job that it should exit early. -func (c *censusJob) Run(ctx context.Context) error { +func (c *censusJob) Run(ctx context.Context, _ time.Duration) error { err := RunFn(ctx, c) return err } diff --git a/internal/clientcache/cmd/cache/wrapper_register.go b/internal/clientcache/cmd/cache/wrapper_register.go index 9831a60fbf..08e898bb75 100644 --- a/internal/clientcache/cmd/cache/wrapper_register.go +++ b/internal/clientcache/cmd/cache/wrapper_register.go @@ -17,6 +17,7 @@ import ( "github.com/hashicorp/boundary/internal/clientcache/internal/daemon" "github.com/hashicorp/boundary/internal/cmd/base" "github.com/hashicorp/boundary/internal/cmd/wrapper" + "github.com/hashicorp/boundary/version" "github.com/mitchellh/cli" ) @@ -33,28 +34,50 @@ func hook(ctx context.Context, baseCmd *base.Command, token string) { if baseCmd.FlagSkipCacheDaemon { return } - if startDaemon(ctx, baseCmd) { - addTokenToCache(ctx, baseCmd, token) + started, err := startDaemon(ctx, baseCmd) + if err != nil { + // Failed to start the daemon, but we don't need to tell the user + // since the function already did + return } + if !started { + // If we didn't have to start it, check that the version of the cache + // is current or newer than the CLI. + // We don't care if the cache is newer than the CLI, since we don't + // want to kill a cache started by a newer version of the CLI. + if !cacheVersionIsCurrentOrNewer(ctx, baseCmd) { + // If the cache is older than the current version, restart it + // Ignore errors stopping the daemon since it might have been stopped since + // we last tried to start the daemon. + _ = stopDaemon(ctx, baseCmd) + _, err = startDaemon(ctx, baseCmd) + if err != nil { + return + } + } + } + + // Cache successfully started and version verified, add the token to the cache + addTokenToCache(ctx, baseCmd, token) } // startDaemon attempts to start a daemon and returns true if we have attempted to start // the daemon and either it was successful or it was already running. -func startDaemon(ctx context.Context, baseCmd *base.Command) bool { +func startDaemon(ctx context.Context, baseCmd *base.Command) (started bool, _ error) { // Ignore errors related to checking if the process is already running since // this can fall back to running the process. if dotPath, err := DefaultDotDirectory(ctx); err == nil { pidPath := filepath.Join(dotPath, pidFileName) if running, _ := pidFileInUse(ctx, pidPath); running != nil { // return true since it is already running, no need to run it again. - return true + return false, nil } } cmdName, err := os.Executable() if err != nil { baseCmd.UI.Error(fmt.Sprintf("unable to find boundary binary for cache startup: %s", err.Error())) - return false + return false, err } var stdErr bytes.Buffer @@ -64,8 +87,41 @@ func startDaemon(ctx context.Context, baseCmd *base.Command) bool { // We use Run here instead of Start because the command spawns off a subprocess and returns. // We do not want to send the request to add a persona to the cache until we know the daemon // has started up. - err = cmd.Run() - return err == nil || strings.Contains(stdErr.String(), "already running") + if err := cmd.Run(); err != nil { + baseCmd.UI.Error(fmt.Sprintf("unable to start cache: %s", err.Error())) + return false, err + } + return !strings.Contains(stdErr.String(), "already running"), nil +} + +// stopDaemon makes a best effort attempt at stopping the cache daemon, if it is running +func stopDaemon(ctx context.Context, baseCmd *base.Command) error { + dotPath, err := DefaultDotDirectory(ctx) + if err != nil { + baseCmd.UI.Error(fmt.Sprintf("cannot find daemon directory: %s", err.Error())) + return err + } + pidPath := filepath.Join(dotPath, pidFileName) + running, err := pidFileInUse(ctx, pidPath) + if err != nil { + baseCmd.UI.Error(fmt.Sprintf("PID file in use: %s", err.Error())) + return err + } + if running == nil { + return nil + } + + cmdName, err := os.Executable() + if err != nil { + baseCmd.UI.Error(fmt.Sprintf("unable to find boundary binary for cache startup: %s", err.Error())) + return err + } + cmd := exec.Command(cmdName, "cache", "stop") + if err := cmd.Run(); err != nil { + baseCmd.UI.Error(fmt.Sprintf("unable to stop cache: %s", err.Error())) + return err + } + return nil } // silentUi should not be used in situations where the UI is expected to be @@ -108,6 +164,27 @@ func addTokenToCache(ctx context.Context, baseCmd *base.Command, token string) b return err == nil && apiErr == nil } +// cacheVersionIsCurrentOrNewer requests the version of the cache from the +// daemon, then compares it to the version of the CLI. If the cache version is +// greater than or equal to the CLI, it returns true. In all other cases, including +// errors, it returns false. +func cacheVersionIsCurrentOrNewer(ctx context.Context, baseCmd *base.Command) bool { + com := StatusCommand{Command: base.NewCommand(baseCmd.UI)} + // We do not want to print errors out from our background interactions with + // the daemon so use the silentUi to toss out anything that shouldn't be used + _, result, apiErr, err := com.Status(ctx) + if err != nil || apiErr != nil { + return false + } + cacheVersion := version.FromVersionString(result.Version) + if cacheVersion == nil { + return false + } + cliVersion := version.Get() + + return cacheVersion.Semver().GreaterThanOrEqual(cliVersion.Semver()) +} + // waitForDaemon continually looks for the unix socket until it is found or the // provided context is done. It returns an error if the unix socket is not found // before the context is done. diff --git a/internal/credential/vault/jobs.go b/internal/credential/vault/jobs.go index d7e23d9c8c..0013d09d8f 100644 --- a/internal/credential/vault/jobs.go +++ b/internal/credential/vault/jobs.go @@ -130,7 +130,7 @@ func (r *TokenRenewalJob) Status() scheduler.JobStatus { // Run queries the vault credential repo for tokens that need to be renewed, it then creates // a vault client and renews each token. Can not be run in parallel, if Run is invoked while // already running an error with code JobAlreadyRunning will be returned. -func (r *TokenRenewalJob) Run(ctx context.Context) error { +func (r *TokenRenewalJob) Run(ctx context.Context, _ time.Duration) error { const op = "vault.(TokenRenewalJob).Run" if !r.running.CompareAndSwap(r.running.Load(), true) { return errors.New(ctx, errors.JobAlreadyRunning, op, "job already running") @@ -356,7 +356,7 @@ func (r *TokenRevocationJob) Status() scheduler.JobStatus { // Run queries the vault credential repo for tokens that need to be revoked, it then creates // a vault client and revokes each token. Can not be run in parallel, if Run is invoked while // already running an error with code JobAlreadyRunning will be returned. -func (r *TokenRevocationJob) Run(ctx context.Context) error { +func (r *TokenRevocationJob) Run(ctx context.Context, _ time.Duration) error { const op = "vault.(TokenRevocationJob).Run" if !r.running.CompareAndSwap(r.running.Load(), true) { return errors.New(ctx, errors.JobAlreadyRunning, op, "job already running") @@ -521,7 +521,7 @@ func (r *CredentialRenewalJob) Status() scheduler.JobStatus { // Run queries the vault credential repo for credentials that need to be renewed, it then creates // a vault client and renews each credential. Can not be run in parallel, if Run is invoked while // already running an error with code JobAlreadyRunning will be returned. -func (r *CredentialRenewalJob) Run(ctx context.Context) error { +func (r *CredentialRenewalJob) Run(ctx context.Context, _ time.Duration) error { const op = "vault.(CredentialRenewalJob).Run" if !r.running.CompareAndSwap(r.running.Load(), true) { return errors.New(ctx, errors.JobAlreadyRunning, op, "job already running") @@ -696,7 +696,7 @@ func (r *CredentialRevocationJob) Status() scheduler.JobStatus { // Run queries the vault credential repo for credentials that need to be revoked, it then creates // a vault client and revokes each credential. Can not be run in parallel, if Run is invoked while // already running an error with code JobAlreadyRunning will be returned. -func (r *CredentialRevocationJob) Run(ctx context.Context) error { +func (r *CredentialRevocationJob) Run(ctx context.Context, _ time.Duration) error { const op = "vault.(CredentialRevocationJob).Run" if !r.running.CompareAndSwap(r.running.Load(), true) { return errors.New(ctx, errors.JobAlreadyRunning, op, "job already running") @@ -847,7 +847,7 @@ func (r *CredentialStoreCleanupJob) Status() scheduler.JobStatus { // Run deletes all vault credential stores in the repo that have been soft deleted. // Can not be run in parallel, if Run is invoked while already running an error with code // JobAlreadyRunning will be returned. -func (r *CredentialStoreCleanupJob) Run(ctx context.Context) error { +func (r *CredentialStoreCleanupJob) Run(ctx context.Context, _ time.Duration) error { const op = "vault.(CredentialStoreCleanupJob).Run" if !r.running.CompareAndSwap(r.running.Load(), true) { return errors.New(ctx, errors.JobAlreadyRunning, op, "job already running") @@ -947,7 +947,7 @@ func (r *CredentialCleanupJob) Status() scheduler.JobStatus { // Run deletes all Vault credential in the repo that have a null session_id and are not active. // Can not be run in parallel, if Run is invoked while already running an error with code // JobAlreadyRunning will be returned. -func (r *CredentialCleanupJob) Run(ctx context.Context) error { +func (r *CredentialCleanupJob) Run(ctx context.Context, _ time.Duration) error { const op = "vault.(CredentialCleanupJob).Run" if !r.running.CompareAndSwap(r.running.Load(), true) { return errors.New(ctx, errors.JobAlreadyRunning, op, "job already running") diff --git a/internal/credential/vault/jobs_test.go b/internal/credential/vault/jobs_test.go index 39177b2677..12b029a251 100644 --- a/internal/credential/vault/jobs_test.go +++ b/internal/credential/vault/jobs_test.go @@ -312,7 +312,7 @@ func TestTokenRenewalJob_RunLimits(t *testing.T) { r, err := newTokenRenewalJob(ctx, rw, rw, kmsCache, tt.opts...) require.NoError(err) - err = r.Run(ctx) + err = r.Run(ctx, 0) require.NoError(err) assert.Equal(tt.wantLen, r.numTokens) @@ -352,7 +352,7 @@ func TestTokenRenewalJob_Run(t *testing.T) { cs, err := repo.CreateCredentialStore(ctx, in) require.NoError(err) - err = r.Run(ctx) + err = r.Run(ctx, 0) require.NoError(err) // No tokens should have been renewed since token expiration is 24 hours by default assert.Equal(0, r.numProcessed) @@ -391,7 +391,7 @@ func TestTokenRenewalJob_Run(t *testing.T) { require.NoError(err) // Run token renewal again - err = r.Run(ctx) + err = r.Run(ctx, 0) require.NoError(err) // Current and maintaining token should have been processed assert.Equal(2, r.numProcessed) @@ -468,7 +468,7 @@ func TestTokenRenewalJob_RunExpired(t *testing.T) { time.Sleep(time.Second * 2) // Token should have expired in vault, run should now expire in repo - err = r.Run(ctx) + err = r.Run(ctx, 0) require.NoError(err) assert.Equal(1, r.numTokens) @@ -790,7 +790,7 @@ func TestTokenRevocationJob_RunLimits(t *testing.T) { r, err := newTokenRevocationJob(ctx, rw, rw, kmsCache, tt.opts...) require.NoError(err) - err = r.Run(ctx) + err = r.Run(ctx, 0) require.NoError(err) assert.Equal(tt.wantLen, r.numTokens) @@ -836,7 +836,7 @@ func TestTokenRevocationJob_Run(t *testing.T) { require.NoError(err) // No tokens should have been revoked since only the current token exists - err = r.Run(ctx) + err = r.Run(ctx, 0) require.NoError(err) assert.Equal(0, r.numProcessed) @@ -885,7 +885,7 @@ func TestTokenRevocationJob_Run(t *testing.T) { // Running should revoke noCredsToken and the revokeToken even though it has active // credentials it has been marked for revocation - err = r.Run(ctx) + err = r.Run(ctx, 0) require.NoError(err) assert.Equal(2, r.numProcessed) @@ -922,7 +922,7 @@ func TestTokenRevocationJob_Run(t *testing.T) { assert.NoError(err) // Running again should now revoke the credsToken - err = r.Run(ctx) + err = r.Run(ctx, 0) require.NoError(err) assert.Equal(1, r.numProcessed) @@ -934,7 +934,7 @@ func TestTokenRevocationJob_Run(t *testing.T) { require.NoError(rw.LookupWhere(ctx, &repoToken, "token_hmac = ?", []any{credsToken.TokenHmac})) assert.Equal(string(RevokedToken), repoToken.Status) - err = r.Run(ctx) + err = r.Run(ctx, 0) require.NoError(err) // With only the current token remaining no tokens should be revoked assert.Equal(0, r.numProcessed) @@ -1129,7 +1129,7 @@ func TestCredentialRenewalJob_RunLimits(t *testing.T) { r, err := newCredentialRenewalJob(ctx, rw, rw, kmsCache, tt.opts...) require.NoError(err) - err = r.Run(ctx) + err = r.Run(ctx, 0) require.NoError(err) assert.Equal(tt.wantLen, r.numCreds) @@ -1196,7 +1196,7 @@ func TestCredentialRenewalJob_Run(t *testing.T) { credRenewal, err := newCredentialRenewalJob(ctx, rw, rw, kmsCache) require.NoError(err) - err = credRenewal.Run(ctx) + err = credRenewal.Run(ctx, 0) require.NoError(err) // No credentials should have been renewed assert.Equal(0, credRenewal.numCreds) @@ -1213,7 +1213,7 @@ func TestCredentialRenewalJob_Run(t *testing.T) { // Sleep to move clock time.Sleep(2 * time.Second) - err = credRenewal.Run(ctx) + err = credRenewal.Run(ctx, 0) require.NoError(err) // The active credential should have been renewed assert.Equal(1, credRenewal.numCreds) @@ -1319,7 +1319,7 @@ func TestCredentialRenewalJob_RunExpired(t *testing.T) { require.NoError(rw.LookupById(ctx, lookupCred)) assert.Equal(string(ActiveCredential), lookupCred.Status) - err = credRenewal.Run(ctx) + err = credRenewal.Run(ctx, 0) require.NoError(err) // The active credential should have been processed assert.Equal(1, credRenewal.numCreds) @@ -1678,7 +1678,7 @@ func TestCredentialRevocationJob_RunLimits(t *testing.T) { r, err := newCredentialRevocationJob(ctx, rw, rw, kmsCache, tt.opts...) require.NoError(err) - err = r.Run(ctx) + err = r.Run(ctx, 0) require.NoError(err) assert.Equal(tt.wantLen, r.numCreds) @@ -1745,7 +1745,7 @@ func TestCredentialRevocationJob_Run(t *testing.T) { r, err := newCredentialRevocationJob(ctx, rw, rw, kmsCache) require.NoError(err) - err = r.Run(ctx) + err = r.Run(ctx, 0) require.NoError(err) // No credentials should have been revoked assert.Equal(0, r.numCreds) @@ -1764,7 +1764,7 @@ func TestCredentialRevocationJob_Run(t *testing.T) { // Verify revokeCred is valid in testDb assert.NoError(testDb.ValidateCredential(t, revokeSecret)) - err = r.Run(ctx) + err = r.Run(ctx, 0) require.NoError(err) // The revoke credential should have been revoked assert.Equal(1, r.numCreds) @@ -1842,7 +1842,7 @@ func TestCredentialRevocationJob_RunDeleted(t *testing.T) { secret, cred := testVaultCred(t, conn, v, cl, sess, repoToken, ActiveCredential, 5*time.Hour) - err = r.Run(ctx) + err = r.Run(ctx, 0) require.NoError(err) // No credentials should have been revoked as expiration is 5 hours from now assert.Equal(0, r.numCreds) @@ -1852,7 +1852,7 @@ func TestCredentialRevocationJob_RunDeleted(t *testing.T) { require.NoError(err) assert.Equal(1, count) - err = r.Run(ctx) + err = r.Run(ctx, 0) require.NoError(err) // No credentials should have been revoked assert.Equal(0, r.numCreds) @@ -1879,7 +1879,7 @@ func TestCredentialRevocationJob_RunDeleted(t *testing.T) { assert.Empty(lookupCred.SessionId) assert.Equal(string(RevokeCredential), lookupCred.Status) - err = r.Run(ctx) + err = r.Run(ctx, 0) require.NoError(err) // The revoke credential should have been revoked assert.Equal(1, r.numCreds) @@ -2028,7 +2028,7 @@ func TestCredentialStoreCleanupJob_Run(t *testing.T) { require.NoError(err) // No credential stores should have been cleaned up - err = r.Run(ctx) + err = r.Run(ctx, 0) require.NoError(err) assert.Equal(0, r.numStores) @@ -2057,7 +2057,7 @@ func TestCredentialStoreCleanupJob_Run(t *testing.T) { assert.Equal(string(RevokeToken), repoToken.Status) // Both soft deleted credential stores should not be cleaned up yet - err = r.Run(ctx) + err = r.Run(ctx, 0) require.NoError(err) assert.Equal(0, r.numStores) @@ -2067,7 +2067,7 @@ func TestCredentialStoreCleanupJob_Run(t *testing.T) { assert.Equal(1, count) // cs1 should be deleted - err = r.Run(ctx) + err = r.Run(ctx, 0) require.NoError(err) assert.Equal(1, r.numStores) @@ -2095,7 +2095,7 @@ func TestCredentialStoreCleanupJob_Run(t *testing.T) { assert.Equal(1, count) // cs2 still has a second token not yet revoked/expired - err = r.Run(ctx) + err = r.Run(ctx, 0) require.NoError(err) assert.Equal(0, r.numStores) @@ -2112,7 +2112,7 @@ func TestCredentialStoreCleanupJob_Run(t *testing.T) { assert.Equal(1, count) // With no un-expired or un-revoked tokens cs2 should now be deleted - err = r.Run(ctx) + err = r.Run(ctx, 0) require.NoError(err) assert.Equal(1, r.numStores) @@ -2289,7 +2289,7 @@ func TestCredentialCleanupJob_Run(t *testing.T) { _, sess2Cred := testVaultCred(t, conn, v, cl, sess2, repoToken, ActiveCredential, 5*time.Hour) // No credentials should be cleaned up - err = r.Run(ctx) + err = r.Run(ctx, 0) require.NoError(err) assert.Equal(0, r.numCreds) @@ -2299,7 +2299,7 @@ func TestCredentialCleanupJob_Run(t *testing.T) { assert.Equal(1, count) // Credentials are still in the revoke state so none should be deleted yet - err = r.Run(ctx) + err = r.Run(ctx, 0) require.NoError(err) assert.Equal(0, r.numCreds) @@ -2324,7 +2324,7 @@ func TestCredentialCleanupJob_Run(t *testing.T) { assert.Equal(1, count) // Only the three credentials associated with the deleted session should be deleted - err = r.Run(ctx) + err = r.Run(ctx, 0) require.NoError(err) assert.Equal(3, r.numCreds) diff --git a/internal/daemon/cluster/handlers/worker_service.go b/internal/daemon/cluster/handlers/worker_service.go index 2e3209f2a7..1b57f6ee63 100644 --- a/internal/daemon/cluster/handlers/worker_service.go +++ b/internal/daemon/cluster/handlers/worker_service.go @@ -193,12 +193,12 @@ func (ws *workerServiceServer) Status(ctx context.Context, req *pbs.StatusReques authorizedDownstreams := &pbs.AuthorizedDownstreamWorkerList{} if len(req.GetConnectedWorkerPublicIds()) > 0 { - knownConnectedWorkers, err := serverRepo.ListWorkers(ctx, []string{scope.Global.String()}, server.WithWorkerPool(req.GetConnectedWorkerPublicIds()), server.WithLiveness(-1)) + knownConnectedWorkers, err := serverRepo.VerifyKnownWorkers(ctx, req.GetConnectedWorkerPublicIds()) if err != nil { event.WriteError(ctx, op, err, event.WithInfoMsg("error getting known connected worker ids")) return &pbs.StatusResponse{}, status.Errorf(codes.Internal, "Error getting known connected worker ids: %v", err) } - authorizedDownstreams.WorkerPublicIds = server.WorkerList(knownConnectedWorkers).PublicIds() + authorizedDownstreams.WorkerPublicIds = knownConnectedWorkers } if len(req.GetConnectedUnmappedWorkerKeyIdentifiers()) > 0 { diff --git a/internal/daemon/controller/handlers/accounts/account_service.go b/internal/daemon/controller/handlers/accounts/account_service.go index 18e2c5932d..a132fc3a79 100644 --- a/internal/daemon/controller/handlers/accounts/account_service.go +++ b/internal/daemon/controller/handlers/accounts/account_service.go @@ -606,7 +606,7 @@ func (s Service) getFromRepo(ctx context.Context, id string) (auth.Account, []st } return nil, nil, err } - mgs, err := repo.ListManagedGroupMembershipsByMember(ctx, a.GetPublicId()) + mgs, err := repo.ListManagedGroupMembershipsByMember(ctx, a.GetPublicId(), oidc.WithLimit(-1)) if err != nil { return nil, nil, err } @@ -629,7 +629,7 @@ func (s Service) getFromRepo(ctx context.Context, id string) (auth.Account, []st } return nil, nil, err } - mgs, err := repo.ListManagedGroupMembershipsByMember(ctx, a.GetPublicId()) + mgs, err := repo.ListManagedGroupMembershipsByMember(ctx, a.GetPublicId(), ldap.WithLimit(ctx, -1)) if err != nil { return nil, nil, err } diff --git a/internal/daemon/controller/handlers/accounts/account_service_test.go b/internal/daemon/controller/handlers/accounts/account_service_test.go index 5e33ea3741..7a5e549d2f 100644 --- a/internal/daemon/controller/handlers/accounts/account_service_test.go +++ b/internal/daemon/controller/handlers/accounts/account_service_test.go @@ -134,13 +134,15 @@ func TestGet(t *testing.T) { return password.NewRepository(ctx, rw, rw, kmsCache) } oidcRepoFn := func() (*oidc.Repository, error) { - return oidc.NewRepository(ctx, rw, rw, kmsCache) + // Use a small limit to test that membership lookup is explicitly unlimited + return oidc.NewRepository(ctx, rw, rw, kmsCache, oidc.WithLimit(1)) } iamRepoFn := func() (*iam.Repository, error) { return iam.NewRepository(ctx, rw, rw, kmsCache) } ldapRepoFn := func() (*ldap.Repository, error) { - return ldap.NewRepository(ctx, rw, rw, kmsCache) + // Use a small limit to test that membership lookup is explicitly unlimited + return ldap.NewRepository(ctx, rw, rw, kmsCache, ldap.WithLimit(ctx, 1)) } s, err := accounts.NewService(ctx, pwRepoFn, oidcRepoFn, ldapRepoFn, 1000) @@ -175,9 +177,10 @@ func TestGet(t *testing.T) { oidc.WithApiUrl(oidc.TestConvertToUrls(t, "https://www.alice.com/callback")[0]), ) oidcA := oidc.TestAccount(t, conn, oidcAm, "test-subject") - // Create a managed group that will always match, so we can test that it is + // Create some managed groups that will always match, so we can test that it is // returned in results mg := oidc.TestManagedGroup(t, conn, oidcAm, `"/token/sub" matches ".*"`) + mg2 := oidc.TestManagedGroup(t, conn, oidcAm, `"/token/sub" matches ".*"`) oidcWireAccount := pb.Account{ Id: oidcA.GetPublicId(), AuthMethodId: oidcA.GetAuthMethodId(), @@ -193,7 +196,7 @@ func TestGet(t *testing.T) { }, }, AuthorizedActions: oidcAuthorizedActions, - ManagedGroupIds: []string{mg.GetPublicId()}, + ManagedGroupIds: []string{mg.GetPublicId(), mg2.GetPublicId()}, } ldapAm := ldap.TestAuthMethod(t, conn, databaseWrapper, org.PublicId, []string{"ldaps://ldap1"}) @@ -204,6 +207,7 @@ func TestGet(t *testing.T) { ldap.WithDn(ctx, "test-dn"), ) ldapMg := ldap.TestManagedGroup(t, conn, ldapAm, []string{"admin"}) + ldapMg2 := ldap.TestManagedGroup(t, conn, ldapAm, []string{"admin"}) ldapWireAccount := pb.Account{ Id: ldapAcct.GetPublicId(), AuthMethodId: ldapAm.GetPublicId(), @@ -222,7 +226,7 @@ func TestGet(t *testing.T) { }, Type: ldap.Subtype.String(), AuthorizedActions: ldapAuthorizedActions, - ManagedGroupIds: []string{ldapMg.GetPublicId()}, + ManagedGroupIds: []string{ldapMg.GetPublicId(), ldapMg2.GetPublicId()}, } cases := []struct { @@ -289,12 +293,14 @@ func TestGet(t *testing.T) { if globals.ResourceInfoFromPrefix(tc.req.Id).Subtype == oidc.Subtype { // Set up managed groups before getting. First get the current - // managed group to make sure we have the right version. + // managed groups to make sure we have the right version. oidcRepo, err := oidcRepoFn() require.NoError(err) currMg, err := oidcRepo.LookupManagedGroup(ctx, mg.GetPublicId()) require.NoError(err) - _, _, err = oidcRepo.SetManagedGroupMemberships(ctx, oidcAm, oidcA, []*oidc.ManagedGroup{currMg}) + currMg2, err := oidcRepo.LookupManagedGroup(ctx, mg2.GetPublicId()) + require.NoError(err) + _, _, err = oidcRepo.SetManagedGroupMemberships(ctx, oidcAm, oidcA, []*oidc.ManagedGroup{currMg, currMg2}) require.NoError(err) } diff --git a/internal/daemon/controller/handlers/managed_groups/managed_group_service.go b/internal/daemon/controller/handlers/managed_groups/managed_group_service.go index dd7c780917..d8c03480d5 100644 --- a/internal/daemon/controller/handlers/managed_groups/managed_group_service.go +++ b/internal/daemon/controller/handlers/managed_groups/managed_group_service.go @@ -446,7 +446,7 @@ func (s Service) getFromRepo(ctx context.Context, id string) (auth.ManagedGroup, } return nil, nil, err } - ids, err := repo.ListManagedGroupMembershipsByGroup(ctx, mg.GetPublicId()) + ids, err := repo.ListManagedGroupMembershipsByGroup(ctx, mg.GetPublicId(), oidc.WithLimit(-1)) if err != nil { return nil, nil, err } @@ -469,7 +469,7 @@ func (s Service) getFromRepo(ctx context.Context, id string) (auth.ManagedGroup, } return nil, nil, err } - ids, err := repo.ListManagedGroupMembershipsByGroup(ctx, mg.GetPublicId()) + ids, err := repo.ListManagedGroupMembershipsByGroup(ctx, mg.GetPublicId(), ldap.WithLimit(ctx, -1)) if err != nil { return nil, nil, err } diff --git a/internal/daemon/controller/handlers/managed_groups/managed_group_service_test.go b/internal/daemon/controller/handlers/managed_groups/managed_group_service_test.go index 8c83d142f4..14762802b9 100644 --- a/internal/daemon/controller/handlers/managed_groups/managed_group_service_test.go +++ b/internal/daemon/controller/handlers/managed_groups/managed_group_service_test.go @@ -118,13 +118,15 @@ func TestGet(t *testing.T) { wrap := db.TestWrapper(t) kmsCache := kms.TestKms(t, conn, wrap) oidcRepoFn := func() (*oidc.Repository, error) { - return oidc.NewRepository(ctx, rw, rw, kmsCache) + // Use a small limit to test that membership lookup is explicitly unlimited + return oidc.NewRepository(ctx, rw, rw, kmsCache, oidc.WithLimit(1)) } iamRepoFn := func() (*iam.Repository, error) { return iam.NewRepository(ctx, rw, rw, kmsCache) } ldapRepoFn := func() (*ldap.Repository, error) { - return ldap.NewRepository(ctx, rw, rw, kmsCache) + // Use a small limit to test that membership lookup is explicitly unlimited + return ldap.NewRepository(ctx, rw, rw, kmsCache, ldap.WithLimit(ctx, 1)) } s, err := managed_groups.NewService(ctx, oidcRepoFn, ldapRepoFn, 1000) @@ -142,6 +144,7 @@ func TestGet(t *testing.T) { oidc.WithApiUrl(oidc.TestConvertToUrls(t, "https://www.alice.com/callback")[0]), ) oidcA := oidc.TestAccount(t, conn, oidcAm, "test-subject") + oidcB := oidc.TestAccount(t, conn, oidcAm, "test-subject-2") omg := oidc.TestManagedGroup(t, conn, oidcAm, oidc.TestFakeManagedGroupFilter) // Set up managed group before getting. First get the current @@ -153,6 +156,10 @@ func TestGet(t *testing.T) { require.NoError(t, err) _, _, err = oidcRepo.SetManagedGroupMemberships(ctx, oidcAm, oidcA, []*oidc.ManagedGroup{currMg}) require.NoError(t, err) + currMg, err = oidcRepo.LookupManagedGroup(ctx, omg.GetPublicId()) + require.NoError(t, err) + _, _, err = oidcRepo.SetManagedGroupMemberships(ctx, oidcAm, oidcB, []*oidc.ManagedGroup{currMg}) + require.NoError(t, err) // Fetch the group once more to get the updated time currMg, err = oidcRepo.LookupManagedGroup(ctx, omg.GetPublicId()) require.NoError(t, err) @@ -171,11 +178,12 @@ func TestGet(t *testing.T) { }, }, AuthorizedActions: oidcAuthorizedActions, - MemberIds: []string{oidcA.GetPublicId()}, + MemberIds: []string{oidcA.GetPublicId(), oidcB.GetPublicId()}, } ldapAm := ldap.TestAuthMethod(t, conn, databaseWrapper, org.PublicId, []string{"ldaps://ldap1"}) ldapAcct := ldap.TestAccount(t, conn, ldapAm, "test-login-name", ldap.WithMemberOfGroups(ctx, "admin")) + ldapAcct2 := ldap.TestAccount(t, conn, ldapAm, "test-login-name-2", ldap.WithMemberOfGroups(ctx, "admin")) ldapMg := ldap.TestManagedGroup(t, conn, ldapAm, []string{"admin"}) ldapWireManagedGroup := pb.ManagedGroup{ Id: ldapMg.GetPublicId(), @@ -191,7 +199,7 @@ func TestGet(t *testing.T) { }, }, AuthorizedActions: ldapAuthorizedActions, - MemberIds: []string{ldapAcct.GetPublicId()}, + MemberIds: []string{ldapAcct.GetPublicId(), ldapAcct2.GetPublicId()}, } cases := []struct { diff --git a/internal/daemon/controller/tickers.go b/internal/daemon/controller/tickers.go index ebf5314d16..d1ac967840 100644 --- a/internal/daemon/controller/tickers.go +++ b/internal/daemon/controller/tickers.go @@ -12,9 +12,7 @@ import ( "github.com/hashicorp/boundary/internal/daemon/cluster" "github.com/hashicorp/boundary/internal/errors" "github.com/hashicorp/boundary/internal/event" - "github.com/hashicorp/boundary/internal/server" "github.com/hashicorp/boundary/internal/server/store" - "github.com/hashicorp/boundary/internal/types/scope" ) // In the future we could make this configurable @@ -198,12 +196,12 @@ func (c *Controller) startWorkerConnectionMaintenanceTicking(cancelCtx context.C event.WriteError(cancelCtx, op, err, event.WithInfoMsg("error fetching server repository for cluster connection maintenance")) break } - knownWorker, err := serverRepo.ListWorkers(cancelCtx, []string{scope.Global.String()}, server.WithWorkerPool(connectionState.WorkerIds()), server.WithLiveness(-1)) + knownWorkers, err := serverRepo.VerifyKnownWorkers(cancelCtx, connectionState.WorkerIds()) if err != nil { event.WriteError(cancelCtx, op, err, event.WithInfoMsg("couldn't get known workers from repo")) break } - connectionState.DisconnectMissingWorkers(server.WorkerList(knownWorker).PublicIds()) + connectionState.DisconnectMissingWorkers(knownWorkers) } if len(connectionState.UnmappedKeyIds()) > 0 { diff --git a/internal/db/schema/migrations/oss/postgres/92/04_add_column_to_job_run.up.sql b/internal/db/schema/migrations/oss/postgres/92/04_add_column_to_job_run.up.sql new file mode 100644 index 0000000000..9a077cc025 --- /dev/null +++ b/internal/db/schema/migrations/oss/postgres/92/04_add_column_to_job_run.up.sql @@ -0,0 +1,11 @@ +-- Copyright (c) HashiCorp, Inc. +-- SPDX-License-Identifier: BUSL-1.1 + +begin; + + alter table job_run + add column retries_count int not null default 0 + constraint retries_count_can_not_be_negative + check(retries_count >= 0); + +commit; diff --git a/internal/db/schema/migrations/oss/postgres/92/05_delete_terminated_session_job.up.sql b/internal/db/schema/migrations/oss/postgres/92/05_delete_terminated_session_job.up.sql new file mode 100644 index 0000000000..9a07fd1622 --- /dev/null +++ b/internal/db/schema/migrations/oss/postgres/92/05_delete_terminated_session_job.up.sql @@ -0,0 +1,34 @@ +-- Copyright (c) HashiCorp, Inc. +-- SPDX-License-Identifier: BUSL-1.1 + +begin; + + create table session_delete_terminated_job ( + batch_size int not null + constraint batch_size_must_be_greater_than_0 + check(batch_size > 0), + create_time wt_timestamp, + update_time wt_timestamp + ); + comment on table session_delete_terminated_job is + 'session_delete_terminated_job is a single row table that contains settings for the delete terminated sessions job.'; + + -- this index ensures that there will only ever be one row in the + -- table. see: + -- https://www.postgresql.org/docs/current/indexes-expressional.html + create unique index session_delete_terminated_job_one_row + on session_delete_terminated_job((batch_size is not null)); + + create trigger immutable_columns before update on session_delete_terminated_job + for each row execute procedure immutable_columns('create_time'); + + create trigger default_create_time_column before insert on session_delete_terminated_job + for each row execute procedure default_create_time(); + + create trigger update_time_column before update on session_delete_terminated_job + for each row execute procedure update_time_column(); + + insert into session_delete_terminated_job(batch_size) values(5000); + +commit; + diff --git a/internal/event/eventer.go b/internal/event/eventer.go index 11a0d5b6cb..ae33fe3514 100644 --- a/internal/event/eventer.go +++ b/internal/event/eventer.go @@ -794,14 +794,18 @@ func (e *Eventer) ReleaseGate() error { if qe == nil { continue // we may have already sent this but gotten errors later } + ctx, cancel := newSendCtx(qe.ctx) + if cancel != nil { + defer cancel() + } var queuedOp string switch t := qe.event.(type) { case *sysEvent: queuedOp = "system" - writeErr = e.writeSysEvent(qe.ctx, t, WithNoGateLocking(true)) + writeErr = e.writeSysEvent(ctx, t, WithNoGateLocking(true)) case *err: queuedOp = "error" - writeErr = e.writeError(qe.ctx, t, WithNoGateLocking(true)) + writeErr = e.writeError(ctx, t, WithNoGateLocking(true)) default: // Have no idea what this is and shouldn't have gotten in here to // begin with, so just continue, and log it diff --git a/internal/event/eventer_gate_test.go b/internal/event/eventer_gate_test.go index 2e821cdf4c..450eac3458 100644 --- a/internal/event/eventer_gate_test.go +++ b/internal/event/eventer_gate_test.go @@ -159,3 +159,48 @@ func TestEventer_Gating(t *testing.T) { }) } } + +func TestReleaseGate_NoError_CanceledContext(t *testing.T) { + require := require.New(t) + + buffer := new(bytes.Buffer) + eventerConfig := EventerConfig{ + AuditEnabled: true, + ObservationsEnabled: true, + SysEventsEnabled: true, + Sinks: []*SinkConfig{ + { + Name: "test-sink", + EventTypes: []Type{EveryType}, + Format: TextHclogSinkFormat, + Type: WriterSink, + WriterConfig: &WriterSinkTypeConfig{ + Writer: buffer, + }, + }, + }, + } + testLock := &sync.Mutex{} + testLogger := testLogger(t, testLock) + + eventer, err := NewEventer( + testLogger, + testLock, + "TestEventer_Gating", + eventerConfig, + WithGating(true), + ) + require.NoError(err) + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + ctx, err = NewEventerContext(ctx, eventer) + require.NoError(err) + + WriteError(ctx, "error-1", fmt.Errorf("error-1")) + _ = WriteObservation(ctx, "observation-1", WithId("observation-1"), WithHeader("foo", "bar")) + + cancel() + + require.NoError(eventer.ReleaseGate()) +} diff --git a/internal/gen/controller.swagger.json b/internal/gen/controller.swagger.json index fc7d3fe827..d9cfdc6d25 100644 --- a/internal/gen/controller.swagger.json +++ b/internal/gen/controller.swagger.json @@ -3,7 +3,7 @@ "info": { "title": "Boundary controller HTTP API", "description": "Welcome to the Boundary controller HTTP API documentation. This page provides a reference guide for using the Boundary controller API, a JSON-based HTTP API. The API implements commonly seen HTTP API patterns for status codes, paths, and errors. See the [API overview](https://developer.hashicorp.com/boundary/docs/api-clients/api) for more information.\n\nBefore you read this page, it is useful to understand Boundary's [domain model](https://developer.hashicorp.com/boundary/docs/concepts/domain-model) and to be aware of the terminology used here. To get started, search for the service you want to interact with in the sidebar to the left. Each resource in Boundary, such as accounts and credential stores, has its own service. Each service contains all the API endpoints for the resource.\n## Status codes\n- `2XX`: Boundary returns a code between `200` and `299` on success. Generally this is `200`, but implementations should be prepared to accept any `2XX` status code as indicating success. If a call returns a `2XX` code that is not `200`, it follows well-understood semantics for those status codes.\n- `400`: Boundary returns `400` when a command cannot be completed due to invalid user input, except for a properly-formatted identifier that does not map to an existing resource, which returns a `404` as discussed below.\n- `401`: Boundary returns `401` if no authentication token is provided or if the provided token is invalid. A valid token that simply does not have permission for a resource returns a `403` instead. A token that is invalid or missing, but where the anonymous user (`u_anon`) is able to successfully perform the action, will not return a `401` but instead will return the result of the action.\n- `403`: Boundary returns `403` if a provided token was valid but does not have the grants required to perform the requested action.\n- `404`: Boundary returns `404` if a resource cannot be found. Note that this happens _prior_ to authentication/authorization checking in nearly all cases as the resource information (such as its scope, available actions, etc.) is a required part of that check. As a result, an action against a resource that does not exist returns a `404` instead of a `401` or `403`. While this could be considered an information leak, since IDs are randomly generated and this only discloses whether an ID is valid, it's tolerable as it allows for far simpler and more robust client implementation.\n- `405`: Boundary returns a `405` to indicate that the method (HTTP verb or custom action) is not implemented for the given resource.\n- `429`: Boundary returns a `429` if any of the API rate limit quotas have been exhausted for the resource and action. It includes the `Retry-After` header so that the client knows how long to wait before making a new request.\n- `500`: Boundary returns `500` if an error occurred that is not (directly) tied to invalid user input. If a `500` is generated, information about the error is logged to Boundary's server log but is not generally provided to the client.\n- `503`: Boundary returns a `503` if it is unable to store a quota due to the API rate limit being exceeded. It includes the `Retry-After` header so that the client knows how long to wait before making a new request.\n## List pagination\nBoundary uses [API pagination](https://developer.hashicorp.com/boundary/docs/api-clients/api/pagination) to support searching and filtering large lists of results efficiently.", - "version": "0.18.0", + "version": "0.18.2", "contact": { "name": "HashiCorp Boundary", "url": "https://www.boundaryproject.io/" diff --git a/internal/host/plugin/job_orphaned_host_cleanup.go b/internal/host/plugin/job_orphaned_host_cleanup.go index 1206d286e2..d5cf543ff2 100644 --- a/internal/host/plugin/job_orphaned_host_cleanup.go +++ b/internal/host/plugin/job_orphaned_host_cleanup.go @@ -66,7 +66,7 @@ func (r *OrphanedHostCleanupJob) Status() scheduler.JobStatus { // deletes those hosts. Can not be run in parallel, if // Run is invoked while already running an error with code JobAlreadyRunning // will be returned. -func (r *OrphanedHostCleanupJob) Run(ctx context.Context) error { +func (r *OrphanedHostCleanupJob) Run(ctx context.Context, _ time.Duration) error { const op = "plugin.(OrphanedHostCleanupJob).Run" if !r.running.CompareAndSwap(r.running.Load(), true) { return errors.New(ctx, errors.JobAlreadyRunning, op, "job already running") diff --git a/internal/host/plugin/job_orphaned_host_cleanup_test.go b/internal/host/plugin/job_orphaned_host_cleanup_test.go index cc22c4afef..da17ae1f57 100644 --- a/internal/host/plugin/job_orphaned_host_cleanup_test.go +++ b/internal/host/plugin/job_orphaned_host_cleanup_test.go @@ -118,7 +118,7 @@ func TestOrphanedHostCleanupJob_Run(t *testing.T) { err = sche.RegisterJob(context.Background(), r) require.NoError(err) - err = r.Run(context.Background()) + err = r.Run(context.Background(), 0) require.NoError(err) // No sets should have been synced. assert.Equal(0, r.numProcessed) @@ -134,7 +134,7 @@ func TestOrphanedHostCleanupJob_Run(t *testing.T) { TestHost(t, conn, cat.GetPublicId(), "host2") // Run sync again with the newly created set - err = r.Run(context.Background()) + err = r.Run(context.Background(), 0) require.NoError(err) // The single existing set should have been processed assert.Equal(1, r.numHosts) @@ -148,14 +148,14 @@ func TestOrphanedHostCleanupJob_Run(t *testing.T) { TestHost(t, conn, cat.GetPublicId(), "5") // Run sync again with the freshly synced set - err = r.Run(context.Background()) + err = r.Run(context.Background(), 0) require.NoError(err) // The single existing set should have been processed assert.Equal(5, r.numHosts) assert.Equal(5, r.numProcessed) // Run sync again with the freshly synced set - err = r.Run(context.Background()) + err = r.Run(context.Background(), 0) require.NoError(err) // The single existing set should have been processed assert.Equal(0, r.numHosts) diff --git a/internal/host/plugin/job_set_sync.go b/internal/host/plugin/job_set_sync.go index b3f537a9ba..29a0662366 100644 --- a/internal/host/plugin/job_set_sync.go +++ b/internal/host/plugin/job_set_sync.go @@ -85,7 +85,7 @@ func (r *SetSyncJob) Status() scheduler.JobStatus { // creates a plugin client and syncs each set. Can not be run in parallel, if // Run is invoked while already running an error with code JobAlreadyRunning // will be returned. -func (r *SetSyncJob) Run(ctx context.Context) error { +func (r *SetSyncJob) Run(ctx context.Context, _ time.Duration) error { const op = "plugin.(SetSyncJob).Run" if !r.running.CompareAndSwap(r.running.Load(), true) { return errors.New(ctx, errors.JobAlreadyRunning, op, "job already running") diff --git a/internal/host/plugin/job_set_sync_test.go b/internal/host/plugin/job_set_sync_test.go index e838d8b21f..870cd3790a 100644 --- a/internal/host/plugin/job_set_sync_test.go +++ b/internal/host/plugin/job_set_sync_test.go @@ -156,7 +156,7 @@ func TestSetSyncJob_Run(t *testing.T) { err = sche.RegisterJob(context.Background(), r) require.NoError(err) - err = r.Run(context.Background()) + err = r.Run(context.Background(), 0) require.NoError(err) // No sets should have been synced. assert.Equal(0, r.numProcessed) @@ -175,7 +175,7 @@ func TestSetSyncJob_Run(t *testing.T) { TestSetMembers(t, conn, setToRemoveHosts.GetPublicId(), []*Host{hostToRemove}) // Run sync again with the newly created set - err = r.Run(context.Background()) + err = r.Run(context.Background(), 0) require.NoError(err) hsa := &hostSetAgg{PublicId: setToRemoveHosts.GetPublicId()} @@ -217,7 +217,7 @@ func TestSetSyncJob_Run(t *testing.T) { assert.Less(hsa.LastSyncTime.AsTime().UnixNano(), hsa.CreateTime.AsTime().UnixNano()) // Run sync again with the newly created set - err = r.Run(context.Background()) + err = r.Run(context.Background(), 0) require.NoError(err) // The single existing set should have been processed assert.Equal(1, r.numSets) @@ -243,7 +243,7 @@ func TestSetSyncJob_Run(t *testing.T) { firstSyncTime := hsa.LastSyncTime // Run sync again with the freshly synced set - err = r.Run(context.Background()) + err = r.Run(context.Background(), 0) require.NoError(err) assert.Equal(0, r.numSets) assert.Equal(0, r.numProcessed) @@ -258,7 +258,7 @@ func TestSetSyncJob_Run(t *testing.T) { assert.True(hs.NeedSync) // Run sync again with the set needing update - err = r.Run(context.Background()) + err = r.Run(context.Background(), 0) require.NoError(err) // The single existing set should have been processed assert.Equal(1, r.numSets) @@ -273,7 +273,7 @@ func TestSetSyncJob_Run(t *testing.T) { // Run sync with a new second set _ = TestSet(t, conn, kmsCache, sched, cat, plgm) - require.NoError(r.Run(context.Background())) + require.NoError(r.Run(context.Background(), 0)) assert.Equal(1, r.numSets) assert.Equal(1, r.numProcessed) @@ -398,7 +398,7 @@ func TestSetSyncJob_Run(t *testing.T) { assert.Equal(1, count) // Run job - err = r.Run(context.Background()) + err = r.Run(context.Background(), 0) require.NoError(err) // Validate results diff --git a/internal/host/plugin/repository_host_catalog_test.go b/internal/host/plugin/repository_host_catalog_test.go index 77e50e4c25..0a29f9adcd 100644 --- a/internal/host/plugin/repository_host_catalog_test.go +++ b/internal/host/plugin/repository_host_catalog_test.go @@ -1547,7 +1547,7 @@ func (j *testSyncJob) Status() scheduler.JobStatus { } } -func (j *testSyncJob) Run(_ context.Context) error { return nil } +func (j *testSyncJob) Run(_ context.Context, _ time.Duration) error { return nil } func (j *testSyncJob) NextRunIn(_ context.Context) (time.Duration, error) { return setSyncJobRunInterval, nil } diff --git a/internal/host/plugin/testing.go b/internal/host/plugin/testing.go index d4f1b41b87..8a62f1e2f0 100644 --- a/internal/host/plugin/testing.go +++ b/internal/host/plugin/testing.go @@ -209,7 +209,7 @@ func TestRunSetSync(t testing.TB, conn *db.DB, kmsCache *kms.Kms, plgm map[strin j, err := newSetSyncJob(ctx, rw, rw, kmsCache, plgm) require.NoError(t, err) - require.NoError(t, j.Run(ctx)) + require.NoError(t, j.Run(ctx, 0)) } func testGetDnsName(t testing.TB) string { diff --git a/internal/kms/job/data_key_version_destruction_monitor_job.go b/internal/kms/job/data_key_version_destruction_monitor_job.go index 91be9ade80..b09d6cae5a 100644 --- a/internal/kms/job/data_key_version_destruction_monitor_job.go +++ b/internal/kms/job/data_key_version_destruction_monitor_job.go @@ -35,7 +35,7 @@ func (r dataKeyVersionDestructionMonitorJob) Status() scheduler.JobStatus { // Run performs the required work depending on the implementation. // The context is used to notify the job that it should exit early. -func (r *dataKeyVersionDestructionMonitorJob) Run(ctx context.Context) error { +func (r *dataKeyVersionDestructionMonitorJob) Run(ctx context.Context, _ time.Duration) error { const op = "kmsjob.(dataKeyVersionDestructionMonitorJob).Run" if err := r.kmsRepo.MonitorDataKeyVersionDestruction(ctx); err != nil { diff --git a/internal/kms/job/table_rewrapping_job.go b/internal/kms/job/table_rewrapping_job.go index 116bd167c6..96962d499c 100644 --- a/internal/kms/job/table_rewrapping_job.go +++ b/internal/kms/job/table_rewrapping_job.go @@ -41,7 +41,7 @@ func (r tableRewrappingJob) Status() scheduler.JobStatus { // Run performs the required work depending on the implementation. // The context is used to notify the job that it should exit early. -func (r *tableRewrappingJob) Run(ctx context.Context) error { +func (r *tableRewrappingJob) Run(ctx context.Context, _ time.Duration) error { const op = "kmsjob.(tableRewrappingJob).Run" if err := r.kmsRepo.MonitorTableRewrappingRuns(ctx, r.tableName); err != nil { diff --git a/internal/pagination/purge/purge_job.go b/internal/pagination/purge/purge_job.go index 89e3e7e328..3f91f41ccb 100644 --- a/internal/pagination/purge/purge_job.go +++ b/internal/pagination/purge/purge_job.go @@ -43,7 +43,7 @@ func (c *purgeJob) Status() scheduler.JobStatus { // Run performs the required work depending on the implementation. // The context is used to notify the job that it should exit early. -func (c *purgeJob) Run(ctx context.Context) error { +func (c *purgeJob) Run(ctx context.Context, _ time.Duration) error { const op = "purge.(purgeJob).Run" _, err := c.w.Exec(ctx, c.query, nil) if err != nil { diff --git a/internal/pagination/purge/purge_test.go b/internal/pagination/purge/purge_test.go index 6ef25ba4fb..4ba7ade8cd 100644 --- a/internal/pagination/purge/purge_test.go +++ b/internal/pagination/purge/purge_test.go @@ -54,7 +54,7 @@ func TestPurgeTables(t *testing.T) { query: query, } - err = sJob.Run(ctx) + err = sJob.Run(ctx, 0) require.NoError(t, err) var count int diff --git a/internal/proto/controller/storage/job/store/v1/job.proto b/internal/proto/controller/storage/job/store/v1/job.proto index f3380740d8..949c249270 100644 --- a/internal/proto/controller/storage/job/store/v1/job.proto +++ b/internal/proto/controller/storage/job/store/v1/job.proto @@ -67,6 +67,11 @@ message JobRun { // @inject_tag: `gorm:"default:0"` uint32 total_count = 9; + // retries_count is set during an update to indicate the number of times + // a job has retried work. + // @inject_tag: `gorm:"default:0"` + uint32 retries_count = 12; + // status of the job run (running, completed, failed or interrupted). // @inject_tag: `gorm:"not_null"` string status = 10; diff --git a/internal/recording/delete_session_recording_job.go b/internal/recording/delete_session_recording_job.go index f77670d022..d5b3271dc0 100644 --- a/internal/recording/delete_session_recording_job.go +++ b/internal/recording/delete_session_recording_job.go @@ -33,7 +33,7 @@ func (dsr *deleteSessionRecordingJob) Status() scheduler.JobStatus { return sche // Run performs the required work depending on the implementation. // The context is used to notify the job that it should exit early. -func (dsr *deleteSessionRecordingJob) Run(_ context.Context) error { return nil } +func (dsr *deleteSessionRecordingJob) Run(_ context.Context, _ time.Duration) error { return nil } // NextRunIn returns the duration until the next job run should be scheduled. // Delete Session Recording will run every hour unless we know there are more to delete, diff --git a/internal/scheduler/additional_verification_test.go b/internal/scheduler/additional_verification_test.go index a62b919b05..cacab68ea1 100644 --- a/internal/scheduler/additional_verification_test.go +++ b/internal/scheduler/additional_verification_test.go @@ -40,7 +40,7 @@ func TestSchedulerWorkflow(t *testing.T) { job1Ch := make(chan error) job1Ready := make(chan struct{}) testDone := make(chan struct{}) - fn1 := func(_ context.Context) error { + fn1 := func(_ context.Context, _ time.Duration) error { select { case <-testDone: return nil @@ -54,7 +54,7 @@ func TestSchedulerWorkflow(t *testing.T) { job2Ch := make(chan error) job2Ready := make(chan struct{}) - fn2 := func(_ context.Context) error { + fn2 := func(_ context.Context, _ time.Duration) error { select { case <-testDone: return nil @@ -274,7 +274,7 @@ func TestSchedulerJobProgress(t *testing.T) { jobReady := make(chan struct{}) done := make(chan struct{}) - fn := func(ctx context.Context) error { + fn := func(ctx context.Context, _ time.Duration) error { select { case <-done: return nil @@ -317,7 +317,7 @@ func TestSchedulerJobProgress(t *testing.T) { <-statusRequest // Send progress to monitor loop to persist - jobStatus <- JobStatus{Total: 10, Completed: 0} + jobStatus <- JobStatus{Total: 10, Completed: 0, Retries: 1} // Wait for scheduler to query for job status before verifying previous results <-statusRequest @@ -329,6 +329,7 @@ func TestSchedulerJobProgress(t *testing.T) { assert.Equal(string(job.Running), run.Status) assert.Equal(uint32(10), run.TotalCount) assert.Equal(uint32(0), run.CompletedCount) + assert.Equal(uint32(1), run.RetriesCount) // Send progress to monitor loop to persist jobStatus <- JobStatus{Total: 20, Completed: 10} @@ -384,7 +385,7 @@ func TestSchedulerMonitorLoop(t *testing.T) { jobReady := make(chan struct{}) jobDone := make(chan struct{}) testDone := make(chan struct{}) - fn := func(ctx context.Context) error { + fn := func(ctx context.Context, _ time.Duration) error { select { case <-testDone: return nil @@ -450,7 +451,7 @@ func TestSchedulerFinalStatusUpdate(t *testing.T) { jobReady := make(chan struct{}) jobErr := make(chan error) testDone := make(chan struct{}) - fn := func(_ context.Context) error { + fn := func(_ context.Context, _ time.Duration) error { select { case <-testDone: return nil @@ -542,7 +543,7 @@ func TestSchedulerRunNow(t *testing.T) { jobCh := make(chan struct{}) jobReady := make(chan struct{}) testDone := make(chan struct{}) - fn := func(_ context.Context) error { + fn := func(_ context.Context, _ time.Duration) error { select { case <-testDone: return nil diff --git a/internal/scheduler/batch/batch.go b/internal/scheduler/batch/batch.go new file mode 100644 index 0000000000..71d0ef128a --- /dev/null +++ b/internal/scheduler/batch/batch.go @@ -0,0 +1,400 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +/* +Package batch implements a batch processor for jobs that update or delete +multiple rows in the database using a single SQL UPDATE or DELETE +statement. + +It defines a type, [Batch], which is used by a job to execute a SQL +statement in batches. SQL commands are executed in batches by providing an +[Exec] function that executes a SQL statement, which must contain a +parameterized LIMIT clause, and returns the number of rows affected by the +query or an error if the query failed. Batch adjusts the batch size in an +effort to get the query execution time close to the [Config.Target] +duration. When the batch size is adjusted, [Store] is called. Jobs using +the batch processor should persist this value and use it as the starting +batch size in subsequent calls to Batch. + +See session.deleteTerminatedJob for an example of how to use the batch +processor. + +# SQL + +The SQL LIMIT clause can only be used in query statements. It cannot be +used directly in a DELETE or UPDATE statement. Instead, a subquery or CTE +must be used to limit the number of rows affected by the query. + +Here is an example SQL DELETE statement that uses a subquery with a LIMIT +clause: + + delete + from alias_target_deleted + where public_id in ( + select public_id + from alias_target_deleted + where delete_time < @delete_time + limit @batch_size + ); +*/ +package batch + +import ( + "context" + "math/rand/v2" + "sync" + "time" + + "github.com/hashicorp/boundary/internal/errors" + "github.com/hashicorp/boundary/internal/scheduler" +) + +const ( + DefaultStatusThreshold = 5 * time.Minute + DefaultTarget = 1 * time.Second +) + +const ( + DefaultSize = 5000 + DefaultMin = 10 + DefaultMax = 10000 +) + +// Exec is the function type used for executing the batch query. An Exec +// function must return the number of rows affected by the query or an +// error if the query failed. +type Exec func(ctx context.Context, batchSize int) (rowCount int, err error) + +// Store is the function type used for storing the batch size in the +// database. A Store function must return an error if the store failed. +type Store func(ctx context.Context, batchSize int) error + +// Config is a structure used to configure a [Batch]. +type Config struct { + // Size is the initial batch size. + // + // If Size is less than 1, the initial batch size will be set to + // DefaultSize. + // + // If Size is less than Min, the initial batch size will be set to Min. + // + // If Size is greater than Max, the initial batch size will be set to + // Max. + Size int + + // Min and Max are the minimum and maximum batch sizes allowed. The + // batch size will be clamped to the range [Min, Max]. + // + // If Min is less than 1, it will be set to DefaultMin. + // + // If Max is less than or equal to Min, it will be set to DefaultMax. + Min int + Max int + + // TotalToComplete is the total number of rows to be processed by the + // job. This is reported in the JobStatus returned by the Status + // method. + TotalToComplete int + + // StatusThreshold is the amount of time the job has to return a + // JobStatus with values different from the previous JobStatus. If a + // call to Exec approaches this threshold, Batch will interrupt the + // call to Exec, reduce the batch size, and call Exec again. It will + // also increment the Retries value reported in the Status method. + // + // If StatusThreshold is less than or equal to zero, it will be set to + // DefaultStatusThreshold. + StatusThreshold time.Duration + + // Target is the target duration for the query to run in. The batch + // size will be adjusted to keep the query duration within the target + // range. + // + // If Target is less than or equal to zero, it will be set to + // DefaultTarget. + // + // If Target is greater than or equal to StatusThreshold, it will be + // set to StatusThreshold - 5ms. + Target time.Duration + + // Exec is called to execute the query. Exec is called by the Run + // method in a loop until the row count returned by Exec is less than + // the current batch size or Exec returns an error other than + // context.DeadlineExceeded. The configuration must supply this + // callback for batch to succeed. + // + // If Exec returns a context.DeadlineExceeded error, the batch size + // will be reduced, the Retries value reported in the Status method + // will be incremented, and then Exec will be called again. + // + // If Exec returns an error other than context.DeadlineExceeded, the + // batch size will not be changed and the Run method will exit + // returning the error. + Exec Exec + + // Store, if not nil, is called when the batch size has changed and the + // new batch size should be stored in the database. If Store returns an + // error, the Run method will exit and return with the error. + Store Store +} + +func (c *Config) size() int { + switch { + case c.Size < 1: + return DefaultSize + case c.Size < c.min(): + return c.min() + case c.Size > c.max(): + return c.max() + } + return c.Size +} + +func (c *Config) min() int { + if c.Min < 1 { + return DefaultMin + } + return c.Min +} + +func (c *Config) max() int { + if c.Max <= c.Min { + return DefaultMax + } + return c.Max +} + +// Arbitrary constants +const ( + statusThresholdBuffer = 250 * time.Millisecond + + // both ranges are a percentage of the target duration + lowerRange = 10 + upperRange = 10 +) + +func (c *Config) statusThreshold() time.Duration { + if c.StatusThreshold <= 0 { + return DefaultStatusThreshold - statusThresholdBuffer + } + return c.StatusThreshold - statusThresholdBuffer +} + +func (c *Config) target() time.Duration { + switch { + case c.Target <= 0: + return DefaultTarget + case c.Target >= c.statusThreshold(): + return c.statusThreshold() + } + return c.Target +} + +func (c *Config) targetRange() (lower time.Duration, upper time.Duration) { + target := c.target() + return target - (target / lowerRange), target + (target / upperRange) +} + +func (c *Config) store() Store { + if c.Store == nil { + return func(_ context.Context, _ int) error { return nil } + } + return c.Store +} + +func (c *Config) clone() *Config { + if c == nil { + return nil + } + return &Config{ + Size: c.Size, + Min: c.Min, + Max: c.Max, + TotalToComplete: c.TotalToComplete, + StatusThreshold: c.StatusThreshold, + Target: c.Target, + Exec: c.Exec, + Store: c.Store, + } +} + +// Batch is a batch job processor for SQL jobs that update or delete +// multiple rows in the database using a single SQL UPDATE or DELETE +// statement. +type Batch struct { + c *Config + + slowExecutions int + fastExecutions int + + mu sync.Mutex + retries int + totalCompleted int +} + +// New creates a [Batch] that uses the given configuration to execute a SQL +// job in batches. An error is returned if c contains a nil Exec. +func New(ctx context.Context, c *Config) (*Batch, error) { + const op = "batch.New" + switch { + case c == nil: + return nil, errors.New(ctx, errors.InvalidParameter, op, "nil Config") + case c.Exec == nil: + return nil, errors.New(ctx, errors.InvalidParameter, op, "nil Exec") + } + return &Batch{ + c: c.clone(), + }, nil +} + +// Status reports the job’s current status. +func (b *Batch) Status() scheduler.JobStatus { + b.mu.Lock() + defer b.mu.Unlock() + return scheduler.JobStatus{ + Completed: b.totalCompleted, + Total: b.c.TotalToComplete, + Retries: b.retries, + } +} + +// Run runs the batch processor. It calls the [Exec] function in a loop +// until the row count returned by Exec is less than the current batch size +// or Exec returns an error other than context.DeadlineExceeded. +// +// Each call to Run resets the values returned in [Batch.Status]. +func (b *Batch) Run(ctx context.Context) error { + const op = "batch.Run" + b.reset() + + for { + count, runDuration, err := b.run(ctx) + if err != nil { + if errors.Is(err, context.DeadlineExceeded) { + if err := b.timedOut(ctx); err != nil { + return errors.Wrap(ctx, err, op) + } + continue + } + return errors.Wrap(ctx, err, op) + } + + b.successful(count) + + // batch is not complete + if count == b.c.size() { + if err := b.adjustSize(ctx, runDuration); err != nil { + return errors.Wrap(ctx, err, op) + } + continue + } + + // batch is complete + return nil + } +} + +func (b *Batch) run(ctx context.Context) (int, time.Duration, error) { + queryCtx, cancel := context.WithTimeout(ctx, b.c.statusThreshold()) + defer cancel() + start := time.Now() + n, err := b.c.Exec(queryCtx, b.c.size()) + return n, time.Since(start), err +} + +func (b *Batch) reset() { + b.mu.Lock() + b.retries = 0 + b.totalCompleted = 0 + b.mu.Unlock() + b.fastExecutions = 0 + b.slowExecutions = 0 +} + +func (b *Batch) timedOut(ctx context.Context) error { + b.mu.Lock() + b.retries++ + b.mu.Unlock() + return b.c.exponentialDecrease(ctx, b.retries) +} + +func (b *Batch) successful(rowCount int) { + b.mu.Lock() + b.totalCompleted += rowCount + b.mu.Unlock() +} + +func (b *Batch) adjustSize(ctx context.Context, runDuration time.Duration) error { + lower, upper := b.c.targetRange() + + switch { + case runDuration < lower: // too fast + // increase the batch size to go slower + b.fastExecutions++ + b.slowExecutions = 0 + return b.c.linearIncrease(ctx, b.fastExecutions) + case runDuration > upper: // too slow + // decrease the batch size to go faster + b.slowExecutions++ + b.fastExecutions = 0 + return b.c.linearDecrease(ctx, b.slowExecutions) + } + + // within target range so reset the counters + b.fastExecutions = 0 + b.slowExecutions = 0 + return nil +} + +func (c *Config) exponentialDecrease(ctx context.Context, attempt int) error { + if attempt < 1 { + attempt = 1 + } + newSize := (c.size() / (1 << uint(attempt))) - c.jitter() + return c.setSize(ctx, newSize) +} + +func (c *Config) linearIncrease(ctx context.Context, attempt int) error { + if attempt < 1 { + attempt = 1 + } + newSize := c.size() + (c.size() / 10 * attempt) + c.jitter() + return c.setSize(ctx, newSize) +} + +func (c *Config) linearDecrease(ctx context.Context, attempt int) error { + if attempt < 1 { + attempt = 1 + } + newSize := c.size() - (c.size() / 10 * attempt) - c.jitter() + return c.setSize(ctx, newSize) +} + +// jitter returns a random number between 0 and 10% of the current batch +// size. +func (c *Config) jitter() int { + return rand.N(c.size() / 10) +} + +// setSize sets the batch size to newSize and calls Store if newSize is +// different from the current size. If newSize is less than Min, the batch +// size will be set to Min. If newSize is greater than Max, the batch size +// will be set to Max. If Store returns an error, it will be returned by +// setSize. +func (c *Config) setSize(ctx context.Context, newSize int) error { + currentSize := c.Size + if newSize == currentSize { + return nil + } + switch { + case newSize < c.min(): + newSize = c.min() + case newSize > c.max(): + newSize = c.max() + } + if newSize != currentSize { + c.Size = newSize + return c.store()(ctx, newSize) + } + return nil +} diff --git a/internal/scheduler/batch/batch_test.go b/internal/scheduler/batch/batch_test.go new file mode 100644 index 0000000000..b82352b6af --- /dev/null +++ b/internal/scheduler/batch/batch_test.go @@ -0,0 +1,566 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package batch + +import ( + "context" + "errors" + "fmt" + "testing" + "time" + + "github.com/hashicorp/boundary/internal/scheduler" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNew(t *testing.T) { + t.Parallel() + t.Run("nil-config", func(t *testing.T) { + t.Parallel() + ctx := context.Background() + got, err := New(ctx, nil) + require.Error(t, err) + assert.Nil(t, got) + }) + t.Run("nil-Exec", func(t *testing.T) { + t.Parallel() + ctx := context.Background() + config := &Config{} + got, err := New(ctx, config) + require.Error(t, err) + assert.Nil(t, got) + }) + t.Run("minimum-config", func(t *testing.T) { + t.Parallel() + ctx := context.Background() + config := &Config{ + Exec: func(ctx context.Context, batchSize int) (int, error) { return 0, nil }, + } + got, err := New(ctx, config) + require.NoError(t, err) + assert.NotNil(t, got) + }) +} + +func TestConfig(t *testing.T) { + t.Parallel() + t.Run("default-config", func(t *testing.T) { + t.Parallel() + assert := assert.New(t) + config := &Config{ + Exec: func(ctx context.Context, batchSize int) (int, error) { return 0, nil }, + } + assert.Equal(DefaultSize, config.size()) + assert.Equal(DefaultMin, config.min()) + assert.Equal(DefaultMax, config.max()) + assert.Equal(DefaultStatusThreshold-statusThresholdBuffer, config.statusThreshold()) + assert.Equal(DefaultTarget, config.target()) + config.StatusThreshold = DefaultTarget + config.Target = DefaultTarget + want := DefaultTarget - statusThresholdBuffer + assert.Equal(want, config.target()) + }) + tests := []struct { + min, max, size int + want int + }{ + {0, 0, 0, DefaultSize}, + {0, 0, DefaultMin - 1, DefaultMin}, + {0, 0, DefaultMax + 1, DefaultMax}, + } + for _, tt := range tests { + tt := tt + t.Run(fmt.Sprintf("min-max-size_%d-%d-%d", tt.min, tt.max, tt.size), func(t *testing.T) { + t.Parallel() + assert := assert.New(t) + config := &Config{ + Min: tt.min, + Max: tt.max, + Size: tt.size, + } + assert.Equal(tt.want, config.size()) + }) + } +} + +type testStore struct { + called bool + batchSize int +} + +func (s *testStore) Store(ctx context.Context, batchSize int) error { + s.called = true + s.batchSize = batchSize + return nil +} + +func TestConfig_setSize(t *testing.T) { + t.Parallel() + ctx := context.Background() + config := &Config{ + Min: 5, + Max: 15, + Size: 10, + } + tests := []struct { + newSize int + wantSize int + wantCalled bool + }{ + {0, 5, true}, + {4, 5, true}, + {5, 5, true}, + {6, 6, true}, + {10, 10, false}, + {20, 15, true}, + {15, 15, true}, + } + for _, tt := range tests { + tt := tt + t.Run(fmt.Sprintf("newSize_%d", tt.newSize), func(t *testing.T) { + t.Parallel() + assert, require := assert.New(t), require.New(t) + c := config.clone() + assert.Equal(config, c) + + ts := &testStore{} + c.Store = ts.Store + + err := c.setSize(ctx, tt.newSize) + require.NoError(err) + assert.Equal(tt.wantSize, c.Size) + assert.Equal(tt.wantCalled, ts.called) + if tt.wantCalled { + assert.Equal(tt.wantSize, ts.batchSize) + } + }) + } +} + +func TestConfig_targetRange(t *testing.T) { + t.Parallel() + tests := []struct { + target time.Duration + wantUpper time.Duration + wantLower time.Duration + }{ + { + target: 1000 * time.Millisecond, + wantLower: 900 * time.Millisecond, + wantUpper: 1100 * time.Millisecond, + }, + { + target: 60 * time.Second, + wantLower: 54 * time.Second, + wantUpper: 66 * time.Second, + }, + } + for _, tt := range tests { + tt := tt + t.Run(fmt.Sprintf("target_%d", tt.target), func(t *testing.T) { + t.Parallel() + assert := assert.New(t) + config := &Config{ + Target: tt.target, + } + assert.Equal(tt.target, config.target()) + lower, upper := config.targetRange() + assert.Equal(tt.wantLower, lower) + assert.Equal(tt.wantUpper, upper) + }) + } +} + +func TestConfig_exponentialDecrease(t *testing.T) { + t.Parallel() + tests := []struct { + batchSize int + attempt int + expected int + }{ + {10, 0, DefaultMin}, + {10, 1, DefaultMin}, + {10, 2, DefaultMin}, + {9, 1, DefaultMin}, + {9, 2, DefaultMin}, + {1000, 0, 500}, + {1000, 1, 500}, + {1000, 2, 250}, + {1000, 3, 125}, + {1000, 4, 62}, + {1000, 5, 31}, + {1000, 6, 15}, + {1000, 7, DefaultMin}, + {500, 1, 250}, + {500, 2, 125}, + } + for _, tt := range tests { + tt := tt + t.Run(fmt.Sprintf("batchSize=%d/attempt=%d", tt.batchSize, tt.attempt), func(t *testing.T) { + assert, require := assert.New(t), require.New(t) + ctx := context.Background() + c := &Config{ + Size: tt.batchSize, + } + delta := c.size() / 10 + err := c.exponentialDecrease(ctx, tt.attempt) + require.NoError(err) + assert.InDelta(tt.expected, c.Size, float64(delta)) + }) + } +} + +func Test_linearDecrease(t *testing.T) { + t.Parallel() + tests := []struct { + batchSize int + attempt int + expected int + }{ + {10, 0, DefaultMin}, + {10, 1, DefaultMin}, + {10, 2, DefaultMin}, + {1000, 0, 900}, + {1000, 1, 900}, + {1000, 2, 800}, + {1000, 3, 700}, + {1000, 4, 600}, + {1000, 5, 500}, + {1000, 6, 400}, + {1000, 7, 300}, + {1000, 10, DefaultMin}, + {1000, 11, DefaultMin}, + {500, 1, 450}, + {500, 2, 400}, + {100, 1, 90}, + {100, 2, 80}, + } + for _, tt := range tests { + tt := tt + t.Run(fmt.Sprintf("batchSize=%d/attempt=%d", tt.batchSize, tt.attempt), func(t *testing.T) { + t.Parallel() + assert, require := assert.New(t), require.New(t) + ctx := context.Background() + c := &Config{ + Size: tt.batchSize, + } + delta := c.size() / 10 + err := c.linearDecrease(ctx, tt.attempt) + require.NoError(err) + assert.InDelta(tt.expected, c.Size, float64(delta)) + }) + } +} + +func Test_linearIncrease(t *testing.T) { + t.Parallel() + tests := []struct { + batchSize int + attempt int + expected int + }{ + {10000, 0, DefaultMax}, + {10000, 1, DefaultMax}, + {10000, 2, DefaultMax}, + {1000, 0, 1100}, + {1000, 1, 1100}, + {1000, 2, 1200}, + {1000, 3, 1300}, + {1000, 4, 1400}, + {1000, 5, 1500}, + {1000, 6, 1600}, + {1000, 7, 1700}, + {500, 1, 550}, + {500, 2, 600}, + {100, 1, 110}, + {100, 2, 120}, + {10, 1, 11}, + {10, 2, 12}, + } + for _, tt := range tests { + tt := tt + t.Run(fmt.Sprintf("batchSize=%d/attempt=%d", tt.batchSize, tt.attempt), func(t *testing.T) { + t.Parallel() + assert, require := assert.New(t), require.New(t) + ctx := context.Background() + c := &Config{ + Size: tt.batchSize, + } + delta := c.size() / 10 + err := c.linearIncrease(ctx, tt.attempt) + require.NoError(err) + assert.InDelta(tt.expected, c.Size, float64(delta)) + }) + } +} + +type recorder struct { + execBatchSize int + storeBatchSize int + status scheduler.JobStatus +} + +func (r *recorder) setup(c *Config) { + c.Store = r.Store +} + +func (r *recorder) Store(ctx context.Context, batchSize int) error { + r.storeBatchSize = batchSize + return nil +} + +func (r *recorder) Exec(ctx context.Context, batchSize int) (int, error) { + r.execBatchSize = batchSize + return 0, nil +} + +type testRun struct { + ret func(context.Context, int, *Config) (int, error) + chk func(*testing.T, *recorder) + rec *recorder +} + +func (tr *testRun) validate(t *testing.T) { + if tr.chk != nil { + tr.chk(t, tr.rec) + } +} + +func (tr *testRun) recorder(cf *Config) *recorder { + if tr.rec == nil { + tr.rec = &recorder{} + tr.rec.setup(cf) + } + return tr.rec +} + +type testRunner struct { + conf *Config + b *Batch + runs []*testRun + t *testing.T + call int +} + +func newTestRunner(t *testing.T, conf *Config, b *Batch) *testRunner { + tr := &testRunner{ + t: t, + conf: conf, + b: b, + } + conf.Exec = tr.Exec + return tr +} + +func (tr *testRunner) Exec(ctx context.Context, batchSize int) (int, error) { + if tr.call > 0 { + prevRun := tr.runs[tr.call-1] + prevRun.rec.status = tr.b.Status() + prevRun.validate(tr.t) + } + run := tr.runs[tr.call] + rec := run.recorder(tr.conf) + if _, err := rec.Exec(ctx, batchSize); err != nil { + return 0, err + } + tr.call++ + return run.ret(ctx, batchSize, tr.conf) +} + +func TestRun(t *testing.T) { + const testStatusTotal = 10 + + t.Parallel() + assertStoreCalled := func() func(*testing.T, *recorder) { + const op = "assertStoreCalled" + return func(t *testing.T, r *recorder) { + assert.Positive(t, r.storeBatchSize, op) + } + } + assertStoreNotCalled := func() func(*testing.T, *recorder) { + const op = "assertStoreNotCalled" + return func(t *testing.T, r *recorder) { + t.Helper() + assert.Zero(t, r.storeBatchSize, op) + } + } + assertRetryCalled := func() func(*testing.T, *recorder) { + const op = "assertRetryCalled" + return func(t *testing.T, r *recorder) { + assert.Positive(t, r.status.Retries, op) + } + } + assertRetryNotCalled := func() func(*testing.T, *recorder) { + const op = "assertRetryNotCalled" + return func(t *testing.T, r *recorder) { + assert.Zero(t, r.status.Retries, op) + } + } + assertCompletedCalled := func() func(*testing.T, *recorder) { + const op = "assertCompletedCalled" + return func(t *testing.T, r *recorder) { + assert.Positive(t, r.status.Completed, op) + } + } + assertCompletedNotCalled := func() func(*testing.T, *recorder) { + const op = "assertCompletedNotCalled" + return func(t *testing.T, r *recorder) { + assert.Zero(t, r.status.Completed, op) + } + } + assertStatusTotal := func() func(*testing.T, *recorder) { + const op = "assertStatusTotal" + return func(t *testing.T, r *recorder) { + assert.Equal(t, testStatusTotal, r.status.Total, op) + } + } + combine := func(fns ...func(*testing.T, *recorder)) func(*testing.T, *recorder) { + return func(t *testing.T, r *recorder) { + for _, fn := range fns { + fn(t, r) + } + } + } + + execLessThanBatch := func() func(context.Context, int, *Config) (int, error) { + return func(ctx context.Context, batchSize int, c *Config) (int, error) { + return batchSize - 1, nil + } + } + execTimeout := func() func(context.Context, int, *Config) (int, error) { + return func(ctx context.Context, batchSize int, c *Config) (int, error) { + select { + case <-ctx.Done(): + return 0, ctx.Err() + } + } + } + execError := func() func(context.Context, int, *Config) (int, error) { + return func(ctx context.Context, batchSize int, c *Config) (int, error) { + return 0, errors.New("fake error") + } + } + execSlow := func() func(context.Context, int, *Config) (int, error) { + return func(ctx context.Context, batchSize int, c *Config) (int, error) { + _, upper := c.targetRange() + time.Sleep(upper + (2 * time.Millisecond)) + return batchSize, nil + } + } + execTargetRange := func() func(context.Context, int, *Config) (int, error) { + return func(ctx context.Context, batchSize int, c *Config) (int, error) { + time.Sleep(c.Target) + return batchSize, nil + } + } + + runMap := map[string]*testRun{ + "execLessThanBatch": { + ret: execLessThanBatch(), + chk: combine(assertStatusTotal(), assertStoreNotCalled(), assertRetryNotCalled(), assertCompletedCalled()), + }, + "execTimeout": { + ret: execTimeout(), + chk: combine(assertStatusTotal(), assertStoreCalled(), assertRetryCalled(), assertCompletedNotCalled()), + }, + "execError": { + ret: execError(), + chk: combine(assertStatusTotal(), assertStoreNotCalled(), assertRetryNotCalled(), assertCompletedNotCalled()), + }, + "execSlow": { + ret: execSlow(), + chk: combine(assertStatusTotal(), assertStoreCalled(), assertRetryNotCalled(), assertCompletedCalled()), + }, + "execTargetRange": { + ret: execTargetRange(), + chk: combine(assertStatusTotal(), assertStoreNotCalled(), assertRetryNotCalled(), assertCompletedCalled()), + }, + } + + tests := []struct { + name string + conf *Config + runs []*testRun + wantErr bool + }{ + { + name: "normal", + conf: &Config{ + TotalToComplete: testStatusTotal, + }, + runs: []*testRun{runMap["execLessThanBatch"]}, + }, + { + name: "error", + conf: &Config{ + TotalToComplete: testStatusTotal, + }, + runs: []*testRun{runMap["execError"]}, + wantErr: true, + }, + { + name: "timeout-normal", + conf: &Config{ + TotalToComplete: testStatusTotal, + StatusThreshold: 5 * time.Millisecond, + }, + runs: []*testRun{runMap["execTimeout"], runMap["execLessThanBatch"]}, + }, + { + name: "slow-normal", + conf: &Config{ + TotalToComplete: testStatusTotal, + StatusThreshold: 5 * time.Millisecond, + }, + runs: []*testRun{runMap["execSlow"], runMap["execLessThanBatch"]}, + }, + { + name: "target-normal", + conf: &Config{ + TotalToComplete: testStatusTotal, + Target: 1 * time.Second, + }, + runs: []*testRun{runMap["execTargetRange"], runMap["execLessThanBatch"]}, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + assert := assert.New(t) + ctx := context.Background() + b := &Batch{ + c: tt.conf, + } + tr := newTestRunner(t, tt.conf, b) + tr.runs = tt.runs + + if err := b.Run(ctx); tt.wantErr { + assert.Error(err) + } else { + assert.NoError(err) + } + }) + } +} + +func TestBatch_batchCompleted(t *testing.T) { + t.Parallel() + assert, require := assert.New(t), require.New(t) + ctx := context.Background() + config := &Config{ + Exec: func(ctx context.Context, batchSize int) (int, error) { return 0, nil }, + Store: func(ctx context.Context, batchSize int) error { + fmt.Println("batchSize: ", batchSize) + return nil + }, + } + b, err := New(ctx, config) + require.NoError(err) + assert.NotNil(b) + + err = b.adjustSize(ctx, 10) + require.NoError(err) +} diff --git a/internal/scheduler/cleaner/cleaner_job.go b/internal/scheduler/cleaner/cleaner_job.go index 402054abee..a7fa39abd9 100644 --- a/internal/scheduler/cleaner/cleaner_job.go +++ b/internal/scheduler/cleaner/cleaner_job.go @@ -29,7 +29,7 @@ func (c *cleanerJob) Status() scheduler.JobStatus { // Run performs the required work depending on the implementation. // The context is used to notify the job that it should exit early. -func (c *cleanerJob) Run(ctx context.Context) error { +func (c *cleanerJob) Run(ctx context.Context, _ time.Duration) error { const op = "cleaner.(cleanerJob).Run" if _, err := c.w.Exec(ctx, "delete from job_run where status='completed'", nil); err != nil { diff --git a/internal/scheduler/job.go b/internal/scheduler/job.go index 0c5d0bd0e1..bbb2baff13 100644 --- a/internal/scheduler/job.go +++ b/internal/scheduler/job.go @@ -11,20 +11,41 @@ import ( ) // Job defines an interface for jobs that can be invoked by the scheduler. +// Multiple goroutines may invoke methods on a Job simultaneously. type Job interface { - // Status reports the job’s current status. The status is periodically persisted by - // the scheduler when a job is running, and will be used to verify a job is making progress. + // Status reports the job’s current status. It is called periodically + // while the Run method is running and immediately after the Run method + // completes. + // + // The scheduler uses the values in the returned JobStatus to verify + // that progress is being made by the job. The scheduler will interrupt + // the job if the values returned by Status do not change over a + // configurable amount of time. + // + // See scheduler.WithInterruptThreshold for more information. Status() JobStatus - // Run performs the required work depending on the implementation. - // The context is used to notify the job that it should exit early. - Run(ctx context.Context) error + // Run starts the specified job and waits for it to complete. + // + // The context parameter is used to notify the job that it should exit. + // + // The statusThreshold parameter is the amount of time the job has to + // return a JobStatus with values different from the previous + // JobStatus. Each time a JobStatus with values different from the + // previous JobStatus is returned, the timer for the threshold + // restarts. If the threshold is reached, the context is canceled. + // + // If the returned error is not nil, the job will be scheduled to run + // again immediately and the returned error will be logged. + Run(ctx context.Context, statusThreshold time.Duration) error - // NextRunIn returns the duration until the next job run should be scheduled. This - // method is invoked after a run has successfully completed and the next run time - // is being persisted by the scheduler. If an error is returned, the error will be logged - // but the duration returned will still be used in scheduling. If a zero duration is returned + // NextRunIn returns the duration the job scheduler should wait before + // running the job again. It is only called after the Run method has + // completed and returned a nil error. If a zero duration is returned // the job will be scheduled to run again immediately. + // + // If an error is returned, the error will be logged but the returned + // duration will still be used in scheduling. NextRunIn(context.Context) (time.Duration, error) // Name is the unique name of the job. @@ -34,12 +55,25 @@ type Job interface { Description() string } -// JobStatus defines the struct that must be returned by the Job.Status() method. +// A JobStatus represents the status of a job. +// Completed and Total are used to indicate job progress. +// The Completed value cannot be greater than the Total value +// and both values must be greater than or equal to zero. +// +// Retries is used to indicate how many times the job has retried +// accomplishing work. The value must be greater than or equal to zero. +// +// The scheduler uses the values in the JobStatus to verify that +// progress is being made by the job. The scheduler will interrupt the job +// if the values returned by Status do not change over a configurable +// amount of time. type JobStatus struct { - // Completed and Total are used to indicate job progress, - // each job implementation will determine the definition of - // progress by calculating both Completed and Total. - Completed, Total int + // The job's work items + Completed int // number of items processed + Total int // total number of items to be processed + + // The job's liveliness + Retries int // number of times the job has retried work } func validateJob(ctx context.Context, j Job) error { diff --git a/internal/scheduler/job/additional_verification_test.go b/internal/scheduler/job/additional_verification_test.go index 4e221b14fe..0e9c351fd2 100644 --- a/internal/scheduler/job/additional_verification_test.go +++ b/internal/scheduler/job/additional_verification_test.go @@ -44,7 +44,7 @@ func TestJobWorkflow(t *testing.T) { assert.NotEmpty(run.PrivateId) assert.Equal(job.Name, run.JobName) - run, err = repo.UpdateProgress(ctx, run.PrivateId, 100, 110) + run, err = repo.UpdateProgress(ctx, run.PrivateId, 100, 110, 0) require.NoError(err) assert.Equal(uint32(100), run.CompletedCount) assert.Equal(uint32(110), run.TotalCount) @@ -54,7 +54,7 @@ func TestJobWorkflow(t *testing.T) { require.NoError(err) assert.Nil(newRuns) - run, err = repo.CompleteRun(ctx, run.PrivateId, time.Hour, 0, 0) + run, err = repo.CompleteRun(ctx, run.PrivateId, time.Hour, 0, 0, 0) require.NoError(err) assert.Equal(Completed.string(), run.Status) @@ -85,7 +85,7 @@ func TestJobWorkflow(t *testing.T) { require.NoError(err) assert.Nil(newRuns) - newRun, err = repo.FailRun(ctx, newRun.PrivateId, 0, 0) + newRun, err = repo.FailRun(ctx, newRun.PrivateId, 0, 0, 0) require.NoError(err) assert.Equal(Failed.string(), newRun.Status) diff --git a/internal/scheduler/job/query.go b/internal/scheduler/job/query.go index dbb8be5fb9..41bd0c3185 100644 --- a/internal/scheduler/job/query.go +++ b/internal/scheduler/job/query.go @@ -65,7 +65,8 @@ const updateProgressQuery = ` job_run set completed_count = ?, - total_count = ? + total_count = ?, + retries_count = ? where private_id = ? and status = 'running' @@ -78,6 +79,7 @@ const completeRunQuery = ` set completed_count = ?, total_count = ?, + retries_count = ?, status = 'completed', end_time = current_timestamp where @@ -92,6 +94,7 @@ const failRunQuery = ` set completed_count = ?, total_count = ?, + retries_count = ?, status = 'failed', end_time = current_timestamp where diff --git a/internal/scheduler/job/repository_run.go b/internal/scheduler/job/repository_run.go index d0f54e7127..e31044e995 100644 --- a/internal/scheduler/job/repository_run.go +++ b/internal/scheduler/job/repository_run.go @@ -71,7 +71,7 @@ func (r *Repository) RunJobs(ctx context.Context, serverId string, opt ...Option // Once a run has been persisted with a final run status (completed, failed or interrupted), // any future UpdateProgress attempts will return an error with Code errors.InvalidJobRunState. // All options are ignored. -func (r *Repository) UpdateProgress(ctx context.Context, runId string, completed, total int, _ ...Option) (*Run, error) { +func (r *Repository) UpdateProgress(ctx context.Context, runId string, completed, total, retries int, _ ...Option) (*Run, error) { const op = "job.(Repository).UpdateProgress" if runId == "" { return nil, errors.New(ctx, errors.InvalidParameter, op, "missing run id") @@ -81,7 +81,7 @@ func (r *Repository) UpdateProgress(ctx context.Context, runId string, completed run.PrivateId = runId _, err := r.writer.DoTx(ctx, db.StdRetryCnt, db.ExpBackoff{}, func(r db.Reader, w db.Writer) error { - rows, err := w.Query(ctx, updateProgressQuery, []any{completed, total, runId}) + rows, err := w.Query(ctx, updateProgressQuery, []any{completed, total, retries, runId}) if err != nil { return errors.Wrap(ctx, err, op) } @@ -134,7 +134,7 @@ func (r *Repository) UpdateProgress(ctx context.Context, runId string, completed // or interrupted), any future calls to CompleteRun will return an error with Code // errors.InvalidJobRunState. // All options are ignored. -func (r *Repository) CompleteRun(ctx context.Context, runId string, nextRunIn time.Duration, completed, total int, _ ...Option) (*Run, error) { +func (r *Repository) CompleteRun(ctx context.Context, runId string, nextRunIn time.Duration, completed, total, retries int, _ ...Option) (*Run, error) { const op = "job.(Repository).CompleteRun" if runId == "" { return nil, errors.New(ctx, errors.InvalidParameter, op, "missing run id") @@ -148,7 +148,7 @@ func (r *Repository) CompleteRun(ctx context.Context, runId string, nextRunIn ti // persisted by the scheduler's monitor jobs loop. // Add an on update sql trigger to protect the job_run table, once progress // values are used in the critical path. - rows, err := w.Query(ctx, completeRunQuery, []any{completed, total, runId}) + rows, err := w.Query(ctx, completeRunQuery, []any{completed, total, retries, runId}) if err != nil { return errors.Wrap(ctx, err, op) } @@ -220,7 +220,7 @@ func (r *Repository) CompleteRun(ctx context.Context, runId string, nextRunIn ti // or interrupted), any future calls to FailRun will return an error with Code // errors.InvalidJobRunState. // All options are ignored. -func (r *Repository) FailRun(ctx context.Context, runId string, completed, total int, _ ...Option) (*Run, error) { +func (r *Repository) FailRun(ctx context.Context, runId string, completed, total, retries int, _ ...Option) (*Run, error) { const op = "job.(Repository).FailRun" if runId == "" { return nil, errors.New(ctx, errors.InvalidParameter, op, "missing run id") @@ -234,7 +234,7 @@ func (r *Repository) FailRun(ctx context.Context, runId string, completed, total // persisted by the scheduler's monitor jobs loop. // Add an on update sql trigger to protect the job_run table, once progress // values are used in the critical path. - rows, err := w.Query(ctx, failRunQuery, []any{completed, total, runId}) + rows, err := w.Query(ctx, failRunQuery, []any{completed, total, retries, runId}) if err != nil { return errors.Wrap(ctx, err, op) } diff --git a/internal/scheduler/job/repository_run_test.go b/internal/scheduler/job/repository_run_test.go index a468535dc1..fcc415e251 100644 --- a/internal/scheduler/job/repository_run_test.go +++ b/internal/scheduler/job/repository_run_test.go @@ -216,7 +216,7 @@ func TestRepository_RunJobsOrder(t *testing.T) { assert.Equal(run.JobPluginId, firstJob.PluginId) // End first job with time between last and middle - _, err = repo.CompleteRun(ctx, run.PrivateId, -6*time.Hour, 0, 0) + _, err = repo.CompleteRun(ctx, run.PrivateId, -6*time.Hour, 0, 0, 0) require.NoError(err) runs, err = repo.RunJobs(ctx, server.PrivateId) @@ -260,7 +260,7 @@ func TestRepository_UpdateProgress(t *testing.T) { job := testJob(t, conn, "name", "description", wrapper) type args struct { - completed, total int + completed, total, retries int } tests := []struct { @@ -342,10 +342,12 @@ func TestRepository_UpdateProgress(t *testing.T) { }, }, args: args{ - total: 10, + total: 10, + retries: 1, }, want: args{ - total: 10, + total: 10, + retries: 1, }, }, { @@ -381,10 +383,12 @@ func TestRepository_UpdateProgress(t *testing.T) { args: args{ completed: 10, total: 20, + retries: 1, }, want: args{ completed: 10, total: 20, + retries: 1, }, }, { @@ -422,7 +426,7 @@ func TestRepository_UpdateProgress(t *testing.T) { privateId = tt.orig.PrivateId } - got, err := repo.UpdateProgress(ctx, privateId, tt.args.completed, tt.args.total) + got, err := repo.UpdateProgress(ctx, privateId, tt.args.completed, tt.args.total, tt.args.retries) if tt.wantErr { require.Error(err) assert.Truef(errors.Match(errors.T(tt.wantErrCode), err), "Unexpected error %s", err) @@ -455,7 +459,7 @@ func TestRepository_UpdateProgress(t *testing.T) { require.NoError(err) require.NotNil(repo) - got, err := repo.UpdateProgress(ctx, "fake-run-id", 0, 0) + got, err := repo.UpdateProgress(ctx, "fake-run-id", 0, 0, 0) require.Error(err) require.Nil(got) assert.Truef(errors.Match(errors.T(errors.RecordNotFound), err), "Unexpected error %s", err) @@ -476,7 +480,7 @@ func TestRepository_CompleteRun(t *testing.T) { job := testJob(t, conn, "name", "description", wrapper) type args struct { - completed, total int + completed, total, retries int } tests := []struct { name string @@ -557,7 +561,7 @@ func TestRepository_CompleteRun(t *testing.T) { Status: Running.string(), }, }, - args: args{completed: 10, total: 20}, + args: args{completed: 10, total: 20, retries: 1}, }, } @@ -577,7 +581,7 @@ func TestRepository_CompleteRun(t *testing.T) { privateId = tt.orig.PrivateId } - got, err := repo.CompleteRun(ctx, privateId, tt.nextRunIn, tt.args.completed, tt.args.total) + got, err := repo.CompleteRun(ctx, privateId, tt.nextRunIn, tt.args.completed, tt.args.total, tt.args.retries) if tt.wantErr { require.Error(err) assert.Truef(errors.Match(errors.T(tt.wantErrCode), err), "Unexpected error %s", err) @@ -597,6 +601,7 @@ func TestRepository_CompleteRun(t *testing.T) { assert.Equal(Completed.string(), got.Status) assert.Equal(tt.args.completed, int(got.CompletedCount)) assert.Equal(tt.args.total, int(got.TotalCount)) + assert.Equal(tt.args.retries, int(got.RetriesCount)) updatedJob, err := repo.LookupJob(ctx, tt.orig.JobName) assert.NoError(err) @@ -621,7 +626,7 @@ func TestRepository_CompleteRun(t *testing.T) { require.NoError(err) require.NotNil(repo) - got, err := repo.CompleteRun(ctx, "fake-run-id", time.Hour, 0, 0) + got, err := repo.CompleteRun(ctx, "fake-run-id", time.Hour, 0, 0, 0) require.Error(err) require.Nil(got) assert.Truef(errors.Match(errors.T(errors.RecordNotFound), err), "Unexpected error %s", err) @@ -642,7 +647,7 @@ func TestRepository_FailRun(t *testing.T) { job := testJob(t, conn, "name", "description", wrapper) type args struct { - completed, total int + completed, total, retries int } tests := []struct { name string @@ -721,7 +726,7 @@ func TestRepository_FailRun(t *testing.T) { Status: Running.string(), }, }, - args: args{completed: 10, total: 20}, + args: args{completed: 10, total: 20, retries: 5}, }, } @@ -741,7 +746,7 @@ func TestRepository_FailRun(t *testing.T) { privateId = tt.orig.PrivateId } - got, err := repo.FailRun(ctx, privateId, tt.args.completed, tt.args.total) + got, err := repo.FailRun(ctx, privateId, tt.args.completed, tt.args.total, tt.args.retries) if tt.wantErr { require.Error(err) assert.Truef(errors.Match(errors.T(tt.wantErrCode), err), "Unexpected error %s", err) @@ -761,6 +766,7 @@ func TestRepository_FailRun(t *testing.T) { assert.Equal(Failed.string(), got.Status) assert.Equal(tt.args.completed, int(got.CompletedCount)) assert.Equal(tt.args.total, int(got.TotalCount)) + assert.Equal(tt.args.retries, int(got.RetriesCount)) // Delete job run so it does not clash with future runs _, err = repo.deleteRun(context.Background(), privateId) @@ -774,7 +780,7 @@ func TestRepository_FailRun(t *testing.T) { require.NoError(err) require.NotNil(repo) - got, err := repo.FailRun(ctx, "fake-run-id", 0, 0) + got, err := repo.FailRun(ctx, "fake-run-id", 0, 0, 0) require.Error(err) require.Nil(got) assert.Truef(errors.Match(errors.T(errors.RecordNotFound), err), "Unexpected error %s", err) diff --git a/internal/scheduler/job/store/job.pb.go b/internal/scheduler/job/store/job.pb.go index 7bc6229cf0..c6d4020a84 100644 --- a/internal/scheduler/job/store/job.pb.go +++ b/internal/scheduler/job/store/job.pb.go @@ -134,6 +134,10 @@ type JobRun struct { // total_count is set during an update to indicate the progress goal. // @inject_tag: `gorm:"default:0"` TotalCount uint32 `protobuf:"varint,9,opt,name=total_count,json=totalCount,proto3" json:"total_count,omitempty" gorm:"default:0"` + // retries_count is set during an update to indicate the number of times + // a job has retried work. + // @inject_tag: `gorm:"default:0"` + RetriesCount uint32 `protobuf:"varint,12,opt,name=retries_count,json=retriesCount,proto3" json:"retries_count,omitempty" gorm:"default:0"` // status of the job run (running, completed, failed or interrupted). // @inject_tag: `gorm:"not_null"` Status string `protobuf:"bytes,10,opt,name=status,proto3" json:"status,omitempty" gorm:"not_null"` @@ -230,6 +234,13 @@ func (x *JobRun) GetTotalCount() uint32 { return 0 } +func (x *JobRun) GetRetriesCount() uint32 { + if x != nil { + return x.RetriesCount + } + return 0 +} + func (x *JobRun) GetStatus() string { if x != nil { return x.Status @@ -266,7 +277,7 @@ var file_controller_storage_job_store_v1_job_proto_rawDesc = []byte{ 0x72, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x6e, 0x65, 0x78, 0x74, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x52, - 0x75, 0x6e, 0x22, 0xd9, 0x03, 0x0a, 0x06, 0x4a, 0x6f, 0x62, 0x52, 0x75, 0x6e, 0x12, 0x1d, 0x0a, + 0x75, 0x6e, 0x22, 0xfe, 0x03, 0x0a, 0x06, 0x4a, 0x6f, 0x62, 0x52, 0x75, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x49, 0x64, 0x12, 0x22, 0x0a, 0x0d, 0x6a, 0x6f, 0x62, 0x5f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, @@ -291,16 +302,18 @@ var file_controller_storage_job_store_v1_job_proto_rawDesc = []byte{ 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x74, 0x6f, 0x74, 0x61, - 0x6c, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x23, - 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, - 0x72, 0x49, 0x64, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x42, 0x42, - 0x5a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, - 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x2f, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, - 0x65, 0x72, 0x2f, 0x6a, 0x6f, 0x62, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3b, 0x73, 0x74, 0x6f, - 0x72, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6c, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, + 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x72, + 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, + 0x72, 0x5f, 0x69, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x69, 0x64, 0x42, 0x42, 0x5a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x62, 0x6f, 0x75, 0x6e, + 0x64, 0x61, 0x72, 0x79, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x73, 0x63, + 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2f, 0x6a, 0x6f, 0x62, 0x2f, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x3b, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/internal/scheduler/scheduler.go b/internal/scheduler/scheduler.go index 50ad3e82b3..b71f77b93d 100644 --- a/internal/scheduler/scheduler.go +++ b/internal/scheduler/scheduler.go @@ -228,7 +228,7 @@ func (s *Scheduler) schedule(ctx context.Context, wg *sync.WaitGroup) { err := s.runJob(ctx, wg, r) if err != nil { event.WriteError(ctx, op, err, event.WithInfoMsg("error starting job")) - if _, inner := repo.FailRun(ctx, r.PrivateId, 0, 0); inner != nil { + if _, inner := repo.FailRun(ctx, r.PrivateId, 0, 0, 0); inner != nil { event.WriteError(ctx, op, inner, event.WithInfoMsg("error updating failed job run")) } } @@ -260,7 +260,7 @@ func (s *Scheduler) runJob(ctx context.Context, wg *sync.WaitGroup, r *job.Run) go func() { defer rj.cancelCtx() defer wg.Done() - runErr := j.Run(jobContext) + runErr := j.Run(jobContext, s.interruptThreshold) // Get final status report to update run progress with status := j.Status() @@ -273,10 +273,10 @@ func (s *Scheduler) runJob(ctx context.Context, wg *sync.WaitGroup, r *job.Run) if inner != nil { event.WriteError(ctx, op, inner, event.WithInfoMsg("error getting next run time", "name", j.Name())) } - _, updateErr = repo.CompleteRun(ctx, r.PrivateId, nextRun, status.Completed, status.Total) + _, updateErr = repo.CompleteRun(ctx, r.PrivateId, nextRun, status.Completed, status.Total, status.Retries) default: event.WriteError(ctx, op, runErr, event.WithInfoMsg("job run failed", "run id", r.PrivateId, "name", j.Name())) - _, updateErr = repo.FailRun(ctx, r.PrivateId, status.Completed, status.Total) + _, updateErr = repo.FailRun(ctx, r.PrivateId, status.Completed, status.Total, status.Retries) } if updateErr != nil { @@ -333,7 +333,7 @@ func (s *Scheduler) updateRunningJobProgress(ctx context.Context, j *runningJob) return fmt.Errorf("error creating job repo %w", err) } status := j.status() - _, err = repo.UpdateProgress(ctx, j.runId, status.Completed, status.Total) + _, err = repo.UpdateProgress(ctx, j.runId, status.Completed, status.Total, status.Retries) if errors.Match(errors.T(errors.InvalidJobRunState), err) { // Job has been persisted with a final run status, cancel job context to trigger early exit. j.cancelCtx() diff --git a/internal/scheduler/testing.go b/internal/scheduler/testing.go index 71fafbcce2..e15d5f1066 100644 --- a/internal/scheduler/testing.go +++ b/internal/scheduler/testing.go @@ -55,10 +55,10 @@ func TestScheduler(t testing.TB, conn *db.DB, wrapper wrapping.Wrapper, opt ...O return s } -func testJobFn() (func(ctx context.Context) error, chan struct{}, chan struct{}) { +func testJobFn() (func(ctx context.Context, _ time.Duration) error, chan struct{}, chan struct{}) { jobReady := make(chan struct{}) jobDone := make(chan struct{}) - fn := func(ctx context.Context) error { + fn := func(ctx context.Context, _ time.Duration) error { jobReady <- struct{}{} // Block until context is canceled @@ -73,7 +73,7 @@ func testJobFn() (func(ctx context.Context) error, chan struct{}, chan struct{}) type testJob struct { nextRunIn time.Duration name, description string - fn func(context.Context) error + fn func(context.Context, time.Duration) error statusFn func() JobStatus } @@ -84,8 +84,8 @@ func (j testJob) Status() JobStatus { return j.statusFn() } -func (j testJob) Run(ctx context.Context) error { - return j.fn(ctx) +func (j testJob) Run(ctx context.Context, statusThreshold time.Duration) error { + return j.fn(ctx, statusThreshold) } func (j testJob) NextRunIn(_ context.Context) (time.Duration, error) { diff --git a/internal/server/job/rotate_roots_job.go b/internal/server/job/rotate_roots_job.go index d0d4d23774..26a602dcd0 100644 --- a/internal/server/job/rotate_roots_job.go +++ b/internal/server/job/rotate_roots_job.go @@ -78,7 +78,7 @@ func (r *rotateRootsJob) Status() scheduler.JobStatus { } // Run executes the job by calling the rotateRoots domain function -func (r *rotateRootsJob) Run(ctx context.Context) error { +func (r *rotateRootsJob) Run(ctx context.Context, _ time.Duration) error { const op = "server.(rotateRootsJob).Run" _, err := server.RotateRoots(ctx, r.workerAuthRepo, nodeenrollment.WithCertificateLifetime(r.certificateLifetime)) diff --git a/internal/server/job/rotate_roots_job_test.go b/internal/server/job/rotate_roots_job_test.go index cbf2154078..eb9df01ca4 100644 --- a/internal/server/job/rotate_roots_job_test.go +++ b/internal/server/job/rotate_roots_job_test.go @@ -105,7 +105,7 @@ func TestRotateRootsJob(t *testing.T) { assert.Equal(time.Hour, nextRun) // Run job and ensure rotation was performed - err = got.Run(ctx) + err = got.Run(ctx, 0) require.NoError(err) require.Equal(1, got.totalRotates) rootIds, err = workerAuthRepo.List(ctx, (*types.RootCertificate)(nil)) @@ -127,6 +127,6 @@ func TestRotateRootsJobFailure(t *testing.T) { got, err := newRotateRootsJob(ctx, &db.Db{}, &db.Db{}, kmsCache) require.NoError(err) - err = got.Run(ctx) + err = got.Run(ctx, 0) require.Error(err) } diff --git a/internal/server/job/upsert_worker_storage_bucket_job.go b/internal/server/job/upsert_worker_storage_bucket_job.go index efc8a293f7..11ec752b52 100644 --- a/internal/server/job/upsert_worker_storage_bucket_job.go +++ b/internal/server/job/upsert_worker_storage_bucket_job.go @@ -35,7 +35,7 @@ func (usb *upsertWorkerStorageBucketJob) Status() scheduler.JobStatus { return s // Run performs the required work depending on the implementation. // The context is used to notify the job that it should exit early. -func (usb *upsertWorkerStorageBucketJob) Run(_ context.Context) error { return nil } +func (usb *upsertWorkerStorageBucketJob) Run(ctx context.Context, _ time.Duration) error { return nil } // NextRunIn returns the duration until the next job run should be scheduled. // Upsert Worker Storage Bucket will run every 24 hours unless we know there are diff --git a/internal/server/query.go b/internal/server/query.go index d78953b0e5..54a6a6be0e 100644 --- a/internal/server/query.go +++ b/internal/server/query.go @@ -73,6 +73,12 @@ const ( where worker_key_identifier = @worker_key_identifier ` + verifyKnownWorkersQuery = ` + select public_id + from server_worker + where public_id in (?); + ` + getWorkerAuthsByWorkerIdQuery = ` select * from worker_auth_authorized diff --git a/internal/server/repository_worker.go b/internal/server/repository_worker.go index 576313fb6e..f3e95b161c 100644 --- a/internal/server/repository_worker.go +++ b/internal/server/repository_worker.go @@ -431,6 +431,40 @@ func (r *Repository) UpsertWorkerStatus(ctx context.Context, worker *Worker, opt return ret, nil } +// VerifyKnownWorkers checks that the passed worker IDs are found in the repository and returns +// the public IDs of the workers that are found. +func (r *Repository) VerifyKnownWorkers(ctx context.Context, ids []string) ([]string, error) { + const op = "server.(Repository).VerifyKnownWorkers" + + if len(ids) == 0 { + return nil, nil + } + + rows, err := r.reader.Query(ctx, verifyKnownWorkersQuery, []any{ids}) + if err != nil { + return nil, errors.Wrap(ctx, err, op) + } + defer rows.Close() + + type rowsResult struct { + PublicId string + } + var ret []string + for rows.Next() { + var result rowsResult + err = r.reader.ScanRows(ctx, rows, &result) + if err != nil { + return nil, errors.Wrap(ctx, err, op) + } + ret = append(ret, result.PublicId) + } + if err := rows.Err(); err != nil { + return nil, errors.Wrap(ctx, err, op) + } + + return ret, nil +} + // setWorkerTags removes all existing tags from the same source and worker id // and creates new ones based on the ones provided. This function should be // called from inside a db transaction. diff --git a/internal/server/repository_worker_test.go b/internal/server/repository_worker_test.go index 1295535167..67f8f580d1 100644 --- a/internal/server/repository_worker_test.go +++ b/internal/server/repository_worker_test.go @@ -567,6 +567,63 @@ func TestUpsertWorkerStatus(t *testing.T) { }) } +func TestVerifyKnownWorkers(t *testing.T) { + ctx := context.Background() + conn, _ := db.TestSetup(t, "postgres") + rw := db.New(conn) + wrapper := db.TestWrapper(t) + kmsCache := kms.TestKms(t, conn, wrapper) + require.NoError(t, kmsCache.CreateKeys(context.Background(), scope.Global.String(), kms.WithRandomReader(rand.Reader))) + repo, err := server.NewRepository(ctx, rw, rw, kmsCache) + require.NoError(t, err) + + workerIds := make([]string, 0, 10) + // Seed the repo with workers + for i := 0; i < 10; i++ { + w := server.TestPkiWorker(t, conn, wrapper) + workerIds = append(workerIds, w.GetPublicId()) + } + + tests := []struct { + name string + testIds []string + wantCnt int + }{ + { + name: "empty-list", + testIds: []string{}, + wantCnt: 0, + }, + { + name: "full-list", + testIds: workerIds, + wantCnt: 10, + }, + { + name: "bogus-list", + testIds: []string{"w_bogus1", "w_bogus2"}, + wantCnt: 0, + }, + { + name: "partial-bogus-list", + testIds: []string{"w_bogus1", "w_bogus2", workerIds[0], workerIds[1]}, + wantCnt: 2, + }, + { + name: "partial-list", + testIds: workerIds[:5], + wantCnt: 5, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ids, err := repo.VerifyKnownWorkers(ctx, tt.testIds) + require.NoError(t, err) + require.Equal(t, tt.wantCnt, len(ids)) + }) + } +} + func TestTagUpdatingListing(t *testing.T) { ctx := context.Background() require := require.New(t) diff --git a/internal/session/job_delete_terminated_sessions.go b/internal/session/job_delete_terminated_sessions.go index e43e9f8018..e0f9c4cda7 100644 --- a/internal/session/job_delete_terminated_sessions.go +++ b/internal/session/job_delete_terminated_sessions.go @@ -5,10 +5,12 @@ package session import ( "context" + "sync" "time" "github.com/hashicorp/boundary/internal/errors" "github.com/hashicorp/boundary/internal/scheduler" + "github.com/hashicorp/boundary/internal/scheduler/batch" ) type deleteTerminatedJob struct { @@ -18,8 +20,8 @@ type deleteTerminatedJob struct { // state for it to be deleted. threshold time.Duration - // the number of sessions deleted in the most recent run - deletedInRun int + mu sync.Mutex + batch *batch.Batch } func newDeleteTerminatedJob(ctx context.Context, repo *Repository, threshold time.Duration) (*deleteTerminatedJob, error) { @@ -38,24 +40,49 @@ func newDeleteTerminatedJob(ctx context.Context, repo *Repository, threshold tim // Status reports the job’s current status. The status is periodically persisted by // the scheduler when a job is running, and will be used to verify a job is making progress. func (d *deleteTerminatedJob) Status() scheduler.JobStatus { - return scheduler.JobStatus{ - Completed: d.deletedInRun, - Total: d.deletedInRun, + d.mu.Lock() + defer d.mu.Unlock() + if d.batch != nil { + return d.batch.Status() } + return scheduler.JobStatus{} } // Run performs the required work depending on the implementation. // The context is used to notify the job that it should exit early. -func (d *deleteTerminatedJob) Run(ctx context.Context) error { +func (d *deleteTerminatedJob) Run(ctx context.Context, statusThreshold time.Duration) error { const op = "session.(deleteTerminatedJob).Run" - d.deletedInRun = 0 - var err error - d.deletedInRun, err = d.repo.deleteSessionsTerminatedBefore(ctx, d.threshold) + params, err := d.repo.getDeleteJobParams(ctx, d.threshold) + switch { + case err != nil: + return errors.Wrap(ctx, err, op) + case params.TotalToDelete == 0: + return nil + } + + exec := func() batch.Exec { + return func(ctx context.Context, batchSize int) (int, error) { + return d.repo.deleteTerminatedSessionsBatch(ctx, params.WindowStartTime, batchSize) + } + } + + config := &batch.Config{ + Size: params.BatchSize, + TotalToComplete: params.TotalToDelete, + StatusThreshold: statusThreshold, + Exec: exec(), + Store: d.repo.setDeleteJobBatchSize, + } + + batch, err := batch.New(ctx, config) if err != nil { return errors.Wrap(ctx, err, op) } - return nil + d.mu.Lock() + d.batch = batch + d.mu.Unlock() + return batch.Run(ctx) } // NextRunIn returns the duration until the next job run should be scheduled. This diff --git a/internal/session/job_delete_terminated_sessions_test.go b/internal/session/job_delete_terminated_sessions_test.go index b8df545aef..9aa4cefb21 100644 --- a/internal/session/job_delete_terminated_sessions_test.go +++ b/internal/session/job_delete_terminated_sessions_test.go @@ -34,48 +34,13 @@ func TestDeleteTermiantedSessionsJob(t *testing.T) { threshold time.Duration expected int }{ - { - 0, - 0, - time.Nanosecond, - 0, - }, - { - 1, - 1, - time.Nanosecond, - 1, - }, - { - 1, - 1, - time.Hour, - 0, - }, - { - 10, - 10, - time.Nanosecond, - 10, - }, - { - 10, - 4, - time.Nanosecond, - 4, - }, - { - 10, - 0, - time.Nanosecond, - 0, - }, - { - 10, - 10, - time.Hour, - 0, - }, + {0, 0, time.Nanosecond, 0}, + {1, 1, time.Nanosecond, 1}, + {1, 1, time.Hour, 0}, + {10, 10, time.Nanosecond, 10}, + {10, 4, time.Nanosecond, 4}, + {10, 0, time.Nanosecond, 0}, + {10, 10, time.Hour, 0}, } for _, tc := range cases { @@ -101,9 +66,9 @@ func TestDeleteTermiantedSessionsJob(t *testing.T) { job, err := newDeleteTerminatedJob(ctx, repo, tc.threshold) require.NoError(t, err) - err = job.Run(ctx) + err = job.Run(ctx, 1*time.Second) require.NoError(t, err) - assert.Equal(t, tc.expected, job.deletedInRun) + assert.Equal(t, tc.expected, job.Status().Completed) }) } } diff --git a/internal/session/job_session_cleanup.go b/internal/session/job_session_cleanup.go index d98933c464..0ec00fd89e 100644 --- a/internal/session/job_session_cleanup.go +++ b/internal/session/job_session_cleanup.go @@ -93,7 +93,7 @@ func (j *sessionConnectionCleanupJob) Status() scheduler.JobStatus { } // Run executes the job. -func (j *sessionConnectionCleanupJob) Run(ctx context.Context) error { +func (j *sessionConnectionCleanupJob) Run(ctx context.Context, _ time.Duration) error { const op = "session.(sessionConnectionCleanupJob).Run" j.totalClosed = 0 diff --git a/internal/session/job_session_cleanup_test.go b/internal/session/job_session_cleanup_test.go index 3e7e8a5284..aa4169a085 100644 --- a/internal/session/job_session_cleanup_test.go +++ b/internal/session/job_session_cleanup_test.go @@ -110,7 +110,7 @@ func TestSessionConnectionCleanupJob(t *testing.T) { require.NoError(err) // Run the job. - require.NoError(job.Run(ctx)) + require.NoError(job.Run(ctx, 0)) // Assert connection state on both workers. assertConnections := func(workerId string, closed bool) { diff --git a/internal/session/query.go b/internal/session/query.go index 491c7555aa..5d512dc31c 100644 --- a/internal/session/query.go +++ b/internal/session/query.go @@ -341,17 +341,6 @@ update session_connection ) %s returning public_id; -` - deleteTerminated = ` -delete from session -using session_state -where - session.public_id = session_state.session_id -and - session_state.state = 'terminated' -and - lower(session_state.active_time_range) < wt_sub_seconds_from_now(@threshold_seconds) -; ` sessionCredentialRewrapQuery = ` select distinct @@ -457,6 +446,42 @@ values ` ) +// queries for the delete terminated sessions job +const ( + getDeleteJobParams = ` +with total (to_delete) as ( + select count(session_id) + from session_state + where session_state.state = 'terminated' + and lower(session_state.active_time_range) < wt_sub_seconds_from_now(@threshold_seconds) +), +params (batch_size) as ( + select batch_size + from session_delete_terminated_job +) +select total.to_delete as total_to_delete, + params.batch_size as batch_size, + wt_sub_seconds_from_now(@threshold_seconds) as window_start_time + from total, params; +` + setDeleteJobBatchSize = ` +update session_delete_terminated_job + set batch_size = @batch_size; +` + deleteTerminatedInBatch = ` +with batch (session_id) as ( + select session_id + from session_state + where state = 'terminated' + and lower(session_state.active_time_range) < @terminated_before + limit @batch_size +) +delete + from session + where public_id in (select session_id from batch); +` +) + func batchInsertSessionCredentialDynamic(creds []*DynamicCredential) (string, []any, error) { if len(creds) <= 0 { return "", nil, fmt.Errorf("empty slice of DynamicCredential, cannot build query") diff --git a/internal/session/repository_jobs.go b/internal/session/repository_jobs.go new file mode 100644 index 0000000000..250705c4f6 --- /dev/null +++ b/internal/session/repository_jobs.go @@ -0,0 +1,72 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package session + +import ( + "context" + "database/sql" + "time" + + "github.com/hashicorp/boundary/internal/db/timestamp" + "github.com/hashicorp/boundary/internal/errors" +) + +type deleteJobParams struct { + TotalToDelete int + BatchSize int + WindowStartTime *timestamp.Timestamp +} + +func (r *Repository) getDeleteJobParams(ctx context.Context, threshold time.Duration) (deleteJobParams, error) { + const op = "session.(Repository).getDeleteJobParams" + + args := []any{ + sql.Named("threshold_seconds", threshold.Seconds()), + } + rows, err := r.reader.Query(ctx, getDeleteJobParams, args) + if err != nil { + return deleteJobParams{}, errors.Wrap(ctx, err, op, errors.WithMsg("error getting parameters for delete terminated sessions job")) + } + defer rows.Close() + + var jobParams deleteJobParams + for rows.Next() { + if err := r.reader.ScanRows(ctx, rows, &jobParams); err != nil { + return deleteJobParams{}, errors.Wrap(ctx, err, op, errors.WithMsg("scan row failed")) + } + } + if err := rows.Err(); err != nil { + return deleteJobParams{}, errors.Wrap(ctx, err, op, errors.WithMsg("next row failed")) + } + return jobParams, nil +} + +func (r *Repository) setDeleteJobBatchSize(ctx context.Context, batchSize int) error { + const op = "session.(Repository).setDeleteJobBatchSize" + + args := []any{ + sql.Named("batch_size", batchSize), + } + + _, err := r.writer.Exec(ctx, setDeleteJobBatchSize, args) + if err != nil { + return errors.Wrap(ctx, err, op, errors.WithMsg("error setting delete job batch size")) + } + return nil +} + +func (r *Repository) deleteTerminatedSessionsBatch(ctx context.Context, terminatedBefore *timestamp.Timestamp, batchSize int) (int, error) { + const op = "session.(Repository).deleteTerminatedSessionsBatch" + + args := []any{ + sql.Named("terminated_before", terminatedBefore), + sql.Named("batch_size", batchSize), + } + + c, err := r.writer.Exec(ctx, deleteTerminatedInBatch, args) + if err != nil { + return 0, errors.Wrap(ctx, err, op, errors.WithMsg("error deleting terminated sessions")) + } + return c, nil +} diff --git a/internal/session/repository_jobs_test.go b/internal/session/repository_jobs_test.go new file mode 100644 index 0000000000..514e89f293 --- /dev/null +++ b/internal/session/repository_jobs_test.go @@ -0,0 +1,185 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package session + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/hashicorp/boundary/internal/db" + "github.com/hashicorp/boundary/internal/iam" + "github.com/hashicorp/boundary/internal/kms" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestRepository_getDeleteJobParams(t *testing.T) { + ctx := context.Background() + conn, _ := db.TestSetup(t, "postgres") + rw := db.New(conn) + wrapper := db.TestWrapper(t) + iamRepo := iam.TestRepo(t, conn, wrapper) + kms := kms.TestKms(t, conn, wrapper) + repo, err := NewRepository(ctx, rw, rw, kms) + composedOf := TestSessionParams(t, conn, wrapper, iamRepo) + + const defaultBatchSize = 5000 + + cases := []struct { + sessionCount int + terminateCount int + threshold time.Duration + expectedBatchSize int + expectedTotalToDelete int + }{ + {0, 0, time.Nanosecond, defaultBatchSize, 0}, + {1, 1, time.Nanosecond, defaultBatchSize, 1}, + {1, 1, time.Hour, defaultBatchSize, 0}, + {10, 10, time.Nanosecond, defaultBatchSize, 10}, + {10, 4, time.Nanosecond, defaultBatchSize, 4}, + {10, 0, time.Nanosecond, defaultBatchSize, 0}, + {10, 10, time.Hour, defaultBatchSize, 0}, + } + + for _, tc := range cases { + t.Run(fmt.Sprintf("%d_%d_%s_%d", tc.sessionCount, tc.terminateCount, tc.threshold, tc.expectedBatchSize), func(t *testing.T) { + t.Cleanup(func() { + sdb, err := conn.SqlDB(ctx) + require.NoError(t, err) + _, err = sdb.Exec(`delete from session;`) + require.NoError(t, err) + }) + + for i := 0; i < tc.sessionCount; i++ { + s := TestSession(t, conn, wrapper, composedOf) + if i < tc.terminateCount { + _, err = repo.CancelSession(ctx, s.PublicId, s.Version) + require.NoError(t, err) + } + } + + c, err := repo.TerminateCompletedSessions(ctx) + require.NoError(t, err) + assert.Equal(t, tc.terminateCount, c) + + p, err := repo.getDeleteJobParams(ctx, tc.threshold) + require.NoError(t, err) + assert.Equal(t, tc.expectedBatchSize, p.BatchSize) + assert.Equal(t, tc.expectedTotalToDelete, p.TotalToDelete) + }) + } +} + +func TestRepository_deleteTerminatedSessionsBatch(t *testing.T) { + ctx := context.Background() + conn, _ := db.TestSetup(t, "postgres") + rw := db.New(conn) + wrapper := db.TestWrapper(t) + iamRepo := iam.TestRepo(t, conn, wrapper) + kms := kms.TestKms(t, conn, wrapper) + repo, err := NewRepository(ctx, rw, rw, kms) + composedOf := TestSessionParams(t, conn, wrapper, iamRepo) + + cases := []struct { + sessionCount int + terminateCount int + batchSize int + expected int + }{ + {0, 0, 10, 0}, + {1, 1, 10, 1}, + {10, 10, 5, 5}, + {10, 2, 5, 2}, + {10, 0, 10, 0}, + } + + for _, tc := range cases { + t.Run(fmt.Sprintf("%d_%d_%d", tc.sessionCount, tc.terminateCount, tc.batchSize), func(t *testing.T) { + t.Cleanup(func() { + sdb, err := conn.SqlDB(ctx) + require.NoError(t, err) + _, err = sdb.Exec(`delete from session;`) + require.NoError(t, err) + }) + + // add initial group of sessions and terminate some of them + { + for i := 0; i < tc.sessionCount; i++ { + s := TestSession(t, conn, wrapper, composedOf) + if i < tc.terminateCount { + _, err = repo.CancelSession(ctx, s.PublicId, s.Version) + require.NoError(t, err) + } + } + + c, err := repo.TerminateCompletedSessions(ctx) + require.NoError(t, err) + assert.Equal(t, tc.terminateCount, c) + } + + // get the job parameters + p, err := repo.getDeleteJobParams(ctx, time.Nanosecond) + require.NoError(t, err) + assert.Equal(t, tc.terminateCount, p.TotalToDelete) + + // add more sessions to test the WindowStartTime + { + for i := 0; i < tc.sessionCount; i++ { + s := TestSession(t, conn, wrapper, composedOf) + if i < tc.terminateCount { + _, err = repo.CancelSession(ctx, s.PublicId, s.Version) + require.NoError(t, err) + } + } + + c, err := repo.TerminateCompletedSessions(ctx) + require.NoError(t, err) + assert.Equal(t, tc.terminateCount, c) + } + + c, err := repo.deleteTerminatedSessionsBatch(ctx, p.WindowStartTime, tc.batchSize) + require.NoError(t, err) + assert.Equal(t, tc.expected, c) + }) + } +} + +func TestRepository_setDeleteJobBatchSize(t *testing.T) { + ctx := context.Background() + conn, _ := db.TestSetup(t, "postgres") + rw := db.New(conn) + wrapper := db.TestWrapper(t) + kms := kms.TestKms(t, conn, wrapper) + repo, err := NewRepository(ctx, rw, rw, kms) + + cases := []struct { + batchSize int + expectErr bool + }{ + {-1, true}, + {0, true}, + {1, false}, + {9, false}, + {10, false}, + {10000, false}, + {10001, false}, + } + + for _, tc := range cases { + t.Run(fmt.Sprintf("%d_%t", tc.batchSize, tc.expectErr), func(t *testing.T) { + err = repo.setDeleteJobBatchSize(ctx, tc.batchSize) + if tc.expectErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + + p, err := repo.getDeleteJobParams(ctx, time.Nanosecond) + require.NoError(t, err) + assert.Equal(t, tc.batchSize, p.BatchSize) + }) + } +} diff --git a/internal/session/repository_session.go b/internal/session/repository_session.go index 864db5c26d..0b82821a66 100644 --- a/internal/session/repository_session.go +++ b/internal/session/repository_session.go @@ -909,20 +909,6 @@ func (r *Repository) CheckIfNotActive(ctx context.Context, reportedSessions []st return notActive, nil } -func (r *Repository) deleteSessionsTerminatedBefore(ctx context.Context, threshold time.Duration) (int, error) { - const op = "session.(Repository).deleteTerminated" - - args := []any{ - sql.Named("threshold_seconds", threshold.Seconds()), - } - - c, err := r.writer.Exec(ctx, deleteTerminated, args) - if err != nil { - return 0, errors.Wrap(ctx, err, op, errors.WithMsg("error deleting terminated sessions")) - } - return c, nil -} - func fetchStates(ctx context.Context, r db.Reader, sessionId string, opt ...db.Option) ([]*State, error) { const op = "session.fetchStates" var states []*State diff --git a/internal/session/repository_session_test.go b/internal/session/repository_session_test.go index 9e28a90818..a3e03d6bd7 100644 --- a/internal/session/repository_session_test.go +++ b/internal/session/repository_session_test.go @@ -5,7 +5,6 @@ package session import ( "context" - "fmt" "testing" "time" @@ -1827,94 +1826,6 @@ func TestRepository_deleteTargetFKey(t *testing.T) { } } -func TestRepository_deleteTerminated(t *testing.T) { - ctx := context.Background() - conn, _ := db.TestSetup(t, "postgres") - rw := db.New(conn) - wrapper := db.TestWrapper(t) - iamRepo := iam.TestRepo(t, conn, wrapper) - kms := kms.TestKms(t, conn, wrapper) - repo, err := NewRepository(ctx, rw, rw, kms) - composedOf := TestSessionParams(t, conn, wrapper, iamRepo) - - cases := []struct { - sessionCount int - terminateCount int - threshold time.Duration - expected int - }{ - { - 0, - 0, - time.Nanosecond, - 0, - }, - { - 1, - 1, - time.Nanosecond, - 1, - }, - { - 1, - 1, - time.Hour, - 0, - }, - { - 10, - 10, - time.Nanosecond, - 10, - }, - { - 10, - 4, - time.Nanosecond, - 4, - }, - { - 10, - 0, - time.Nanosecond, - 0, - }, - { - 10, - 10, - time.Hour, - 0, - }, - } - - for _, tc := range cases { - t.Run(fmt.Sprintf("%d_%d_%s", tc.sessionCount, tc.terminateCount, tc.threshold), func(t *testing.T) { - t.Cleanup(func() { - sdb, err := conn.SqlDB(ctx) - require.NoError(t, err) - _, err = sdb.Exec(`delete from session;`) - require.NoError(t, err) - }) - - for i := 0; i < tc.sessionCount; i++ { - s := TestSession(t, conn, wrapper, composedOf) - if i < tc.terminateCount { - _, err = repo.CancelSession(ctx, s.PublicId, s.Version) - require.NoError(t, err) - } - - } - c, err := repo.TerminateCompletedSessions(ctx) - require.NoError(t, err) - assert.Equal(t, tc.terminateCount, c) - - c, err = repo.deleteSessionsTerminatedBefore(ctx, tc.threshold) - require.NoError(t, err) - assert.Equal(t, tc.expected, c) - }) - } -} - func Test_decrypt(t *testing.T) { conn, _ := db.TestSetup(t, "postgres") wrapper := db.TestWrapper(t) diff --git a/internal/snapshot/snapshot_job.go b/internal/snapshot/snapshot_job.go index 6bda0cd250..6ec92cf107 100644 --- a/internal/snapshot/snapshot_job.go +++ b/internal/snapshot/snapshot_job.go @@ -41,7 +41,7 @@ func (c *snapshotJob) Status() scheduler.JobStatus { // Run performs the required work depending on the implementation. // The context is used to notify the job that it should exit early. -func (c *snapshotJob) Run(ctx context.Context) error { +func (c *snapshotJob) Run(ctx context.Context, _ time.Duration) error { const op = "snapshot.(snapshotJob).Run" err := runFn(ctx, c) return err diff --git a/internal/ui/VERSION b/internal/ui/VERSION index b4c97c4abb..87cf729ddb 100644 --- a/internal/ui/VERSION +++ b/internal/ui/VERSION @@ -1,4 +1,4 @@ -0d55812f647689f8c735d5c5f6aea0a524d18557 +0ff91f9182cee072d3f4658d7ff7929cda23126f # This file determines the version of the UI to embed in the boundary binary. # Update this file by running 'make update-ui-version' from the root of this repo. # Set UI_COMMITISH when running the above target to update to a specific version. diff --git a/plugins/boundary/mains/aws/go.mod b/plugins/boundary/mains/aws/go.mod index 8d228bdc36..99d1441d17 100644 --- a/plugins/boundary/mains/aws/go.mod +++ b/plugins/boundary/mains/aws/go.mod @@ -1,10 +1,10 @@ module github.com/hashicorp/boundary/plugins/boundary/mains/aws -go 1.23.1 +go 1.23.3 require ( - github.com/hashicorp/boundary-plugin-aws v0.4.0 - github.com/hashicorp/boundary/sdk v0.0.43-0.20240717182311-a20aae98794a + github.com/hashicorp/boundary-plugin-aws v0.4.2 + github.com/hashicorp/boundary/sdk v0.0.49 ) require ( diff --git a/plugins/boundary/mains/aws/go.sum b/plugins/boundary/mains/aws/go.sum index edfabe9d5b..f07a5976b7 100644 --- a/plugins/boundary/mains/aws/go.sum +++ b/plugins/boundary/mains/aws/go.sum @@ -74,10 +74,10 @@ github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/hashicorp/boundary-plugin-aws v0.4.0 h1:n0CNeswkTvrREXxzOIoaMjCs7FVWwMHceHKiQNwz3KA= -github.com/hashicorp/boundary-plugin-aws v0.4.0/go.mod h1:p6cicTmRGFw9qKvCbABIxsbRkqJq+jjwy0Ih+Ns3RHg= -github.com/hashicorp/boundary/sdk v0.0.43-0.20240717182311-a20aae98794a h1:SwVze6sYE5o+J9qdcgj29auY1k6O8oSo9DC2+2Gb9rw= -github.com/hashicorp/boundary/sdk v0.0.43-0.20240717182311-a20aae98794a/go.mod h1:9iOT7kDM6mYcSkKxNuZlv8rP7U5BG1kXoevjLLL8lNQ= +github.com/hashicorp/boundary-plugin-aws v0.4.2 h1:qPBl61Oow/f8UZLeXYqrxsq1ggeLmqepeM/9VwRzRYI= +github.com/hashicorp/boundary-plugin-aws v0.4.2/go.mod h1:iDTNMMJ7mfmaWSUIML65uphXTUJYNRbm683exxQ/x7w= +github.com/hashicorp/boundary/sdk v0.0.49 h1:XOb6mSKyrU/wI20+5xTYBHQUP7eIeKcLxKSCpCs4yzM= +github.com/hashicorp/boundary/sdk v0.0.49/go.mod h1:IHP79to8aIi22FiY58pgBqJL96/U9D8ZAUhS2DdC+Us= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= diff --git a/plugins/boundary/mains/azure/go.mod b/plugins/boundary/mains/azure/go.mod index 8c62c54681..024576c752 100644 --- a/plugins/boundary/mains/azure/go.mod +++ b/plugins/boundary/mains/azure/go.mod @@ -28,8 +28,8 @@ require ( github.com/hashicorp/eventlogger v0.2.6-0.20231025104552-802587e608f0 // indirect github.com/hashicorp/eventlogger/filters/encrypt v0.1.8-0.20231025104552-802587e608f0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-hclog v1.5.0 // indirect - github.com/hashicorp/go-kms-wrapping/v2 v2.0.14 // indirect + github.com/hashicorp/go-hclog v1.6.3 // indirect + github.com/hashicorp/go-kms-wrapping/v2 v2.0.16 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-plugin v1.5.2 // indirect github.com/hashicorp/go-retryablehttp v0.7.0 // indirect @@ -51,11 +51,11 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.8.1 // indirect github.com/stretchr/testify v1.8.4 // indirect - golang.org/x/crypto v0.14.0 // indirect + golang.org/x/crypto v0.17.0 // indirect golang.org/x/net v0.17.0 // indirect golang.org/x/oauth2 v0.11.0 // indirect - golang.org/x/sys v0.13.0 // indirect - golang.org/x/text v0.13.0 // indirect + golang.org/x/sys v0.15.0 // indirect + golang.org/x/text v0.14.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b // indirect google.golang.org/grpc v1.59.0 // indirect diff --git a/plugins/boundary/mains/azure/go.sum b/plugins/boundary/mains/azure/go.sum index d8dd90bff3..bf8d673665 100644 --- a/plugins/boundary/mains/azure/go.sum +++ b/plugins/boundary/mains/azure/go.sum @@ -168,12 +168,12 @@ github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= -github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.5 h1:jrnDfQm2hCQ0/hEselgqzV4fK16gpZoY0OWGZpVPNHM= github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.5/go.mod h1:psh1qKep5ukvuNobFY/hCybuudlkkACpmazOsCgX5Rg= -github.com/hashicorp/go-kms-wrapping/v2 v2.0.14 h1:1ZuhfnZgRnLK8S0KovJkoTCRIQId5pv3sDR7pG5VQBw= -github.com/hashicorp/go-kms-wrapping/v2 v2.0.14/go.mod h1:0dWtzl2ilqKpavgM3id/kFK9L3tjo6fS4OhbVPSYpnQ= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.16 h1:WZeXfD26QMWYC35at25KgE021SF9L3u9UMHK8fJAdV0= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.16/go.mod h1:ZiKZctjRTLEppuRwrttWkp71VYMbTTCkazK4xT7U/NQ= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-plugin v1.5.2 h1:aWv8eimFqWlsEiMrYZdPYl+FdHaBJSN4AWwGWfT1G2Y= @@ -302,8 +302,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -419,8 +419,8 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -428,8 +428,8 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/plugins/boundary/mains/minio/go.mod b/plugins/boundary/mains/minio/go.mod index 2283ebb596..8f685f59bf 100644 --- a/plugins/boundary/mains/minio/go.mod +++ b/plugins/boundary/mains/minio/go.mod @@ -19,7 +19,7 @@ require ( github.com/hashicorp/eventlogger v0.2.6-0.20231025104552-802587e608f0 // indirect github.com/hashicorp/eventlogger/filters/encrypt v0.1.8-0.20231025104552-802587e608f0 // indirect github.com/hashicorp/go-hclog v1.5.0 // indirect - github.com/hashicorp/go-kms-wrapping/v2 v2.0.14 // indirect + github.com/hashicorp/go-kms-wrapping/v2 v2.0.16 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-plugin v1.5.2 // indirect github.com/hashicorp/go-secure-stdlib/base62 v0.1.2 // indirect diff --git a/plugins/boundary/mains/minio/go.sum b/plugins/boundary/mains/minio/go.sum index dd2ce2d68c..f6eca6efdd 100644 --- a/plugins/boundary/mains/minio/go.sum +++ b/plugins/boundary/mains/minio/go.sum @@ -75,8 +75,8 @@ github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+ github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.5 h1:jrnDfQm2hCQ0/hEselgqzV4fK16gpZoY0OWGZpVPNHM= github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.5/go.mod h1:psh1qKep5ukvuNobFY/hCybuudlkkACpmazOsCgX5Rg= -github.com/hashicorp/go-kms-wrapping/v2 v2.0.14 h1:1ZuhfnZgRnLK8S0KovJkoTCRIQId5pv3sDR7pG5VQBw= -github.com/hashicorp/go-kms-wrapping/v2 v2.0.14/go.mod h1:0dWtzl2ilqKpavgM3id/kFK9L3tjo6fS4OhbVPSYpnQ= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.16 h1:WZeXfD26QMWYC35at25KgE021SF9L3u9UMHK8fJAdV0= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.16/go.mod h1:ZiKZctjRTLEppuRwrttWkp71VYMbTTCkazK4xT7U/NQ= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-plugin v1.5.2 h1:aWv8eimFqWlsEiMrYZdPYl+FdHaBJSN4AWwGWfT1G2Y= diff --git a/plugins/kms/mains/alicloudkms/go.mod b/plugins/kms/mains/alicloudkms/go.mod index 0c43eda07d..4a9b39af4d 100644 --- a/plugins/kms/mains/alicloudkms/go.mod +++ b/plugins/kms/mains/alicloudkms/go.mod @@ -3,8 +3,8 @@ module github.com/hashicorp/boundary/plugins/kms/mains/alicloudkms go 1.23.0 require ( - github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.5 - github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/v2 v2.0.2 + github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.7 + github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/v2 v2.0.3 ) require ( @@ -13,7 +13,7 @@ require ( github.com/fatih/color v1.14.1 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/hashicorp/go-hclog v1.5.0 // indirect - github.com/hashicorp/go-kms-wrapping/v2 v2.0.14 // indirect + github.com/hashicorp/go-kms-wrapping/v2 v2.0.16 // indirect github.com/hashicorp/go-plugin v1.5.2 // indirect github.com/hashicorp/go-secure-stdlib/base62 v0.1.2 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect @@ -31,8 +31,8 @@ require ( github.com/rogpeppe/go-internal v1.8.1 // indirect github.com/stretchr/testify v1.8.4 // indirect golang.org/x/net v0.17.0 // indirect - golang.org/x/sys v0.13.0 // indirect - golang.org/x/text v0.13.0 // indirect + golang.org/x/sys v0.15.0 // indirect + golang.org/x/text v0.14.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect google.golang.org/grpc v1.59.0 // indirect google.golang.org/protobuf v1.31.0 // indirect diff --git a/plugins/kms/mains/alicloudkms/go.sum b/plugins/kms/mains/alicloudkms/go.sum index 15082fab99..7d776eeab5 100644 --- a/plugins/kms/mains/alicloudkms/go.sum +++ b/plugins/kms/mains/alicloudkms/go.sum @@ -18,12 +18,12 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.5 h1:jrnDfQm2hCQ0/hEselgqzV4fK16gpZoY0OWGZpVPNHM= -github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.5/go.mod h1:psh1qKep5ukvuNobFY/hCybuudlkkACpmazOsCgX5Rg= -github.com/hashicorp/go-kms-wrapping/v2 v2.0.14 h1:1ZuhfnZgRnLK8S0KovJkoTCRIQId5pv3sDR7pG5VQBw= -github.com/hashicorp/go-kms-wrapping/v2 v2.0.14/go.mod h1:0dWtzl2ilqKpavgM3id/kFK9L3tjo6fS4OhbVPSYpnQ= -github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/v2 v2.0.2 h1:j/2W4nWgzUMGrtRnfvFj60PdwooKBGDY4/fec7vvjhc= -github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/v2 v2.0.2/go.mod h1:heY2PS1SGU0cMamgv+zId/sKT+XFHaf61bLOSnP1Gb8= +github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.7 h1:gM4OwbF16Cmfxt2QMkoGMQbRTfYFZLvDMPgU3rM3KIo= +github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.7/go.mod h1:7ZMHVluyqgHgEuTADeDzFNWoA9mnyPfdiK8Tk2Bct1c= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.16 h1:WZeXfD26QMWYC35at25KgE021SF9L3u9UMHK8fJAdV0= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.16/go.mod h1:ZiKZctjRTLEppuRwrttWkp71VYMbTTCkazK4xT7U/NQ= +github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/v2 v2.0.3 h1:36Pxy8BQd7DAJ2Mk6vuJlIjqQ80e20vlO7a4Ep3RTOg= +github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/v2 v2.0.3/go.mod h1:heY2PS1SGU0cMamgv+zId/sKT+XFHaf61bLOSnP1Gb8= github.com/hashicorp/go-plugin v1.5.2 h1:aWv8eimFqWlsEiMrYZdPYl+FdHaBJSN4AWwGWfT1G2Y= github.com/hashicorp/go-plugin v1.5.2/go.mod h1:w1sAEES3g3PuV/RzUrgow20W2uErMly84hhD3um1WL4= github.com/hashicorp/go-secure-stdlib/base62 v0.1.2 h1:ET4pqyjiGmY09R5y+rSd70J2w45CtbWDNvGqWp/R3Ng= @@ -98,10 +98,10 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= diff --git a/plugins/kms/mains/awskms/go.mod b/plugins/kms/mains/awskms/go.mod index a3a7596f90..e40d6daed4 100644 --- a/plugins/kms/mains/awskms/go.mod +++ b/plugins/kms/mains/awskms/go.mod @@ -3,19 +3,19 @@ module github.com/hashicorp/boundary/plugins/kms/mains/awskms go 1.23.0 require ( - github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.5 - github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.8 + github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.7 + github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.10 ) require ( - github.com/aws/aws-sdk-go v1.44.214 // indirect + github.com/aws/aws-sdk-go v1.55.5 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/fatih/color v1.14.1 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-hclog v1.5.0 // indirect - github.com/hashicorp/go-kms-wrapping/v2 v2.0.14 // indirect + github.com/hashicorp/go-kms-wrapping/v2 v2.0.16 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-plugin v1.5.2 // indirect github.com/hashicorp/go-secure-stdlib/awsutil v0.1.6 // indirect @@ -32,10 +32,10 @@ require ( github.com/rogpeppe/go-internal v1.8.1 // indirect github.com/stretchr/testify v1.8.4 // indirect golang.org/x/net v0.17.0 // indirect - golang.org/x/sys v0.13.0 // indirect - golang.org/x/text v0.13.0 // indirect + golang.org/x/sys v0.15.0 // indirect + golang.org/x/text v0.14.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect google.golang.org/grpc v1.59.0 // indirect - google.golang.org/protobuf v1.31.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/plugins/kms/mains/awskms/go.sum b/plugins/kms/mains/awskms/go.sum index fa631a2b88..0bcde58051 100644 --- a/plugins/kms/mains/awskms/go.sum +++ b/plugins/kms/mains/awskms/go.sum @@ -1,6 +1,6 @@ github.com/aws/aws-sdk-go v1.30.27/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/aws/aws-sdk-go v1.44.214 h1:YzDuC+9UtrAOUkItlK7l3BvKI9o6qAog9X8i289HORc= -github.com/aws/aws-sdk-go v1.44.214/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= +github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -26,12 +26,12 @@ github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/S github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.5 h1:jrnDfQm2hCQ0/hEselgqzV4fK16gpZoY0OWGZpVPNHM= -github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.5/go.mod h1:psh1qKep5ukvuNobFY/hCybuudlkkACpmazOsCgX5Rg= -github.com/hashicorp/go-kms-wrapping/v2 v2.0.14 h1:1ZuhfnZgRnLK8S0KovJkoTCRIQId5pv3sDR7pG5VQBw= -github.com/hashicorp/go-kms-wrapping/v2 v2.0.14/go.mod h1:0dWtzl2ilqKpavgM3id/kFK9L3tjo6fS4OhbVPSYpnQ= -github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.8 h1:/x3Vx8+G2bcx5J4euSphPy/5OeDC2i7C4Vtak5r8qAw= -github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.8/go.mod h1:DcXbvVpgNWbxGmxgmu3QN64bEydMu14Cpe34RRR30HY= +github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.7 h1:gM4OwbF16Cmfxt2QMkoGMQbRTfYFZLvDMPgU3rM3KIo= +github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.7/go.mod h1:7ZMHVluyqgHgEuTADeDzFNWoA9mnyPfdiK8Tk2Bct1c= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.16 h1:WZeXfD26QMWYC35at25KgE021SF9L3u9UMHK8fJAdV0= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.16/go.mod h1:ZiKZctjRTLEppuRwrttWkp71VYMbTTCkazK4xT7U/NQ= +github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.10 h1:YOSmJpqZt2X3KL0JUlKdih3WjpDlS9jQPyr6Etdh1GE= +github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.10/go.mod h1:eX4b0InOUfJ3NjfNWlJruBDT3rHXxOVw+7qNFmtjNbo= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-plugin v1.5.2 h1:aWv8eimFqWlsEiMrYZdPYl+FdHaBJSN4AWwGWfT1G2Y= @@ -90,49 +90,25 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= @@ -140,8 +116,8 @@ google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= diff --git a/plugins/kms/mains/azurekeyvault/go.mod b/plugins/kms/mains/azurekeyvault/go.mod index a68bbd1ef6..c36962dc52 100644 --- a/plugins/kms/mains/azurekeyvault/go.mod +++ b/plugins/kms/mains/azurekeyvault/go.mod @@ -3,8 +3,8 @@ module github.com/hashicorp/boundary/plugins/kms/mains/azurekeyvault go 1.23.0 require ( - github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.5 - github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.10 + github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.7 + github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.11 ) require ( @@ -28,7 +28,7 @@ require ( github.com/golang/protobuf v1.5.3 // indirect github.com/google/uuid v1.3.1 // indirect github.com/hashicorp/go-hclog v1.5.0 // indirect - github.com/hashicorp/go-kms-wrapping/v2 v2.0.14 // indirect + github.com/hashicorp/go-kms-wrapping/v2 v2.0.16 // indirect github.com/hashicorp/go-plugin v1.5.2 // indirect github.com/hashicorp/go-secure-stdlib/base62 v0.1.2 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect @@ -42,9 +42,9 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.8.1 // indirect github.com/stretchr/testify v1.8.4 // indirect - golang.org/x/crypto v0.15.0 // indirect + golang.org/x/crypto v0.17.0 // indirect golang.org/x/net v0.18.0 // indirect - golang.org/x/sys v0.14.0 // indirect + golang.org/x/sys v0.15.0 // indirect golang.org/x/text v0.14.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect google.golang.org/grpc v1.59.0 // indirect diff --git a/plugins/kms/mains/azurekeyvault/go.sum b/plugins/kms/mains/azurekeyvault/go.sum index 9dae6a3662..ee346e702d 100644 --- a/plugins/kms/mains/azurekeyvault/go.sum +++ b/plugins/kms/mains/azurekeyvault/go.sum @@ -53,12 +53,12 @@ github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.5 h1:jrnDfQm2hCQ0/hEselgqzV4fK16gpZoY0OWGZpVPNHM= -github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.5/go.mod h1:psh1qKep5ukvuNobFY/hCybuudlkkACpmazOsCgX5Rg= -github.com/hashicorp/go-kms-wrapping/v2 v2.0.14 h1:1ZuhfnZgRnLK8S0KovJkoTCRIQId5pv3sDR7pG5VQBw= -github.com/hashicorp/go-kms-wrapping/v2 v2.0.14/go.mod h1:0dWtzl2ilqKpavgM3id/kFK9L3tjo6fS4OhbVPSYpnQ= -github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.10 h1:g4F+mrwvfCJJIrLHbKhClClFLz/+T42zASm6S1Av38s= -github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.10/go.mod h1:+nZqburV15IZlvfxk29XUdwWour3PkYxRaOFesx37OI= +github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.7 h1:gM4OwbF16Cmfxt2QMkoGMQbRTfYFZLvDMPgU3rM3KIo= +github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.7/go.mod h1:7ZMHVluyqgHgEuTADeDzFNWoA9mnyPfdiK8Tk2Bct1c= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.16 h1:WZeXfD26QMWYC35at25KgE021SF9L3u9UMHK8fJAdV0= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.16/go.mod h1:ZiKZctjRTLEppuRwrttWkp71VYMbTTCkazK4xT7U/NQ= +github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.11 h1:/7SKkYIhA8cr3l8m1EKT6Q90bPoSVqqVBuQ6HgoMIkw= +github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.11/go.mod h1:LepS5s6ESGE0qQMpYaui5lX+mQYeiYiy06VzwWRioO8= github.com/hashicorp/go-plugin v1.5.2 h1:aWv8eimFqWlsEiMrYZdPYl+FdHaBJSN4AWwGWfT1G2Y= github.com/hashicorp/go-plugin v1.5.2/go.mod h1:w1sAEES3g3PuV/RzUrgow20W2uErMly84hhD3um1WL4= github.com/hashicorp/go-secure-stdlib/base62 v0.1.2 h1:ET4pqyjiGmY09R5y+rSd70J2w45CtbWDNvGqWp/R3Ng= @@ -113,8 +113,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA= -golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= @@ -140,8 +140,8 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= -golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= diff --git a/plugins/kms/mains/gcpckms/go.mod b/plugins/kms/mains/gcpckms/go.mod index c4c2acdede..98b71b61a4 100644 --- a/plugins/kms/mains/gcpckms/go.mod +++ b/plugins/kms/mains/gcpckms/go.mod @@ -3,8 +3,8 @@ module github.com/hashicorp/boundary/plugins/kms/mains/gcpckms go 1.23.0 require ( - github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.5 - github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.10 + github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.7 + github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.12 ) require ( @@ -20,7 +20,7 @@ require ( github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect github.com/googleapis/gax-go/v2 v2.12.0 // indirect github.com/hashicorp/go-hclog v1.5.0 // indirect - github.com/hashicorp/go-kms-wrapping/v2 v2.0.14 // indirect + github.com/hashicorp/go-kms-wrapping/v2 v2.0.16 // indirect github.com/hashicorp/go-plugin v1.5.2 // indirect github.com/hashicorp/go-secure-stdlib/base62 v0.1.2 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect @@ -33,12 +33,12 @@ require ( github.com/rogpeppe/go-internal v1.8.1 // indirect github.com/stretchr/testify v1.8.4 // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/crypto v0.14.0 // indirect + golang.org/x/crypto v0.17.0 // indirect golang.org/x/net v0.17.0 // indirect golang.org/x/oauth2 v0.13.0 // indirect golang.org/x/sync v0.4.0 // indirect - golang.org/x/sys v0.13.0 // indirect - golang.org/x/text v0.13.0 // indirect + golang.org/x/sys v0.15.0 // indirect + golang.org/x/text v0.14.0 // indirect google.golang.org/api v0.149.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b // indirect diff --git a/plugins/kms/mains/gcpckms/go.sum b/plugins/kms/mains/gcpckms/go.sum index 9c0afe2c3c..33783840ab 100644 --- a/plugins/kms/mains/gcpckms/go.sum +++ b/plugins/kms/mains/gcpckms/go.sum @@ -61,12 +61,12 @@ github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56 github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.5 h1:jrnDfQm2hCQ0/hEselgqzV4fK16gpZoY0OWGZpVPNHM= -github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.5/go.mod h1:psh1qKep5ukvuNobFY/hCybuudlkkACpmazOsCgX5Rg= -github.com/hashicorp/go-kms-wrapping/v2 v2.0.14 h1:1ZuhfnZgRnLK8S0KovJkoTCRIQId5pv3sDR7pG5VQBw= -github.com/hashicorp/go-kms-wrapping/v2 v2.0.14/go.mod h1:0dWtzl2ilqKpavgM3id/kFK9L3tjo6fS4OhbVPSYpnQ= -github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.10 h1:/HAAj2i/jeo2GqdWO1XjlutpaKzZaeBe3scvuujAJPg= -github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.10/go.mod h1:HSaOaX/lv3ShCdilUYbOTPnSvmoZ9xtQhgw+8hYcZkg= +github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.7 h1:gM4OwbF16Cmfxt2QMkoGMQbRTfYFZLvDMPgU3rM3KIo= +github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.7/go.mod h1:7ZMHVluyqgHgEuTADeDzFNWoA9mnyPfdiK8Tk2Bct1c= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.16 h1:WZeXfD26QMWYC35at25KgE021SF9L3u9UMHK8fJAdV0= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.16/go.mod h1:ZiKZctjRTLEppuRwrttWkp71VYMbTTCkazK4xT7U/NQ= +github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.12 h1:PCqWzT/Hii0KL07JsBZ3lJbv/wx02IAHYlhWQq8rxRY= +github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.12/go.mod h1:HSaOaX/lv3ShCdilUYbOTPnSvmoZ9xtQhgw+8hYcZkg= github.com/hashicorp/go-plugin v1.5.2 h1:aWv8eimFqWlsEiMrYZdPYl+FdHaBJSN4AWwGWfT1G2Y= github.com/hashicorp/go-plugin v1.5.2/go.mod h1:w1sAEES3g3PuV/RzUrgow20W2uErMly84hhD3um1WL4= github.com/hashicorp/go-secure-stdlib/base62 v0.1.2 h1:ET4pqyjiGmY09R5y+rSd70J2w45CtbWDNvGqWp/R3Ng= @@ -117,8 +117,8 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -150,13 +150,13 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= diff --git a/plugins/kms/mains/ocikms/go.mod b/plugins/kms/mains/ocikms/go.mod index 07fd1460eb..08deba64ff 100644 --- a/plugins/kms/mains/ocikms/go.mod +++ b/plugins/kms/mains/ocikms/go.mod @@ -3,7 +3,7 @@ module github.com/hashicorp/boundary/plugins/kms/mains/ocikms go 1.23.0 require ( - github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.5 + github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.7 github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2 v2.0.8 ) @@ -12,7 +12,7 @@ require ( github.com/fatih/color v1.14.1 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/hashicorp/go-hclog v1.5.0 // indirect - github.com/hashicorp/go-kms-wrapping/v2 v2.0.14 // indirect + github.com/hashicorp/go-kms-wrapping/v2 v2.0.16 // indirect github.com/hashicorp/go-plugin v1.5.2 // indirect github.com/hashicorp/go-secure-stdlib/base62 v0.1.2 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect @@ -27,8 +27,8 @@ require ( github.com/sony/gobreaker v0.5.0 // indirect github.com/stretchr/testify v1.8.4 // indirect golang.org/x/net v0.17.0 // indirect - golang.org/x/sys v0.13.0 // indirect - golang.org/x/text v0.13.0 // indirect + golang.org/x/sys v0.15.0 // indirect + golang.org/x/text v0.14.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect google.golang.org/grpc v1.59.0 // indirect google.golang.org/protobuf v1.31.0 // indirect diff --git a/plugins/kms/mains/ocikms/go.sum b/plugins/kms/mains/ocikms/go.sum index ef5345e7f5..d38972d215 100644 --- a/plugins/kms/mains/ocikms/go.sum +++ b/plugins/kms/mains/ocikms/go.sum @@ -14,10 +14,10 @@ github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.5 h1:jrnDfQm2hCQ0/hEselgqzV4fK16gpZoY0OWGZpVPNHM= -github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.5/go.mod h1:psh1qKep5ukvuNobFY/hCybuudlkkACpmazOsCgX5Rg= -github.com/hashicorp/go-kms-wrapping/v2 v2.0.14 h1:1ZuhfnZgRnLK8S0KovJkoTCRIQId5pv3sDR7pG5VQBw= -github.com/hashicorp/go-kms-wrapping/v2 v2.0.14/go.mod h1:0dWtzl2ilqKpavgM3id/kFK9L3tjo6fS4OhbVPSYpnQ= +github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.7 h1:gM4OwbF16Cmfxt2QMkoGMQbRTfYFZLvDMPgU3rM3KIo= +github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.7/go.mod h1:7ZMHVluyqgHgEuTADeDzFNWoA9mnyPfdiK8Tk2Bct1c= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.16 h1:WZeXfD26QMWYC35at25KgE021SF9L3u9UMHK8fJAdV0= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.16/go.mod h1:ZiKZctjRTLEppuRwrttWkp71VYMbTTCkazK4xT7U/NQ= github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2 v2.0.8 h1:F2RNYvXq9yJKbXRxfHBSzOCx0YxRdkaQ8qu0EECeu5U= github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2 v2.0.8/go.mod h1:ULlMyM1QKNuq1JIENcNCRAkgo/RYxxCkm26pjR6w/ko= github.com/hashicorp/go-plugin v1.5.2 h1:aWv8eimFqWlsEiMrYZdPYl+FdHaBJSN4AWwGWfT1G2Y= @@ -77,10 +77,10 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= diff --git a/plugins/kms/mains/transit/go.mod b/plugins/kms/mains/transit/go.mod index 0c29b17135..f259248138 100644 --- a/plugins/kms/mains/transit/go.mod +++ b/plugins/kms/mains/transit/go.mod @@ -3,8 +3,8 @@ module github.com/hashicorp/boundary/plugins/kms/mains/transit go 1.23.0 require ( - github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.5 - github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.12-0.20240510224000-05c77e842118 + github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.7 + github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.12 ) require ( @@ -15,8 +15,8 @@ require ( github.com/golang/protobuf v1.5.3 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-hclog v1.5.0 // indirect - github.com/hashicorp/go-kms-wrapping/v2 v2.0.14 // indirect + github.com/hashicorp/go-hclog v1.6.3 // indirect + github.com/hashicorp/go-kms-wrapping/v2 v2.0.16 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-plugin v1.5.2 // indirect github.com/hashicorp/go-retryablehttp v0.7.2 // indirect diff --git a/plugins/kms/mains/transit/go.sum b/plugins/kms/mains/transit/go.sum index 7be96a2a09..1e9ca25c2b 100644 --- a/plugins/kms/mains/transit/go.sum +++ b/plugins/kms/mains/transit/go.sum @@ -27,14 +27,14 @@ github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brv github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= -github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.5 h1:jrnDfQm2hCQ0/hEselgqzV4fK16gpZoY0OWGZpVPNHM= -github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.5/go.mod h1:psh1qKep5ukvuNobFY/hCybuudlkkACpmazOsCgX5Rg= -github.com/hashicorp/go-kms-wrapping/v2 v2.0.14 h1:1ZuhfnZgRnLK8S0KovJkoTCRIQId5pv3sDR7pG5VQBw= -github.com/hashicorp/go-kms-wrapping/v2 v2.0.14/go.mod h1:0dWtzl2ilqKpavgM3id/kFK9L3tjo6fS4OhbVPSYpnQ= -github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.12-0.20240510224000-05c77e842118 h1:Znp4cktSGpDSk3CQvdmluVoozB8VsgUDKEUNrmzy/Uk= -github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.12-0.20240510224000-05c77e842118/go.mod h1:YRqguGarF7kbHeojTPkanH3qvjbEP2pelq5b0ifaQ1M= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.7 h1:gM4OwbF16Cmfxt2QMkoGMQbRTfYFZLvDMPgU3rM3KIo= +github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.7/go.mod h1:7ZMHVluyqgHgEuTADeDzFNWoA9mnyPfdiK8Tk2Bct1c= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.16 h1:WZeXfD26QMWYC35at25KgE021SF9L3u9UMHK8fJAdV0= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.16/go.mod h1:ZiKZctjRTLEppuRwrttWkp71VYMbTTCkazK4xT7U/NQ= +github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.12 h1:E8pzzF7i44OZCYDol+U7VbTBmHe65/6dx1nYxS0P1k0= +github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.12/go.mod h1:YRqguGarF7kbHeojTPkanH3qvjbEP2pelq5b0ifaQ1M= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= diff --git a/sdk/go.mod b/sdk/go.mod index e972dd45f7..1cb1fd78da 100644 --- a/sdk/go.mod +++ b/sdk/go.mod @@ -1,6 +1,6 @@ module github.com/hashicorp/boundary/sdk -go 1.23.1 +go 1.23.3 require ( github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 @@ -20,7 +20,7 @@ require ( require ( github.com/Masterminds/semver/v3 v3.2.0 // indirect github.com/Masterminds/sprig/v3 v3.2.3 // indirect - github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.5 // indirect + github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.7 // indirect github.com/hashicorp/go-secure-stdlib/base62 v0.1.2 // indirect github.com/rogpeppe/go-internal v1.9.0 // indirect github.com/shopspring/decimal v1.3.1 // indirect @@ -39,7 +39,7 @@ require ( github.com/golang/protobuf v1.5.3 // indirect github.com/google/uuid v1.4.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/go-kms-wrapping/v2 v2.0.14 + github.com/hashicorp/go-kms-wrapping/v2 v2.0.16 github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-secure-stdlib/configutil/v2 v2.0.11 github.com/hashicorp/go-secure-stdlib/listenerutil v0.1.9 // indirect diff --git a/sdk/go.sum b/sdk/go.sum index ac13d53c2a..63b5ca51bd 100644 --- a/sdk/go.sum +++ b/sdk/go.sum @@ -49,8 +49,10 @@ github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+ github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.5 h1:jrnDfQm2hCQ0/hEselgqzV4fK16gpZoY0OWGZpVPNHM= github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.5/go.mod h1:psh1qKep5ukvuNobFY/hCybuudlkkACpmazOsCgX5Rg= -github.com/hashicorp/go-kms-wrapping/v2 v2.0.14 h1:1ZuhfnZgRnLK8S0KovJkoTCRIQId5pv3sDR7pG5VQBw= -github.com/hashicorp/go-kms-wrapping/v2 v2.0.14/go.mod h1:0dWtzl2ilqKpavgM3id/kFK9L3tjo6fS4OhbVPSYpnQ= +github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.7 h1:gM4OwbF16Cmfxt2QMkoGMQbRTfYFZLvDMPgU3rM3KIo= +github.com/hashicorp/go-kms-wrapping/plugin/v2 v2.0.7/go.mod h1:7ZMHVluyqgHgEuTADeDzFNWoA9mnyPfdiK8Tk2Bct1c= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.16 h1:WZeXfD26QMWYC35at25KgE021SF9L3u9UMHK8fJAdV0= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.16/go.mod h1:ZiKZctjRTLEppuRwrttWkp71VYMbTTCkazK4xT7U/NQ= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= diff --git a/testing/internal/e2e/infra/docker.go b/testing/internal/e2e/infra/docker.go index 97bb76bafe..1a412f97f7 100644 --- a/testing/internal/e2e/infra/docker.go +++ b/testing/internal/e2e/infra/docker.go @@ -172,7 +172,7 @@ func StartBoundary(t testing.TB, pool *dockertest.Pool, network *dockertest.Netw Mounts: []string{path.Dir(boundaryConfigFilePath) + ":/boundary/"}, Name: "boundary", Networks: []*dockertest.Network{network}, - ExposedPorts: []string{"9200", "9201", "9202", "9203"}, + ExposedPorts: []string{"9200/tcp", "9201/tcp", "9202/tcp", "9203/tcp"}, PortBindings: map[docker.Port][]docker.PortBinding{ "9200/tcp": {{HostIP: "localhost", HostPort: "9200/tcp"}}, "9201/tcp": {{HostIP: "localhost", HostPort: "9201/tcp"}}, @@ -212,7 +212,7 @@ func StartVault(t testing.TB, pool *dockertest.Pool, network *dockertest.Network }, Name: "vault", Networks: []*dockertest.Network{network}, - ExposedPorts: []string{"8200"}, + ExposedPorts: []string{"8200/tcp"}, PortBindings: map[docker.Port][]docker.PortBinding{ "8200/tcp": {{HostIP: "localhost", HostPort: "8210/tcp"}}, }, diff --git a/testing/internal/e2e/tests/database/migration_test.go b/testing/internal/e2e/tests/database/migration_test.go index 2912536628..fd6207743c 100644 --- a/testing/internal/e2e/tests/database/migration_test.go +++ b/testing/internal/e2e/tests/database/migration_test.go @@ -181,16 +181,17 @@ func setupEnvironment(t testing.TB, c *config, boundaryRepo, boundaryTag string) t.Log("Waiting for Boundary to finish loading...") err = pool.Retry(func() error { - response, err := http.Get(b.UriLocalhost) + response, err := http.Get(fmt.Sprintf("%s/health", b.UriLocalhost)) if err != nil { - t.Logf("Could not access Boundary URL: %s. Retrying...", err.Error()) + t.Logf("Could not access health endpoint: %s. Retrying...", err.Error()) return err } if response.StatusCode != http.StatusOK { - return fmt.Errorf("Could not connect to %s. Status Code: %d", b.UriLocalhost, response.StatusCode) + return fmt.Errorf("Health check returned an error. Status Code: %d", response.StatusCode) } + response.Body.Close() return nil }) require.NoError(t, err) diff --git a/testing/internal/e2e/tests/database/testdata/boundary-config.hcl b/testing/internal/e2e/tests/database/testdata/boundary-config.hcl index d814774170..f191c22575 100644 --- a/testing/internal/e2e/tests/database/testdata/boundary-config.hcl +++ b/testing/internal/e2e/tests/database/testdata/boundary-config.hcl @@ -91,7 +91,7 @@ events { ] file { - path = "/logs" + path = "/boundary/logs" file_name = "audit.log" } diff --git a/version/VERSION b/version/VERSION index 47d04a5288..de564aec2e 100644 --- a/version/VERSION +++ b/version/VERSION @@ -1 +1 @@ -0.18.0 \ No newline at end of file +0.18.3 \ No newline at end of file diff --git a/website/content/docs/api-clients/api/index.mdx b/website/content/docs/api-clients/api/index.mdx index 7281f4f12d..613c0d738b 100644 --- a/website/content/docs/api-clients/api/index.mdx +++ b/website/content/docs/api-clients/api/index.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: API -description: |- - Boundary's HTTP API standards +page_title: API overview +description: >- + Learn about Boundary's HTTP API standards, status codes, path layout, methods, and headers. Understand how rate limiting helps manage system resources. --- # API diff --git a/website/content/docs/api-clients/api/pagination.mdx b/website/content/docs/api-clients/api/pagination.mdx index 1f21fd1ff7..bfd2e80c65 100644 --- a/website/content/docs/api-clients/api/pagination.mdx +++ b/website/content/docs/api-clients/api/pagination.mdx @@ -1,7 +1,8 @@ --- layout: docs page_title: API list pagination -description: Learn how the API pagination and cache works in Boundary to prevent system resources from being overwhlemed and to help you find resources. +description: >- + Learn about API pagination and how the cache works in Boundary to prevent system resources from being overwhelmed and to help you find resources. --- # API list pagination diff --git a/website/content/docs/api-clients/api/rate-limiting.mdx b/website/content/docs/api-clients/api/rate-limiting.mdx index 16d5feedaf..fdaa13293d 100644 --- a/website/content/docs/api-clients/api/rate-limiting.mdx +++ b/website/content/docs/api-clients/api/rate-limiting.mdx @@ -1,7 +1,8 @@ --- layout: docs page_title: API rate limiting -description: Learn how API rate limiting lets you configure limits on the rates of API requests in Boundary to help manage resources and prevent them from being overwhelmed. +description: >- + Learn how API rate limiting lets you configure limits on the rates of API requests in Boundary to help manage resources and prevent them from being overwhelmed. --- # Rate limiting diff --git a/website/content/docs/api-clients/client-agent.mdx b/website/content/docs/api-clients/client-agent.mdx new file mode 100644 index 0000000000..2e75365493 --- /dev/null +++ b/website/content/docs/api-clients/client-agent.mdx @@ -0,0 +1,665 @@ +--- +layout: docs +page_title: Client Agent overview +description: >- + Learn how the Boundary Client Agent intercepts DNS requests as the primary resolver on the system, allowing Boundary to proxy connections transparently. +--- + +# Boundary Client Agent + +@include 'alerts/enterprise-only.mdx' + +@include 'alerts/beta.mdx' + +The Boundary Client Agent is Boundary's DNS daemon. +When the Boundary Client Agent runs alongside an authenticated Boundary client, the agent establishes itself as the primary resolver on the system and intercepts DNS requests. + +If you enter a hostname that matches a Boundary alias that the client is authorized to establish a session to, Boundary automatically generates the session and transparently proxies the connection on your behalf. +If the Boundary Client Agent cannot find an alias, or if there are any issues with authentication, network connectivity, or latency, the Client Agent defers DNS resolution to the previously configured DNS resolvers. + +## Security + +When you successfully authorize a session on a Boundary controller, the response includes a list of any brokered credentials, which include the decoded secrets. +When the Boundary Client Agent receives a DNS request, Boundary creates a new session. +An OS user can only connect to an authorized session managed by the Boundary Client Agent daemon if they are the same OS user that added the Boundary auth token used for authorizing the session. + + + +Currently, you cannot authenticate to multiple Boundary controllers at once. +If you authenticate to a different Boundary controller, any existing sessions are terminated and any new transparent sessions would be established with the new controller. + + + +The Boundary Client Agent stores the credentials and some other information related to the session in memory. +The in-memory store removes the session information: + +- when the session ends. +- if the auth token stored in the Boundary expires. +- if the current OS user authenticates with a different Boundary user. +- if the current OS user authenticates to a new Boundary controller. +- if the Boundary Client Agent is paused. +- if the Boundary Client Agent is terminated. + +API requests are authenticated in the same way as session proxy access. + +Credential brokering is supported for transparent sessions. +A notification appears when you establish a session against a target that is configured with credential brokering. +You can retrieve the credentials later using the following command: + +```shell-session +$ boundary client-agent sessions +``` + +### Grants + +The default grants that Boundary creates for anonymous and authenticated users are sufficient to get started with the Client Agent for the public beta. +However, in a production scenario, you may want to provide the least amount of privileges necessary for users. +For a Boundary user to be able to use the Client Agent to establish a transparent session, they must: + +- be able to authenticate using an auth method. +- have read permissions for their auth token. +- have permission to establish a session to one or more targets. + +You can use the following grant strings to grant those permissions: + +``` +type=auth-method;ids=*;actions=authorize +type=target;ids=*;actions=authorize-session +type=auth-token;ids=*;actions=read:self +``` + +HashiCorp highly recommends that you also grant users the permission to list resolvable aliases, as the Client Agent periodically fetches a list of aliases to match incoming DNS requests against. +Without that permission, every DNS request on the system is sent to the Boundary controller, which can easily overwhelm it. +You can use the following grant string to grant the permission to list resolvable aliases: + +``` +type=user;ids=*;actions=list-resolvable-aliases +``` + +## Configuration + +The default configuration included with the Boundary Client Agent upon installation will be suitable for most users. If you want to make changes to the configuration, the configuration file is located in the following directory: + + + + + `/Library/Application Support/HashiCorp/Boundary/boundary-client-agent.hcl` + + + + + `C:\Program Files\Hashicorp Boundary\boundary-client-agent.hcl` + + + + +The configuration file contains the following fields: + +- `alias_refresh_interval` - Specifies how often to refresh the alias cache. The default value is 1 minute. + + Example: + ```hcl + alias_refresh_interval=60s + ``` + +- `dns_request_timeout` - Specifies for how long the Client Agent DNS request handling, including any recursion, is allowed to run before it is canceled. + + Example: + ```hcl + dns_request_timeout=300s + ``` + +- `interface_to_use` - Specifies the interface to use instead of the default. + + Example: + ```hcl + interface_to_use=en1 + ``` + +- `log_file` - Specifies where to write the Boundary Client Agent log file to. + + Example: + ```hcl + log_file="/Library/Application\ /Support/HashiCorp/Boundary/boundary-client-agent.log" + ``` + +- `log_level` - Specifies the verbosity of the Client Agent logs. + + Example: + ```hcl + log_level="DEBUG" + ``` + +- `log_to_stdout` - Logs to STDOUT in addition to the `boundary-client-agent.log` file. + + Example: + ```hcl + log_to_stdout=false + ``` + +- `override_upstream_dns_servers` - Lists the DNS servers that should be used for recursing non-Boundary requests, overriding those configured on the system. + + Example: + ```hcl + override_upstream_dns_servers = ["8.8.8.8", "8.8.4.4"] + ``` + +- `state_file` - Specifies where to write the Boundary Client Agent state file to. This is an ephemeral file which is removed on successful shutdown. + + Example: + ```hcl + state_file="/Library/Application\ /Support/HashiCorp/Boundary/boundary-client-agent-state.json" + ``` + +- `v4_prefix` - Specifies an alternate prefix to use for generating IPs. Currently must be between /8 and /16 + + Example: + ```hcl + v4_prefix=1.1.1.1/8 + ``` + +### Change the configuration + +Complete the following steps to change the configuration of the Client Agent: + + + + +1. As a privileged user, open the Boundary Client Agent configuration file in the editor of your choice. +By default, it is located in the following directory: + + `/Library/Application Support/HashiCorp/Boundary/boundary-client-agent.hcl` + +1. Change the configuration settings, and save the file. + + + + You must restart the Client Agent to update some configuration settings. + However, when you restart the Client Agent, it closes any existing sessions. + Other configuration settings can be updated by only reloading the configuration file, which does not affect any existing sessions. + + + +1. Either reload the configuration file or restart the Client Agent. + + You can change the following configuration values by reloading the configuration file, which will not disrupt any existing sessions: + + - `dns_request_timeout` + - `log_file` + - `log_level` + - `state_file` + - `override_upstream_dns_servers` + - `v4_prefix` + + Run the following command to reload the configuration file: + + ```shell-session + $ sudo pkill -1 boundary-client-agent + ``` + + If you want to update another configuration value, you can restart the Client Agent using the following commands, however it will close any existing sessions: + + ```shell-session + $ sudo launchctl stop com.hashicorp.boundary.boundary-client-agent + $ sudo launchctl start com.hashicorp.boundary.boundary-client-agent + ``` + + + + +1. As a privileged user, open the Boundary Client Agent configuration file in the editor of your choice. +By default, it is located in the following directory: + + `C:\Program Files\Hashicorp Boundary\boundary-client-agent.hcl` + +1. Change the configuration settings, and save the file. +1. Run the following commands to restart the Client Agent. + + ```shell-session + net stop BoundaryClientAgent + net start BoundaryClientAgent + ``` + + Note that when you restart the Client Agent, it closes any existing sessions. + + + + +## Manage the Client Agent + +Refer to the following sections for more information about managing the Client Agent. +You can monitor the Client Agent's status and retrieve information about any transparent sessions. +If you want to temporarily defer DNS resolution to any previously configured DNS resolvers, you can pause the Client Agent. +You can also disable the Client Agent, if you no longer want to use it for DNS resolution. + +### Monitor status and sessions + +You can check the status of the Client Agent to ensure it is running. +Use the following command to check the Client Agent's status: + +```shell-session +$ boundary client-agent status +``` + +You can retrieve information about the sessions that the Client Agent is managing. +Use the following command to list any sessions currently being managed by the Client Agent, as well as any brokered credentials for those sessions: + +```shell-session +$ boundary client-agent sessions +``` + +Note that this command does not list sessions that are not managed by the Client Agent. Use `boundary sessions list -recursive` to see all sessions. + +### Pause the Client Agent + +You can temporarily disable the Boundary Client Agent by pausing it with the following command: + +```shell-session +$ boundary client-agent pause +``` + +When the Client Agent is paused, it does not intercept any DNS requests, and you are unable to use transparent sessions. + +To resume the Client Agent, use the following command: + +```shell-session +$ boundary client-agent resume +``` + +### Disable the Client Agent + +If you want to disable the Boundary Client Agent, you can stop it with the following commands: + + + + +```shell-session +$ sudo launchctl unload -w /Library/LaunchDaemons/com.hashicorp.boundary.boundary-client-agent.plist +``` + +Unloading the Boundary Client Agent removes its launch daemon configuration. To restart the Client Agent, use: + +```shell-session +$ sudo launchctl load -w /Library/LaunchDaemons/com.hashicorp.boundary.boundary-client-agent.plist +``` + + + + +```shell-session +net stop BoundaryClientAgent +``` + + + + +## Troubleshooting + +The following sections can help you to troubleshoot the Client Agent's behavior. You should proceed through these steps from top to bottom. + +### Check the status of the Client Agent + +If you experience unexpected behavior, you should first check on the status of the Client Agent. +You can check the status using the Boundary CLI or the Desktop Client. To check the Client Agent status through the Boundary CLI, use the following command: + +```shell-session +$ boundary client-agent status + +Status: + Address: + https://boundary.corp.com + Auth Token Expiration: 167h58m9s + Auth Token Id: at_GBqZUK2ihv + Status: running + Version: 0.0.1-e561e69839cce148ee5045684bce5b7168c65026 +``` + +In the Desktop Client, you can find the status of the Client Agent by navigating to **Settings**, and then scrolling to the **Boundary Client Agent** section. + +The status command includes various information about the Client Agent, including the runtime status. +In this example, the runtime status is "running". +If the status is "paused", the Client Agent is not currently intercepting DNS requests and must be resumed. +Users can pause the Client Agent, and it will also pause itself if it detects a large number of network failures in a short period of time. + +The status also allows you to see whether the current user is authenticated. +If the response looks like the example above, including showing an auth token ID and expiration, your current user is authenticated. +If not, you may need to first authenticate to the Client Agent using the CLI or Desktop Client. + +The status also sometimes contains a list of errors that have been encountered by the Client Agent. +The list is ordered by most recent first. +These errors can help you understand why the Client Agent may not be behaving as expected. +Please see the section below on commonly seen errors to help diagnose specific errors. +Note that this list of errors will not be cleared until the next reboot, so it may not necessarily be a sign of something being wrong. + +If the status command returns an error, the Client Agent may not be running. +You can attempt to start the Client Agent using the following commands: + + + + + ```shell-session + $ sudo launchctl start com.hashicorp.boundary.boundary-client-agent + ``` + + + + + ```shell-session + net start BoundaryClientAgent + ``` + + + +### Resume the Client Agent + +You can resume the Client Agent using either the Boundary CLI or the Desktop Client. In the CLI, run the following command to resume the Client Agent: + +```shell-session +$ boundary client-agent resume +The Client Agent has been successfully resumed. +``` + +In the Desktop Client, you can resume the Client Agent by selecting the **Resume** button in the **Boundary Client Agent** section of the settings. +Once the Client Agent has resumed, test if it has started working as expected again. + +### Inspect the log file + +If you are not able to diagnose the problem by looking at the status or resuming the Client Agent, another step can be to inspect the log file produced by the Client Agent. + + + + +The log file should be located in `/Library/Application Support/HashiCorp/Boundary/boundary-client-agent.log`. + + + + +The log file should be located in `C:\Windows\Logs\boundary-client-agent.log`. + + + + +Once you have found the log file, you can look through it to see if you can understand why the Client Agent is not working as expected. +The list below provides some common errors and explanations. + +It may be necessary to increase the logging verbosity of the Client Agent. +You can increase the verbosity by setting the `log_level` option in the configuration file to `"DEBUG"`. +See the section on changing the configuration for more information. + +### Establish the behavior of the local DNS configuration + +The Client Agent works by intercepting DNS requests before they are sent to your regular DNS server. +If the DNS requests on your system are not sent to the right place, or they are not being answered appropriately, transparent sessions will not work. + +You can use the `nslookup` command to understand where the DNS requests are being sent. +Start by sending a DNS request for `hashicorp.com`: + +```shell-session +$ nslookup hashicorp.com +;; Truncated, retrying in TCP mode. +Server: 100.88.241.86 +Address: 100.88.241.86#53 + +Non-authoritative answer: +Name: hashicorp.com +Address: 76.76.21.21 +``` + +The important part here is the `Server` field, which contains an IP in the CGNAT range (from `100.64.0.0` to `100.127.255.255`). +This is a good indication that the Client Agent DNS server is being used as expected. + +Next, you can try to make a DNS request to an alias that you expect to work. The following example makes a DNS request to an alias with a value of `mytarget.boundary.dev`: + +```shell-session +$ nslookup mytarget.boundary.dev +;; Truncated, retrying in TCP mode. +Server: fc00:a20a::d7bf:c059 +Address: fc00:a20a::d7bf:c059#53 + +Name: mytarget.boundary.dev +Address: 100.84.164.9 +``` + +You can tell two things from this: +1. The Client Agent is likely able to intercept the DNS request, because the server is a local IPv6 address in the [ULA](https://en.wikipedia.org/wiki/Unique_local_address) range. + Both an IPv4 CGNAT range or IPv6 ULA range IP address are indications of this. +2. The Client Agent is able to identify `mytarget.boundary.dev` as an alias with a target that the requesting user is authorized to connect to, because it responded with a valid DNS response pointing to a local IPv4 address in the CGNAT range. + Similarly to above, the IP address in the response may also be an IPv6 address in the ULA range. + +If you do not see this kind of response, it may be that the alias you are trying to connect to doesn't exist, or your user is not authorized to connect to it. +Double check that you are using the correct alias and that your user is authorized to connect to it. + +### Flush OS DNS cache + +If you still do not see the expected behavior, it can be useful to flush the operating system's DNS cache. +The exact steps depend on the operating system you use: + + + + +```shell-session +$ sudo dscacheutil -flushcache && sudo killall -HUP mDNSResponder +``` + + + + +```shell-session +ipconfig /flushdns +``` + + + + +After you flush the DNS cache, try connecting to the alias again, or repeat the steps above. + +If you are still not able to understand what is wrong, submit a support ticket. Include the `boundary client-agent status` output and the log file in the ticket. + +### Commonly seen errors + +Refer to the following commonly seen errors for more information about their possible causes and resolutions. + +#### nodename nor servname provided, or not known / No such host is known + +This is a generic error for a failed DNS resolution. +It can mean a number of different things: +- The alias doesn't exist or is misspelled. +- Your user isn't authenticated or doesn't have permission to connect to the target. +- The Client Agent is not able to intercept DNS requests, it could shut down or paused. +- The OS DNS cache is interfering with the operation of the Client Agent. +- The Client Agent may not yet know about the alias. + It takes around 2 minutes for the Client Agent to learn about new aliases. + +Follow the troubleshooting steps above to resolve the issue. + +#### failed to listen for DNS on either IPv4 or IPv6 + +This error happens when some other application on the local machine occupies the ports used by the Boundary Client Agent. +The Client Agent requires access to port 53 for IPv4 and IPv6, both UDP and TCP. +Diagnosing what causes the error differs per operating system: + + + + +As a privileged user, you can use the `lsof` program to find what applications are occupying a port, for example: + +```shell-session +$ sudo lsof -nP | grep ":53" +``` + +If anything is occupying port 53, you may need to terminate the application before the Client Agent is able to start. + +Applications that make use of the Apple Virtualization Framework are known to sometimes occupy this port under +the name `_mdnsresponder`. If you have any virtualization software, you may need to turn it off before using +the Client Agent. + + + + +As a privileged user, you can open the **Resource Monitor** and inspect the **Network** > **Listening Ports** section to find any applications that use port 53. + + + + +Once you have identified which other software is using the port, you can stop it and try to start the Client Agent again. + +#### failed to refresh alias cache: error="fetching resolvable aliases: error performing client request during List call" + +This usually implies that there is a problem reaching the internet or the Boundary controller. +The error is related to the periodic updating of aliases used by the Client Agent to know whether a DNS request matches an alias or not. + +HashiCorp recommends that you pause the Client Agent and examine the status and logs for further errors: + +```shell-session +$ boundary client-agent pause +``` + +Follow the troubleshooting steps to understand why the Client Agent is not able to reach the controller. + +#### WARNING! Remote host indentification has changed! It is possible that someone is doing something nasty! + +This error arises when you use an alias to connect to an SSH target after the first successful connection using that alias. The issue occurs because Boundary workers generate a new host key on every new SSH connection. You can safely ignore the warning using the `StrictHostKeyChecking=no` command line option: + +```shell-session +$ ssh -o StrictHostKeyChecking=no targetalias.boundary.dev +``` + +You can also remove the existing server host key from the `~/.ssh/known_hosts` file to avoid the error. + +## Conflicting software + +Some software is known to cause conflicts with the Boundary Client Agent. +The following sections are an incomplete list of potential conflicts and any available workarounds for issues. + +### Docker Desktop (MacOS) + +Docker Desktop sometimes creates a local DNS listener that prevents the Client Agent from running. +If you run Docker Desktop 4.26 or later, you must clear the `Use kernel networking for UDP` option. +Otherwise, the Client Agent refuses to start. + +### Palo Alto Networking Global Protect VPN + +If you are unable to establish a transparent session while using the Palo Alto Networking Global Protect VPN, you may need to explicitly specify a network interface and the upstream DNS server(s) to use. + +By default, the Client Agent reads the primary network interface's DNS server configuration and uses that information to resolve domains that are not configured as aliases in Boundary. +If the VPN configuration includes custom DNS servers, this information may not be available to the Client Agent, so you must explicitly specify the DNS server(s) to use. + +To configure the DNS server(s) to use, use the `override_upstream_dns_servers` configuration option: + +```hcl +# The DNS servers must be specified as an IP, or an IP:Port. +# If no port is provided, port 53 is assumed. +# The order of the entries specifies the priority. +# We recommended providing both the VPN DNS servers +# and the default DNS servers, so that DNS requests can +# be resolved even when the VPN is not active. +override_upstream_dns_servers = [ + "10.0.0.1", # Example primary VPN DNS server + "10.0.0.2", # Example secondary VPN DNS server + "8.8.8.8", # Fallback default DNS server + "8.8.4.4:53", # Fallback default DNS server with a custom port +] +``` + + + +The `override_upstream_dns_servers` is used for all non-Boundary DNS requests. +If you only provide the VPN DNS servers, the Client Agent will not be able to resolve any DNS requests when the VPN is not active. + + + +#### Primary network interfaces + +By default, the Client Agent creates IPs on the primary network interface to serve its DNS server. +Refer to the tabs below for possible conflicts for each supported operating system. + + + + +When you run the Client Agent alongside the PAN-GP VPN, the primary network interface will likely be set to a `tun` type interface, which the Client Agent cannot use for its IP addresses. +You may see errors such as the following in the `boundary-client-agent.log` file or the `boundary client-agent status` command response: + +``` +[ERROR] macos.addIP: error adding ipv4 address: ifconfig: ioctl (SIOCAIFADDR): Destination address required +``` + +To work around the default `tun` interface, you must provide an explicit network interface using the `interface_to_use` configuration option. For example: + +```hcl +interface_to_use=en0 +``` + +The `interface_to_use` option allows the Client Agent to create the IPs it needs to serve the DNS server and proxy traffic. +You must restart the Client Agent for it to update its configuration with the new setting. + + + + +On Windows, the Client Agent may be able to create the IPs that it needs on the primary network interface, but it fails to establish any transparent sessions. You may see the following message: + +``` +[INFO] default route change detected, restarting +``` + +You must explicitly specify a network interface to use other than the primary one. You can list available network interfaces using the Powershell command `Get-NetAdapter`, or the older `route print` command. You must find the index of the interface you would normally use to connect to the internet. In this example, the interface index is `11`: + +``` +PS C:\> Get-NetAdapter +Name InterfaceDescription ifIndex Status MacAddress LinkSpeed +---- -------------------- ------- ------ ---------- --------- +Ethernet Parallels VirtIO Ethernet Adapter 11 Up 00-1C-42-B3-F2-75 10 Gbps +Ethernet 2 PANGP Virtual Ethernet Adapter Secure 24 Up 02-50-41-00-00-01 2 Gbps +``` + +Alternatively, if you use `route print`, refer to the following example: + +``` +PS C:\> route print +=========================================================================== +Interface List + 24...02 50 41 00 00 01 ......PANGP Virtual Ethernet Adapter Secure + 11...00 1c 42 b3 f2 75 ......Parallels VirtIO Ethernet Adapter + 1...........................Software Loopback Interface 1 +=========================================================================== +```` + +Your configuration should look like this: + +```hcl +interface_to_use=11 +``` + +You must restart the Client Agent for it to update its configuration with the new setting. + + + + +### Cloudflare WARP client + +The Cloudflare WARP client uses a local DNS server to direct traffic. +It has built-in checks to prevent it from being run alongside other software that uses the same mechanism. +This includes the Boundary Client Agent. +If you try to use the Client Agent with the Cloudflare WARP client, it may work, or you may see an error like this one: + +``` +Status: Unable to Connect +Error reason: DNS Proxy Failure +Error code: CF_DNS_PROXY_FAILURE +Error description: The WARP Agent must be the only process responsible for DNS resolution on the device. One or more processes are already bound to port 53: boundary-client-agent. +Learn more: https://cfl.re/CF_DNS_PROXY_FAILURE +``` + +You can still install both the Cloudflare WARP client and the Boundary Client Agent on the same machine. +As long as you don't run both at the same time, they should work as expected. + +## Uninstall the Client Agent on Mac + +If you used the Mac installer, you can run `/Library/Application Support/HashiCorp/Boundary Uninstaller.app` to uninstall Boundary. +The uninstaller removes any installed components, including the Desktop client, CLI, and the Boundary Client Agent. + +## More information + +Refer to the following topics for more information: + +- [Aliases](/boundary/docs/concepts/aliases) +- [Transparent sessions](/boundary/docs/concepts/transparent-sessions) diff --git a/website/content/docs/api-clients/client-cache.mdx b/website/content/docs/api-clients/client-cache.mdx index fcbe7b4911..ce8daae1ed 100644 --- a/website/content/docs/api-clients/client-cache.mdx +++ b/website/content/docs/api-clients/client-cache.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Client cache -description: |- - Learn how the client cache enables Boundary to quickly retrieve local information about session and target resources. +page_title: Client cache overview +description: >- + Learn how the client cache enables Boundary to quickly retrieve local information about session and target resources. Manage startup and other cache functions. --- # Client cache diff --git a/website/content/docs/api-clients/desktop.mdx b/website/content/docs/api-clients/desktop.mdx index e87acccbcc..ba74487f21 100644 --- a/website/content/docs/api-clients/desktop.mdx +++ b/website/content/docs/api-clients/desktop.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Boundary Desktop -description: |- - Get up and running with Boundary Desktop +page_title: Boundary Desktop overview +description: >- + Learn how to install the Boundary Desktop application to browse and connect to targets. --- # Boundary Desktop diff --git a/website/content/docs/api-clients/go-sdk.mdx b/website/content/docs/api-clients/go-sdk.mdx index d71f39cb47..b1d6232ee9 100644 --- a/website/content/docs/api-clients/go-sdk.mdx +++ b/website/content/docs/api-clients/go-sdk.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Go SDK -description: |- - Boundary's Go SDK +page_title: Go SDK overview +description: >- + Learn about Boundary's Go SDK. Use the Go SDK to authenticate to Boundary with an auth method or a recovery KMS workflow. --- # Go SDK diff --git a/website/content/docs/api-clients/index.mdx b/website/content/docs/api-clients/index.mdx index b5e9dd6361..a78d5c5b38 100644 --- a/website/content/docs/api-clients/index.mdx +++ b/website/content/docs/api-clients/index.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: API/Clients -description: |- - An introduction to Boundary's API and clients. +description: >- + Discover resources to learn more about Boundary's API and clients. --- # API and clients diff --git a/website/content/docs/common-workflows/manage-roles.mdx b/website/content/docs/common-workflows/manage-roles.mdx index 0c0781928c..f45f162be9 100644 --- a/website/content/docs/common-workflows/manage-roles.mdx +++ b/website/content/docs/common-workflows/manage-roles.mdx @@ -1,7 +1,8 @@ --- layout: docs page_title: Manage roles and permissions -description: How to manage roles, permissions, and grants. +description: >- + Use roles to manage permissions assigned to users and groups. Create roles and assign principals and grants to them. Add grant scopes and configure inheritance. --- # Manage roles and permissions diff --git a/website/content/docs/concepts/aliases.mdx b/website/content/docs/concepts/aliases.mdx index 71a294f215..5df5666c93 100644 --- a/website/content/docs/concepts/aliases.mdx +++ b/website/content/docs/concepts/aliases.mdx @@ -1,263 +1,64 @@ --- layout: docs page_title: Aliases -description: |- - Aliases let you associate a string with a Boundary resource. Learn how to use an alias instead of a target ID when you connect. +description: >- + Aliases let you associate a string with a resource. Learn how to use an alias instead of an ID when you connect to a target. --- # Aliases -An alias is a globally unique, DNS-like string that is associated with a destination resource. -You can establish a session to a target by referencing its alias, instead of having to provide a target ID or target name and scope ID. +An alias is a globally unique, DNS-like string that is associated with a destination resource. You can establish a session to a target by referencing its alias, instead of having to provide a target ID or target name and scope ID. + For example, if you have an alias `boundary.dev`, you can use it to connect to a target with the following command: `boundary connect ssh boundary.dev`. ## Background -When you create a resource in Boundary, it assigns the resource a randomly generated identifier. -You must use those IDs to perform actions in Boundary. +When you create a resource in Boundary, it assigns the resource a randomly generated identifier. You must use those IDs to perform actions in Boundary. When you connect to a target using the terminal, you must reference the target ID or target name and scope name. -When you connect to a target using the terminal, you must reference the target ID or target name and scope name. As an example, to SSH to a target, you can execute the command `boundary connect ssh -target-id ttcp_123456789`. + Since it can be difficult to remember the unique IDs, users frequently have to look up the identifiers for any resources they want to operate on. -Aliases simplify this process. -When you create an alias and associate it with a target, you can later use the alias `value` instead of the target ID in commands. -Boundary automatically resolves to the target that the alias references. +Aliases simplify this process. When you create an alias and associate it with a target, you can later use the alias `value` instead of the target ID in commands. Boundary automatically resolves to the target that the alias references. ## Permissions -The existence of an alias for a Boundary resource does not change how permissions function. -Anyone can attempt to use an alias to access a target, but if you do not have permission to access the target, the attempt fails. -You can create an alias for a target, even if you do not have permission to access the target. +The existence of an alias for a Boundary resource does not change how permissions function. Anyone can attempt to use an alias to access a target, but if you do not have permission to access the target, the attempt fails. You can create an alias for a target, even if you do not have permission to access the target. -Separating the permissions from aliases and destination resources means a different set of people can manage the aliases than the people who have permission to operate on targets. -For example, you may have a project with a sensitive set of targets. -You can configure Boundary to allow a select few users to manage those targets, while a different group of users manage the aliases. +Separating the permissions from aliases and destination resources means a different set of people can manage the aliases than the people who have permission to operate on targets. For example, you may have a project with a sensitive set of targets. You can configure Boundary to allow a select few users to manage those targets, while a different group of users manage the aliases. ## Naming conventions An alias is a globally unique, DNS-like string that is associated with a destination resource. The alias `value` parameter does not have to be delimited by a suffix, and can be just a hostname. -Examples of valid aliases include `webserver` and `webserver.boundary`. - -## Scopes - -You can only create aliases in the `global` scope. -However, you can associate aliases with targets or hosts from any scope. -Support for additional resource types may be added in the future. +Examples of valid aliases include `database.boundary` and `webserver.boundary`. -If you delete a project, Boundary clears the `destination_id` parameter for any aliases that resolve to targets in that project, so that they no longer function. +### Single word aliases and transparent sessions -## Connect to a target using an alias +HashiCorp recommends that you do not use single-word aliases such as `webserver` as opposed to `webserver.boundary`, because single-word aliases do not work intuitively on Windows. -Whenever you could use the `-id` flag or `-target` flag in the CLI, you can substitute an alias. +Windows DNS resolution does not support resolving unqualified single word DNS hostnames. You can make the hostname fully qualified, but is not intuitive to most users. -For example, you can use the following command to connect to an SSH target with the ID `ttcp_1234567890`: +For example the following hostname works: -```shell-session -$ boundary connect ssh -target -id ttcp_1234567890 ``` - -If you configured an alias named `example.alias.boundary` for the target, you could alternatively use the following command to connect to the target: - -```shell-session -$ boundary connect ssh example.alias.boundary +ssh mytarget. ``` -## Create aliases - -There are multiple ways that you can create aliases and associate them with targets in Boundary. - -You can: - -- [Create an alias for an existing target](#create-an-alias-for-an-existing-target) -- [Create an alias during target creation](#create-an-alias-during-target-creation) -- [Associate an existing alias with a target](#associate-an-existing-alias-with-a-target) - -### Create an alias for an existing target - -You can create a new alias and associate it with an existing target at the same time. -Complete the following steps to create a new alias and associate it with a target: - - - - -1. Log in to Boundary. -1. Select **Aliases** in the navigation pane. -1. Click **New Alias**. -1. Complete the following fields: - - **Name**: (Optional) Enter an optional name for the alias to use for identification purposes. - - **Description**: (Optional) Enter an optional description for the alias to use for identification purposes. - - **Type**: Select **Target**. - At this time, targets are the only Boundary resources that supports aliasing. - - **Alias Value**: Enter the string that you want to use as the alias to represent the target. - An alias's value can be a hostname or a DNS-like string. - - **Target ID**: (Optional) Specify any targets you want to associate with the alias. - - **Host ID**: (Optional) Enter an optional host ID, if you would like to specify that the alias always uses the same host when you use it to connect to a target. -1. Click **Save**. - - - - -1. Log in to Boundary. -1. Use the following command to create an alias: - - ```shell-session - $ boundary aliases create target \ - -description 'This is an example alias for target tcp_1234567890' \ - -destination-id tcp_1234567890 \ - -name Example Boundary alias \ - -scope-id global \ - -value example.alias.boundary \ - -authorize-session-host-id hst_1234567890 - ``` - - You can use any of the following [attributes](/boundary/docs/concepts/domain-model/aliases) when you create an alias: - - - `-description=` - Specifies the optional description you want to use for identification purposes. - - `-destination-id=` - Specifies the ID of the target that the alias references. - - `-name=` - Specifies the optional name you want to use to describe the alias for identification purposes. - - `-scope-id=` - Scope in which to create the alias. The default is `global`. - You can also specify the scope using the BOUNDARY_SCOPE_ID environment variable. - At this time, aliases are only supported for the global scope. - - `-value=` - Specifies the string that you want to use as the alias to represent the target. - The alias `value` can be a hostname or a DNS-like string. - - `-authorize-session-host-id=` - Optionally indicates the host ID to use when you use the alias to authorize a session. +But this hostname does not work: - - - -### Create an alias during target creation - -You can create a new target and new alias at the same time and associate the two. - -Complete the following steps to create a new target and new alias at the same time: - - - - -1. Log in to Boundary. -1. Select **Targets** in the navigation pane. -1. Click **New Target**. -1. Complete the following fields: - - **Name**: Enter the target name for identification purposes. - - **Description**: (Optional) Enter an optional description for identification purposes. - - **Type**: Select the target type. - You can create SSH or TCP targets. - - **Target Address**: (Optional) Enter a valid IP address or DNS name. - Alternatively, you can configure host catalogs and host sets. - - **Default Port**: (Optional) Enter an optional default port for the target to use for connections. - - **Default Client Port**: (Optional) Enter an optional local proxy port on which to listen when a session is started on a client. - - **Maximum Duration**: (Optional) Enter an optional maximum duration for sessions on this target, in seconds. - - **Maximum Connection**: (Optional) Enter the maximum number of connections allowed per session on this target. - For unlimited connections, enter `-1`. - - **Workers**: (Optional) Select whether you want the worker to function as an ingress and/or egress worker. - - **Aliases**: (Optional) Enter the value fpr any aliases you want to associate with this target, and then click **Add**. - An alias's value can be a hostname or a DNS-like string. - You can associate multiple aliases with a target. -1. Click **Save**. - - - - -1. Log in to Boundary. -1. Use the following command to create a target: - - ```shell-session - $ boundary targets create ssh \ - -description 'This is an example ssh target' \ - -name Example Boundary SSH target \ - -scope-id global \ - -with-alias-authorize-session-host-id hst_1234567890 \ - -with-alias-scope-id global \ - -with-alias-value example.alias.boundary - ``` - - You can use any of the following [attributes](/boundary/docs/concepts/domain-model/targets) when you create a target: - - - `description` - (optional) - An optional description that you can use for identification purposes. - - `name` - (required) - The `name` must be unique within the target's project. - - `scope-id` - (required) - The scope in which to create the target. - The default is `global`. - You can also specify the scope using the BOUNDARY_SCOPE_ID environment variable. - - `-address=` - An optional valid network address for the target to connect to. - You cannot use an address alongside host sources. - - `-default-client-port=` - The default client port on the target. - - `-default-port=` - The default port on the target. - If you do not specify a default port, Boundary uses port 22. - - `-egress-worker-filter=` - A Boolean expression that filters which egress workers can process sessions for the target. - - `-enable-session-recording=` - A Boolean expression you can use to enable session recording for the target. - - `-ingress-worker-filter=` - A Boolean expression that filters which ingress workers can process sessions for the target. - - `-session-connection-limit=` - The maximum number of connections allowed for a session. -A value of `-1` means the connections are unlimited. - - `-session-max-seconds=` - The maximum lifetime of the session, including all connections. - You can specify an integer number of seconds or a duration string. - - `-storage-bucket-id=` - The public ID of the storage bucket to associate with the target. - - `-with-alias-authorize-session-host-id=` - The host ID that an alias uses to authorize sessions for the target. - - `-with-aliasscope-id=` - The scope ID that you want to create the target and alias in. - The default is `global`. - At this time, aliases are only supported for the global scope. - - `-with-alias-value=` - The value of the alias that you want to use to represent the target. - Use this parameter to create the alias and target, and associate them with each other, at the same time. - - Note that you can create SSH or TCP [target types](/boundary/docs/concepts/domain-model/targets#target-types). - The example command in this section creates an SSH target. - - - - -### Associate an existing alias with a target - -If you already created an alias, you can update it with an existing target. -Complete the following steps to add an alias to a target: - - - - -1. Log in to Boundary. -1. Select **Targets** in the navigation pane. -1. Select the target you want to add an alias to. -1. Under the **Aliases** heading in the right sidebar, click **Add an alias**. -1. Complete the following fields: - - **Name**: (Optional) Enter an optional name for the alias to use for identification purposes. - - **Description**: (Optional) Enter an optional description for the alias to use for identification purposes. - - **Type**: Select **Target**. - At this time, targets are the only Boundary resources that supports aliasing. - - **Alias Value**: Enter the alias value you want to use in commands to represent the target. - An alias's value can be a hostname or a DNS-like string. - - **Target ID**: This field contains the ID of the target you selected to add an alias to. - It is read only. - - **Host ID**: (Optional) Enter an optional host ID, if you would like to specify that the alias always uses the same host when you use it to connect to a target. -1. Click **Save**. +``` +ssh mytarget +``` - - +For this reason, if you expect any Windows users to use an alias, it should contain a dot (`.`) anywhere in the value. -1. Log in to Boundary. -1. Use the following command to create an alias: +Refer to the [transparent sessions](/boundary/docs/concepts/transparent-sessions) documentation for more information. - ```shell-session - $ boundary aliases update target \ - -destination-id tcp_1234567890 \ - -id alt_1234567890 \ - -authorize-session-host-id hst_1234567890 - ``` +## Scopes - You can use any of the following [attributes](/boundary/docs/concepts/domain-model/aliases) when you update an alias: +You can only create aliases in the `global` scope. However, you can associate aliases with targets or hosts from any scope. Support for additional resource types may be added in the future. - - `-description=` - Specifies the optional description you want to use for identification purposes. - - `-destination-id=` - Specifies the ID of the target that the alias references. - - `id=` - Specifies the ID of the alias you want to update. - - `-name=` - Specifies the optional name you want to use to describe the alias for identification purposes. - - `-scope-id=` - Scope in which to create the alias. The default is `global`. - You can also specify the scope using the BOUNDARY_SCOPE_ID environment variable. - At this time, aliases are only supported for the global scope. - - `-value=` - Specifies the string that you want to use as the alias to represent the target. - The alias `value` must comply with DNS naming rules. - - `-authorize-session-host-id=` - Optionally indicates the host ID to use when you use the alias to authorize a session. +If you delete a project, Boundary clears the `destination_id` parameter for any aliases that resolve to targets in that project, so that they no longer function. - - \ No newline at end of file +Refer to the [Configure aliases and transparent sessions](/boundary/docs/configuration/target-aliases) pages to learn more. diff --git a/website/content/docs/concepts/auditing.mdx b/website/content/docs/concepts/auditing.mdx index 6f72970528..a31912e7fc 100644 --- a/website/content/docs/concepts/auditing.mdx +++ b/website/content/docs/concepts/auditing.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Auditing -description: |- - An overview of using Boundary to audit for compliance and threat management +description: >- + Learn how Boundary can help improve compliance and threat management by using session recording to audit user access and actions. Understand the BSR format. --- # Auditing diff --git a/website/content/docs/concepts/connection-workflows/connect-helpers.mdx b/website/content/docs/concepts/connection-workflows/connect-helpers.mdx index a1611f0b05..5b5dfb16b7 100644 --- a/website/content/docs/concepts/connection-workflows/connect-helpers.mdx +++ b/website/content/docs/concepts/connection-workflows/connect-helpers.mdx @@ -1,7 +1,8 @@ --- layout: docs page_title: Connect helpers -description: Learn how connect helpers enable Boundary to automatically accept host key prompts to facilitate connections for HTTP, Kubernetes, PostgreSQL, RDP, and SSH. +description: >- + Learn how to use connect helpers to automatically accept host key prompts and facilitate connections for HTTP, Kubernetes, PostgreSQL, RDP, and SSH. --- # Connect helpers diff --git a/website/content/docs/concepts/connection-workflows/exec-flag.mdx b/website/content/docs/concepts/connection-workflows/exec-flag.mdx index 4593a1b1c0..1d79e1cd07 100644 --- a/website/content/docs/concepts/connection-workflows/exec-flag.mdx +++ b/website/content/docs/concepts/connection-workflows/exec-flag.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: -exec flag -description: |- - Learn how the `-exec` flag enables you to execute Boundary TCP sessions using your preferred client, even when there is no built-in support for it. +description: >- + Learn how to use the `-exec` flag to execute TCP sessions or pass flags using your preferred client. --- # Exec flag diff --git a/website/content/docs/concepts/connection-workflows/index.mdx b/website/content/docs/concepts/connection-workflows/index.mdx index cef6aa4e68..fae371eef5 100644 --- a/website/content/docs/concepts/connection-workflows/index.mdx +++ b/website/content/docs/concepts/connection-workflows/index.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Connection workflows -description: |- - Workflows that you can use to connect to targets. Learn how connect helpers, the `-exec` flag, and SSH ProxyCommand can help facilitate connections to targets. +description: >- + Learn how transparent sessions, connect helpers, the `-exec` flag, SSH ProxyCommand, and multi-hop sessions can help facilitate connections to targets. --- # Connection workflows @@ -17,6 +17,18 @@ Refer to the [`boundary connect`](/boundary/docs/commands/connect) documentation To practice using the `boundary connect` command in a development environment, refer to the **Connect to your first target** tutorial for either [HCP Boundary](/boundary/tutorials/hcp-getting-started/hcp-getting-started-connect) or the [self-managed versions](/boundary/tutorials/oss-getting-started/oss-getting-started-connect) of Boundary. +## Transparent sessions + +@include 'alerts/enterprise-only.mdx' + +@include 'alerts/beta.mdx' + +Transparent sessions shift Boundary from an active connection model to a passive connection model. +Instead of interacting with the Boundary CLI or Desktop client and having to remember specific IDs or ephemeral ports to connect to targets, Boundary operates in the background. +If a user is authenticated and authorized, Boundary intercepts DNS calls and routes traffic through a session automatically. + +Refer to the [transparent sessions](/boundary/docs/concepts/transparent-sessions) documentation for more information. + ## Connect helpers Boundary features connect helpers that assist with making connections to targets using certain protocols. @@ -52,7 +64,7 @@ Refer to the [SSH ProxyCommand](/boundary/docs/concepts/connection-workflows/wor ## Multi-hop sessions -This feature requires HCP Boundary or Boundary Enterprise +@include 'alerts/enterprise-only.mdx' Most organizations want to provide access to infrastructure without exposing private networks. Many organizations also have complex network topologies requiring inbound traffic to route through multiple network enclaves to reach the target system. diff --git a/website/content/docs/concepts/connection-workflows/multi-hop.mdx b/website/content/docs/concepts/connection-workflows/multi-hop.mdx index cfb3d6863b..84d6889e1e 100644 --- a/website/content/docs/concepts/connection-workflows/multi-hop.mdx +++ b/website/content/docs/concepts/connection-workflows/multi-hop.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Multi-hop sessions -description: |- - Learn how multi-hop sessions enable you to chain together two or more Boundary workers across multiple networks. +description: >- + Learn how multi-hop sessions let you chain together two or more workers across multiple networks to reach a target without exposing private networks. --- # Multi-hop sessions @@ -14,6 +14,16 @@ inbound traffic to route through multiple network enclaves to reach the target s Multi-hop sessions allow you to chain together two or more workers across multiple networks to form reverse proxy connections between the user and the target, even in complex networks with strict outbound-only policies. +## Inbound network rules + +With a multi-hop deployment, all connections are initiated outbound from the most downstream worker in the chain. After Boundary establishes the initial connection between the workers, it uses the established connection for any subsequent connections. +These persistent TCP connections result in the requirement for only outbound connectivity. + +If you have one or more firewalls sitting between the ingress and egress workers, you do not need to create additional inbound networking rules to facilitate a Boundary multi-hop deployment. This not only helps to +simplify your infrastructure configuration, but also ensures that your security posture is not weakened or compromised. + +## Multi-hop worker types + In multi-hop scenarios, there are typically three types of workers: 1. **Ingress worker** - An ingress worker is a worker that is accessible by the client. The client initiates the connection to the ingress worker. 1. **Intermediary worker** - An optional intermediary worker sits between ingress and egress workers as part of a multi-hop chain. There can be multiple intermediary workers as part of a multi-hop chain. @@ -59,6 +69,63 @@ traffic to a target. Ingress worker filters determine which workers you connect with to initiate a session, and egress worker filters determine which workers are used to access targets. +## Use HCP-managed workers as ingress workers + +Many organizations have strict network policies that prohibit all inbound traffic into their networks. In these scenarios, you can use HCP-managed workers as the ingress workers. To establish a connection into the network, a self-managed worker configured as an egress worker initiates an outbound connection to the HCP-managed worker, creating a persistent connection. As a result, when end users connect to a target, the end user's connection would hop from the Boundary client to the HCP-managed worker (ingress worker) to the self-managed worker (egress worker) to the target (or other intermediary workers if needed). + +### Configure HCP-managed workers for ingress + +To configure end user traffic to ingress through HCP-managed workers, you must configure the self-managed worker (enterprise version). On your self-managed worker that you use for egress to the HCP-managed worker, set the configuration file with the following parameters: +- `hcp_boundary_cluster_id` - The HCP Boundary cluster ID, which can be found in the HCP Boundary cluster's URL. +- Omit the `public_addr` parameter. A public address is not needed since the self-managed worker initiates the connection to HCP-managed workers. +- Omit the `initial_upstreams` parameter. This is not needed because the `hcp_boundary_cluster_id` parameter is sufficent to indicate the HCP-managed workers as the upstream. +- Include a [worker tag](/boundary/docs/concepts/filtering/worker-tags#target-worker-filtering) in the `worker` stanza which will be used to select multi-hop routes for each target. + +### Example self-managed worker configuration: +``` +hcp_boundary_cluster_id = "7acdefe2c-1234-4ff1-b710-123456789876" + +listener "tcp" { + address = "0.0.0.0:9202" + purpose = "proxy" +} + +worker { + auth_storage_path = "/home/ubuntu/boundary/worker1" + tags { + tag = ["multihop"] + } + recording_storage_path = "/tmp/worker1" +} +``` +### Allow-list outbound network traffic to HCP-managed workers + +Some organizations require explicit destination addresses set in their network firewall rules for any outbound traffic. In this scenario, you should use the fully qualified domain name (FQDN) of the HCP-managed workers: + +``` +.proxy.boundary.hashicorp.cloud +``` + +where the `cluster_uuid` is the HCP Boundary cluster ID. You can find your HCP Boundary cluster ID in the HCP Boundary cluster's URL. + + + + The Boundary cluster ID is derived from the Boundary address. For example, if + your cluster URL is: + + `https://abcd1234-e567-f890-1ab2-cde345f6g789.boundary.hashicorp.cloud` + + Then your cluster id is `abcd1234-e567-f890-1ab2-cde345f6g789`. + + + +### Route end user traffic to targets through HCP-managed workers + +To route traffic through the HCP-managed workers, you should set the egress filters of each target to match the tag set in the self-managed worker's configuration file. You do not need to set additional ingress filters on the targets. + +![Multi-hop egress worker filter](/img/ui/multi-hop-egress-filter_light.png#light-theme-only) +![Multi-hop egress worker filter](/img/ui/multi-hop-egress-filter_dark.png#dark-theme-only) + ## Multi-hop worker requirements When you configure multi-hop sessions, there is an "ingress" worker, an "egress" @@ -68,9 +135,9 @@ intermediary workers have the following requirements. ### Ingress worker requirements To proxy target connections, ingress workers require outbound access to the -Boundary control plane and inbound access from clients. +Boundary control plane and inbound access from clients. -HCP Boundary clusters automatically deploy HCP-managed workers which can be used as ingress workers. Using HCP-managed workers as ingress workers is helpful when organizations have strict networks security policies that prohibit any inbound access. In this scenario, intermediary or egress workers within the private network can establish a reverse proxy connection to the HCP-managed ingress worker. +HCP Boundary clusters automatically deploy HCP-managed workers which can be used as ingress workers. Using HCP-managed workers as ingress workers is helpful when organizations have strict networks security policies that prohibit any inbound access. In this scenario, intermediary or egress workers within the private network can establish a reverse proxy connection to the HCP-managed ingress worker. ### Intermediary worker requirements diff --git a/website/content/docs/concepts/connection-workflows/workflow-ssh-proxycommand.mdx b/website/content/docs/concepts/connection-workflows/workflow-ssh-proxycommand.mdx index 0823c47263..75378a02d6 100644 --- a/website/content/docs/concepts/connection-workflows/workflow-ssh-proxycommand.mdx +++ b/website/content/docs/concepts/connection-workflows/workflow-ssh-proxycommand.mdx @@ -1,7 +1,8 @@ --- layout: docs page_title: SSH ProxyCommand -description: Learn how SSH ProxyCommand enables you to proxy an SSH connection in Boundary using a configuration file. Configure connections using the target's ID or domain. +description: >- + Learn how to use SSH ProxyCommand to proxy an SSH connection using a configuration file. Configure hosts using the target's ID or domain. --- # SSH ProxyCommand diff --git a/website/content/docs/concepts/credential-management.mdx b/website/content/docs/concepts/credential-management.mdx index 9a7f934277..a35dfb6c43 100644 --- a/website/content/docs/concepts/credential-management.mdx +++ b/website/content/docs/concepts/credential-management.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Credential management -description: |- - An overview of credential management in Boundary +description: >- + Learn about using credential brokering or credential injection to authenticate users. Understand the benefits and security considerations of each method. --- # Credential management diff --git a/website/content/docs/concepts/domain-model/accounts.mdx b/website/content/docs/concepts/domain-model/accounts.mdx index d2b1ce4660..d527d5bde6 100644 --- a/website/content/docs/concepts/domain-model/accounts.mdx +++ b/website/content/docs/concepts/domain-model/accounts.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Domain model - accounts -description: |- - The anatomy of a Boundary account +page_title: Account resource +description: >- + Learn about using the accounts resource to establish the identities of users. Understand how to configure general, password, and LDAP account attributes. --- # Accounts diff --git a/website/content/docs/concepts/domain-model/aliases.mdx b/website/content/docs/concepts/domain-model/aliases.mdx index 5f9d4569aa..248fe91f6b 100644 --- a/website/content/docs/concepts/domain-model/aliases.mdx +++ b/website/content/docs/concepts/domain-model/aliases.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Domain model - aliases -description: |- - The anatomy of a Boundary alias +page_title: Alias resource +description: >- + Learn about using the alias resource to transparently connect to targets without having to provide the target ID or target name and scope ID. --- # Aliases diff --git a/website/content/docs/concepts/domain-model/auth-methods.mdx b/website/content/docs/concepts/domain-model/auth-methods.mdx index b030fefc3c..02f88eb1b1 100644 --- a/website/content/docs/concepts/domain-model/auth-methods.mdx +++ b/website/content/docs/concepts/domain-model/auth-methods.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Domain model - auth methods -description: |- - Use auth methods to authenticate users to Boundary. Learn which attributes you can configure for password, OIDC, and LDAP auth methods in Boundary. +page_title: Auth method resource +description: >- + Learn about using the auth method resource to authenticate users. Understand which attributes you can configure for password, OIDC, and LDAP auth methods. --- # Auth methods diff --git a/website/content/docs/concepts/domain-model/credential-libraries.mdx b/website/content/docs/concepts/domain-model/credential-libraries.mdx index b0599552ef..b521822588 100644 --- a/website/content/docs/concepts/domain-model/credential-libraries.mdx +++ b/website/content/docs/concepts/domain-model/credential-libraries.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Domain model - credential libraries -description: |- - The anatomy of a Boundary credential library +page_title: Credential library resource +description: >- + Learn about using the credential library resource to provide credentials from a credential store. Understand the credential library attributes you can configure. --- # Credential libraries diff --git a/website/content/docs/concepts/domain-model/credential-stores.mdx b/website/content/docs/concepts/domain-model/credential-stores.mdx index 02f78565ee..ac803c9000 100644 --- a/website/content/docs/concepts/domain-model/credential-stores.mdx +++ b/website/content/docs/concepts/domain-model/credential-stores.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Domain model - credential stores -description: |- - The anatomy of a Boundary credential store +page_title: Credential store resource +description: >- + Learn about using the credential store resource to store and retrieve credentials. Understand the Vault and static credential store attributes you can configure. --- # Credential stores diff --git a/website/content/docs/concepts/domain-model/credentials.mdx b/website/content/docs/concepts/domain-model/credentials.mdx index 55bad83262..99fefbbcb8 100644 --- a/website/content/docs/concepts/domain-model/credentials.mdx +++ b/website/content/docs/concepts/domain-model/credentials.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Domain model - credentials -description: |- - The anatomy of a Boundary credential +page_title: Credential resource +description: >- + Learn about using the credential resource to define secrets for a host. Understand username password, SSH private key, SSH certificate, and JSON credential types. --- # Credentials diff --git a/website/content/docs/concepts/domain-model/groups.mdx b/website/content/docs/concepts/domain-model/groups.mdx index 7c0045333e..5da4dfb2bb 100644 --- a/website/content/docs/concepts/domain-model/groups.mdx +++ b/website/content/docs/concepts/domain-model/groups.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Domain model - groups -description: |- - The anatomy of a Boundary group +page_title: Group resource +description: >- + Learn about using the group resource to create collections of users with the same access control. Any role assigned to a group is assigned to the group's users. --- # Groups diff --git a/website/content/docs/concepts/domain-model/host-catalogs.mdx b/website/content/docs/concepts/domain-model/host-catalogs.mdx index f10d505419..883af093cc 100644 --- a/website/content/docs/concepts/domain-model/host-catalogs.mdx +++ b/website/content/docs/concepts/domain-model/host-catalogs.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Domain model - host catalogs -description: |- - The anatomy of a Boundary host catalog +page_title: Host catalog resource +description: >- + Learn about using the host catalog resource to organize and manage hosts and host sets within a project based on their function, environment, or other criteria. --- # Host catalogs diff --git a/website/content/docs/concepts/domain-model/host-sets.mdx b/website/content/docs/concepts/domain-model/host-sets.mdx index 551d60ad48..e8d01513b7 100644 --- a/website/content/docs/concepts/domain-model/host-sets.mdx +++ b/website/content/docs/concepts/domain-model/host-sets.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Domain model - host sets -description: |- - The anatomy of a Boundary host set +page_title: Host set resource +description: >- + Learn about using the host set resource to organize and manage hosts that have the same level of access control and belong to the same host catalog. --- # Host sets diff --git a/website/content/docs/concepts/domain-model/hosts.mdx b/website/content/docs/concepts/domain-model/hosts.mdx index e4c493ba14..d2f53a9103 100644 --- a/website/content/docs/concepts/domain-model/hosts.mdx +++ b/website/content/docs/concepts/domain-model/hosts.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Domain model - hosts -description: |- - The anatomy of a Boundary host +page_title: Host resource +description: >- + Learn about using host resources to represent computing elements that are reachable from Boundary. You can organize hosts in host sets and host catalogs. --- # Hosts diff --git a/website/content/docs/concepts/domain-model/index.mdx b/website/content/docs/concepts/domain-model/index.mdx index 81baa342c1..40cd8d7eb7 100644 --- a/website/content/docs/concepts/domain-model/index.mdx +++ b/website/content/docs/concepts/domain-model/index.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Domain model index -description: |- - Reference documentation for Boundary's domain model. +page_title: Domain model overview +description: >- + Learn about using the domain model to organize identity and access management and target resources for secure access. Understand how resources work together. --- # Overview diff --git a/website/content/docs/concepts/domain-model/managed-groups.mdx b/website/content/docs/concepts/domain-model/managed-groups.mdx index 9309f07785..e845a129b0 100644 --- a/website/content/docs/concepts/domain-model/managed-groups.mdx +++ b/website/content/docs/concepts/domain-model/managed-groups.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Domain model - managed groups -description: |- - The anatomy of a Boundary managed group +page_title: Managed group resource +description: >- + Learn about using the managed group resource to organize identity provider accounts and assign them grants. Understand how to configure OIDC and LDAP attributes. --- # Managed groups diff --git a/website/content/docs/concepts/domain-model/roles.mdx b/website/content/docs/concepts/domain-model/roles.mdx index 42164cffb6..5da68d81b2 100644 --- a/website/content/docs/concepts/domain-model/roles.mdx +++ b/website/content/docs/concepts/domain-model/roles.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Domain model - roles -description: |- - The anatomy of a Boundary role +page_title: Role resource +description: >- + Learn about using the role resource to group permissions which are then granted to any principal assigned to the role. Understand role attributes. --- # Roles diff --git a/website/content/docs/concepts/domain-model/scopes.mdx b/website/content/docs/concepts/domain-model/scopes.mdx index 134fd4eb6b..92fbd39916 100644 --- a/website/content/docs/concepts/domain-model/scopes.mdx +++ b/website/content/docs/concepts/domain-model/scopes.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Domain model - scopes -description: |- - Use scopes to group and manage resources in Boundary. Learn how to configure global scopes, org scopes, and projects to logically group resources. +page_title: Scope resource +description: >- + Learn about using the scope resource to organize and manage resources. Understand how to configure global, org, and project scopes to logically group resources. --- # Scopes diff --git a/website/content/docs/concepts/domain-model/session-connections.mdx b/website/content/docs/concepts/domain-model/session-connections.mdx index cbf3b20a0e..a5f9c555e3 100644 --- a/website/content/docs/concepts/domain-model/session-connections.mdx +++ b/website/content/docs/concepts/domain-model/session-connections.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Domain model - session connections -description: |- - The anatomy of a Boundary session connection +page_title: Session connection resource +description: >- + Learn how the session connection resource represents the proxy Boundary creates between user and host. Understand how connections are established and terminated. --- # Session connections diff --git a/website/content/docs/concepts/domain-model/session-recordings.mdx b/website/content/docs/concepts/domain-model/session-recordings.mdx index d0f00eb602..33f74a0500 100644 --- a/website/content/docs/concepts/domain-model/session-recordings.mdx +++ b/website/content/docs/concepts/domain-model/session-recordings.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Domain model - session recordings -description: |- - Use session recordings to audit user sessions in Boundary. Learn how to configure session recordings to monitor usage. +page_title: Session recording resource +description: >- + Learn about using the session recording resource to audit user sessions. Understand how storage policy retention periods help you meet your compliance needs. --- # Session recordings diff --git a/website/content/docs/concepts/domain-model/sessions.mdx b/website/content/docs/concepts/domain-model/sessions.mdx index a328fc0723..ba3d99e5dd 100644 --- a/website/content/docs/concepts/domain-model/sessions.mdx +++ b/website/content/docs/concepts/domain-model/sessions.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Domain model - sessions -description: |- - The anatomy of a Boundary session +page_title: Session resource +description: >- + Learn how the session resource is a set of connections between users and hosts that may include credentials. Understand how sessions are created and terminated. --- # Sessions diff --git a/website/content/docs/concepts/domain-model/storage-buckets.mdx b/website/content/docs/concepts/domain-model/storage-buckets.mdx index b759328321..7b3240cd6f 100644 --- a/website/content/docs/concepts/domain-model/storage-buckets.mdx +++ b/website/content/docs/concepts/domain-model/storage-buckets.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Domain model - storage bucket -description: |- - The anatomy of a Boundary storage bucket +page_title: Storage bucket resource +description: >- + Learn how to use the storage bucket resource to retain session recordings for compliance. Understand storage bucket attributes and how scopes affect storage. --- # Storage buckets diff --git a/website/content/docs/concepts/domain-model/storage-policy.mdx b/website/content/docs/concepts/domain-model/storage-policy.mdx index 5729d8848b..4f7c19eeb3 100644 --- a/website/content/docs/concepts/domain-model/storage-policy.mdx +++ b/website/content/docs/concepts/domain-model/storage-policy.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Domain model - storage policies -description: |- - Use storage policies to manage session recording retention in Boundary. Learn how to configure policies for global and org scopes, and specify retention periods. +page_title: Storae policy resource +description: >- + Learn about using the storage policy resource to manage session recording retention. Understand how to configure policies for scopes and specify retention. --- # Storage policies @@ -12,6 +12,7 @@ description: |- A resource known as a storage policy is used to codify how long [session recordings][] must be kept and when they should be deleted. A storage policy's name is optional, but it must be unique if you define one. Storage policies can only be assigned to the global [scope][] or an org scope. +You must [attach][] storage policies to a scope (global or a specific org) to take effect. A storage policy exists in either the global scope or an org scope. Storage policies that are created in the global scope can be associated with any org scope. @@ -55,5 +56,6 @@ The following services are relevant to this resource: - [Scope Service](/boundary/api-docs/scope-service) - [Policy Service](/boundary/api-docs/policy-service) +[attach]: /boundary/docs/configuration/session-recording/configure-storage-policy#attach-storage-policies-to-a-scope [session recordings]: /boundary/docs/concepts/domain-model/session-recordings -[scope]: /boundary/docs/concepts/domain-model/scopes \ No newline at end of file +[scope]: /boundary/docs/concepts/domain-model/scopes diff --git a/website/content/docs/concepts/domain-model/targets.mdx b/website/content/docs/concepts/domain-model/targets.mdx index e1796e7149..1c66b78b85 100644 --- a/website/content/docs/concepts/domain-model/targets.mdx +++ b/website/content/docs/concepts/domain-model/targets.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Domain model - targets -description: |- - The anatomy of a Boundary target +page_title: Target resource +description: >- + Learn about using the target resource to configure a networked service a user can connect to. Understand the TCP and SSH target type requirements and attributes. --- # Targets diff --git a/website/content/docs/concepts/domain-model/users.mdx b/website/content/docs/concepts/domain-model/users.mdx index 0750e105da..8a7753f9fc 100644 --- a/website/content/docs/concepts/domain-model/users.mdx +++ b/website/content/docs/concepts/domain-model/users.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Domain model - users -description: |- - The anatomy of a Boundary user +page_title: User resource +description: >- + Learn how the user resource identifies a person or entity for access control purposes. Understand how to assign groups and roles so users inherit permissions. --- # Users diff --git a/website/content/docs/concepts/filtering/events.mdx b/website/content/docs/concepts/filtering/events.mdx index bf7833fe35..7f7b1e8b94 100644 --- a/website/content/docs/concepts/filtering/events.mdx +++ b/website/content/docs/concepts/filtering/events.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Filtering - events -description: |- - How to filter events emitted by Boundary. +description: >- + Learn how to filter audit, observation, system, and telemetry events to find information written to any event sinks you configured. --- # Filter events diff --git a/website/content/docs/concepts/filtering/index.mdx b/website/content/docs/concepts/filtering/index.mdx index 3d63b32853..ad6d473d35 100644 --- a/website/content/docs/concepts/filtering/index.mdx +++ b/website/content/docs/concepts/filtering/index.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Filtering -description: |- - An introduction to the filtering syntax used in Boundary. +description: >- + Learn how to use filters to match and find data. Understand how to create filter expressions using matching operators composed with selectors and values. --- # Filter expressions diff --git a/website/content/docs/concepts/filtering/managed-groups.mdx b/website/content/docs/concepts/filtering/managed-groups.mdx index 98e1d0fa79..3298e1bf42 100644 --- a/website/content/docs/concepts/filtering/managed-groups.mdx +++ b/website/content/docs/concepts/filtering/managed-groups.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Filtering - managed groups -description: |- - How to configure filters for managed groups within the OIDC or LDAP auth methods. +description: >- + Learn how to configure filters for managed groups. View search filter attributes and examples of filters for OIDC and LDAP managed groups. --- [filter syntax]: /boundary/docs/concepts/filtering diff --git a/website/content/docs/concepts/filtering/resource-listing.mdx b/website/content/docs/concepts/filtering/resource-listing.mdx index 527fad7872..e53af6bd52 100644 --- a/website/content/docs/concepts/filtering/resource-listing.mdx +++ b/website/content/docs/concepts/filtering/resource-listing.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Filtering - resource listing -description: |- - How to use filter list responses coming back from Boundary. +description: >- + Learn how to use filters to reduce the set of resources returned when you perform a list operation. Filtering list results helps you find information. --- # Filter resource listings diff --git a/website/content/docs/concepts/filtering/worker-tags.mdx b/website/content/docs/concepts/filtering/worker-tags.mdx index d09ba13434..da3a94a96e 100644 --- a/website/content/docs/concepts/filtering/worker-tags.mdx +++ b/website/content/docs/concepts/filtering/worker-tags.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Filtering - worker tags -description: |- - How to use worker tags to control which workers can handle a given resource. +description: >- + Learn about using worker tags to designate worker roles. Examples include serving specific regions or functions, such as credential stores or storage buckets. --- # Worker tags diff --git a/website/content/docs/concepts/host-discovery/aws.mdx b/website/content/docs/concepts/host-discovery/aws.mdx index 36d88d65d0..0a25143c4a 100644 --- a/website/content/docs/concepts/host-discovery/aws.mdx +++ b/website/content/docs/concepts/host-discovery/aws.mdx @@ -1,15 +1,15 @@ --- layout: docs page_title: AWS dynamic host catalogs -description: |- - An overview of AWS host discovery in Boundary +description: >- + Use dynamic host catalogs to automatically discover AWS EC2 instances and add them as hosts. Create a host catalog and host set for AWS resources. --- # AWS dynamic host catalogs Boundary uses dynamic host catalogs to automatically discover AWS EC2 instances and add them as hosts. ## Create a host catalog to connect with AWS Boundary uses plugins to integrate with a variety of providers. To use -a dynamic host catalog to integrate with AWS, you create a host catalog of the `plugin` type +a dynamic host catalog to integrate with AWS, you create a host catalog of the `plugin` type and set the `plugin-name` value to `aws`. You must also provide the specific fields needed for Boundary to authenticate with AWS. @@ -57,6 +57,10 @@ The fields following the `attr` and `secret` flags are specific to AWS and are r - `disable_credential_rotation`: When set to `true`, Boundary will not rotate the credentials with AWS automatically. - `region`: The region to configure the host catalog for. All host sets in this catalog will be configured for this region. +- `role_arn`: The AWS role ARN used for `AssumeRole` authentication. If you provide a `role_arn` value, you must also set `disable_credential_rotation` to `true`. +- `role_external_id`: The external ID that you configured for the `AssumeRole` provider. +- `role_session_name`: The session name that you configured for the `AssumeRole` provider. +- `role_tags`: The key-value pair tags that you configured for the `AssumeRole` provider. - `access_key_id`: The access key ID for the IAM user to use with this host catalog. - `secret_access_key`: The secret access key for the IAM user to use with this diff --git a/website/content/docs/concepts/host-discovery/azure.mdx b/website/content/docs/concepts/host-discovery/azure.mdx index c401237528..397e212351 100644 --- a/website/content/docs/concepts/host-discovery/azure.mdx +++ b/website/content/docs/concepts/host-discovery/azure.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Azure dynamic host catalogs -description: |- - An overview of Azure host discovery in Boundary +description: >- + Use dynamic host catalogs to automatically discover Azure resources and add them as hosts. Create a host catalog and host set for Azure resources. --- # Azure dynamic host catalogs Boundary uses dynamic host catalogs to automatically discover Azure resources available through Azure Resource Manager (ARM) and add them as hosts. @@ -10,7 +10,7 @@ Boundary uses dynamic host catalogs to automatically discover Azure resources av ## Create a host catalog to connect with Azure Boundary uses plugins to integrate with a variety of providers. To use a dynamic host catalog to integrate with Azure, you create a host catalog of the -`plugin` type and set the `plugin-name` value to `azure`. You must also provide the +`plugin` type and set the `plugin-name` value to `azure`. You must also provide the specific fields needed for Boundary to authenticate with Azure. diff --git a/website/content/docs/concepts/host-discovery/index.mdx b/website/content/docs/concepts/host-discovery/index.mdx index 121c27872f..543aa6672c 100644 --- a/website/content/docs/concepts/host-discovery/index.mdx +++ b/website/content/docs/concepts/host-discovery/index.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Host discovery -description: |- - An overview of host discovery in Boundary +description: >- + Learn how host discovery workflows let Boundary discover and onboard new resources. Understand how dynamic host catalogs enable automated host discovery. --- # Host discovery diff --git a/website/content/docs/concepts/iam.mdx b/website/content/docs/concepts/iam.mdx index 565fab08d8..e117143394 100644 --- a/website/content/docs/concepts/iam.mdx +++ b/website/content/docs/concepts/iam.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Identity and access management (IAM) -description: |- - Identity and access management in Boundary +description: >- + Learn how scopes, auth methods, accounts, users, groups, and roles make up the identity and access management (IAM) system. View examples of grant strings. --- # Identity and access management (IAM) diff --git a/website/content/docs/concepts/index.mdx b/website/content/docs/concepts/index.mdx index 0bfc944f89..6b1890569e 100644 --- a/website/content/docs/concepts/index.mdx +++ b/website/content/docs/concepts/index.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Concepts -description: |- - An introduction to Boundary concepts and architecture. +description: >- + Discover resources to help you understand Boundary concepts and architecture. --- # Concepts diff --git a/website/content/docs/concepts/security/connections-tls.mdx b/website/content/docs/concepts/security/connections-tls.mdx index 9336b5afd8..3ec5dfa06a 100644 --- a/website/content/docs/concepts/security/connections-tls.mdx +++ b/website/content/docs/concepts/security/connections-tls.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Connections/TLS -description: |- - How Boundary secures its connections +description: >- + Learn how Boundary uses transport layer security (TLS) to secure connections and establish sessions. Understand how workers authenticate to resources. --- # TLS in Boundary diff --git a/website/content/docs/concepts/security/data-encryption.mdx b/website/content/docs/concepts/security/data-encryption.mdx index 2c2a40547b..cbd6babe96 100644 --- a/website/content/docs/concepts/security/data-encryption.mdx +++ b/website/content/docs/concepts/security/data-encryption.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Data encryption -description: |- - How Boundary secures data at rest +description: >- + Learn how key management services protect the encryption keys used for securing data. Understand key lifecycle management best practices. --- # Data security in Boundary diff --git a/website/content/docs/concepts/security/index.mdx b/website/content/docs/concepts/security/index.mdx index 7332c6745f..ded4fb5da9 100644 --- a/website/content/docs/concepts/security/index.mdx +++ b/website/content/docs/concepts/security/index.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Security -description: |- - Boundary security overview. +description: >- + Discover resources to help you understand Boundary's security. --- # Security diff --git a/website/content/docs/concepts/transparent-sessions.mdx b/website/content/docs/concepts/transparent-sessions.mdx new file mode 100644 index 0000000000..f774fd3b1d --- /dev/null +++ b/website/content/docs/concepts/transparent-sessions.mdx @@ -0,0 +1,48 @@ +--- +layout: docs +page_title: Transparent sessions +description: >- + Learn how transparent sessions enable you to connect to target resources without entering resource IDs or port numbers. +--- + +# Transparent sessions + +@include 'alerts/enterprise-only.mdx' + +@include 'alerts/beta.mdx' + +Transparent sessions shift Boundary from an active connection model to a passive connection model. Boundary operates in the background instead of requiring you to remember specific resource IDs or ephemeral ports to connect to targets. + +As long as Boundary authenticates a user and the user is authorized to access the target, Boundary intercepts the DNS call and routes traffic through a session automatically. + +Transparent sessions require [aliases](/boundary/docs/concepts/aliases) and the [Boundary Client Agent](/boundary/docs/api-clients/client-agent). + +The Boundary Desktop client facilitates quick target discovery and session establishment using your preferred client. If you configure aliases for your targets, install the Boundary Client Agent, and ensure you are authenticated to the cluster, connections are transparent to the user. Boundary provides OS notifications to make it clear when you connect to a target using a transparent session. + +Boundary supports Windows and MacOS for the transparent sessions public beta. + +Refer to the [Configure transparent sessions](/boundary/docs/configuration/target-aliases/transparent-sessions) page to get started. + +## Known issues + +Refer to the following table for known issues that may affect the public beta: + +| Issue | Description | +| ----- | ----------- | +| Connection is reset when trying to reconnect | If you use an SSH transparent session and then cancel the connection, you may have trouble reconnecting until Boundary cleans up the session. | +| SSH connection fails with man-in-the-middle warning | On Ubuntu systems, the initial transparent session may be successful, but any subsequent connections prompt a warning that you may be experiencing a man-in-the-middle attack. | +| Boundary Client Agent authentication does not persist across restarts | When you reboot, you are required to re-authenticate to the Client Agent before you can use transparent sessions. | +| Windows shortcuts are mandatory | The Windows installer always installs Desktop and Start menu shortcuts. This is a known issue. Shortcuts will be optional in a future version of the installer. | +| Windows installer prompts for restart | When you install Boundary, the Windows installer occasionally prompts you to restart your computer, however it is not necessary. | +| Boundary Client Agent resumes on reboot | If the Client Agent is paused and the machine is rebooted, the Client Agent will be resumed after the reboot. | +| Single-word aliases do not work on Windows | If you create an alias consisting of a single word without a dot (`.`), the alias will not work on Windows. | +| Windows installer does not support partial installations | The Windows installer fails to start the Client Agent if the Desktop client is not installed at the same time. | +| Alias connection failures inside containers/VMs | Using transparent sessions rely on network access to the local network of the computer the Client Agent is running on. Network enclaves such as those created by Docker containers and VMs cannot reach this network. | + +## More information + +Refer to the following topics for more information: + +- [Aliases](/boundary/docs/concepts/aliases) +- [Boundary Client Agent](/boundary/docs/api-clients/client-agent) +- [Configure transparent sessions](/boundary/docs/configuration/target-aliases/transparent-sessions) \ No newline at end of file diff --git a/website/content/docs/concepts/workers.mdx b/website/content/docs/concepts/workers.mdx index 06f49674fc..969c485785 100644 --- a/website/content/docs/concepts/workers.mdx +++ b/website/content/docs/concepts/workers.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Workers -description: |- - Introduction to Boundary workers +description: >- + Workers let you proxy traffic to private targets. Learn about worker capabilities, using tags to filter tasks, worker health, and best practices for deployment. --- # Workers diff --git a/website/content/docs/configuration/controller.mdx b/website/content/docs/configuration/controller.mdx index 0f29811505..7388608448 100644 --- a/website/content/docs/configuration/controller.mdx +++ b/website/content/docs/configuration/controller.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Controller - configuration -description: |- - The controller stanza configures controller-specifc parameters. +page_title: Controller configuration +description: >- + Learn about configuring controller-specific parameters. Understand how to configure the required KMS stanzas, and view a complete configuration example. --- # `controller` stanza @@ -81,7 +81,7 @@ description will be read. bind a publicly accessible IP to a NIC on the host directly, such as an Amazon EIP. This value can be a direct address string, can refer to a file on disk (file://) from which an address will be read; an env var (env://) from which the - address will be read; or a [go-sockaddr template](https://godoc.org/github.com/hashicorp/go-sockaddr/template). + address will be read; or a [go-sockaddr template](https://godoc.org/github.com/hashicorp/go-sockaddr/template). Note that the address should not include the protocol prefixes like `http://` or `https://`. - `auth_token_time_to_live` - Maximum time to live (TTL) for all auth tokens globally (pertains diff --git a/website/content/docs/configuration/credential-management/configure-credential-brokering.mdx b/website/content/docs/configuration/credential-management/configure-credential-brokering.mdx index 3829f99981..b5c9f9f76a 100644 --- a/website/content/docs/configuration/credential-management/configure-credential-brokering.mdx +++ b/website/content/docs/configuration/credential-management/configure-credential-brokering.mdx @@ -1,7 +1,7 @@ --- layout: docs page_title: Configure targets with credential brokering -description: |- +description: >- Configure credential brokering workflows so that Boundary centrally manages credentials and returns them to users when they successfully connect to a target. --- diff --git a/website/content/docs/configuration/credential-management/configure-credential-injection.mdx b/website/content/docs/configuration/credential-management/configure-credential-injection.mdx index 6c76b3868a..70bbdeb22f 100644 --- a/website/content/docs/configuration/credential-management/configure-credential-injection.mdx +++ b/website/content/docs/configuration/credential-management/configure-credential-injection.mdx @@ -1,7 +1,7 @@ --- layout: docs page_title: Configure targets with credential injection -description: |- +description: >- Configure credential injection so Boundary provides users with a passwordless experience when connecting to targets. Availabile exclusively for Enterprise users. --- diff --git a/website/content/docs/configuration/credential-management/index.mdx b/website/content/docs/configuration/credential-management/index.mdx index e0ab55b775..d77b714928 100644 --- a/website/content/docs/configuration/credential-management/index.mdx +++ b/website/content/docs/configuration/credential-management/index.mdx @@ -1,7 +1,7 @@ --- layout: docs -page_title: Configure credentials with Boundary -description: |- +page_title: Credential management with Boundary +description: >- Credential stores let you store and manage credentials in Boundary. Learn about configuring user workflows with credential management or credential injection. --- diff --git a/website/content/docs/configuration/credential-management/static-cred-boundary.mdx b/website/content/docs/configuration/credential-management/static-cred-boundary.mdx index 3f3c719235..26cf542ba3 100644 --- a/website/content/docs/configuration/credential-management/static-cred-boundary.mdx +++ b/website/content/docs/configuration/credential-management/static-cred-boundary.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Manage static credentials with Boundary -description: |- - Create a static credential store to manage static credentials in Boundary. Credential stores let you configure targets for credential brokering or injection. +description: >- + Create a static credential store to manage static credentials. Credential stores let you configure targets for credential brokering or injection. --- # Create a static credential store diff --git a/website/content/docs/configuration/credential-management/static-cred-vault.mdx b/website/content/docs/configuration/credential-management/static-cred-vault.mdx index ab4101f9a7..6cb82cff08 100644 --- a/website/content/docs/configuration/credential-management/static-cred-vault.mdx +++ b/website/content/docs/configuration/credential-management/static-cred-vault.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Manage static credentials with Vault -description: |- - Create a Vault credential store to manage credentials in Boundary. Credential stores let you configure targets for credential brokering or credential injection. +description: >- + Create a Vault credential store to manage credentials. Credential stores let you configure targets for credential brokering or credential injection. --- # Create a Vault credential store diff --git a/website/content/docs/configuration/identity-access-management/assignable-permissions.mdx b/website/content/docs/configuration/identity-access-management/assignable-permissions.mdx index a7100acc29..ad38c10135 100644 --- a/website/content/docs/configuration/identity-access-management/assignable-permissions.mdx +++ b/website/content/docs/configuration/identity-access-management/assignable-permissions.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Assignable permissions -description: |- - Assignable permissions +description: >- + Learn about using actions and output fields to grant users permissions to any resources that are identified by ID or Type. --- # Assignable permissions diff --git a/website/content/docs/configuration/identity-access-management/index.mdx b/website/content/docs/configuration/identity-access-management/index.mdx index db15d7abe3..8eedea3e57 100644 --- a/website/content/docs/configuration/identity-access-management/index.mdx +++ b/website/content/docs/configuration/identity-access-management/index.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Permissions index -description: |- - Boundary's permissions model +description: >- + Learn about Boundary's RBAC (Role-based access control), allow-only permissions model. Understand how permissions are configured using grant strings and roles. --- # Permissions in Boundary diff --git a/website/content/docs/configuration/identity-access-management/permission-grant-formats.mdx b/website/content/docs/configuration/identity-access-management/permission-grant-formats.mdx index d7ad3599f6..1a6d1ee56d 100644 --- a/website/content/docs/configuration/identity-access-management/permission-grant-formats.mdx +++ b/website/content/docs/configuration/identity-access-management/permission-grant-formats.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Permission grant formats -description: |- - Permission grant formats +description: >- + Learn how to construct grant strings that map resources and permissions. Understand ID, Type, Pinned, and Wildcard grant formats. View possible grant templates. --- # Permission grant formats diff --git a/website/content/docs/configuration/identity-access-management/resource-table.mdx b/website/content/docs/configuration/identity-access-management/resource-table.mdx index 87ce7b6309..39bee9e133 100644 --- a/website/content/docs/configuration/identity-access-management/resource-table.mdx +++ b/website/content/docs/configuration/identity-access-management/resource-table.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Resource table -description: |- - Resource table +description: >- + View a list of resources and their available permissions parameters and actions to help you configure and manage permissions. --- # Resource tables diff --git a/website/content/docs/configuration/index.mdx b/website/content/docs/configuration/index.mdx index 13c8939ce4..99b31e0d07 100644 --- a/website/content/docs/configuration/index.mdx +++ b/website/content/docs/configuration/index.mdx @@ -1,7 +1,8 @@ --- layout: docs -page_title: Overview/top-level parameters -description: Boundary configuration reference. +page_title: Top-level configuration parameters +description: >- + Learn about the parameters that make up the Boundary HCL configuration file. View parameters for HCP and self-managed installations. --- # Configuration diff --git a/website/content/docs/configuration/kms/aead.mdx b/website/content/docs/configuration/kms/aead.mdx index d17e51d8d3..d6e29dd4bf 100644 --- a/website/content/docs/configuration/kms/aead.mdx +++ b/website/content/docs/configuration/kms/aead.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: AEAD - configuration -description: |- - The AEAD KMS configures AEAD-specific parameters. +page_title: AEAD KMS configuration +description: >- + Learn about using the Authenticated Encryption with Associated Data (AEAD) KMS for key management. AEAD is typically used for development workflows or testing. --- # `aead` KMS diff --git a/website/content/docs/configuration/kms/alicloudkms.mdx b/website/content/docs/configuration/kms/alicloudkms.mdx index 08d52f3731..5c941ef8fa 100644 --- a/website/content/docs/configuration/kms/alicloudkms.mdx +++ b/website/content/docs/configuration/kms/alicloudkms.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: AliCloud KMS - KMSs - configuration +page_title: AliCloud KMS configuration description: >- - The AliCloud KMS configures Boundary to use AliCloud KMS for key management. + Learn about using the AliCloud KMS for key management and configuring parameters and authentication. View an example alicloudkms configuration. --- # `alicloudkms` KMS diff --git a/website/content/docs/configuration/kms/awskms.mdx b/website/content/docs/configuration/kms/awskms.mdx index 96d75320da..4b97367930 100644 --- a/website/content/docs/configuration/kms/awskms.mdx +++ b/website/content/docs/configuration/kms/awskms.mdx @@ -1,9 +1,8 @@ --- layout: docs -page_title: AWS KMS - KMSs - Configuration -description: |- - The AWS KMS configures Boundary to use AWS KMS for key management. - mechanism. +page_title: AWS KMS Configuration +description: >- + Learn about using the AWS KMS for key management, configuring parameters and authentication, and best practices for key rotation. View an example configuration. --- # `awskms` diff --git a/website/content/docs/configuration/kms/azurekeyvault.mdx b/website/content/docs/configuration/kms/azurekeyvault.mdx index 889b69996f..66b9275f16 100644 --- a/website/content/docs/configuration/kms/azurekeyvault.mdx +++ b/website/content/docs/configuration/kms/azurekeyvault.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Azure Key Vault - seals - configuration +page_title: Azure Key Vault configuration description: >- - The Azure Key Vault seal configures Boundary to use Azure Key Vault for key management. + Learn about using the Azure Key Vault KMS for key management and configuring parameters and authentication. View an example azurekeyvault configuration. --- # `azurekeyvault` KMS diff --git a/website/content/docs/configuration/kms/gcpckms.mdx b/website/content/docs/configuration/kms/gcpckms.mdx index 23056c896e..5ff0f21ab5 100644 --- a/website/content/docs/configuration/kms/gcpckms.mdx +++ b/website/content/docs/configuration/kms/gcpckms.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: GCP Cloud KMS - KMSs - configuration +page_title: GCP Cloud KMS configuration description: >- - The GCP Cloud KMS configures Boundary to use GCP Cloud KMS for key management. + Learn about using the GCP Cloud KMS for key management and configuring parameters and authentication. View an example gcpkms configuration. --- # `gcpckms` KMS diff --git a/website/content/docs/configuration/kms/index.mdx b/website/content/docs/configuration/kms/index.mdx index 13238667f5..685146deb6 100644 --- a/website/content/docs/configuration/kms/index.mdx +++ b/website/content/docs/configuration/kms/index.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: KMS - configuration -description: |- - The KMS stanza configures KMS-specific parameters. +page_title: KMS configuration +description: >- + Learn about using the kms stanza to configure key management system parameters. Discover resources for learning about specific KMS technologies. --- # `kms` stanza diff --git a/website/content/docs/configuration/kms/ocikms.mdx b/website/content/docs/configuration/kms/ocikms.mdx index 95da4158cc..9b2656a7a7 100644 --- a/website/content/docs/configuration/kms/ocikms.mdx +++ b/website/content/docs/configuration/kms/ocikms.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: OCI KMS - KMSs - configuration -description: |- - The OCI KMS configures Boundary to use OCI KMS for key management. +page_title: OCI KMS configuration +description: >- + Learn about using the OCI KMS for key management and configuring parameters and authentication. View an example configuration. Understand OCI KMS key rotation. --- # `ocikms` KMS diff --git a/website/content/docs/configuration/kms/transit.mdx b/website/content/docs/configuration/kms/transit.mdx index b6e59940df..891ea9b16a 100644 --- a/website/content/docs/configuration/kms/transit.mdx +++ b/website/content/docs/configuration/kms/transit.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Vault Transit - seals - configuration -description: |- - The Transit configures Boundary to use Vault's Transit Secret Engine for key management. +page_title: Vault Transit configuration +description: >- + Learn about using the Vault transit secrets engine for key management and configuring parameters and authentication. View an example Transit KMS configuration. --- # `transit` seal diff --git a/website/content/docs/configuration/listener/index.mdx b/website/content/docs/configuration/listener/index.mdx index c7042eb260..2f5b93347a 100644 --- a/website/content/docs/configuration/listener/index.mdx +++ b/website/content/docs/configuration/listener/index.mdx @@ -1,9 +1,8 @@ --- layout: docs -page_title: Listeners - configuration -description: |- - The listener stanza configures the addresses and ports on which Boundary will - respond to requests. +page_title: Listener configuration +description: >- + Learn about TCP and Unix listener configuration settings. Understand where to change the default addresses and ports on which Boundary responds to requests. --- # `listener` stanza diff --git a/website/content/docs/configuration/listener/tcp.mdx b/website/content/docs/configuration/listener/tcp.mdx index 566a9de0fc..849ca97f2f 100644 --- a/website/content/docs/configuration/listener/tcp.mdx +++ b/website/content/docs/configuration/listener/tcp.mdx @@ -1,9 +1,8 @@ --- layout: docs -page_title: TCP - listeners - configuration -description: |- - The TCP listener configures Boundary to listen on the specified TCP address and - port. +page_title: TCP listener configuration +description: >- + Learn about using the TCP listener on a TCP address and port, and view configurable parameters. Understand custom response headers. View example configurations. --- # `tcp` listener diff --git a/website/content/docs/configuration/listener/unix.mdx b/website/content/docs/configuration/listener/unix.mdx index c080d304db..4f60862598 100644 --- a/website/content/docs/configuration/listener/unix.mdx +++ b/website/content/docs/configuration/listener/unix.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Unix domain socket - listeners - configuration -description: |- - The Unix listener configures Boundary to listen on the specified Unix domain socket. +page_title: Unix domain socket listener configuration +description: >- + Learn about using the Unix listener on a specified Unix domain socket, and view configurable parameters. View example Unix listener configurations. --- # `unix` listener diff --git a/website/content/docs/configuration/plugins.mdx b/website/content/docs/configuration/plugins.mdx index 1c4e5d1485..67d5a00db4 100644 --- a/website/content/docs/configuration/plugins.mdx +++ b/website/content/docs/configuration/plugins.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Plugins - configuration -description: |- - The plugins stanza configures plugin-specific parameters. +page_title: Plugin configuration +description: >- + Learn about the plugin-specific parameter that configures a directory for Boundary to use for writing and executing its built-in plugins. --- # `plugin` stanza diff --git a/website/content/docs/configuration/session-recording/configure-storage-policy.mdx b/website/content/docs/configuration/session-recording/configure-storage-policy.mdx index 6b924097d0..6e9ddfd72e 100644 --- a/website/content/docs/configuration/session-recording/configure-storage-policy.mdx +++ b/website/content/docs/configuration/session-recording/configure-storage-policy.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Configure storage bucket policies -description: |- - How to configure storage bucket lifecycle policies for session recording in Boundary +description: >- + Configure storage bucket policies to manage the lifecycles of session recordings. Specify retention and deletion policies to codify compliance periods. --- # Configure storage bucket policies @@ -50,7 +50,7 @@ Complete the following steps to create a storage policy in Boundary for session 1. Complete the following fields to create the Boundary storage policy: - **Name**: (Optional) The name field is optional, but if you enter a name it must be unique. - **Description**: (Optional) An optional description of the Boundary storage policy for identification purposes. - - **Retention Policy**: (Required) Specifies how long a recording must be stored, in days. + - **Retention Policy**: (Required) Specifies how long a recording must be stored, in days. Policy values include: - `Forever`: If enabled, the **Deletion Policy** field is disabled. - `Custom`: Specify a custom retention policy in days. @@ -186,7 +186,7 @@ In this example, recordings stored within the global scope must be retained for - Boundary does not support an undo action. Storage policies are meant to enforce compliance to a specific law or regulation. Updating the storage policy of a session recording can have immediate and possibly unexpected results such as the immediate deletion of session recordings. + Boundary does not support an undo action. Storage policies are meant to enforce compliance to a specific law or regulation. Updating the storage policy of a session recording can have immediate and possibly unexpected results such as the immediate deletion of session recordings. @@ -216,7 +216,7 @@ The following example applies the policy created above to an org named `prod-dat ```shell-session $ boundary policies list - Policy information: + Policy information: ID: pst_WZ3SQSSYJY Version: 1 Type: storage @@ -382,12 +382,12 @@ Check that the storage policy was successfully attached to the `prod-databases` Storage Policy ID: pst_WZ3SQSSYJY Updated Time: Thu, 25 Jan 2024 22:00:27 MST Version: 7 - + Scope (parent): ID: global Name: global Type: global - + Authorized Actions: detach-storage-policy no-op @@ -503,7 +503,7 @@ New session recordings under the `prod-databases` scope should now show a `retai 1. Create a new session recording on a target within the `prod-databases` org. 1. Log in to Boundary. -1. Click **Session Recordings** in the navigation panel. +1. Click **Session Recordings** in the navigation panel. 1. Click **View** for a new recording that was made after the storage policy was attached to the `prod-databases` scope. 1. Under **Session details**, verify that the *Retain until* and *Delete after* dates match the durations defined in the `soc2-policy`. @@ -564,7 +564,7 @@ New session recordings under the `prod-databases` scope should now show a `retai Storage Bucket ID: sb_DC8SPb9uc2 Type: ssh Updated Time: Mon, 29 Jan 2024 23:25:53 MST - + ... ... More Output ... ... @@ -581,18 +581,18 @@ New session recordings under the `prod-databases` scope should now show a `retai 1. The following API call is an example of reading the details of a session recording with the `soc2-policy` storage policy applied to the `prod-databases` scope. List the available session recordings. This example recursively lists all recordings within the global scope. - + ```shell-session $ curl --header "Content-Type: application/json" \ --header "Authorization: Bearer $(boundary config get-token)" \ --request GET \ $BOUNDARY_ADDR/v1/session-recordings?recursive=true&scope_id=global | jq ``` - + **Example output:** - + - + ```plaintext { "items": [ diff --git a/website/content/docs/configuration/session-recording/configure-worker-storage.mdx b/website/content/docs/configuration/session-recording/configure-worker-storage.mdx index 09a5c18848..6c00f1301b 100644 --- a/website/content/docs/configuration/session-recording/configure-worker-storage.mdx +++ b/website/content/docs/configuration/session-recording/configure-worker-storage.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Configure workers for local storage -description: |- - How to configure Boundary workers for session recording. +description: >- + Configure workers for session recording storage. View requirements and an example configuration. Understand possible storage states for local and remote storage. --- # Configure workers for session recording @@ -68,7 +68,7 @@ Boundary uses the permission states to determine the remote storage state of a w Boundary periodically checks the states of any workers that use the external storage, and then reports them back to the controller. -You can check the remote storage state of a worker using the `boundary worker read -id $WORKER_ID` command. +You can check the remote storage state of a worker using the `boundary workers read -id $WORKER_ID` command. ``` Worker information: diff --git a/website/content/docs/configuration/session-recording/create-storage-bucket.mdx b/website/content/docs/configuration/session-recording/create-storage-bucket.mdx index 0be073b73c..7fa84683c8 100644 --- a/website/content/docs/configuration/session-recording/create-storage-bucket.mdx +++ b/website/content/docs/configuration/session-recording/create-storage-bucket.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Create a storage bucket -description: |- - How to create a storage bucket for session recording in Boundary +description: >- + Create a storage bucket in an external storage provider to store recorded sessions. You can review recorded sessions later for compliance and threat management. --- # Create a storage bucket @@ -61,7 +61,7 @@ Complete the following steps to create a storage bucket in Boundary. - **Access key ID**: (Required) The access key ID that AWS generates for the IAM user to use with the storage bucket. - **Secret access key**: (Required) The secret access key that AWS generates for the IAM user to use with this storage bucket. - - **Worker filter**: (Required) A filter that indicates which Boundary workers have access to the storage. The filter must match an existing worker in order to create a Boundary storage bucket. + - **Worker filter**: (Required) A filter expression that indicates which Boundary workers have access to the storage. The filter must match an existing worker in order to create a Boundary storage bucket. Refer to [filter examples](/boundary/docs/concepts/filtering/worker-tags#example-worker-filter-for-storage-buckets) to learn about worker tags and filters. - **Disable credential rotation**: (Optional) Prevents the AWS plugin from automatically rotating credentials. Although credentials are stored encrypted in Boundary, by default the [AWS plugin](https://github.com/hashicorp/boundary-plugin-aws) attempts to rotate the credentials you provide. The given credentials are used to create a new credential, and then the original credential is revoked. @@ -79,7 +79,7 @@ Complete the following steps to create a storage bucket in Boundary. For more information, refer to the AWS documentation for [Logging IAM and AWS STS API calls with AWS CloudTrail](https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html). - **Role tags**: An object with key-value pair attributes that is passed when you assume an IAM role. For more information, refer to the AWS documentation for [Passing session tags in AWS STS](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html). - - **Worker filter**: (Required) A filter that indicates which Boundary workers have access to the storage. The filter must match an existing worker in order to create a Boundary storage bucket. + - **Worker filter**: (Required) A filter expression that indicates which Boundary workers have access to the storage. The filter must match an existing worker in order to create a Boundary storage bucket. Refer to [filter examples](/boundary/docs/concepts/filtering/worker-tags#example-worker-filter-for-storage-buckets) to learn about worker tags and filters. - **Disable credential rotation**: (Required) Prevents the AWS plugin from automatically rotating credentials. This option is required if you use dynamic credentials. @@ -110,7 +110,7 @@ The required fields for creating a storage bucket depend on whether you configur -bucket-name mybucket1 \ -plugin-name aws \ -scope-id o_1234567890 \ - -worker-filter ‘“dev” in “/tags/type”’ \ + -worker-filter ‘“aws-worker” in “/tags/type”’ \ -secret ‘{“access_key_id”: “123456789” , “secret_access_key” : “123/456789/12345678”}’ \ -attributes ‘{“region”:”us-east-1”,”disable_credential_rotation”:true}’ ``` @@ -121,7 +121,7 @@ The required fields for creating a storage bucket depend on whether you configur - `bucket-name`: (Required) The name of the AWS bucket you want to associate with the Boundary storage bucket. - `plugin-name`: (Required) The name of the Boundary storage plugin. - `scope_id`: (Required) A storage bucket can belong to the Global scope or an Org scope. - - `worker-filter`: (Required) A filter that indicates which Boundary workers have access to the storage. The filter must match an existing worker in order to create a Boundary storage bucket. + - `worker-filter`: (Required) A filter expression that indicates which Boundary workers have access to the storage. The filter must match an existing worker in order to create a Boundary storage bucket. Refer to [filter examples](/boundary/docs/concepts/filtering/worker-tags#example-worker-filter-for-storage-buckets) to learn about worker tags and filters. - `secret`: (Required) The AWS credentials to use. - `access_key_id`: (Required) The AWS access key to use. - `secret_access_key_id`: (Required) The AWS secret access key to use. @@ -155,7 +155,7 @@ The required fields for creating a storage bucket depend on whether you configur - `bucket-name`: (Required) The name of the AWS bucket you want to associate with the Boundary storage bucket. - `plugin-name`: (Required) The name of the Boundary storage plugin. - `scope_id`: (Required) A storage bucket can belong to the Global scope or an Org scope. - - `worker-filter`: (Required) A filter that indicates which Boundary workers have access to the storage. The filter must match an existing worker in order to create a Boundary storage bucket. + - `worker-filter`: (Required) A filter expression that indicates which Boundary workers have access to the storage. The filter must match an existing worker in order to create a Boundary storage bucket. Refer to [filter examples](/boundary/docs/concepts/filtering/worker-tags#example-worker-filter-for-storage-buckets) to learn about worker tags and filters. - `attributes` or `-attr`: Attributes of the Amazon S3 storage bucket. - `role_arn`: (Required) The ARN (Amazon Resource Name) role that is attached to the EC2 instance that the self-managed worker runs on. - `role_external_id`: (Optional) A required value if you delegate third party access to your AWS resources. @@ -173,7 +173,11 @@ The required fields for creating a storage bucket depend on whether you configur -The HCL code for creating a storage bucket is different depending on whether you configured the AWS S3 bucket with static or dynamic credentials. +The HCL code for creating a storage bucket is different depending on whether you configured the AWS S3 bucket with static or dynamic credentials. This page provides example configurations for a generic Terraform deployment. + +Refer to the [Boundary Terraform provider documentation](https://registry.terraform.io/providers/hashicorp/boundary/latest/docs) to learn about the requirements for the following example attributes. + +Support for Amazon S3 storage providers leverages the [Boundary AWS plugin](https://github.com/hashicorp/boundary-plugin-aws). @@ -201,7 +205,7 @@ resource "boundary_storage_bucket" "aws_static_credentials_example" { "access_key_id" = "aws_access_key_id_value", "secret_access_key" = "aws_secret_access_key_value" }) - worker_filter = "\"dev\" in \"/tags/type\"" + worker_filter = "\"aws-worker\" in \"/tags/type\"" } output "storage_bucket_id" { @@ -229,7 +233,7 @@ resource "boundary_storage_bucket" "aws_dynamic_credentials_example" { "role_arn" = "arn:aws:iam::123456789012:role/S3Access" "disable_credential_rotation" = true }) - worker_filter = "\"dev\" in \"/tags/type\"" + worker_filter = "\"s3-worker\" in \"/tags/type\"" } output "storage_bucket_id" { @@ -271,7 +275,7 @@ Complete the following steps to create a storage bucket in Boundary. - **Region**: (Optional) The region to configure the storage bucket for. - **Access key ID** (Required): The MinIO service account's access key to use with this storage bucket. - **Secret access key** (Required): The MinIO service account's secret key to use with this storage bucket. - - **Worker filter**: (Required) A filter that indicates which Boundary workers have access to the storage. The filter must match an existing worker in order to create a Boundary storage bucket. + - **Worker filter**: (Required) A filter expression that indicates which Boundary workers have access to the storage. The filter must match an existing worker in order to create a Boundary storage bucket. Refer to [filter examples](/boundary/docs/concepts/filtering/worker-tags#example-worker-filter-for-storage-buckets) to learn about worker tags and filters. - **Disable credential rotation**: (Optional) Controls whether the plugin will rotate the incoming credentials and manage a new MinIO service account. If this attribute is set to false, or not provided, the plugin will rotate the incoming credentials, using them to create a new MinIO service account, then delete the incoming credentials. 1. Click **Save**. @@ -288,7 +292,7 @@ Complete the following steps to create a storage bucket in Boundary. -plugin-name minio \ -scope-id o_1234567890 \ -bucket-prefix="foo/bar/zoo" \ - -worker-filter '"minio" in "/tags/type"' \ + -worker-filter '"minio-worker" in "/tags/type"' \ -attr endpoint_url="https://my-minio-instance.dev:9000" \ -attr region="REGION" \ -attr disable_credential_rotation=true \ @@ -301,7 +305,7 @@ Complete the following steps to create a storage bucket in Boundary. - `bucket-name`: (Required) Name of the MinIO bucket you want to associate with the Boundary storage bucket. - `plugin-name`: (Required) The name of the Boundary storage plugin. - `scope_id`: (Required) A storage bucket can belong to the Global scope or an Org scope. - - `worker-filter`: (Required) A filter that indicates which Boundary workers have access to the storage. The filter must match an existing worker in order to create a Boundary storage bucket. + - `worker-filter`: (Required) A filter expression that indicates which Boundary workers have access to the storage. The filter must match an existing worker in order to create a Boundary storage bucket. Refer to [filter examples](/boundary/docs/concepts/filtering/worker-tags#example-worker-filter-for-storage-buckets) to learn about worker tags and filters. - `secret`: (Required) The MinIO credentials to use. - `access_key_id` (Required): The MinIO service account's access key to use with this storage bucket. - `secret_access_key` (Required): The MinIO service account's secret key to use with this storage bucket. @@ -313,10 +317,45 @@ Complete the following steps to create a storage bucket in Boundary. This option must be set to `true` if you use dynamic credentials. - + + +This page provides example configurations for a generic Terraform deployment. + +Refer to the [Boundary Terraform provider documentation](https://registry.terraform.io/providers/hashicorp/boundary/latest/docs) to learn about the requirements for the following example attributes. + +Support for MinIO storage providers leverages the [Boundary MinIO plugin](https://github.com/hashicorp/boundary-plugin-minio). + +Apply the following Terraform policy: + +```hcl +resource "boundary_storage_bucket" "minio_credentials_example" { + name = "My MinIO storage bucket" + description = "My first storage bucket" + scope_id = "o_1234567890" + plugin_name = "minio" + bucket_name = "mybucket1" + + attributes_json = jsonencode({ + "endpoint_url" = "minio_access_key_id_value", + "disable_credential_rotation" = true + }) + + secrets_json = jsonencode({ + "access_key_id" = "minio_access_key_id_value", + "secret_access_key" = "minio_secret_access_key_value" + }) + worker_filter = "\"minio-worker\" in \"/tags/type\"" +} + +output "storage_bucket_id" { + value = boundary_storage_bucket.minio_credentials_example.id +} +``` + + Complete the following steps to create a storage bucket in Boundary using an S3-compliant storage provider. Hitachi Content Platform is used as an example below. @@ -345,7 +384,7 @@ Complete the following steps to create a storage bucket in Boundary using an S3- - **Region**: (Optional) The region to configure the storage bucket for. - **Access key ID** (Required): The storage provider's service account's access key to use with this storage bucket. - **Secret access key** (Required): The storage provider's service account's secret key to use with this storage bucket. - - **Worker filter**: (Required) A filter that indicates which Boundary workers have access to the storage. The filter must match an existing worker in order to create a Boundary storage bucket. + - **Worker filter**: (Required) A filter expression that indicates which Boundary workers have access to the storage. The filter must match an existing worker in order to create a Boundary storage bucket. Refer to [filter examples](/boundary/docs/concepts/filtering/worker-tags#example-worker-filter-for-storage-buckets) to learn about worker tags and filters. - **Disable credential rotation**: (Optional) Controls whether the plugin will rotate the incoming credentials and manage a new storage service account. If this attribute is set to false, or not provided, the plugin will rotate the incoming credentials, using them to create a new storage service account, then delete the incoming credentials. Note that credential rotation is not supported for Hitachi Content Platform, and it may not function for other S3-compatible providers. @@ -364,7 +403,7 @@ Complete the following steps to create a storage bucket in Boundary using an S3- -plugin-name minio \ -scope-id o_1234567890 \ -bucket-prefix="foo/bar/zoo" \ - -worker-filter '"dev" in "/tags/type"' \ + -worker-filter '"storage-worker" in "/tags/type"' \ -attr endpoint_url="https://my-hitachi-instance.dev:9000" \ -attr region="REGION" \ -attr disable_credential_rotation=true \ @@ -378,7 +417,7 @@ Complete the following steps to create a storage bucket in Boundary using an S3- - `plugin-name`: (Required) The name of the Boundary storage plugin. Use the `minio` plugin for S3-compatible storage. - `scope_id`: (Required) A storage bucket can belong to the Global scope or an Org scope. - - `worker-filter`: (Required) A filter that indicates which Boundary workers have access to the storage. The filter must match an existing worker in order to create a Boundary storage bucket. + - `worker-filter`: (Required) A filter expression that indicates which Boundary workers have access to the storage. The filter must match an existing worker in order to create a Boundary storage bucket. Refer to [filter examples](/boundary/docs/concepts/filtering/worker-tags#example-worker-filter-for-storage-buckets) to learn about worker tags and filters. - `secret`: (Required) The storage provider's credentials to use. - `access_key_id` (Required): The storage provider's service account's access key to use with this storage bucket. - `secret_access_key` (Required): The storage provider's service account's secret key to use with this storage bucket. @@ -389,6 +428,42 @@ Complete the following steps to create a storage bucket in Boundary using an S3- Note that credential rotation is not supported for Hitachi Content Platform, and it may not function for other S3-compatible providers. + + + +This page provides example configurations for a generic Terraform deployment. + +Refer to the [Boundary Terraform provider documentation](https://registry.terraform.io/providers/hashicorp/boundary/latest/docs) to learn about the requirements for the following example attributes. + +Support for S3-compliant storage providers leverages the [Boundary MinIO plugin](https://github.com/hashicorp/boundary-plugin-minio). + +Apply the following Terraform policy: + +```hcl +resource "boundary_storage_bucket" "storage_credentials_example" { + name = "My storage bucket" + description = "My first storage bucket" + scope_id = "o_1234567890" + plugin_name = "minio" + bucket_name = "mybucket1" + + attributes_json = jsonencode({ + "endpoint_url" = "minio_access_key_id_value", + "disable_credential_rotation" = true + }) + + secrets_json = jsonencode({ + "access_key_id" = "storage_access_key_id_value", + "secret_access_key" = "storage_secret_access_key_value" + }) + worker_filter = "\"storage-worker\" in \"/tags/type\"" +} + +output "storage_bucket_id" { + value = boundary_storage_bucket.storage_credentials_example.id +} +``` + diff --git a/website/content/docs/configuration/session-recording/enable-session-recording.mdx b/website/content/docs/configuration/session-recording/enable-session-recording.mdx index 5934d9054f..56e310a71b 100644 --- a/website/content/docs/configuration/session-recording/enable-session-recording.mdx +++ b/website/content/docs/configuration/session-recording/enable-session-recording.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Enable session recording on a target -description: |- - How to enable session recording on a target in Boundary +description: >- + Enable session recording for targets so that user sessions are recorded for compliance and threat management. --- # Enable session recording on a target @@ -23,6 +23,7 @@ Refer to [Create the controller configuration](/boundary/docs/install-boundary/c - The targets must be configured with an ingress or egress worker filter that includes a worker with access to the storage bucket you created. Refer to [SSH target attributes](/boundary/docs/concepts/domain-model/targets#ssh-target-attributes) for more information. - You must enable injected application credentials on any target that you want to use for session recording. +Refer to [Configure targets with credential injection](/boundary/docs/configuration/credential-management/configure-credential-injection) for more information about injecting application credentials. Complete the following steps to enable session recording on a target. diff --git a/website/content/docs/configuration/session-recording/index.mdx b/website/content/docs/configuration/session-recording/index.mdx index d1aaaf67cf..f0202502e8 100644 --- a/website/content/docs/configuration/session-recording/index.mdx +++ b/website/content/docs/configuration/session-recording/index.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Overview -description: |- - An overview of session recording in Boundary +page_title: Session recording overview +description: >- + Learn about using session recording to audit user sessions for compliance and threat management. Understand considerations for local and remote storage. --- # Overview diff --git a/website/content/docs/configuration/session-recording/storage-providers/configure-minio.mdx b/website/content/docs/configuration/session-recording/storage-providers/configure-minio.mdx index 781139d3c5..35add2959e 100644 --- a/website/content/docs/configuration/session-recording/storage-providers/configure-minio.mdx +++ b/website/content/docs/configuration/session-recording/storage-providers/configure-minio.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Configure MinIO -description: |- - How to configure MinIO as a storage provider for Boundary session recording. +page_title: Configure MinIO storage +description: >- + Configure MinIO as a storage provider for recorded sessions. Understand session recording and MinIO requirements. View an example configuration. --- # Configure MinIO as a storage provider @@ -30,6 +30,9 @@ When you determine storage requirements for the external bucket, you should cons You must associate the Boundary storage bucket with a MinIO storage bucket. A Boundary MinIO storage bucket contains the bucket name, endpoint URL, optional region, optional prefix, and the service account credentials needed to access the bucket. To enable credential rotation, you cannot add a Boundary storage bucket without a MinIO service account. You can disable credential rotation when you create the Boundary storage bucket. + At the time of the 0.18.0 release, the latest tested and supported MinIO version is `RELEASE.2024-10-02T17-50-4Z`. + Newer versions may work as well, but they have not been tested. + At this time, the NetBSD operating system is not supported for the MinIO storage bucket. diff --git a/website/content/docs/configuration/session-recording/storage-providers/configure-s3-compliant.mdx b/website/content/docs/configuration/session-recording/storage-providers/configure-s3-compliant.mdx index a7f4b39958..1b5fc358db 100644 --- a/website/content/docs/configuration/session-recording/storage-providers/configure-s3-compliant.mdx +++ b/website/content/docs/configuration/session-recording/storage-providers/configure-s3-compliant.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Configure S3-compliant storage -description: |- - How to configure an S3-compliant storage provider for Boundary session recording. +description: >- + Configure an S3-compliant storage provider for recorded sessions. Understand session recording and storage provider requirements. View an example configuration. --- # Configure an S3-compliant storage provider @@ -11,13 +11,22 @@ description: |- The [MinIO plugin](https://github.com/hashicorp/boundary-plugin-minio/) lets you configure S3-compliant storage providers for session recording. + HashiCorp has tested and confirmed that you can configure the following S3-compliant storage products for session recording using the MinIO plugin: -- [Hitachi Content Platform](#hitachi-content-platform-configuration) +- [Hitachi Content Platform](https://trycontent.hitachivantara.com) + + At the time of the 0.18.0 release, the latest tested and supported Hitachi Content Platform version is `2.6.0.0`. + Newer versions may work as well, but they have not been tested. + +- [Backblaze B2](https://www.backblaze.com/cloud-storage) You can also configure other providers' S3-compliant storage products for session recording storage. We will update the list of providers as we test them. +The process for [configuring the Hitachi Content Platform](#hitachi-content-platform-configuration) is included below as an example. +Configuring other S3-compliant storage products for use with session recording should be a similar process. + ## Requirements Before you can create a storage bucket in Boundary, you must ensure that your environment meets certain requirements. @@ -40,11 +49,10 @@ When you determine storage requirements for the external bucket, you should cons - A service account and access keys for the storage provider You must provide service account access keys when you configure a Boundary storage bucket later on. - Refer to your storage provider's documentation to learn how to set up a service account. - The storage bucket must be configured with R/W access. If you use a - restricted IAM user policy, the following policy actions must be allowed at a minimum. + restricted IAM user policy, you must allow the following policy actions at a minimum. ```json { @@ -73,7 +81,7 @@ When you determine storage requirements for the external bucket, you should cons HashiCorp has tested and confirmed that you can configure the Hitachi Content Platform for external session recording storage using the MinIO plugin. It is included as an example in this topic. -You should be able to configure other S3-compliant storage providers to work for session recording storage as well, but we have not tested other providers. +You should be able to configure other S3-compliant storage providers to work for session recording storage as well, but we have only tested a [limited number of providers](#providers). You must have an account with Hitachi Content Platform to create storage buckets. You can sign up for an account at the following URL: diff --git a/website/content/docs/configuration/session-recording/storage-providers/configure-s3.mdx b/website/content/docs/configuration/session-recording/storage-providers/configure-s3.mdx index 9df0154281..afdd30db4b 100644 --- a/website/content/docs/configuration/session-recording/storage-providers/configure-s3.mdx +++ b/website/content/docs/configuration/session-recording/storage-providers/configure-s3.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Configure Amazon S3 -description: |- - How to configure Amazon S3 as a storage provider for Boundary session recording. +page_title: Configure Amazon S3 storage +description: >- + Configure Amazon S3 as a storage provider for recorded sessions. Understand session recording and AWS requirements. View an example configuration. --- # Configure Amazon S3 as a storage provider diff --git a/website/content/docs/configuration/session-recording/update-storage-policy.mdx b/website/content/docs/configuration/session-recording/update-storage-policy.mdx index febda9084e..8096accc7e 100644 --- a/website/content/docs/configuration/session-recording/update-storage-policy.mdx +++ b/website/content/docs/configuration/session-recording/update-storage-policy.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Update storage bucket policies -description: |- - How to update a storage bucket policy for session recordings in Boundary +description: >- + Update storage bucket policies to manage the lifecycles of session recordings. Understand how changing a policy affects existing and new recorded sessions. --- # Update storage bucket policies diff --git a/website/content/docs/configuration/target-aliases/connect-target-alias.mdx b/website/content/docs/configuration/target-aliases/connect-target-alias.mdx new file mode 100644 index 0000000000..de3227e636 --- /dev/null +++ b/website/content/docs/configuration/target-aliases/connect-target-alias.mdx @@ -0,0 +1,30 @@ +--- +layout: docs +page_title: Overview +description: >- + Learn how to connect to a target configured with an alias. +--- + +After you [Create a target alias](/boundary/docs/configuration/target-aliases/create-target-alias), you can refer to the alias when you establish sessions. + +With aliases, you don't have to specify the target ID, target name, or scope when you connect with Boundary. Target aliases are also required to use [transparent sessions](/boundary/docs/configuration/target-aliases/transparent-sessions). + +# Connect to a target using an alias + +You can substitute an alias whenever you could use the `-id` flag or `-target` flag in the CLI. + +For example, you can use the following command to connect to an SSH target with the ID `ttcp_1234567890`: + +```shell-session +$ boundary connect ssh -target -id ttcp_1234567890 +``` + +If you configured an alias named `example.alias.boundary` for the target, you can now use the alias to connect to the target: + +```shell-session +$ boundary connect ssh example.alias.boundary +``` + +Aliases are globally unique, so you don't need to specify the scope to connect to the target. + +After you verify that you can connect to the target using an alias, you can try to connect to the target using [transparent sessions](/boundary/docs/configuration/target-aliases/transparent-sessions). HCP/ENT \ No newline at end of file diff --git a/website/content/docs/configuration/target-aliases/create-target-alias.mdx b/website/content/docs/configuration/target-aliases/create-target-alias.mdx new file mode 100644 index 0000000000..a2c2304812 --- /dev/null +++ b/website/content/docs/configuration/target-aliases/create-target-alias.mdx @@ -0,0 +1,545 @@ +--- +layout: docs +page_title: Create target aliases +description: >- + Learn how to create a target alias for an existing target, or assign one during target creation. +--- + +# Create target aliases + +You can create aliases and associate them with targets using the following methods: + +- [Create an alias for an existing target](#create-an-alias-for-an-existing-target) +- [Create an alias during target creation](#create-an-alias-during-target-creation) +- [Associate an existing alias with a target](#associate-an-existing-alias-with-a-target) +- [Create multiple target aliases: An example](#create-multiple-aliases-for-a-single-target) + +## Create an alias for an existing target + +You can create a new alias and associate it with an existing target at the same time. + +When you create the target alias, you can choose from the following methods: + +- Create the alias without adding a target +- Create the alias for one or more targets +- Create the alias with an optional host ID + +Complete the following steps to create a new alias and associate it with a target: + + + + +1. Log in to Boundary. +1. Select **Aliases** in the navigation pane. +1. Click **New Alias**. +1. Complete the following fields: + - **Name**: (Optional) Enter an optional name for the alias to use for identification purposes. + - **Description**: (Optional) Enter an optional description for the alias to use for identification purposes. + - **Type**: Select **Target**. + At this time, targets are the only Boundary resources that supports aliasing. + - **Alias Value**: Enter the string that you want to use as the alias to represent the target. + An alias's value can be a hostname or a DNS-like string. + - **Target ID**: (Optional) Specify any targets you want to associate with the alias. + - **Host ID**: (Optional) Enter an optional host ID, if you would like to specify that the alias always uses the same host when you use it to connect to a target. +1. Click **Save**. + + + + +1. Log in to Boundary. +1. Use the following command to create an alias: + + ```shell-session + $ boundary aliases create target \ + -description 'This is an example alias for target tcp_1234567890' \ + -destination-id tcp_1234567890 \ + -name Example Boundary alias \ + -scope-id global \ + -value example.alias.boundary \ + -authorize-session-host-id hst_1234567890 + ``` + + You can use any of the following [attributes](/boundary/docs/concepts/domain-model/aliases) when you create an alias: + + - `-description=` - Specifies the optional description you want to use for identification purposes. + - `-destination-id=` - Specifies the ID of the target that the alias references. + - `-name=` - Specifies the optional name you want to use to describe the alias for identification purposes. + - `-scope-id=` - Scope in which to create the alias. The default is `global`. + You can also specify the scope using the BOUNDARY_SCOPE_ID environment variable. + At this time, aliases are only supported for the global scope. + - `-value=` - Specifies the string that you want to use as the alias to represent the target. + The alias `value` can be a hostname or a DNS-like string. + - `-authorize-session-host-id=` - Optionally indicates the host ID to use when you use the alias to authorize a session. + + + + +## Create an alias during target creation + +You can create a new target and new alias at the same time and associate the two. + +Complete the following steps to create a new target and new alias at the same time: + + + + +1. Log in to Boundary. +1. Select **Targets** in the navigation pane. +1. Click **New Target**. +1. Complete the following fields: + - **Name**: Enter the target name for identification purposes. + - **Description**: (Optional) Enter an optional description for identification purposes. + - **Type**: Select the target type. + You can create SSH or TCP targets. + - **Target Address**: (Optional) Enter a valid IP address or DNS name. + Alternatively, you can configure host catalogs and host sets. + - **Default Port**: (Optional) Enter an optional default port for the target to use for connections. + - **Default Client Port**: (Optional) Enter an optional local proxy port on which to listen when a session is started on a client. + - **Maximum Duration**: (Optional) Enter an optional maximum duration for sessions on this target, in seconds. + - **Maximum Connection**: (Optional) Enter the maximum number of connections allowed per session on this target. + For unlimited connections, enter `-1`. + - **Workers**: (Optional) Select whether you want the worker to function as an ingress and/or egress worker. + - **Aliases**: (Optional) Enter the value fpr any aliases you want to associate with this target, and then click **Add**. + An alias's value can be a hostname or a DNS-like string. + You can associate multiple aliases with a target. +1. Click **Save**. + + + + +1. Log in to Boundary. +1. Use the following command to create a target: + + ```shell-session + $ boundary targets create ssh \ + -description 'This is an example ssh target' \ + -name Example Boundary SSH target \ + -scope-id global \ + -with-alias-authorize-session-host-id hst_1234567890 \ + -with-alias-scope-id global \ + -with-alias-value example.alias.boundary + ``` + + You can use any of the following [attributes](/boundary/docs/concepts/domain-model/targets) when you create a target: + + - `description` - (optional) + An optional description that you can use for identification purposes. + - `name` - (required) + The `name` must be unique within the target's project. + - `scope-id` - (required) + The scope in which to create the target. + The default is `global`. + You can also specify the scope using the BOUNDARY_SCOPE_ID environment variable. + - `-address=` - An optional valid network address for the target to connect to. + You cannot use an address alongside host sources. + - `-default-client-port=` - The default client port on the target. + - `-default-port=` - The default port on the target. + If you do not specify a default port, Boundary uses port 22. + - `-egress-worker-filter=` - A Boolean expression that filters which egress workers can process sessions for the target. + - `-enable-session-recording=` - A Boolean expression you can use to enable session recording for the target. + - `-ingress-worker-filter=` - A Boolean expression that filters which ingress workers can process sessions for the target. + - `-session-connection-limit=` - The maximum number of connections allowed for a session. +A value of `-1` means the connections are unlimited. + - `-session-max-seconds=` - The maximum lifetime of the session, including all connections. + You can specify an integer number of seconds or a duration string. + - `-storage-bucket-id=` - The public ID of the storage bucket to associate with the target. + - `-with-alias-authorize-session-host-id=` - The host ID that an alias uses to authorize sessions for the target. + - `-with-aliasscope-id=` - The scope ID that you want to create the target and alias in. + The default is `global`. + At this time, aliases are only supported for the global scope. + - `-with-alias-value=` - The value of the alias that you want to use to represent the target. + Use this parameter to create the alias and target, and associate them with each other, at the same time. + + Note that you can create SSH or TCP [target types](/boundary/docs/concepts/domain-model/targets#target-types). + The example command in this section creates an SSH target. + + + + +## Associate an existing alias with a target + +If you [created an alias](#create-an-alias-for-an-existing-target) without associating it with a target, you can update it with an existing target at a later time. Complete the following steps to add an alias to a target: + + + + +1. Log in to Boundary. +1. Select **Targets** in the navigation pane. +1. Select the target you want to add an alias to. +1. Under the **Aliases** heading in the right sidebar, click **Add an alias**. +1. Complete the following fields: + - **Name**: (Optional) Enter an optional name for the alias to use for identification purposes. + - **Description**: (Optional) Enter an optional description for the alias to use for identification purposes. + - **Type**: Select **Target**. + At this time, targets are the only Boundary resources that supports aliasing. + - **Alias Value**: Enter the alias value you want to use in commands to represent the target. + An alias's value can be a hostname or a DNS-like string. + - **Target ID**: This field contains the ID of the target you selected to add an alias to. + It is read only. + - **Host ID**: (Optional) Enter an optional host ID, if you would like to specify that the alias always uses the same host when you use it to connect to a target. +1. Click **Save**. + + + + +1. Log in to Boundary. +1. Use the following command to create an alias: + + ```shell-session + $ boundary aliases update target \ + -destination-id tcp_1234567890 \ + -id alt_1234567890 \ + -authorize-session-host-id hst_1234567890 + ``` + + You can use any of the following [attributes](/boundary/docs/concepts/domain-model/aliases) when you update an alias: + + - `-description=` - Specifies the optional description you want to use for identification purposes. + - `-destination-id=` - Specifies the ID of the target that the alias references. + - `id=` - Specifies the ID of the alias you want to update. + - `-name=` - Specifies the optional name you want to use to describe the alias for identification purposes. + - `-scope-id=` - Scope in which to create the alias. The default is `global`. + You can also specify the scope using the BOUNDARY_SCOPE_ID environment variable. + At this time, aliases are only supported for the global scope. + - `-value=` - Specifies the string that you want to use as the alias to represent the target. + The alias `value` must comply with DNS naming rules. + - `-authorize-session-host-id=` - Optionally indicates the host ID to use when you use the alias to authorize a session. + + + + +## Create multiple aliases for a single target + +Target aliases point directly to the target they are associated with. You can assign targets a host set or a direct target address. + +[Host sets](/boundary/docs/concepts/domain-model/host-sets) are sets of functionally equivalent hosts, and are commonly used for deployments at scale. When Boundary authorizes a session, a target assigned a host set will select a host from the host set at random to use for all connections for the session. + +You assign [direct target addresses](/boundary/docs/concepts/domain-model/targets#address) directly to the target. They refer to a specific network resource, like an IP address. Boundary only connects to the direct target address when it establishes a connection to the associated target. + +When you create a target alias, you can also assign it to a specific host. Assigning an alias to a specific host is useful if you want to avoid creating multiple targets for specific hosts using direct target addresses. + +For example, you could create two aliases for the same target that has been assigned a host set. One alias could refer to the target itself, and would allow Boundary to randomly select a host to connect to for a session. Another alias could point to the same target, but you could assign a host ID that Boundary should use for a session. + +### Example + +You may want to create aliases that point to the same target, but that specify which host Boundary should use when it establishes a session. + +In this example, you set up three aliases for the same target: + +1. A target alias without a host specified +1. A target alias with a host ID specified +1. A target alias with a different host ID specified + + + + This example uses SSH target types, which are only available in HCP Boundary and Boundary Enterprise. This process also works for any other target type, including the TCP target type available in Boundary Community Edition. + + + +For this example, assume that the following scopes exist: + +- Org: `engineering`, ID `o_2drCWvp3Oc` +- Project: `app-servers`, ID `p_3ECODJDbXV` + +And the following host set and hosts exist: + +- Host set: `linux-dev-servers`, ID `hsst_56oiL0WaKu` +- Host: `dev-040`, ID `hst_7wGXkF8e0Q` +- Host: `dev-041`, ID `hst_zlRwMMPKwp` + +Because the `linux-dev-servers` hosts are functionally equivalent, you can create a single target for the host set, and create an alias for the target. + +We recommend creating DNS-like aliases to ensure consistent naming conventions. In this example, an alias pattern might be: + +`hostname.host-set.project.org` + +For the `linux-dev-servers` target, create the alias `linux-dev.app-servers.eng`. + + + + +Create the `linux-dev-servers` target. + +1. Log in to Boundary. +1. Select the 'engineering' org and the `app-servers` project. +1. Select **Targets** in the navigation pane. +1. Click **New Target**. +1. Complete the following fields: + - **Name**: `linux-dev-servers` + - **Description**: `linux-dev.app-servers.eng target` + - **Type**: SSH + - **Default Port**: `22` + - **Aliases**: `linux-dev.app-servers.eng` +1. Click **Save**. + +Then add the `linux-dev-servers` host set to the new `linux-dev-servers` target. + +1. Click on the **Host Sources** tab. +1. Click **Add Host Sources**. +1. Select the `linux-dev-servers` host set. +1. Click **Add Host Sources**. + + + + +Create the `linux-dev-servers` target. + +1. Log in to Boundary. +1. Use the following command to create the `linux-dev-servers` target with alias `linux-dev.app-servers.eng`: + + ```shell-session + $ boundary targets create ssh \ + -description 'linux-dev.app-servers.eng target' \ + -name linux-dev-servers \ + -scope-id p_3ECODJDbXV \ + -default-port 22 \ + -with-alias-scope-id global \ + -with-alias-value linux-dev.app-servers.eng + ``` + + Example output: + + + + ```shell-session + $ boundary targets create ssh \ + -description 'linux-dev.app-servers.eng target' \ + -name linux-dev-servers \ + -scope-id p_pj6UUPVJT3 \ + -default-port 22 \ + -with-alias-scope-id global \ + -with-alias-value linux-dev.app-servers.eng + + Target information: + Created Time: Thu, 14 Nov 2024 13:39:36 MST + Description: linux-dev.app-servers.eng target + ID: tssh_lhH5pa425G + Name: linux-dev-servers + Session Connection Limit: -1 + Session Max Seconds: 28800 + Type: ssh + Updated Time: Thu, 14 Nov 2024 13:39:36 MST + Version: 1 + + Scope: + ID: p_3ECODJDbXV + Name: app-servers + Parent Scope ID: o_2drCWvp3Oc + Type: project + + Authorized Actions: + remove-host-sources + remove-credential-sources + authorize-session + delete + set-credential-sources + no-op + read + update + add-host-sources + set-host-sources + add-credential-sources + + Aliases: + ID: alt_CkC6wGKLWW + Value: linux-dev.app-servers.eng + + Attributes: + Default Port: 22 + Enable Session Recording: false + ``` + + + +Then add the `linux-dev-servers` host set (ID `hsst_56oiL0WaKu`) to the new `linux-dev-servers` target (ID `tssh_lhH5pa425G`). + +```shell-session +$ boundary targets add-host-sources -id tssh_lhH5pa425G -host-sourchsst_56oiL0WaKu +``` + + + + +Next, create two more aliases for the target. + +Create the `dev-040.linux-dev.app-servers.eng` alias for the host `dev-040`: + + + + +1. Log in to Boundary. Navigate to the `global` scope. +1. Select **Aliases** in the navigation pane. +1. Click **New Alias**. +1. Complete the following fields: + - **Name**: `dev-040` + - **Description**: `Target alias for dev-040.linux-dev.app-servers.eng` + - **Type**: `Target` + - **Alias Value**: `dev-040.linux-dev.app-servers.eng` + - **Target ID**: `tssh_lhH5pa425G` + - **Host ID**: `hst_7wGXkF8e0Q` +1. Click **Save**. + + + + +1. Log in to Boundary. +1. Use the following command to create an alias for host `dev-040: + + ```shell-session + $ boundary aliases create target \ + -description 'Target alias for dev-040.linux-dev.app-servers.eng' \ + -destination-id tssh_lhH5pa425G \ + -name dev-040 \ + -scope-id global \ + -value dev-040.linux-dev.app-servers.eng \ + -authorize-session-host-id hst_7wGXkF8e0Q + ``` + + Example output: + + + + ```shell-session + $ boundary aliases create target \ + -description 'Target alias for dev-040.linux-dev.app-servers.eng' \ + -destination-id tssh_lhH5pa425G \ + -name dev-040 \ + -scope-id global \ + -value dev-040.linux-dev.app-servers.eng \ + -authorize-session-host-id hst_7wGXkF8e0Q + + Alias information: + Created Time: Thu, 14 Nov 2024 13:55:41 MST + Description: Target alias for dev-040.linux-dev.app-servers.eng + Destination ID: tssh_lhH5pa425G + ID: alt_QeCGTcvlq2 + Name: dev-040 + Type: target + Updated Time: Thu, 14 Nov 2024 13:55:41 MST + Value: dev-040.linux-dev.app-servers.eng + Version: 1 + + Scope: + ID: global + Name: global + Type: global + + Authorized Actions: + no-op + read + update + delete + + Attributes: + authorize_session_arguments: + { + "host_id": "hst_7wGXkF8e0Q" + } + ``` + + + + + + +Then create the `dev-041.linux-dev.app-servers.eng` alias for the host `dev-041`. + + + + +1. Log in to Boundary. Navigate to the `global` scope. +1. Select **Aliases** in the navigation pane. +1. Click **New Alias**. +1. Complete the following fields: + - **Name**: `dev-041` + - **Description**: `Target alias for dev-041.linux-dev.app-servers.eng` + - **Type**: `Target` + - **Alias Value**: `dev-041.linux-dev.app-servers.eng` + - **Target ID**: `tssh_lhH5pa425G` + - **Host ID**: `hst_7wGXkF8e0Q` +1. Click **Save**. + + + + +1. Log in to Boundary. +1. Use the following command to create an alias for host `dev-041`: + + ```shell-session + $ boundary aliases create target \ + -description 'Target alias for dev-041.linux-dev.app-servers.eng' \ + -destination-id tssh_lhH5pa425G \ + -name dev-041 \ + -scope-id global \ + -value dev-041.linux-dev.app-servers.eng \ + -authorize-session-host-id hst_zlRwMMPKwp + ``` + + Example output: + + + + ```shell-session + $ boundary aliases create target \ + -description 'Target alias for dev-041.linux-dev.app-servers.eng' \ + -destination-id tssh_lhH5pa425G \ + -name dev-041 \ + -scope-id global \ + -value dev-041.linux-dev.app-servers.eng \ + -authorize-session-host-id hst_zlRwMMPKwp + + Alias information: + Created Time: Thu, 14 Nov 2024 14:00:13 MST + Description: Target alias for dev-040.linux-dev.app-servers.eng + Destination ID: tssh_lhH5pa425G + ID: alt_X5MRXRSi7t + Name: dev-041 + Type: target + Updated Time: Thu, 14 Nov 2024 14:00:13 MST + Value: dev-041.linux-dev.app-servers.eng + Version: 1 + + Scope: + ID: global + Name: global + Type: global + + Authorized Actions: + no-op + read + update + delete + + Attributes: + authorize_session_arguments: + { + "host_id": "hst_zlRwMMPKwp" + } + ``` + + + + + + +You can now use the aliases to connect to the targets in different contexts. + +The Boundary Desktop Client lists the `linux-dev-servers` target and its aliases under the **Aliases** column. + +When you click **Connect**, a list of the hosts available for the connection appears in the **Quick Connect** box. + +To establish a connection to any `linux-dev` host using the CLI, use the `linux-dev.app-servers.eng` alias: + +```shell-session +$ boundary connect ssh linux-dev.app-servers.eng +``` + +This command randomly selects a host from the `linux-dev-servers` host set attached to the `linux-dev-servers` target. + +To establish a connection to a specific host, connect to its target alias instead: + +```shell-session +$ boundary connect ssh dev-041.linux-dev.app-servers.eng +``` + +This alias still points to the `linux-dev-servers` target, but will only create a session with the `dev-041` host. \ No newline at end of file diff --git a/website/content/docs/configuration/target-aliases/index.mdx b/website/content/docs/configuration/target-aliases/index.mdx new file mode 100644 index 0000000000..634156276a --- /dev/null +++ b/website/content/docs/configuration/target-aliases/index.mdx @@ -0,0 +1,35 @@ +--- +layout: docs +page_title: Overview +description: >- + Learn how to configure aliases and transparent sessions to enhance end-user workflows and simplify target access. +--- + +# Overview + +Target aliases simplify the connection workflow for end users by allowing them to reference targets using a globally unique DNS-like string. + +Without aliases, connecting to an end target requires you to reference the target ID, or a combination of target name and scope: + +```shell-session +$ boundary connect ssh -target-id ttcp_1234567890 +``` + +```shell-session +$ boundary connect ssh -target-name sql-database -target-scope-name -staging +``` + +With target aliases, a single globally referenced value is assigned to a target, simplifying connection workflows and enabling transparent sessions for Enterprise and HCP end users. + +```shell-session +$ boundary connect ssh sql-database-staging +``` + +## Configure target aliases + +To set up a target alias: + +1. [Create a target alias](/boundary/docs/configuration/target-aliases/create-target-alias) +1. [Connect to a target using an alias](/boundary/docs/configuration/target-aliases/connect-target-alias) + +After you set up a target alias, you can optionally [Configure transparent sessions for end users](/boundary/docs/configuration/target-aliases/transparent-sessions). HCP/ENT \ No newline at end of file diff --git a/website/content/docs/configuration/target-aliases/transparent-sessions.mdx b/website/content/docs/configuration/target-aliases/transparent-sessions.mdx new file mode 100644 index 0000000000..c5bca90060 --- /dev/null +++ b/website/content/docs/configuration/target-aliases/transparent-sessions.mdx @@ -0,0 +1,98 @@ +--- +layout: docs +page_title: Configure transparent sessions +description: >- + Learn how to configure transparent sessions to enhance end-user workflows and simplify target access. +--- + +# Configure transparent sessions + +@include 'alerts/enterprise-only.mdx' + +@include 'alerts/beta.mdx' + +## Requirements + +Before you configure transparent sessions, you must: + +- Ensure that the Boundary CLI and Boundary Desktop are not installed in the environment in which you want to run the transparent sessions beta. +- Download the appropriate Boundary installer for your Windows or MacOS environment from the [Install Boundary](/boundary/install#installer) page or the [releases](https://releases.hashicorp.com/boundary-installer) page. + +## Install the Boundary clients + +Complete the following steps to install the Boundary Client Agent, CLI, and Desktop client: + +1. Install Boundary using the installer. +Make sure to select the options **Boundary Client Agent**, **CLI**, and **Desktop**. +1. Open the CLI and type the following command to confirm that the version is 0.18.0: + ```shell-session + $ boundary version + ``` +1. In the CLI, run the status command to confirm that the Boundary Client Agent has started: + + ```shell-session + $ boundary client-agent status + ``` + +## Configure targets + +The following section details how to configure targets and test the transparent sessions public beta feature. + + + + If you use a cluster that was created earlier than release 0.16.0, you must add the grant `list-resolvable-aliases` so that the client agent can populate the local alias cache. + + As an example, you could add the grant: + + `type=user;actions=list-resolvable-aliases;ids=*`. + + + +Complete the following steps to configure targets and test transparent sessions: + +1. Authenticate to Boundary using the CLI or Desktop client. +1. [Create a new target with an alias](/boundary/docs/concepts/aliases#create-an-alias-during-target-creation) or [create an alias for an existing target](/boundary/docs/concepts/aliases#create-an-alias-for-an-existing-target). +Ensure that you have authorization to establish a session to the target. +1. Open the client of your choice and [connect to your target using the alias](/boundary/docs/concepts/aliases#connect-to-a-target-using-an-alias). + + Boundary routes your session using the Boundary Client Agent. + You can validate that Boundary routed the session by looking at the **Sessions** page in the Desktop client, by typing `boundary sessions list -recursive` in the CLI, or by looking at sessions managed by the Client Agent using `boundary client-agent sessions`. + + + + The Client Agent periodically requests an updated list of aliases from the controller, so the alias may not work immediately after you create it. + The alias should be updated in the Client Agent within 2 minutes. If you still see connection issues after 2 minutes, follow the troubleshooting steps in [the Client Agent troubleshooting guide](/boundary/docs/api-clients/client-agent#troubleshooting). + + + +When you have validated that transparent sessions work, you can create and establish transparent sessions to other services. + +To establish transparent sessions to other services: + +1. Make a list of the services you use. +1. Create workers as needed for network partitions. +1. Add the services to Boundary as targets. +1. [Create aliases for the targets](/boundary/docs/configuration/target-aliases/create-target-alias). +1. Connect to the target using your client of choice. + +## Connect using transparent sessions + +Without transparent sessions, you must use the [Boundary connect helpers](/boundary/docs/concepts/connection-workflows) to establish a session: + +```shell-session +$ boundary connect ssh -target-name sql-database -target-scope-name -staging +``` + +Alternatively, you can use the Boundary Desktop Client to start a session, and connect on a local port supplied by Boundary: + +```shell-session +$ ssh 127.0.0.1 -p 55374 +``` + +With transparent sessions, you use the target alias as the address to establish a session. If the [client agent is running](#install-the-boundary-clients) and you have authenticated using the CLI or Boundary Desktop Client, you can use the alias to start a session: + +```shell-session +$ ssh my.alias.name +``` + +Boundary starts the session as usual, and brokers or injects any credentials you have configured. \ No newline at end of file diff --git a/website/content/docs/configuration/worker/index.mdx b/website/content/docs/configuration/worker/index.mdx index 24aff48b9b..e8fcb12563 100644 --- a/website/content/docs/configuration/worker/index.mdx +++ b/website/content/docs/configuration/worker/index.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Worker - configuration -description: |- - The worker stanza configures worker-specific parameters. +page_title: Worker configuration overview +description: >- + Learn about worker configuration for proxies, storage, and tags. Understand how multi-hop configurations let you chain workers together in private networks. --- # Worker stanza @@ -11,19 +11,19 @@ The `worker` stanza configures Boundary worker-specific parameters. All workers within Boundary use certificates and encryption keys to identify themselves and protect data in transit. However, there are three different -ways to register them so that registration of workers can fit into any workflow; controller-led, worker-led, and via external KMS. +ways to register them so that registration of workers can fit into any workflow: controller-led, worker-led, and via external KMS. The differences in how they are configured are in the sub-pages linked at the bottom of this page. -Workers registered via the worker-led or controller-led methods must be registered in -the system via an API call, and require storage on disk to store the current set -of credentials. Workers registering via an external KMS auto-register after successful authentication, making them an easy mechanism to +Workers registered using the worker-led or controller-led methods must be registered in +the system using an API call, and require storage on disk to store the current set +of credentials. Workers registering using an external KMS auto-register after successful authentication, making them an easy mechanism to use for automatic scaling. This also means they are not required to store credentials locally; each time they connect the KMS is used to reauthenticate them. -~> Prior to version 0.15 of Boundary, there were two different types of workers, PKI & KMS workers. +~> Prior to version 0.15 of Boundary, there were two different types of workers, PKI & KMS workers. If you are using pre-0.15 workers, with pre-0.15 upstreams please be sure to switch the documentation version to `0.13.x` - `0.14.x` for correct information. @@ -87,7 +87,7 @@ worker { Session recordings are stored in the local storage while they are in progress. When the session is complete, Boundary moves the local session recording to remote storage and deletes the local copy. -- `recording_storage_minimum_available_capacity` - A value measured in bytes that defines the worker's local storage state. +- `recording_storage_minimum_available_capacity` - A value measured in bytes that defines the worker's local storage state. Boundary compares this value with the available local disk space found in the `recording_storage_path` to determine if a worker can be used for session recording operations. The supported suffixes are kb, kib, mb, mib, gb, gib, tb, tib, which are not case sensitive. Example: 2GB, 2gb, 2GiB, 2gib. The possible storage states based on the `recording_storage_minimum_available_capacity` are: diff --git a/website/content/docs/configuration/worker/worker-configuration.mdx b/website/content/docs/configuration/worker/worker-configuration.mdx index 0adf359952..a741aabeb9 100644 --- a/website/content/docs/configuration/worker/worker-configuration.mdx +++ b/website/content/docs/configuration/worker/worker-configuration.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Worker configuration -description: |- - Worker-specific parameters. +description: >- + Learn about authorizing workers to the controller and configuring workers for session recording storage. View a complete worker configuration example. --- # Worker configuration diff --git a/website/content/docs/developing/building.mdx b/website/content/docs/developing/building.mdx index 9c323fce8f..5b81627f51 100644 --- a/website/content/docs/developing/building.mdx +++ b/website/content/docs/developing/building.mdx @@ -1,7 +1,8 @@ --- layout: docs page_title: Build Boundary -description: Build Boundary from source +description: >- + Learn how to build Boundary from the source. Discover resources to compile a cross-platform build and troubleshoot UI assets. --- # Build Boundary diff --git a/website/content/docs/developing/index.mdx b/website/content/docs/developing/index.mdx index e9c285505e..465106acc0 100644 --- a/website/content/docs/developing/index.mdx +++ b/website/content/docs/developing/index.mdx @@ -1,7 +1,8 @@ --- layout: docs page_title: Develop Boundary -description: Develop Boundary +description: >- + Discover resources to help you build and develop Boundary. --- # Develop Boundary diff --git a/website/content/docs/developing/ui.mdx b/website/content/docs/developing/ui.mdx index 2a92cbeabc..55de5173d7 100644 --- a/website/content/docs/developing/ui.mdx +++ b/website/content/docs/developing/ui.mdx @@ -1,7 +1,8 @@ --- layout: docs page_title: Develop the UI -description: Develop the Boundary user interface +description: >- + Learn how to develop the Boundary user interface locally for testing. Use dev mode to run a local fork of the UI without building it into the binary. --- # Develop the Boundary user interface diff --git a/website/content/docs/enterprise/automated-license-reporting.mdx b/website/content/docs/enterprise/automated-license-reporting.mdx index 25ac7db968..8ea41e81f8 100644 --- a/website/content/docs/enterprise/automated-license-reporting.mdx +++ b/website/content/docs/enterprise/automated-license-reporting.mdx @@ -2,7 +2,7 @@ layout: docs page_title: Automated license utilization reporting description: >- - Learn what data HashiCorp collects to meter Enterprise license utilization. Enable or disable reporting. Review sample payloads and logs. + Learn what data HashiCorp collects to meter Enterprise license utilization. Enable reporting or opt out. Review sample payloads and logs. --- # Automated license utilization reporting diff --git a/website/content/docs/enterprise/index.mdx b/website/content/docs/enterprise/index.mdx index 3116a77779..0be3561872 100644 --- a/website/content/docs/enterprise/index.mdx +++ b/website/content/docs/enterprise/index.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Boundary Enterprise -description: |- - An overview of Boundary Enterprise +page_title: Boundary Enterprise overview +description: >- + Learn about Boundary Enterprise. Discover resources to upgrade from Community Edition or request assistance. --- # Boundary Enterprise diff --git a/website/content/docs/enterprise/licensing.mdx b/website/content/docs/enterprise/licensing.mdx index 4cec6da668..191187e753 100644 --- a/website/content/docs/enterprise/licensing.mdx +++ b/website/content/docs/enterprise/licensing.mdx @@ -2,7 +2,7 @@ layout: docs page_title: License Boundary Enterprise description: >- - How to license Boundary Enterprise. + Learn how to enable Boundary Enterprise with a valid license. Request access from an account team to enable enterprise features. --- # License Boundary Enterprise diff --git a/website/content/docs/enterprise/supported-versions.mdx b/website/content/docs/enterprise/supported-versions.mdx index 9b75612642..d4d4360d06 100644 --- a/website/content/docs/enterprise/supported-versions.mdx +++ b/website/content/docs/enterprise/supported-versions.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Boundary Enterprise supported versions +page_title: Boundary Enterprise supported version policies description: >- - The supported versions policy for Boundary Enterprise. Includes information about support periods, control plane and worker compatibility, and Postgres database version support. + Learn about Enterprise support policies. Includes information about support periods, control plane and worker compatibility, and PostgreSQL database versions. --- # Boundary Enterprise supported versions policy @@ -31,13 +31,29 @@ Customers are recommended to run the latest versions of Boundary in order to lev ## Control plane and client/cli compatibility -The supported version compatibility between Boundary's control plane and Boundary clients/cli is the same as control plane and worker compatibility. Within a Boundary Enterprise deployment API backwards compatibility is only supported between the control plane and clients from the prior “major release”. Using clients on newer versions than the control plane they are registered with is not supported. +The supported version compatibility between Boundary's control plane and Boundary clients/CLI is the same as control plane and worker compatibility. Within a Boundary Enterprise deployment API backwards compatibility is only supported between the control plane and clients from the prior “major release”. Using clients on newer versions than the control plane they are registered with is not supported. For example, Boundary clients version 0.14.0 are compatible with Boundary control plane running Boundary 0.15.0. However, they will not have compatibility once the control plane is updated to version 0.16.0 or above. Boundary clients version 0.16.0 are not compatible with Boundary control plane running Boundary 0.15.0 or lower. Customers are recommended to run the latest versions of Boundary in order to leverage the newest features and bug fixes. +The Desktop client uses a different numbering scheme than the CLI and control plane. +Refer to the table for the Desktop version that corresponds to the control plane version. + +| Desktop version | Control plane version | +| --------------- | ----------- | +| 2.1.0 | 0.17.0 | +| 2.0.3 | 0.16.0 | +| 2.0.2 | 0.15.3 | +| 2.0.1 | 0.15.1 | +| 2.0.0 | 0.15.0 | + +For example, the Desktop version 2.0.3 is compatible with version 0.17.0 of the control plane. +But when the control plane is upgraded to 0.18.0, version 2.0.3 will no longer be officially supported. + +To view the Desktop version along with the corresponding CLI and control plane version, click **Boundary**, and then click **About Boundary** in the Desktop client. + ## PostgreSQL support policy Boundary Enterprise will only support PostgreSQL version 13 and above at launch. diff --git a/website/content/docs/getting-started/dev-mode/connect-to-dev-target.mdx b/website/content/docs/getting-started/dev-mode/connect-to-dev-target.mdx index 6c0ec219e5..ff14b80048 100644 --- a/website/content/docs/getting-started/dev-mode/connect-to-dev-target.mdx +++ b/website/content/docs/getting-started/dev-mode/connect-to-dev-target.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Connect to your first target -description: |- - Connecting to your first target +description: >- + Learn how to connect to targets using Boundary, and optionally use connect helpers, exec flag, SSH ProxyCommand, and Desktop Client features. --- # Connect to your first target diff --git a/website/content/docs/getting-started/dev-mode/dev-mode.mdx b/website/content/docs/getting-started/dev-mode/dev-mode.mdx index ba20075db6..a42c98132e 100644 --- a/website/content/docs/getting-started/dev-mode/dev-mode.mdx +++ b/website/content/docs/getting-started/dev-mode/dev-mode.mdx @@ -1,7 +1,8 @@ --- layout: docs page_title: What is dev mode? -description: Getting started with Boundary Community Edition in dev mode +description: >- + Learn about Boundary's dev mode and how it enables you to quickly deploy a temporary, pre-configured Boundary environment for testing and learning purposes. --- # What is dev mode? diff --git a/website/content/docs/getting-started/dev-mode/run-and-login.mdx b/website/content/docs/getting-started/dev-mode/run-and-login.mdx index 80f57c6a58..93c212ea30 100644 --- a/website/content/docs/getting-started/dev-mode/run-and-login.mdx +++ b/website/content/docs/getting-started/dev-mode/run-and-login.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Run and log in to Boundary -description: |- - How to run Boundary services in dev mode and log in for the first time. +description: >- + Run and authenticate to Boundary in dev mode using pre-configured credentials. Dev mode lets you try Boundary in an environment with ephemeral sample resources. --- # Run and log in to Boundary diff --git a/website/content/docs/getting-started/index.mdx b/website/content/docs/getting-started/index.mdx index 1497cb28e8..074caf2ddb 100644 --- a/website/content/docs/getting-started/index.mdx +++ b/website/content/docs/getting-started/index.mdx @@ -1,7 +1,8 @@ --- layout: docs page_title: Get started -description: Get started with Boundary +description: >- + Understand the difference between HCP Boundary, Boundary Enterprise, and Boundary Community Edition to determine which is best for your needs. --- # Get started diff --git a/website/content/docs/hcp/get-started/connect-to-target.mdx b/website/content/docs/hcp/get-started/connect-to-target.mdx index 4674ae7802..fa905474e8 100644 --- a/website/content/docs/hcp/get-started/connect-to-target.mdx +++ b/website/content/docs/hcp/get-started/connect-to-target.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Connect to target -description: |- - Connecting to your first target +page_title: Connect to a target +description: >- + Understand how to connect to targets using HCP Boundary. Learn how to select targets and use your choice of client and connect helpers to facilitate connections. --- # Connect to your first target diff --git a/website/content/docs/hcp/get-started/deploy-and-login.mdx b/website/content/docs/hcp/get-started/deploy-and-login.mdx index 9d731b266c..c22dbebf3f 100644 --- a/website/content/docs/hcp/get-started/deploy-and-login.mdx +++ b/website/content/docs/hcp/get-started/deploy-and-login.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Deploy and log in -description: |- - How to deploy HCP Boundary services and log in for the first time. +description: >- + Learn about the requirements for using HCP Boundary. Deploy a cluster, and log in for the first time. Discover resources to learn more. --- # Deploy HCP Boundary and log in diff --git a/website/content/docs/hcp/index.mdx b/website/content/docs/hcp/index.mdx index b92aef3d66..d1018de1e0 100644 --- a/website/content/docs/hcp/index.mdx +++ b/website/content/docs/hcp/index.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: HCP Boundary -description: |- - An overview of HCP Boundary +page_title: HCP Boundary overview +description: >- + Discover resources to learn about and try HCP Boundary. --- # HCP Boundary diff --git a/website/content/docs/install-boundary/architecture/fault-tolerance.mdx b/website/content/docs/install-boundary/architecture/fault-tolerance.mdx index b7aef8a779..e5f1d7127b 100644 --- a/website/content/docs/install-boundary/architecture/fault-tolerance.mdx +++ b/website/content/docs/install-boundary/architecture/fault-tolerance.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Fault tolerance recommendations -description: |- - Boundary fault tolerance characteristics +description: >- + Understand Boundary fault tolerance recommendations to increase availability, reduce risk. Minimize the impact of node, availability zone, and regional failures. --- # Fault tolerance recommendations diff --git a/website/content/docs/install-boundary/architecture/high-availability.mdx b/website/content/docs/install-boundary/architecture/high-availability.mdx index 8d50d94feb..0f54c1d7be 100644 --- a/website/content/docs/install-boundary/architecture/high-availability.mdx +++ b/website/content/docs/install-boundary/architecture/high-availability.mdx @@ -1,8 +1,9 @@ --- layout: docs page_title: High availability installation -description: |- - How to install Boundary as a high availability service +description: >- + Learn about network requirements and architecture to set up Boundary for high availability, including database, load balancer, and configuration best practices. + --- # High availability installation diff --git a/website/content/docs/install-boundary/architecture/recommended-architecture.mdx b/website/content/docs/install-boundary/architecture/recommended-architecture.mdx index 96e1e2af58..30784d7a5c 100644 --- a/website/content/docs/install-boundary/architecture/recommended-architecture.mdx +++ b/website/content/docs/install-boundary/architecture/recommended-architecture.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Recommended architecture -description: |- - Boundary architecture recommendations +description: >- + Learn about the recommended architectures for two main Boundary user workflows: an administrator configuring Boundary and an end user connecting to a target. --- # Recommended architecture diff --git a/website/content/docs/install-boundary/architecture/system-requirements.mdx b/website/content/docs/install-boundary/architecture/system-requirements.mdx index e479bba4d2..e404a52f64 100644 --- a/website/content/docs/install-boundary/architecture/system-requirements.mdx +++ b/website/content/docs/install-boundary/architecture/system-requirements.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: System requirements -description: |- - Boundary system requirements +description: >- + Learn about Boundary system requirements including recommendations for hardware sizing, network connectivity, traffic encryption, databases, and load balancers. --- # System requirements diff --git a/website/content/docs/install-boundary/configure-controllers.mdx b/website/content/docs/install-boundary/configure-controllers.mdx index a6aef8785e..afc4af1711 100644 --- a/website/content/docs/install-boundary/configure-controllers.mdx +++ b/website/content/docs/install-boundary/configure-controllers.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Configure controllers -description: |- - Configure Boundary controllers +description: >- + Configure Boundary controllers for a self-managed deployment, including TLS, KMS, and database parameters. Start the service, authenticate, and manage resources. --- # Configure controllers @@ -339,3 +339,9 @@ This allows you to configure targets within those scopes and manage them. HashiCorp recommends that you use the [KMS recovery workflow](/boundary/docs/install-boundary/initialize#log-in-with-recovery-kms) to log in to Boundary for the first time. Refer to [Creating your first login account](/boundary/docs/install-boundary/initialize#create-your-first-login-account) to learn about setting up your first auth method, user, account, and role to log in to Boundary going forward without the recovery KMS workflow. + +After configuring the controller, you should: + +- [Configure Boundary workers](/boundary/docs/install-boundary/configure-workers) +- [Initialize Boundary](/boundary/docs/install-boundary/initialize) +- [Install the Boundary Clients](/boundary/docs/install-boundary/install-clients) \ No newline at end of file diff --git a/website/content/docs/install-boundary/configure-workers.mdx b/website/content/docs/install-boundary/configure-workers.mdx index 2e856857bb..d4f8f188b2 100644 --- a/website/content/docs/install-boundary/configure-workers.mdx +++ b/website/content/docs/install-boundary/configure-workers.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Configure workers -description: |- - Configure Boundary workers +description: >- + Configure Boundary workers for a self-managed deployment, including environment files and KMS keys. Set up ingress, intermediary, and egress roles for multi-hop. --- # Configure workers @@ -535,3 +535,8 @@ name, and admin password when prompted. + +After configuring workers, you should: + +- [Initialize Boundary](/boundary/docs/install-boundary/initialize) +- [Install the Boundary Clients](/boundary/docs/install-boundary/install-clients) \ No newline at end of file diff --git a/website/content/docs/install-boundary/install.mdx b/website/content/docs/install-boundary/deploy.mdx similarity index 82% rename from website/content/docs/install-boundary/install.mdx rename to website/content/docs/install-boundary/deploy.mdx index 2d1aa86423..25dedec7a0 100644 --- a/website/content/docs/install-boundary/install.mdx +++ b/website/content/docs/install-boundary/deploy.mdx @@ -1,11 +1,16 @@ --- layout: docs -page_title: Install Boundary -description: |- - Install a self-managed version of Boundary +page_title: Deploy Boundary +description: >- + Deploy a self-managed version of Boundary Enterprise or Community Edition on Ubuntu/Debian, CentOS/RHEL, or Amazon Linux by installing the binary file. --- -# Install Boundary +# Deploy Boundary + +To deploy a self-managed Boundary environment you should: + +1. Deploy and configure Boundary controllers and workers +2. Install end-user clients This guide outlines the required steps to manually install and configure a single HashiCorp Boundary cluster as defined in the [Recommended @@ -13,6 +18,8 @@ architecture](/boundary/docs/install-boundary/recommended-architecture) topic. It assumes you install Boundary on virtual machines (VMs) or bare-metal servers running a Debian or Red Hat-based Linux distribution. +To learn about installing end-user clients, refer to the [Install Boundary clients](/boundary/docs/install-boundary/install-clients) page. + This document includes general guidance as well as specific recommendations for popular cloud infrastructure platforms. These recommendations have also been encoded into official Terraform reference architectures for @@ -27,11 +34,15 @@ In addition to installing the Boundary binary, the official package also provides a systemd service unit, and a local `boundary` user account under which the service runs. + + You must complete the following steps for each Boundary controller and worker node that you want to deploy. The binary operates as either a worker or -controller, depending on the subsequent configuration that you generate for the +controller, depending on the configuration that you generate for the Boundary binary. + + The steps vary by Linux distribution. Select your distribution of Boundary, and complete the steps to install the @@ -39,6 +50,7 @@ binary: + @@ -189,4 +201,13 @@ binary: - \ No newline at end of file + + +You should install the binary on the controller and worker instances you configure to run in your Boundary environments. + +Next, you should: + +- [Configure Boundary controllers](/boundary/docs/install-boundary/configure-controllers) +- [Configure Boundary workers](/boundary/docs/install-boundary/configure-workers) +- [Initialize Boundary](/boundary/docs/install-boundary/initialize) +- [Install the Boundary Clients](/boundary/docs/install-boundary/install-clients) \ No newline at end of file diff --git a/website/content/docs/install-boundary/index.mdx b/website/content/docs/install-boundary/index.mdx index 6cd5b05259..e6ce233474 100644 --- a/website/content/docs/install-boundary/index.mdx +++ b/website/content/docs/install-boundary/index.mdx @@ -1,14 +1,28 @@ --- layout: docs page_title: Overview -description: |- - Deploying a self-managed version of Boundary +description: >- + Discover resources for deploying a self-managed version of Boundary Enterprise or Boundary Community Edition. --- # Overview -This section details installing Boundary in a self-managed environment. +This section details deploying Boundary in a self-managed environment. You can use the topics in this section to install the Community Edition or the Enterprise version of Boundary. The section also includes reference architecture, system requirement recommendations, and best practices. -To deploy HCP Boundary instead, refer to the [HCP Boundary Get Started section](/boundary/docs/hcp/get-started/deploy-and-login). \ No newline at end of file +To deploy HCP Boundary instead, refer to the [HCP Boundary Get Started section](/boundary/docs/hcp/get-started/deploy-and-login). + +This section outlines the following topics for deploying your self-managed environment: + +- Architectural considerations + - [System requirements](/boundary/docs/install-boundary/architecture/system-requirements) + - [Recommended architecture](/boundary/docs/install-boundary/architecture/recommended-architecture) + - [Fault tolerance](/boundary/docs/install-boundary/architecture/fault-tolerance) + - [High availability](/boundary/docs/install-boundary/architecture/high-availability) +- [Deploy Boundary](/boundary/docs/install-boundary/deploy) +- [Configure controllers](/boundary/docs/install-boundary/configure-controllers) +- [Configure workers](/boundary/docs/install-boundary/configure-workers) +- [Initialize Boundary](/boundary/docs/install-boundary/initialize) +- [Install Boundary clients](/boundary/docs/install-boundary/install-clients) +- [Terraform patterns](/boundary/docs/install-boundary/terraform-patterns) \ No newline at end of file diff --git a/website/content/docs/install-boundary/initialize.mdx b/website/content/docs/install-boundary/initialize.mdx index db0adce394..d128316871 100644 --- a/website/content/docs/install-boundary/initialize.mdx +++ b/website/content/docs/install-boundary/initialize.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Initialize Boundary -description: |- - Creating your first login account +description: >- + Initialize new Boundary self-managed installations. Log in with the recovery KMS to create initial scope, auth method, login account, user, and role resources. --- # Initialize Boundary @@ -10,6 +10,8 @@ description: |- This document describes how to access Boundary for the first time and create the necessary resources to log in as a user. +You can also initialize and manage Boundary using Terraform. Refer to the [Terraform patterns](/boundary/docs/install-boundary/terraform-patterns) page to learn more. + ## Requirements Before you initialize Boundary, you should have [initialized a database](/boundary/docs/install-boundary/configure-controllers#initialize-the-database). @@ -460,3 +462,9 @@ resource "boundary_role" "project_admin" { boundary authenticate password \ -auth-method-id ``` + +After initializing Boundary, you should: + +Next, you should: + +- [Install the Boundary Clients](/boundary/docs/install-boundary/install-clients) \ No newline at end of file diff --git a/website/content/docs/install-boundary/install-clients.mdx b/website/content/docs/install-boundary/install-clients.mdx new file mode 100644 index 0000000000..c84076a36f --- /dev/null +++ b/website/content/docs/install-boundary/install-clients.mdx @@ -0,0 +1,65 @@ +--- +layout: docs +page_title: Install Boundary clients +description: >- + Install the Boundary CLI, Boundary Desktop Client app, or Boundary installer for Boundary Enterprise or Community Edition on Windows, MacOS, or Linux. +--- + +# Install Boundary clients + +Boundary end users access infrastructure targets using the Boundary Desktop Client app or the Boundary CLI. Enterprise or HCP Boundary users can use transparent sessions to proxy traffic through the Boundary Client Agent HCP/ENT. + +This guide outlines the required steps to manually install and configure a Boundary client depending on which distribution of Boundary end users access. + +To learn about installing the Boundary binary when you deploy Boundary controllers or workers, refer to the [Deploy Boundary](/boundary/docs/install-boundary/deploy) page. + +Select your Boundary distribution, and complete the steps to install the +correct clients: + + + + +@include 'alerts/enterprise-only.mdx' + + + + The Boundary installer includes the Boundary Client Agent, which is a beta feature. For production environments, consider downloading the Boundary binary and Boundary Desktop Client directly from the [Install Boundary](/boundary/install) page. + + + +The Boundary installer bundles together the following components: + +- Boundary binary (CLI) +- Boundary Desktop Client app +- Boundary Client Agent HCP/ENT BETA + +Download the appropriate Boundary installer for your Windows, MacOS, or Linux environment from the [Install Boundary](/boundary/install#installer) page or the [releases](https://releases.hashicorp.com/boundary-installer) page. You can also download the components individually, but compatibility is not guaranteed. Refer to the [Supported versions policy](/boundary/docs/enterprise/supported-versions#control-plane-and-client-cli-compatibility) to learn more. + + + + Before you launch the Boundary installer, ensure that you have uninstalled all previous versions of the Boundary binary and Boundary Desktop Client app. + + + +To learn more about installing the Boundary CLI and Boundary Desktop Client app, refer to the [Get started with HCP Boundary](/boundary/tutorials/get-started-hcp) tutorials. + + + + +We recommend the following components for end users: + +- Boundary binary (CLI) +- Boundary Desktop Client app + +Download these components for your Windows, MacOS, or Linux environment from the [Install Boundary](/boundary/install) page or the [HashiCorp releases](https://releases.hashicorp.com/) page. + + + +Client compatibility is not guaranteed. You should always ensure that your client is compatible with the version of the Boundary control plane you are connecting to. Refer to the [Supported versions policy](/boundary/docs/enterprise/supported-versions#control-plane-and-client-cli-compatibility) to learn more. + + + +To learn more about installing the Boundary CLI and Boundary Desktop Client app, refer to the [Get started with self-managed Boundary](/boundary/tutorials/get-started-community) tutorials. + + + \ No newline at end of file diff --git a/website/content/docs/install-boundary/terraform-patterns/index.mdx b/website/content/docs/install-boundary/terraform-patterns/index.mdx index 32fa38e999..700331926f 100644 --- a/website/content/docs/install-boundary/terraform-patterns/index.mdx +++ b/website/content/docs/install-boundary/terraform-patterns/index.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Terraform patterns for Boundary -description: |- - Learn how to configure the Terraform Boundary provider so that you can use Terraform patterns to install and manage Boundary. +description: >- + Configure the Terraform Boundary provider so that you can use Terraform patterns to install and manage Boundary resources. --- # Terraform patterns for Boundary diff --git a/website/content/docs/install-boundary/terraform-patterns/terraform-credentials-and-credential-stores.mdx b/website/content/docs/install-boundary/terraform-patterns/terraform-credentials-and-credential-stores.mdx index fbf3a1d28c..40e8030e26 100644 --- a/website/content/docs/install-boundary/terraform-patterns/terraform-credentials-and-credential-stores.mdx +++ b/website/content/docs/install-boundary/terraform-patterns/terraform-credentials-and-credential-stores.mdx @@ -1,7 +1,7 @@ --- layout: docs page_title: Terraform patterns for Boundary credentials and credential stores -description: |- +description: >- Use Terraform patterns to create and manage Boundary credentials and credential stores. Learn how to create static or Vault credential stores, add credentials. --- diff --git a/website/content/docs/install-boundary/terraform-patterns/terraform-groups-and-rbac.mdx b/website/content/docs/install-boundary/terraform-patterns/terraform-groups-and-rbac.mdx index 92ac2c3227..7c9a7ada0c 100644 --- a/website/content/docs/install-boundary/terraform-patterns/terraform-groups-and-rbac.mdx +++ b/website/content/docs/install-boundary/terraform-patterns/terraform-groups-and-rbac.mdx @@ -1,7 +1,7 @@ --- layout: docs page_title: Terraform patterns for Boundary groups and RBAC -description: |- +description: >- Use Terraform patterns to create and manage Boundary groups and role-based access control (RBAC). Learn how to add users to managed groups and assign roles. --- diff --git a/website/content/docs/install-boundary/terraform-patterns/terraform-hosts-and-host-management.mdx b/website/content/docs/install-boundary/terraform-patterns/terraform-hosts-and-host-management.mdx index 5975526d92..bf41a985ec 100644 --- a/website/content/docs/install-boundary/terraform-patterns/terraform-hosts-and-host-management.mdx +++ b/website/content/docs/install-boundary/terraform-patterns/terraform-hosts-and-host-management.mdx @@ -1,7 +1,7 @@ --- layout: docs page_title: Terraform patterns for Boundary hosts and host management -description: |- +description: >- Use Terraform patterns to create and manage Boundary hosts and host catalogs. Learn how to add static or plugin-based hosts to static or dynamic host catalogs. --- diff --git a/website/content/docs/install-boundary/terraform-patterns/terraform-scopes.mdx b/website/content/docs/install-boundary/terraform-patterns/terraform-scopes.mdx index a5b905d631..e13f831781 100644 --- a/website/content/docs/install-boundary/terraform-patterns/terraform-scopes.mdx +++ b/website/content/docs/install-boundary/terraform-patterns/terraform-scopes.mdx @@ -1,7 +1,7 @@ --- layout: docs page_title: Terraform patterns for Boundary scopes -description: |- +description: >- Use Terraform patterns to create and manage Boundary scopes. Learn how to configure global, org-level, and project-level scopes using the Boundary provider. --- diff --git a/website/content/docs/install-boundary/terraform-patterns/terraform-session-recording.mdx b/website/content/docs/install-boundary/terraform-patterns/terraform-session-recording.mdx index 2ea39b48e3..a4cb08891e 100644 --- a/website/content/docs/install-boundary/terraform-patterns/terraform-session-recording.mdx +++ b/website/content/docs/install-boundary/terraform-patterns/terraform-session-recording.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Terraform patterns for Boundary session recording -description: |- - Use Terraform patterns to enable session recording for auditing user sessions in Boundary. Learn how to configure prerequisite storage policies and buckets. +description: >- + Use Terraform patterns to enable session recording for auditing user sessions in Boundary. Learn how to configure storage policies and storage buckets. --- # Terraform patterns for Boundary session recording diff --git a/website/content/docs/install-boundary/terraform-patterns/terraform-targets.mdx b/website/content/docs/install-boundary/terraform-patterns/terraform-targets.mdx index 46ce1c9a8d..a3472896c6 100644 --- a/website/content/docs/install-boundary/terraform-patterns/terraform-targets.mdx +++ b/website/content/docs/install-boundary/terraform-patterns/terraform-targets.mdx @@ -1,7 +1,7 @@ --- layout: docs page_title: Terraform patterns for Boundary targets -description: |- +description: >- Use Terraform patterns to create and manage Boundary targets. Learn how to configure SSH and TCP targets, inject passwords, and enable session recording. --- diff --git a/website/content/docs/install-boundary/terraform-patterns/terraform-users-and-auth-methods.mdx b/website/content/docs/install-boundary/terraform-patterns/terraform-users-and-auth-methods.mdx index 0fdae66384..c72cae70ee 100644 --- a/website/content/docs/install-boundary/terraform-patterns/terraform-users-and-auth-methods.mdx +++ b/website/content/docs/install-boundary/terraform-patterns/terraform-users-and-auth-methods.mdx @@ -1,7 +1,7 @@ --- layout: docs page_title: Terraform patterns for Boundary users and auth methods -description: |- +description: >- Use Terraform patterns to create and manage Boundary users and auth methods. Learn how to configure password and LDAP auth methods, add accounts, create users. --- diff --git a/website/content/docs/integrations/index.mdx b/website/content/docs/integrations/index.mdx index efb5d34a5c..55a23b5213 100644 --- a/website/content/docs/integrations/index.mdx +++ b/website/content/docs/integrations/index.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Integrations -description: |- - Integrations that extend Boundary +description: >- + Discover integrations that extend Boundary's features and capabilities. --- # Overview diff --git a/website/content/docs/integrations/vault/index.mdx b/website/content/docs/integrations/vault/index.mdx index 7b4c0fd289..4a296e0004 100644 --- a/website/content/docs/integrations/vault/index.mdx +++ b/website/content/docs/integrations/vault/index.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Vault integration -description: |- - Describes the benefits of integrating HashiCorp Boundary and Vault. +description: >- + Understand the security benefits of integrating Boundary and Vault to manage secrets and broker or inject credentials. Set up Vault as an OIDC bridge provider. --- # Vault integration diff --git a/website/content/docs/operations/health.mdx b/website/content/docs/operations/health.mdx index 3144f5c19d..39dd46b5e5 100644 --- a/website/content/docs/operations/health.mdx +++ b/website/content/docs/operations/health.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Boundary health endpoints -description: |- - Verify the Boundary controller and worker server status using health endpoints. +description: >- + Learn about using health endpoints to verify controller and worker server status. Check their health using wget. View an example response and configuration. --- # Boundary health endpoints diff --git a/website/content/docs/operations/index.mdx b/website/content/docs/operations/index.mdx index f933601ccf..18e44f7119 100644 --- a/website/content/docs/operations/index.mdx +++ b/website/content/docs/operations/index.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Operating Boundary -description: |- - Operational tasks in Boundary. +page_title: Maintaining and operating Boundary +description: >- + Discover resources to help you learn more about maintaining and operating Boundary, including information about metrics and health. --- # Maintaining and operating Boundary diff --git a/website/content/docs/operations/metrics.mdx b/website/content/docs/operations/metrics.mdx index c36f9329be..9a253337a8 100644 --- a/website/content/docs/operations/metrics.mdx +++ b/website/content/docs/operations/metrics.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Boundary Metrics -description: |- - Obtain visibility of various components of a running Boundary +page_title: Boundary metrics +description: >- + Learn about using a metrics listener to monitor your Boundary components. View the available controller and worker metrics, and an example configuration. --- # Boundary metrics diff --git a/website/content/docs/operations/session-recordings/index.mdx b/website/content/docs/operations/session-recordings/index.mdx index de85c2247f..15b8bd1c1b 100644 --- a/website/content/docs/operations/session-recordings/index.mdx +++ b/website/content/docs/operations/session-recordings/index.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Recorded sessions operations -description: |- - How to work with Boundary's recorded sessions +page_title: Recorded sessions format and security +description: >- + Learn about asciicast, the recording format used for recorded sessions. Understand security concerns and discover resources for working with recorded sessions. --- # Recorded sessions operations diff --git a/website/content/docs/operations/session-recordings/manage-recorded-sessions.mdx b/website/content/docs/operations/session-recordings/manage-recorded-sessions.mdx index 1bb0f7552d..53db89f618 100644 --- a/website/content/docs/operations/session-recordings/manage-recorded-sessions.mdx +++ b/website/content/docs/operations/session-recordings/manage-recorded-sessions.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Manage recorded sessions -description: |- - How to find, download, and view Boundary's recorded sessions +page_title: Find and view recorded sessions +description: >- + Find and view recorded sessions. View a list of all recorded sessions, or search for a specific recording. Download recorded sessions to meet compliance needs. --- # Find and view recorded sessions diff --git a/website/content/docs/operations/session-recordings/validate-data-store.mdx b/website/content/docs/operations/session-recordings/validate-data-store.mdx index 142a257d63..be7f20c7b7 100644 --- a/website/content/docs/operations/session-recordings/validate-data-store.mdx +++ b/website/content/docs/operations/session-recordings/validate-data-store.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Validate the data integrity in the external object store -description: |- - How Boundary validates the data integrity of recorded sessions in the external object store +page_title: Validate data integrity in the external object store +description: >- + Learn about how Boundary validates the data integrity of the BSR file in the external data store to ensure that the file has not been tampered with. --- # How Boundary validates data integrity in the external object store diff --git a/website/content/docs/operations/session-recordings/validate-session-recordings.mdx b/website/content/docs/operations/session-recordings/validate-session-recordings.mdx index b017fddd42..35bd05b5ba 100644 --- a/website/content/docs/operations/session-recordings/validate-session-recordings.mdx +++ b/website/content/docs/operations/session-recordings/validate-session-recordings.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Validate the integrity recorded sessions -description: |- - How to validate the integrity of Boundary's recorded sessions +page_title: Validate recorded session integrity +description: >- + View the components of the Boundary Session Recording (BSR) file. Verify the integrity of the contents of a BSR cryptographically to ensure security compliance. --- # Validate the integrity of session recordings diff --git a/website/content/docs/overview/use-cases.mdx b/website/content/docs/overview/use-cases.mdx index 252f545712..377779724c 100644 --- a/website/content/docs/overview/use-cases.mdx +++ b/website/content/docs/overview/use-cases.mdx @@ -1,7 +1,8 @@ --- layout: docs page_title: Use cases -description: A list of Boundary use cases +description: >- + Learn how HashiCorp Boundary can help you with zero trust access, multi-cloud access, single-sign on with integrated secrets management, and session monitoring. --- # Use cases diff --git a/website/content/docs/overview/vs/bastion-hosts.mdx b/website/content/docs/overview/vs/bastion-hosts.mdx index bd314f684d..4fbdd193ab 100644 --- a/website/content/docs/overview/vs/bastion-hosts.mdx +++ b/website/content/docs/overview/vs/bastion-hosts.mdx @@ -1,7 +1,8 @@ --- layout: docs page_title: Boundary vs. bastion hosts -description: Compares Boundary to bastion hosts +description: >- + Learn how Boundary compares to bastion hosts by providing tightly controlled, just-in-time access to infrastructure using role-based access controls (RBAC). --- # Boundary vs. bastion hosts diff --git a/website/content/docs/overview/vs/other-software.mdx b/website/content/docs/overview/vs/other-software.mdx index f47b69c6c3..9bf6130c04 100644 --- a/website/content/docs/overview/vs/other-software.mdx +++ b/website/content/docs/overview/vs/other-software.mdx @@ -1,7 +1,8 @@ --- layout: docs page_title: Boundary vs. other software -description: Compares Boundary to other security technologies +description: >- + Learn how HashiCorp Boundary compares to other security technologies. --- # Boundary vs. other software diff --git a/website/content/docs/overview/vs/pam.mdx b/website/content/docs/overview/vs/pam.mdx index 4d34797475..fbb05ed3e8 100644 --- a/website/content/docs/overview/vs/pam.mdx +++ b/website/content/docs/overview/vs/pam.mdx @@ -1,7 +1,8 @@ --- layout: docs page_title: Boundary vs. privileged access management -description: Compares Boundary to using a privileged access management (PAM) solution +description: >- + Learn how Boundary compares to privileged access management (PAM) solutions by providing automation for user and credential management and service discovery. --- # Boundary vs. privileged access management diff --git a/website/content/docs/overview/vs/sdp.mdx b/website/content/docs/overview/vs/sdp.mdx index dc1689a730..6b8d27a2a7 100644 --- a/website/content/docs/overview/vs/sdp.mdx +++ b/website/content/docs/overview/vs/sdp.mdx @@ -1,7 +1,8 @@ --- layout: docs page_title: Boundary vs. software-defined perimeter -description: Compares Boundary to using a software-defined perimeter solution +description: >- + Learn how Boundary compares to software-defined perimeter (SDP) tools using dynamic credentials, context-based access, automated host discovery, and auditing. --- # Boundary vs. software-defined perimeter diff --git a/website/content/docs/overview/vs/secrets-management.mdx b/website/content/docs/overview/vs/secrets-management.mdx index dbf8f27132..e9d0010ece 100644 --- a/website/content/docs/overview/vs/secrets-management.mdx +++ b/website/content/docs/overview/vs/secrets-management.mdx @@ -1,7 +1,8 @@ --- layout: docs page_title: Boundary vs. secrets management tools (HashiCorp Vault, AWS Secrets Manager, Azure Key Vault, etc.) -description: Compares Boundary to using secrets management tools (HashiCorp Vault, AWS Secrets Manager, Azure Key Vault, etc.) +description: >- + Learn how Boundary compares to secrets management tools and how it can compliment them to provide identity-based access, automated host discovery, and auditing. --- # Boundary vs. secrets management tools (HashiCorp Vault, AWS Secrets Manager, Azure Key Vault, etc.) diff --git a/website/content/docs/overview/vs/vpn.mdx b/website/content/docs/overview/vs/vpn.mdx index 40ea8f06c3..4d5fd60a2e 100644 --- a/website/content/docs/overview/vs/vpn.mdx +++ b/website/content/docs/overview/vs/vpn.mdx @@ -1,7 +1,8 @@ --- layout: docs page_title: Boundary vs. VPNs -description: Compares Boundary to using a virtual private network (VPN) +description: >- + Learn how Boundary compares to VPNs by using an Identity Provider (IdP) to grant users remote access to specific permitted services, but not the entire network. --- # Boundary vs. VPNs diff --git a/website/content/docs/overview/vs/zero-trust.mdx b/website/content/docs/overview/vs/zero-trust.mdx index 7c8f2aa449..51168f9b66 100644 --- a/website/content/docs/overview/vs/zero-trust.mdx +++ b/website/content/docs/overview/vs/zero-trust.mdx @@ -1,7 +1,8 @@ --- layout: docs page_title: Boundary and zero trust -description: Describes how Boundary applies to a zero trust security approach +description: >- + Learn how Boundary enables a zero trust approach to security in which all access transactions are authenticated and authorized based on trusted identity. --- # Boundary and zero trust diff --git a/website/content/docs/overview/what-is-boundary.mdx b/website/content/docs/overview/what-is-boundary.mdx index d3d6a27d44..d2f7a454e8 100644 --- a/website/content/docs/overview/what-is-boundary.mdx +++ b/website/content/docs/overview/what-is-boundary.mdx @@ -1,7 +1,8 @@ --- layout: docs page_title: What is Boundary? -description: An introduction to Boundary +description: >- + HashiCorp Boundary is a secure remote access solution that you can use to configure least-privileged, just-in-time access to systems, services, and applications. --- # What is Boundary? diff --git a/website/content/docs/release-notes/index.mdx b/website/content/docs/release-notes/index.mdx index 6e05bfbfef..4121274893 100644 --- a/website/content/docs/release-notes/index.mdx +++ b/website/content/docs/release-notes/index.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Release notes -description: |- - Boundary release notes +description: >- + Discover more information about the important updates in each version of Boundary in the release notes. --- # Release notes diff --git a/website/content/docs/release-notes/v0_10_0.mdx b/website/content/docs/release-notes/v0_10_0.mdx index f89a43978a..631d532f31 100644 --- a/website/content/docs/release-notes/v0_10_0.mdx +++ b/website/content/docs/release-notes/v0_10_0.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: v0.10.0 -description: |- - Boundary release notes for v0.10.0 +page_title: v0.10.0 release notes +description: >- + Learn more about the features included in the Boundary 0.10.0 release and discover what has changed. --- # Boundary v0.10.0 diff --git a/website/content/docs/release-notes/v0_11_0.mdx b/website/content/docs/release-notes/v0_11_0.mdx index dab7e1e4f6..c0a285a335 100644 --- a/website/content/docs/release-notes/v0_11_0.mdx +++ b/website/content/docs/release-notes/v0_11_0.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: v0.11.0 -description: |- - Boundary release notes for v0.11.0 +page_title: v0.11.0 release notes +description: >- + Learn more about the features included in the Boundary 0.11.0 release and discover what has changed. --- # Boundary v0.11.0 diff --git a/website/content/docs/release-notes/v0_12_0.mdx b/website/content/docs/release-notes/v0_12_0.mdx index 423f6faf9e..de65b24d8d 100644 --- a/website/content/docs/release-notes/v0_12_0.mdx +++ b/website/content/docs/release-notes/v0_12_0.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: v0.12.0 -description: |- - Boundary release notes for v0.12.0 +page_title: v0.12.0 release notes +description: >- + Learn more about the new features included in the Boundary 0.12.0 release and understand deprecations and changes. --- # Boundary v0.12.0 diff --git a/website/content/docs/release-notes/v0_13_0.mdx b/website/content/docs/release-notes/v0_13_0.mdx index 629f461f66..d873e5cafa 100644 --- a/website/content/docs/release-notes/v0_13_0.mdx +++ b/website/content/docs/release-notes/v0_13_0.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: v0.13.0 -description: |- - Boundary release notes for v0.13.0 +page_title: v0.13.0 release notes +description: >- + Learn more about the new features included in the Boundary 0.13.0 release. Understand any deprecations, changes, and known issues. --- # Boundary v0.13.0 diff --git a/website/content/docs/release-notes/v0_14_0.mdx b/website/content/docs/release-notes/v0_14_0.mdx index b95db7ce6d..06befb00e7 100644 --- a/website/content/docs/release-notes/v0_14_0.mdx +++ b/website/content/docs/release-notes/v0_14_0.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: v0.14.0 -description: |- - Boundary release notes for v0.14.0 +page_title: v0.14.0 release notes +description: >- + Learn more about the new features included in the Boundary 0.14.0 release. Understand any deprecations, changes, and known issues. --- # Boundary 0.14.0 release notes diff --git a/website/content/docs/release-notes/v0_15_0.mdx b/website/content/docs/release-notes/v0_15_0.mdx index 0695b47a59..55798a97f6 100644 --- a/website/content/docs/release-notes/v0_15_0.mdx +++ b/website/content/docs/release-notes/v0_15_0.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: v0.15.0 -description: |- - Boundary release notes for v0.15.0 +page_title: v0.15.0 release notes +description: >- + Learn more about the new features included in the Boundary 0.15.0 release. Understand any deprecations, changes, and known issues. --- # Boundary 0.15.0 release notes diff --git a/website/content/docs/release-notes/v0_16_0.mdx b/website/content/docs/release-notes/v0_16_0.mdx index c78c61ae6c..e1faa92704 100644 --- a/website/content/docs/release-notes/v0_16_0.mdx +++ b/website/content/docs/release-notes/v0_16_0.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: v0.16.0 -description: |- - Boundary release notes for v0.16.0 +page_title: v0.16.0 release notes +description: >- + Learn more about the new features included in the Boundary 0.16.0 release. Understand any deprecations, changes, and known issues. --- # Boundary 0.16.0 release notes diff --git a/website/content/docs/release-notes/v0_17_0.mdx b/website/content/docs/release-notes/v0_17_0.mdx index 0ffe2889a2..5a6add742d 100644 --- a/website/content/docs/release-notes/v0_17_0.mdx +++ b/website/content/docs/release-notes/v0_17_0.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: v0.17.0 -description: |- - Boundary release notes for v0.17.0 +page_title: v0.17.0 release notes +description: >- + Learn more about the new features included in the Boundary 0.17.0 release. Understand any deprecations, changes, and known issues. --- # Boundary 0.17.0 release notes diff --git a/website/content/docs/release-notes/v0_18_0.mdx b/website/content/docs/release-notes/v0_18_0.mdx new file mode 100644 index 0000000000..8f7754237e --- /dev/null +++ b/website/content/docs/release-notes/v0_18_0.mdx @@ -0,0 +1,190 @@ +--- +layout: docs +page_title: v0.18.0 release notes +description: >- + Learn more about the new features included in the Boundary 0.18.0 release. Understand any deprecations, changes, and known issues. +--- + +# Boundary 0.18.0 release notes + +**GA date:** October 15, 2024 + +@include 'release-notes/intro.mdx' + +## Important changes + + + + + + + + + + + + + + + + + + + + +
ChangeDescription
+ Role creation + + In a future version Boundary will no longer automatically create roles when new scopes are created. This was implemented prior to multi-scope grants to ensure administrators and users had default permissions in new scopes. Since Boundary 0.15, initial roles created for new clusters provide these permissions by default to all scopes using multi-scope grants. +
+ Docker image no longer contains curl + + As of version 0.17.1 and later, the curl binary is no longer included in the published Docker container image for Boundary. The image now includes wget, which you can alternatively use to check the health endpoint for a worker. If your workflow depends on having curl in the image, you can dynamically install it using apk. +

+ Learn more:  Known issues and breaking changes +
+ +## New features + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FeatureUpdateDescription
+ Transparent sessions + + BETA + + Transparent sessions allows users to eliminate steps in their current workflows using Boundary’s Client Agent, a component that operates in the background to intercept network traffic and automatically route this traffic through a session if the user is authenticated and authorized. +

+ Platform teams and access management teams that administer Boundary can now build much faster, simpler secure remote access workflows that feel more intuitive and invisible to their developer customers. +

+ Learn more: Transparent sessions and Client Agent. +
+ Backblaze B2 support for storage buckets + + GA + + Backblaze B2 is now supported as a storage provider for session recording storage buckets. +

+ Learn more: Configure an S3-compliant storage provider. +
+ AssumeRole support for AWS dynamic host catalogs + + GA + + AWS host plugins now support AssumeRole. AssumeRole returns a set of temporary security credentials that you can use to access AWS resources. +

+ Learn more: AWS dynamic host catalogs. +
+ +## Known issues and breaking changes + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
VersionIssueDescription
+ 0.13.0+ + + Rotation of AWS access and secret keys during a session results in stale recordings + + In Boundary version 0.13.0+, when you rotate a storage bucket's secrets, any new sessions use the new credentials. However, previously established sessions continue to use the old credentials. +

+ As a best practice, administrators should rotate credentials in a phased manner, ensuring that all previously established sessions are completed before revoking the stale credentials. + Otherwise, you may end up with recordings that aren't stored in the remote storage bucket, and are unable to be played back. +
+ 0.13.0+ + + Unsupported recovery workflow during worker failure + + If a worker fails during a recording, there is no way to recover the recording. This could happen due to a network connectivity issue or because a worker is scaled down, for example. +

+ Learn more:  + Unsupported recovery workflow +
+ 0.17.1+ + + Docker image no longer contains curl + + As of version 0.17.1 and later, the curl binary is no longer included in the published Docker container image for Boundary. +

+ The image now includes wget. You can use wget to check the health endpoint for workers. +

+ Learn more:  Check the health endpoint using wget +

+ If your workflow depends on having curl in the image, you can dynamically install it using apk. Refer to the following commands for examples of using apk to install curl: +

+ <CONTAINER-ID> apk add curl +

+ or +

+ kubectl exec -ti <NAME> -- apk add curl +
+ 0.18.0 (Fixed in 0.18.1) + + Users are incorrectly removed from managed groups + + If your organization has over 10,000 managed groups, Boundary may incorrectly remove users from the managed group memberships. +

+ In version 0.18.0 and earlier, there was a maximum number of managed groups supported for an auth method. If you had over 10,000 managed groups, Boundary may have incorrectly removed a user from a group during authentication. This issue is fixed in version 0.18.1. There is no longer a maximum number of managed groups. +

+ Learn more:  Managed groups +

+ Upgrade to the latest version of Boundary +
\ No newline at end of file diff --git a/website/content/docs/release-notes/v0_1_0.mdx b/website/content/docs/release-notes/v0_1_0.mdx index 1f9e7ca359..c8cfd2ed9c 100644 --- a/website/content/docs/release-notes/v0_1_0.mdx +++ b/website/content/docs/release-notes/v0_1_0.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: v0.1.0 -description: |- - Boundary release notes for v0.1.0 +page_title: v0.1.0 release notes +description: >- + Learn more about the features included in the Boundary 0.1.0 release. --- # Boundary v0.1.0 diff --git a/website/content/docs/release-notes/v0_2_0.mdx b/website/content/docs/release-notes/v0_2_0.mdx index 4823ff80a7..20585afeb1 100644 --- a/website/content/docs/release-notes/v0_2_0.mdx +++ b/website/content/docs/release-notes/v0_2_0.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: v0.2.0 -description: |- - Boundary release notes for v0.2.0 +page_title: v0.2.0 release notes +description: >- + Learn more about the features included in the Boundary 0.2.0 release and discover what has changed. --- # Boundary v0.2.0 diff --git a/website/content/docs/release-notes/v0_3_0.mdx b/website/content/docs/release-notes/v0_3_0.mdx index 5d1c1cbd8b..dcb65886a3 100644 --- a/website/content/docs/release-notes/v0_3_0.mdx +++ b/website/content/docs/release-notes/v0_3_0.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: v0.3.0 -description: |- - Boundary release notes for v0.3.0 +page_title: v0.3.0 release notes +description: >- + Learn more about the features included in the Boundary 0.3.0 release and discover what has changed. --- # Boundary v0.3.0 diff --git a/website/content/docs/release-notes/v0_4_0.mdx b/website/content/docs/release-notes/v0_4_0.mdx index e63a1e7a9b..870bd8b5d6 100644 --- a/website/content/docs/release-notes/v0_4_0.mdx +++ b/website/content/docs/release-notes/v0_4_0.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: v0.4.0 -description: |- - Boundary release notes for v0.4.0 +page_title: v0.4.0 release notes +description: >- + Learn more about the features included in the Boundary 0.4.0 release and discover what has changed. --- # Boundary v0.4.0 diff --git a/website/content/docs/release-notes/v0_5_0.mdx b/website/content/docs/release-notes/v0_5_0.mdx index 349a9c12a0..95cb99f2dc 100644 --- a/website/content/docs/release-notes/v0_5_0.mdx +++ b/website/content/docs/release-notes/v0_5_0.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: v0.5.0 -description: |- - Boundary release notes for v0.5.0 +page_title: v0.5.0 release notes +description: >- + Learn more about the features included in the Boundary 0.5.0 release and discover what has changed. --- # Boundary v0.5.0 diff --git a/website/content/docs/release-notes/v0_6_0.mdx b/website/content/docs/release-notes/v0_6_0.mdx index 1a90b38d76..6deceafdb8 100644 --- a/website/content/docs/release-notes/v0_6_0.mdx +++ b/website/content/docs/release-notes/v0_6_0.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: v0.6.0 -description: |- - Boundary release notes for v0.6.0 +page_title: v0.6.0 release notes +description: >- + Learn more about the features included in the Boundary 0.6.0 release and discover what has changed. --- # Boundary v0.6.0 diff --git a/website/content/docs/release-notes/v0_7_0.mdx b/website/content/docs/release-notes/v0_7_0.mdx index 8df79dd7b9..ca0f111adc 100644 --- a/website/content/docs/release-notes/v0_7_0.mdx +++ b/website/content/docs/release-notes/v0_7_0.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: v0.7.0 -description: |- - Boundary release notes for v0.7.0 +page_title: v0.7.0 release notes +description: >- + Learn more about the features included in the Boundary 0.7.0 release and discover what has changed. --- # Boundary v0.7.0 diff --git a/website/content/docs/release-notes/v0_8_0.mdx b/website/content/docs/release-notes/v0_8_0.mdx index 82b45e715a..bec2546d37 100644 --- a/website/content/docs/release-notes/v0_8_0.mdx +++ b/website/content/docs/release-notes/v0_8_0.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: v0.8.0 -description: |- - Boundary release notes for v0.8.0 +page_title: v0.8.0 release notes +description: >- + Learn more about the features included in the Boundary 0.8.0 release and discover what has changed. --- # Boundary v0.8.0 diff --git a/website/content/docs/release-notes/v0_9_0.mdx b/website/content/docs/release-notes/v0_9_0.mdx index 4c72af145d..95fe9ef887 100644 --- a/website/content/docs/release-notes/v0_9_0.mdx +++ b/website/content/docs/release-notes/v0_9_0.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: v0.9.0 -description: |- - Boundary release notes for v0.9.0 +page_title: v0.9.0 release notes +description: >- + Learn more about the features included in the Boundary 0.9.0 release and discover what has changed. --- # Boundary v0.9.0 diff --git a/website/content/docs/troubleshoot/common-errors.mdx b/website/content/docs/troubleshoot/common-errors.mdx index 5540995642..eca3738a18 100644 --- a/website/content/docs/troubleshoot/common-errors.mdx +++ b/website/content/docs/troubleshoot/common-errors.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Common error messages -description: |- - Common error messages for Boundary +description: >- + View common Boundary error messages and learn how to troubleshoot them. --- # Common error messages diff --git a/website/content/docs/troubleshoot/faq.mdx b/website/content/docs/troubleshoot/faq.mdx index 9d1c300a80..760350ec52 100644 --- a/website/content/docs/troubleshoot/faq.mdx +++ b/website/content/docs/troubleshoot/faq.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: FAQ -description: |- - FAQ for Boundary +description: >- + View frequently asked questions about Boundary --- # Frequently asked questions diff --git a/website/content/docs/troubleshoot/troubleshoot-recorded-sessions.mdx b/website/content/docs/troubleshoot/troubleshoot-recorded-sessions.mdx index 4e7b06755d..c111182d52 100644 --- a/website/content/docs/troubleshoot/troubleshoot-recorded-sessions.mdx +++ b/website/content/docs/troubleshoot/troubleshoot-recorded-sessions.mdx @@ -1,8 +1,8 @@ --- layout: docs -page_title: Manage recorded sessions -description: |- - How to troubleshoot issues with Boundary's recorded sessions +page_title: Troubleshoot session recordings +description: >- + View known issues and troubleshoot problems with Boundary's recorded sessions. --- # Troubleshoot session recordings diff --git a/website/content/partials/alerts/beta.mdx b/website/content/partials/alerts/beta.mdx new file mode 100644 index 0000000000..5cbafd7ac5 --- /dev/null +++ b/website/content/partials/alerts/beta.mdx @@ -0,0 +1,6 @@ + + +Beta functionality is stable, but possibly incomplete and subject to change. +**We strongly discourage using beta features in production deployments of Boundary.** + + \ No newline at end of file diff --git a/website/content/partials/alerts/enterprise-only.mdx b/website/content/partials/alerts/enterprise-only.mdx new file mode 100644 index 0000000000..50d3cd2a39 --- /dev/null +++ b/website/content/partials/alerts/enterprise-only.mdx @@ -0,0 +1 @@ +This feature requires HCP Boundary or Boundary Enterprise \ No newline at end of file diff --git a/website/data/docs-nav-data.json b/website/data/docs-nav-data.json index ef843117c2..cbb5d01a6a 100644 --- a/website/data/docs-nav-data.json +++ b/website/data/docs-nav-data.json @@ -108,8 +108,8 @@ ] }, { - "title": "Install Boundary", - "path": "install-boundary/install" + "title": "Deploy Boundary", + "path": "install-boundary/deploy" }, { "title": "Configure controllers", @@ -123,6 +123,10 @@ "title": "Initialize Boundary", "path": "install-boundary/initialize" }, + { + "title": "Install Boundary clients", + "path": "install-boundary/install-clients" + }, { "title": "Systemd install", "hidden": true, @@ -195,6 +199,15 @@ "title": "Aliases", "path": "concepts/aliases" }, + { + "title": "Transparent sessions", + "badge": { + "text": "HCP/ENT BETA", + "type": "outlined", + "color": "neutral" + }, + "path": "concepts/transparent-sessions" + }, { "title": "Auditing", "path": "concepts/auditing" @@ -574,6 +587,32 @@ } ] }, + { + "title": "Aliases and transparent sessions", + "routes": [ + { + "title": "Overview", + "path": "configuration/target-aliases" + }, + { + "title": "Create a target alias", + "path": "configuration/target-aliases/create-target-alias" + }, + { + "title": "Connect using a target alias", + "path": "configuration/target-aliases/connect-target-alias" + }, + { + "title": "Connect using transparent sessions", + "badge": { + "text": "HCP/ENT BETA", + "type": "outlined", + "color": "neutral" + }, + "path": "configuration/target-aliases/transparent-sessions" + } + ] + }, { "title": "Events", "routes": [ @@ -1643,6 +1682,15 @@ } ] }, + { + "title": "Client Agent", + "badge": { + "text": "HCP/ENT BETA", + "type": "outlined", + "color": "neutral" + }, + "path": "api-clients/client-agent" + }, { "title": "Go SDK", "path": "api-clients/go-sdk" @@ -1765,6 +1813,10 @@ "title": "Overview", "path": "release-notes" }, + { + "title": "v0.18.0", + "path": "release-notes/v0_18_0" + }, { "title": "v0.17.0", "path": "release-notes/v0_17_0" diff --git a/website/public/img/ui/multi-hop-egress-filter_dark.png b/website/public/img/ui/multi-hop-egress-filter_dark.png new file mode 100644 index 0000000000..7887e78226 Binary files /dev/null and b/website/public/img/ui/multi-hop-egress-filter_dark.png differ diff --git a/website/public/img/ui/multi-hop-egress-filter_light.png b/website/public/img/ui/multi-hop-egress-filter_light.png new file mode 100644 index 0000000000..0c60a55efd Binary files /dev/null and b/website/public/img/ui/multi-hop-egress-filter_light.png differ diff --git a/website/redirects.js b/website/redirects.js index b78a6fc26e..a2f00cb064 100644 --- a/website/redirects.js +++ b/website/redirects.js @@ -93,6 +93,11 @@ module.exports = [ destination: '/boundary/docs/operations/metrics', permanent: true, }, + { + source: '/boundary/docs/install-boundary/install', + destination: '/boundary/docs/install-boundary/deploy', + permanent: true, + }, { source: '/boundary/docs/install-boundary/fault-tolerance', destination: '/boundary/docs/install-boundary/architecture/fault-tolerance',