From 52ad7fc6dd658adbb543a2e1dcb70ddd7879f3d6 Mon Sep 17 00:00:00 2001 From: Amine Date: Sun, 10 Nov 2024 20:03:47 -0800 Subject: [PATCH] s/KRO/kro/g: decapitalize kro in documentation and code. --- .github/ISSUE_TEMPLATE/bug.yaml | 2 +- Makefile | 4 +- api/v1alpha1/conditions.go | 6 +- .../ec2-controller/ec2-controller.yaml | 2 +- .../eks-controller/eks-controller.yaml | 2 +- examples/ack-eks-cluster/eks-cluster.yaml | 382 +++++++++--------- examples/application/Readme.md | 2 +- examples/eks-cluster-mgmt/README.md | 21 +- examples/serverless-microservice/README.md | 141 +++++-- helm/Chart.yaml | 4 +- .../instance/controller_reconcile.go | 2 +- .../dynamiccontroller/dynamic_controller.go | 8 +- internal/graph/builder.go | 6 +- internal/graph/validation.go | 2 +- internal/simpleschema/doc.go | 4 +- internal/simpleschema/transform.go | 2 +- test/README.md | 87 ++-- .../suites/ackekscluster/generator.go | 6 +- .../docs/docs/concepts/00-resource-groups.md | 10 +- .../docs/docs/concepts/10-simple-schema.md | 10 +- website/docs/docs/concepts/15-instances.md | 20 +- website/docs/docs/concepts/_category_.json | 2 +- website/docs/docs/faq.md | 74 +++- .../docs/getting-started/01-Installation.md | 27 +- .../02-deploy-a-resource-group.md | 31 +- website/docs/docs/overview.md | 29 +- website/docusaurus.config.ts | 4 +- .../src/components/HomepageFeatures/index.tsx | 4 +- website/src/pages/index.tsx | 2 +- 29 files changed, 522 insertions(+), 374 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug.yaml b/.github/ISSUE_TEMPLATE/bug.yaml index f30525b2..b5ad3f2c 100644 --- a/.github/ISSUE_TEMPLATE/bug.yaml +++ b/.github/ISSUE_TEMPLATE/bug.yaml @@ -18,7 +18,7 @@ body: **Reproduction Steps** (Please include `ResourceGroup` and `Instances` files): **Versions**: - - KRO Version: + - kro version: - Kubernetes Version (`kubectl version`): **Involved Controllers**: diff --git a/Makefile b/Makefile index 4da65a2d..bad904a8 100644 --- a/Makefile +++ b/Makefile @@ -175,13 +175,13 @@ $(CONTROLLER_GEN): $(LOCALBIN) GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_TOOLS_VERSION) .PHONY: image -build-image: ## Build the KRO controller images using ko build +build-image: ## Build the kro controller images using ko build $(WITH_GOFLAGS) KOCACHE=$(KOCACHE) KO_DOCKER_REPO="095708837592.dkr.ecr.us-west-2.amazonaws.com/kro" \ ko build --bare github.com/awslabs/kro/cmd/controller \ --push=false --tags ${RELEASE_VERSION} --sbom=none .PHONY: publish -publish-image: ## Publish the KRO controller images to ECR +publish-image: ## Publish the kro controller images to ECR $(WITH_GOFLAGS) KOCACHE=$(KOCACHE) KO_DOCKER_REPO="095708837592.dkr.ecr.us-west-2.amazonaws.com/kro" \ ko publish --bare github.com/awslabs/kro/cmd/controller \ --tags ${RELEASE_VERSION} --sbom=none diff --git a/api/v1alpha1/conditions.go b/api/v1alpha1/conditions.go index f0d8bc52..e4d5e717 100644 --- a/api/v1alpha1/conditions.go +++ b/api/v1alpha1/conditions.go @@ -22,15 +22,15 @@ type ConditionType string const ( // ResourceGroupConditionTypeGraphSynced indicates the state of the directed - // acyclic graph (DAG) that KRO uses to manage the resources in a + // acyclic graph (DAG) that kro uses to manage the resources in a // ResourceGroup. ResourceGroupConditionTypeGraphVerified ConditionType = "kro.aws.dev/GraphVerified" // ResourceGroupConditionTypeCustomResourceDefinitionSynced indicates the state of the - // CustomResourceDefinition (CRD) that KRO uses to manage the resources in a + // CustomResourceDefinition (CRD) that kro uses to manage the resources in a // ResourceGroup. ResourceGroupConditionTypeCustomResourceDefinitionSynced ConditionType = "kro.aws.dev/CustomResourceDefinitionSynced" // ResourceGroupConditionTypeReconcilerReady indicates the state of the reconciler. - // Whenever an ResourceGroup resource is created, KRO will spin up a + // Whenever an ResourceGroup resource is created, kro will spin up a // reconciler for that resource. This condition indicates the state of the // reconciler. ResourceGroupConditionTypeReconcilerReady ConditionType = "kro.aws.dev/ReconcilerReady" diff --git a/examples/ack-controller/ec2-controller/ec2-controller.yaml b/examples/ack-controller/ec2-controller/ec2-controller.yaml index b14522c9..ceeedd7a 100644 --- a/examples/ack-controller/ec2-controller/ec2-controller.yaml +++ b/examples/ack-controller/ec2-controller/ec2-controller.yaml @@ -19,7 +19,7 @@ spec: iamRole: maxSessionDuration: integer | default=3600 oidcProvider: string | required=true - roleDescription: string | default=IRSA role for ACK EC2 controller deployement on EKS cluster using KRO Resource group + roleDescription: string | default=IRSA role for ACK EC2 controller deployement on EKS cluster using kro Resource group image: deletePolicy: string | default=delete repository: string | default=public.ecr.aws/aws-controllers-k8s/ec2-controller diff --git a/examples/ack-controller/eks-controller/eks-controller.yaml b/examples/ack-controller/eks-controller/eks-controller.yaml index 6731067f..61ac97b9 100644 --- a/examples/ack-controller/eks-controller/eks-controller.yaml +++ b/examples/ack-controller/eks-controller/eks-controller.yaml @@ -19,7 +19,7 @@ spec: iamRole: maxSessionDuration: integer | default=3600 oidcProvider: string | required=true - roleDescription: string | default=IRSA role for ACK EKS controller deployement on EKS cluster using KRO Resource group + roleDescription: string | default=IRSA role for ACK EKS controller deployement on EKS cluster using kro Resource group iamPolicy: # would prefer to add a policyDocument here, need to support multiline string here description: string | default="policy for eks controller" diff --git a/examples/ack-eks-cluster/eks-cluster.yaml b/examples/ack-eks-cluster/eks-cluster.yaml index b7e91816..f9ce8abf 100644 --- a/examples/ack-eks-cluster/eks-cluster.yaml +++ b/examples/ack-eks-cluster/eks-cluster.yaml @@ -18,197 +18,197 @@ spec: clusterARN: ${cluster.status.ackResourceMetadata.arn} # resources resources: - - name: clusterVPC - readyWhen: - - ${clusterVPC.status.state == "available"} - template: - apiVersion: ec2.services.k8s.aws/v1alpha1 - kind: VPC - metadata: - name: kro-cluster-vpc - spec: - cidrBlocks: - - 192.168.0.0/16 - enableDNSSupport: true - enableDNSHostnames: true - - name: clusterElasticIPAddress - template: - apiVersion: ec2.services.k8s.aws/v1alpha1 - kind: ElasticIPAddress - metadata: - name: kro-cluster-eip - spec: {} - - name: clusterInternetGateway - template: - apiVersion: ec2.services.k8s.aws/v1alpha1 - kind: InternetGateway - metadata: - name: kro-cluster-igw - spec: - vpc: ${clusterVPC.status.vpcID} - - name: clusterRouteTable - template: - apiVersion: ec2.services.k8s.aws/v1alpha1 - kind: RouteTable - metadata: - name: kro-cluster-public-route-table - spec: - vpcID: ${clusterVPC.status.vpcID} - routes: - - destinationCIDRBlock: 0.0.0.0/0 - gatewayID: ${clusterInternetGateway.status.internetGatewayID} - - name: clusterSubnetA - readyWhen: - - ${clusterSubnetA.status.state == "available"} - template: - apiVersion: ec2.services.k8s.aws/v1alpha1 - kind: Subnet - metadata: - name: kro-cluster-public-subnet1 - spec: - availabilityZone: us-west-2a - cidrBlock: 192.168.0.0/18 - vpcID: ${clusterVPC.status.vpcID} - routeTables: - - ${clusterRouteTable.status.routeTableID} - mapPublicIPOnLaunch: true - - name: clusterSubnetB - template: - apiVersion: ec2.services.k8s.aws/v1alpha1 - kind: Subnet - metadata: - name: kro-cluster-public-subnet2 - spec: - availabilityZone: us-west-2b - cidrBlock: 192.168.64.0/18 - vpcID: ${clusterVPC.status.vpcID} - routeTables: - - ${clusterRouteTable.status.routeTableID} - mapPublicIPOnLaunch: true - - name: clusterNATGateway - template: - apiVersion: ec2.services.k8s.aws/v1alpha1 - kind: NATGateway - metadata: - name: kro-cluster-natgateway1 - spec: - subnetID: ${clusterSubnetB.status.subnetID} - allocationID: ${clusterElasticIPAddress.status.allocationID} - - name: clusterRole - template: - apiVersion: iam.services.k8s.aws/v1alpha1 - kind: Role - metadata: - name: kro-cluster-role - spec: - name: kro-cluster-role - description: "KRO created cluster cluster role" - policies: - - arn:aws:iam::aws:policy/AmazonEKSClusterPolicy - assumeRolePolicyDocument: | - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Service": "eks.amazonaws.com" - }, - "Action": "sts:AssumeRole" - } - ] - } - - name: clusterNodeRole - template: - apiVersion: iam.services.k8s.aws/v1alpha1 - kind: Role - metadata: - name: kro-cluster-node-role - spec: - name: kro-cluster-node-role - description: "KRO created cluster node role" - policies: - - arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy - - arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly - - arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy - assumeRolePolicyDocument: | - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - }, - "Action": "sts:AssumeRole" - } - ] - } - - name: clusterAdminRole - template: - apiVersion: iam.services.k8s.aws/v1alpha1 - kind: Role - metadata: - name: kro-cluster-pia-role - spec: - name: kro-cluster-pia-role - description: "KRO created cluster admin pia role" - policies: - - arn:aws:iam::aws:policy/AdministratorAccess - assumeRolePolicyDocument: | - { + - name: clusterVPC + readyWhen: + - ${clusterVPC.status.state == "available"} + template: + apiVersion: ec2.services.k8s.aws/v1alpha1 + kind: VPC + metadata: + name: kro-cluster-vpc + spec: + cidrBlocks: + - 192.168.0.0/16 + enableDNSSupport: true + enableDNSHostnames: true + - name: clusterElasticIPAddress + template: + apiVersion: ec2.services.k8s.aws/v1alpha1 + kind: ElasticIPAddress + metadata: + name: kro-cluster-eip + spec: {} + - name: clusterInternetGateway + template: + apiVersion: ec2.services.k8s.aws/v1alpha1 + kind: InternetGateway + metadata: + name: kro-cluster-igw + spec: + vpc: ${clusterVPC.status.vpcID} + - name: clusterRouteTable + template: + apiVersion: ec2.services.k8s.aws/v1alpha1 + kind: RouteTable + metadata: + name: kro-cluster-public-route-table + spec: + vpcID: ${clusterVPC.status.vpcID} + routes: + - destinationCIDRBlock: 0.0.0.0/0 + gatewayID: ${clusterInternetGateway.status.internetGatewayID} + - name: clusterSubnetA + readyWhen: + - ${clusterSubnetA.status.state == "available"} + template: + apiVersion: ec2.services.k8s.aws/v1alpha1 + kind: Subnet + metadata: + name: kro-cluster-public-subnet1 + spec: + availabilityZone: us-west-2a + cidrBlock: 192.168.0.0/18 + vpcID: ${clusterVPC.status.vpcID} + routeTables: + - ${clusterRouteTable.status.routeTableID} + mapPublicIPOnLaunch: true + - name: clusterSubnetB + template: + apiVersion: ec2.services.k8s.aws/v1alpha1 + kind: Subnet + metadata: + name: kro-cluster-public-subnet2 + spec: + availabilityZone: us-west-2b + cidrBlock: 192.168.64.0/18 + vpcID: ${clusterVPC.status.vpcID} + routeTables: + - ${clusterRouteTable.status.routeTableID} + mapPublicIPOnLaunch: true + - name: clusterNATGateway + template: + apiVersion: ec2.services.k8s.aws/v1alpha1 + kind: NATGateway + metadata: + name: kro-cluster-natgateway1 + spec: + subnetID: ${clusterSubnetB.status.subnetID} + allocationID: ${clusterElasticIPAddress.status.allocationID} + - name: clusterRole + template: + apiVersion: iam.services.k8s.aws/v1alpha1 + kind: Role + metadata: + name: kro-cluster-role + spec: + name: kro-cluster-role + description: "kro created cluster cluster role" + policies: + - arn:aws:iam::aws:policy/AmazonEKSClusterPolicy + assumeRolePolicyDocument: | + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "eks.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] + } + - name: clusterNodeRole + template: + apiVersion: iam.services.k8s.aws/v1alpha1 + kind: Role + metadata: + name: kro-cluster-node-role + spec: + name: kro-cluster-node-role + description: "kro created cluster node role" + policies: + - arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy + - arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly + - arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy + assumeRolePolicyDocument: | + { "Version": "2012-10-17", "Statement": [ - { - "Sid": "AllowEksAuthToAssumeRoleForPodIdentity", - "Effect": "Allow", - "Principal": { - "Service": "pods.eks.amazonaws.com" - }, - "Action": [ - "sts:AssumeRole", - "sts:TagSession" - ] - } + { + "Effect": "Allow", + "Principal": { + "Service": "ec2.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } ] - } - - name: cluster - readyWhen: - - ${cluster.status.status == "ACTIVE"} - template: - apiVersion: eks.services.k8s.aws/v1alpha1 - kind: Cluster - metadata: - name: ${schema.spec.name} - spec: - name: ${schema.spec.name} - accessConfig: - authenticationMode: API_AND_CONFIG_MAP - roleARN: ${clusterRole.status.ackResourceMetadata.arn} - version: ${schema.spec.version} - resourcesVPCConfig: - endpointPrivateAccess: false - endpointPublicAccess: true - subnetIDs: - - ${clusterSubnetA.status.subnetID} - - ${clusterSubnetB.status.subnetID} - - name: clusterNodeGroup - template: - apiVersion: eks.services.k8s.aws/v1alpha1 - kind: Nodegroup - metadata: - name: kro-cluster-nodegroup - spec: - name: kro-cluster-ng - diskSize: 100 - clusterName: ${cluster.spec.name} - subnets: - - ${clusterSubnetA.status.subnetID} - - ${clusterSubnetB.status.subnetID} - nodeRole: ${clusterNodeRole.status.ackResourceMetadata.arn} - updateConfig: - maxUnavailable: 1 - scalingConfig: - minSize: 1 - maxSize: 1 - desiredSize: 1 \ No newline at end of file + } + - name: clusterAdminRole + template: + apiVersion: iam.services.k8s.aws/v1alpha1 + kind: Role + metadata: + name: kro-cluster-pia-role + spec: + name: kro-cluster-pia-role + description: "kro created cluster admin pia role" + policies: + - arn:aws:iam::aws:policy/AdministratorAccess + assumeRolePolicyDocument: | + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "AllowEksAuthToAssumeRoleForPodIdentity", + "Effect": "Allow", + "Principal": { + "Service": "pods.eks.amazonaws.com" + }, + "Action": [ + "sts:AssumeRole", + "sts:TagSession" + ] + } + ] + } + - name: cluster + readyWhen: + - ${cluster.status.status == "ACTIVE"} + template: + apiVersion: eks.services.k8s.aws/v1alpha1 + kind: Cluster + metadata: + name: ${schema.spec.name} + spec: + name: ${schema.spec.name} + accessConfig: + authenticationMode: API_AND_CONFIG_MAP + roleARN: ${clusterRole.status.ackResourceMetadata.arn} + version: ${schema.spec.version} + resourcesVPCConfig: + endpointPrivateAccess: false + endpointPublicAccess: true + subnetIDs: + - ${clusterSubnetA.status.subnetID} + - ${clusterSubnetB.status.subnetID} + - name: clusterNodeGroup + template: + apiVersion: eks.services.k8s.aws/v1alpha1 + kind: Nodegroup + metadata: + name: kro-cluster-nodegroup + spec: + name: kro-cluster-ng + diskSize: 100 + clusterName: ${cluster.spec.name} + subnets: + - ${clusterSubnetA.status.subnetID} + - ${clusterSubnetB.status.subnetID} + nodeRole: ${clusterNodeRole.status.ackResourceMetadata.arn} + updateConfig: + maxUnavailable: 1 + scalingConfig: + minSize: 1 + maxSize: 1 + desiredSize: 1 diff --git a/examples/application/Readme.md b/examples/application/Readme.md index 1744c946..7318bfb6 100644 --- a/examples/application/Readme.md +++ b/examples/application/Readme.md @@ -1,4 +1,4 @@ -# KRO Application example +# kro Application example This example creates a ResourceGroup called `App` and then instaciates it with the default nginx container image. diff --git a/examples/eks-cluster-mgmt/README.md b/examples/eks-cluster-mgmt/README.md index 1535c6fa..e1cf8cbf 100644 --- a/examples/eks-cluster-mgmt/README.md +++ b/examples/eks-cluster-mgmt/README.md @@ -1,8 +1,8 @@ -# Amazon EKS cluster management using KRO & ACK +# Amazon EKS cluster management using kro & ACK -This example demonstrates how to manage a fleet of EKS clusters using KRO, -ACK, and ArgoCD -- it creates EKS clusters, and bootstraps them with the -required add-ons +This example demonstrates how to manage a fleet of EKS clusters using kro, ACK, +and ArgoCD -- it creates EKS clusters, and bootstraps them with the required +add-ons A hub-spoke model is used in this example; a management cluster (hub) is created as part of the initial setup and the controllers needed for provisioning and @@ -74,7 +74,7 @@ OIDC_PROVIDER=$(aws eks describe-cluster --name $EKS_CLUSTER_NAME --region $AWS_ - ACK IAM controller - ACK EC2 controller - ACK EKS controller -6. Install KRO on the management cluster. Please note that this example is +6. Install kro on the management cluster. Please note that this example is tested on 0.1.0-rc.3. 7. Install EKS pod identity add-on: @@ -84,7 +84,7 @@ aws eks create-addon --cluster-name $CLUSTER_NAME --addon-name eks-pod-identity- ### Repo -8. Clone KRO repo: +8. Clone kro repo: ```sh git clone $KRO_REPO_URL $WORKSPACE_PATH/kro @@ -94,7 +94,7 @@ git clone $KRO_REPO_URL $WORKSPACE_PATH/kro the clusters definition, and it will be reconciled to the management cluster via the GitOps flow -**NOTE:** Until KRO is released, make sure the repo you create is private. +**NOTE:** Until kro is released, make sure the repo you create is private. 10. Save the URL of the created repo in an environment variable: @@ -216,13 +216,12 @@ The initial configuration creates one workload cluster named ## Known issues -1. You will need to restart the KRO controller when you add a new workload +1. You will need to restart the kro controller when you add a new workload cluster due to a bug in the controller. Once the resource group `eksclusterwithvpc` is applied, the controller is able to apply the corresponding VPC resources, but it is not able to recognize the generated ids (e.g. subnet id), and feed that into EKS resources. Refer to - [this issue](https://github.com/awslabs/kro/issues/8) for more - details. + [this issue](https://github.com/awslabs/kro/issues/8) for more details. 2. Deleting a cluster does not properly clean up all cluster resources i.e. subnets, routetables are left strangling. ACK EC2 controller keep reporting dependencies preventing deletion. To work around this issue, attempt restart @@ -255,5 +254,5 @@ aws iam delete-role --role-name argocd-hub-role aws iam delete-policy --policy-arn arn:aws:iam::$ACCOUNT_ID:policy/argocd-policy ``` -5. Delete ACK controllers and KRO +5. Delete ACK controllers and kro 6. Delete the management cluster diff --git a/examples/serverless-microservice/README.md b/examples/serverless-microservice/README.md index 09976810..49e52aa9 100644 --- a/examples/serverless-microservice/README.md +++ b/examples/serverless-microservice/README.md @@ -1,45 +1,79 @@ # Example serverless architecture - microservice - -This REST API example shows the end-to-end implementation of a simple application using a serverless approach, as depicted in the diagram: +This REST API example shows the end-to-end implementation of a simple +application using a serverless approach, as depicted in the diagram: ![Serverless microservice diagram](./assets/architecture.png) -Example is (loosely) based on a AWS Serverless Samples repository [serverless-rest-api](https://github.com/aws-samples/serverless-samples/tree/main/serverless-rest-api) project. The services used by this application include Amazon API Gateway, AWS Lambda, and Amazon DynamoDB. Observability implementation is based on Amazon CloudWatch Dashboards and MetricAlarms. This example skips CI/CD implementation and unit/integration testing. +Example is (loosely) based on a AWS Serverless Samples repository +[serverless-rest-api](https://github.com/aws-samples/serverless-samples/tree/main/serverless-rest-api) +project. The services used by this application include Amazon API Gateway, AWS +Lambda, and Amazon DynamoDB. Observability implementation is based on Amazon +CloudWatch Dashboards and MetricAlarms. This example skips CI/CD implementation +and unit/integration testing. ## Implementation notes ### API -API uses API Gateway HTTP API endpoint type. Requests are passed to the integration target (AWS Lambda) for routing and interpretation/response generation. API Gateway does not implement any validation, transformation, path-based routing, API management functions. - - -API Gateway uses Lambda Authorizer for authentication/authorization. However, sample implementation at `./src/authorizer/lambda_function.py` allows all actions on all resources in the API if the `Authorization` header value in the request matches the one stored in the AWS Secrets Manager and retrieved by the Lambda Authorizer when it initializes. - -Make sure to update the authorizer Lambda code according to your authentication/authorization needs. For more details on how to implement Lambda Authorizer, check out [documentation](https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-use-lambda-authorizer.html). or [blueprints](https://github.com/awslabs/aws-apigateway-lambda-authorizer-blueprints). -Look at Lambda Authorizer code at [serverless-rest-api](https://github.com/aws-samples/serverless-samples/tree/main/serverless-rest-api) for JWT based authorization examples if needed. +API uses API Gateway HTTP API endpoint type. Requests are passed to the +integration target (AWS Lambda) for routing and interpretation/response +generation. API Gateway does not implement any validation, transformation, +path-based routing, API management functions. + +API Gateway uses Lambda Authorizer for authentication/authorization. However, +sample implementation at `./src/authorizer/lambda_function.py` allows all +actions on all resources in the API if the `Authorization` header value in the +request matches the one stored in the AWS Secrets Manager and retrieved by the +Lambda Authorizer when it initializes. + +Make sure to update the authorizer Lambda code according to your +authentication/authorization needs. For more details on how to implement Lambda +Authorizer, check out +[documentation](https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-use-lambda-authorizer.html). +or +[blueprints](https://github.com/awslabs/aws-apigateway-lambda-authorizer-blueprints). +Look at Lambda Authorizer code at +[serverless-rest-api](https://github.com/aws-samples/serverless-samples/tree/main/serverless-rest-api) +for JWT based authorization examples if needed. ### Business logic -API Gateway passes the incoming requests to the Lambda function and returns response to the API client. Sample implementation code is available at `./src/logic/lambda_function.py`. It expects the database table name to be specified in the environment variable `TABLE_NAME`. -For HTTP GET requests to the API `items` resource, it runs Amazon DynamoDB `scan` operation and returns all items received as a result. For HTTP GET requests for a particular item (the `items\{id}` resource) it performs `get_item` operation and returns a response from the DynamoDB. PUT request to `items` resource takes incoming payload, adds UUID as a hash key value, adds current timestamp, and performs DynamoDB `put_item` operation. It returns the payload sent to the Dynamo DB as a response body to the API client. +API Gateway passes the incoming requests to the Lambda function and returns +response to the API client. Sample implementation code is available at +`./src/logic/lambda_function.py`. It expects the database table name to be +specified in the environment variable `TABLE_NAME`. + +For HTTP GET requests to the API `items` resource, it runs Amazon DynamoDB +`scan` operation and returns all items received as a result. For HTTP GET +requests for a particular item (the `items\{id}` resource) it performs +`get_item` operation and returns a response from the DynamoDB. PUT request to +`items` resource takes incoming payload, adds UUID as a hash key value, adds +current timestamp, and performs DynamoDB `put_item` operation. It returns the +payload sent to the Dynamo DB as a response body to the API client. ### Database -Example uses DynamoDB table to store data. Database definition is hardcoded in the composition and includes a single required `id` field that is used as a hash key. You will need to change this structure and business logic Lambda code to implement anything more complicated than simple CRUD operations. -# Deployment +Example uses DynamoDB table to store data. Database definition is hardcoded in +the composition and includes a single required `id` field that is used as a hash +key. You will need to change this structure and business logic Lambda code to +implement anything more complicated than simple CRUD operations. +# Deployment ## Pre-requisites: + - EKS cluster - [Kubectl](https://kubernetes.io/docs/tasks/tools/) - [AWS ACK](https://aws-controllers-k8s.github.io/community/docs/community/overview/) -Check out `./src/install.sh` script for the commands used to install necessary ACK controllers . +Check out `./src/install.sh` script for the commands used to install necessary +ACK controllers . ### Deploy ResourceGroup Make sure you are in the following directory: + ```shell cd examples/serverless-microservice/ ``` @@ -49,30 +83,39 @@ kubectl apply -f microservice.yaml ``` Verify the ResourceGroups + ```shell kubectl get ResourceGroup ``` Expected output + ``` NAME AGE microservice.kro.run 14m ``` ### Build Lambda function packages + Make sure you are in the following directory: + ```shell cd examples/serverless-microservice/ ``` -Set the AWS region and S3 bucket name to be used by the Lambda build/package process and in the claim: +Set the AWS region and S3 bucket name to be used by the Lambda build/package +process and in the claim: ```shell export AWS_REGION= # example `us-east-1` export S3_BUCKET= # example `my-serverless-microservice-lambdas` ``` -Executing the `build-and-upload-zip.sh` script creates an S3 bucket in a specified region, zips the Lambda functions, and uploads the ZIP file to the S3 bucket. If the bucket already exists and you have access to it, the script will print a message and continue with the upload. +Executing the `build-and-upload-zip.sh` script creates an S3 bucket in a +specified region, zips the Lambda functions, and uploads the ZIP file to the S3 +bucket. If the bucket already exists and you have access to it, the script will +print a message and continue with the upload. + ```shell ./build-and-upload-zip.sh --bucket $S3_BUCKET --region $AWS_REGION ``` @@ -80,43 +123,57 @@ Executing the `build-and-upload-zip.sh` script creates an S3 bucket in a specifi ### Update and apply the claim Change the default value for `CLAIM_NAME` with any name you choose. + ```shell export CLAIM_NAME= # example `test-rest-api` ``` -Run the below commands to generate a random password to be used by a Lambda Authorizer and store it in [AWS Secrets Manager](https://aws.amazon.com/secrets-manager/) +Run the below commands to generate a random password to be used by a Lambda +Authorizer and store it in +[AWS Secrets Manager](https://aws.amazon.com/secrets-manager/) + ```shell export AUTHORIZER_PASSWORD=$(aws secretsmanager get-random-password --output text) export SECRET_ARN=$(aws secretsmanager create-secret --name "$CLAIM_NAME-auth-password" --secret-string "$AUTHORIZER_PASSWORD" --output json | jq .ARN | tr -d '"') ``` -*Note that password is stored in the AUTHORIZER_PASSWORD environment variable, also used by the testing scripts later in this document. If needed, you can retrieve a password from the AWS Secrets Manager using following command:* +_Note that password is stored in the AUTHORIZER_PASSWORD environment variable, +also used by the testing scripts later in this document. If needed, you can +retrieve a password from the AWS Secrets Manager using following command:_ + ```shell - aws secretsmanager get-secret-value --secret-id "$CLAIM_NAME-auth-password" + aws secretsmanager get-secret-value --secret-id "$CLAIM_NAME-auth-password" ``` -Run the below command to use the template file `microservice-claim-tmpl.yaml` to create the claim file with the variables `CLAIM_NAME`, `S3_BUCKET`, `SECRET_ARN`, and `AWS_REGION` substituted. +Run the below command to use the template file `microservice-claim-tmpl.yaml` to +create the claim file with the variables `CLAIM_NAME`, `S3_BUCKET`, +`SECRET_ARN`, and `AWS_REGION` substituted. + ```shell envsubst < "microservice-claim-tmpl.yaml" > "claim.yaml" ``` +Check that the claim populated with values. Update prefix, API name or +description values in the claim if desired. -Check that the claim populated with values. Update prefix, API name or description values in the claim if desired. ``` cat claim.yaml ``` Apply the claim + ```shell kubectl apply -f claim.yaml ``` Validate the claim + ``` kubectl get microservice ``` Expected result + ``` NAME AGE test-microservice 12m @@ -125,20 +182,26 @@ test-microservice 12m ## Troubleshooting Get a list of the resource groups + ``` kubectl get ResourceGroup ``` -Expected result + +Expected result + ``` NAME AGE microservice.kro.run 35m ``` Describe your resource group, look for errors and events: + ``` kubectl describe resourcegroup.kro.run/microservice.kro.run ``` + Expected result (resource definitions removed for brevity) + ``` Name: microservice.kro.run Namespace: default @@ -175,17 +238,17 @@ Status: Conditions: Last Transition Time: 2024-07-11T15:36:38Z Message: micro controller is ready - Reason: + Reason: Status: True Type: kro.aws.dev/ReconcilerReady Last Transition Time: 2024-07-11T15:36:38Z Message: Directed Acyclic Graph is synced - Reason: + Reason: Status: True Type: kro.aws.dev/GraphVerified Last Transition Time: 2024-07-11T15:36:38Z Message: Custom Resource Definition is synced - Reason: + Reason: Status: True Type: kro.aws.dev/CustomResourceDefinitionSynced State: ACTIVE @@ -203,28 +266,35 @@ Status: Events: ``` -Check logs of the KRO pod for errors if necessary (this command assumes there is only one KRO pod available): +Check logs of the kro pod for errors if necessary (this command assumes there is +only one kro pod available): + ``` kubectl get pods -o custom-columns=":metadata.name" | grep kro | xargs -I% kubectl logs "%" --since=1h ``` -Check logs of the ACK controller pod for errors if necessary (this command assumes there is only one Lambda controller pod available): +Check logs of the ACK controller pod for errors if necessary (this command +assumes there is only one Lambda controller pod available): + ``` kubectl get pods -o custom-columns=":metadata.name" | grep "ack-lambda-controller" | xargs -I% kubectl logs "%" --since=1h ``` - Describe your microservice: + ``` kubectl describe microservice.kro.run/test-microservice ``` -Check the state of individual resources if needed, look for the errors and events: +Check the state of individual resources if needed, look for the errors and +events: + ``` kubectl describe Function ``` Check for errors, events (in this case function IAM role is missing): + ``` Name: test-logic Namespace: default @@ -258,21 +328,24 @@ Events: ``` ## Clean Up + Delete the serverless application + ```shell kubectl delete -f claim.yaml ``` Delete the S3 bucket you've created and the Lambda ZIP packages in it - Delete the ResourceGroups + ```shell kubectl delete -f microservice.yaml ``` -*Note:* -*In case deletion process hangs, you may try patching microservice finalizer:* +_Note:_ _In case deletion process hangs, you may try patching microservice +finalizer:_ + ``` kubectl patch Microservice/test-microservice -p '{"metadata":{"finalizers":[]}}' --type=merge -``` \ No newline at end of file +``` diff --git a/helm/Chart.yaml b/helm/Chart.yaml index 212db9db..d18e3ebe 100644 --- a/helm/Chart.yaml +++ b/helm/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: kro-chart -description: A Helm chart for KRO +description: A Helm chart for kro type: application version: 0.1.0-rc.5 -appVersion: "0.1.0-rc.5" \ No newline at end of file +appVersion: "0.1.0-rc.5" diff --git a/internal/controller/instance/controller_reconcile.go b/internal/controller/instance/controller_reconcile.go index 9a0d92d4..8175edbf 100644 --- a/internal/controller/instance/controller_reconcile.go +++ b/internal/controller/instance/controller_reconcile.go @@ -264,7 +264,7 @@ func (igr *instanceGraphReconciler) updateResource( func (igr *instanceGraphReconciler) handleInstanceDeletion(ctx context.Context, resourceStates map[string]*ResourceState) error { instanceUnstructured := igr.runtime.GetInstance() - igr.log.V(1).Info("Getting all resources created by KRO") + igr.log.V(1).Info("Getting all resources created by kro") for _, resourceID := range igr.runtime.TopologicalOrder() { _, err := igr.runtime.Synchronize() if err != nil { diff --git a/internal/dynamiccontroller/dynamic_controller.go b/internal/dynamiccontroller/dynamic_controller.go index a91bb387..0e8eb8db 100644 --- a/internal/dynamiccontroller/dynamic_controller.go +++ b/internal/dynamiccontroller/dynamic_controller.go @@ -29,7 +29,7 @@ // the performance or functionality of others. // // 4. Minimalism: Unlike controller-runtime, this implementation -// is tailored specifically for KRO's needs, avoiding unnecessary +// is tailored specifically for kro's needs, avoiding unnecessary // dependencies and overhead. // // 5. Future Extensibility: It allows for future enhancements such as @@ -39,14 +39,14 @@ // Why not use k8s.io/controller-runtime: // // 1. Staticc nature: controller-runtime is optimized for statically defined -// controllers, however KRO requires runtime creation and management +// controllers, however kro requires runtime creation and management // of controllers for various GVRs. // // 2. Overhead reduction: by not including unused features like leader election // and certain metrics, this implementation remains minimalistic and efficient. // // 3. Customization: this design allows for deep customization and -// optimization specific to KRO's unique requirements for managing +// optimization specific to kro's unique requirements for managing // multiple GVRs dynamically. // // This implementation aims to provide a reusable, efficient, and flexible @@ -105,7 +105,7 @@ type Config struct { // cluster without requiring restarts or pod redeployments. // // It is mainly inspired by native Kubernetes controllers but designed for more -// flexible and lightweight operation. DC serves as the core component of KRO's +// flexible and lightweight operation. DC serves as the core component of kro's // dynamic resource management system. Its primary purpose is to create and manage // "micro" controllers for custom resources defined by users at runtime (via the // ResourceGroup CRs). diff --git a/internal/graph/builder.go b/internal/graph/builder.go index 5b6f5c19..6852dabb 100644 --- a/internal/graph/builder.go +++ b/internal/graph/builder.go @@ -88,7 +88,7 @@ type Builder struct { schemaResolver resolver.SchemaResolver // resourceEmulator is used to emulate the resources. This is used to validate // the CEL expressions in the resources. Because looking up the CEL expressions - // isn't enough for KRO to validate the expressions. + // isn't enough for kro to validate the expressions. // // Maybe there is a better way, if anything probably there is a better way to // validate the CEL expressions. To revisit. @@ -107,7 +107,7 @@ func (b *Builder) NewResourceGroup(originalCR *v1alpha1.ResourceGroup) (*Graph, // There are a few steps to build a resource group: // 1. Validate the naming convention of the resource group and its resources. - // KRO leverages CEL expressions to allow users to define new types and + // kro leverages CEL expressions to allow users to define new types and // express relationships between resources. This means that we need to ensure // that the names of the resources are valid to be used in CEL expressions. // for example name-something-something is not a valid name for a resource, @@ -178,7 +178,7 @@ func (b *Builder) NewResourceGroup(originalCR *v1alpha1.ResourceGroup) (*Graph, // // SimpleSchema is a new standard we created to simplify CRD declarations, it is // very useful when we need to define the Spec of a CRD, when it comes to defining - // the status of a CRD, we use CEL expressions. KRO inspects the CEL expressions + // the status of a CRD, we use CEL expressions. `kro` inspects the CEL expressions // to infer the types of the status fields, and generate the OpenAPI schema for the // status field. The CEL expressions are also used to patch the status field of the // instance. diff --git a/internal/graph/validation.go b/internal/graph/validation.go index bc303b9a..d5a7531d 100644 --- a/internal/graph/validation.go +++ b/internal/graph/validation.go @@ -35,7 +35,7 @@ var ( // kubernetesVersionRegex kubernetesVersionRegex = regexp.MustCompile(`^v\d+(?:(?:alpha|beta)\d+)?$`) - // reservedKeyWords is a list of reserved words in KRO. + // reservedKeyWords is a list of reserved words in kro. reservedKeyWords = []string{ "apiVersion", "context", diff --git a/internal/simpleschema/doc.go b/internal/simpleschema/doc.go index 8ada2c6c..2463d320 100644 --- a/internal/simpleschema/doc.go +++ b/internal/simpleschema/doc.go @@ -42,9 +42,9 @@ package simpleschema // message: string // lastTransitionTime: string // -// In KRO you might see us using CEL expressions to define instructions +// In kro you might see us using CEL expressions to define instructions // for patch back status fields to CRD instances. This is not part of the schema -// standard it self but it is a KRO specific extension. For example +// standard it self but it is a kro specific extension. For example // // variables: // spec: diff --git a/internal/simpleschema/transform.go b/internal/simpleschema/transform.go index 9dc3c573..13d9df00 100644 --- a/internal/simpleschema/transform.go +++ b/internal/simpleschema/transform.go @@ -34,7 +34,7 @@ func newTransformer() *transformer { // loadPreDefinedTypes loads pre-defined types into the transformer. // The pre-defined types are used to resolve references in the schema. // -// As of today, KRO doesn't support custom types in the schema - do +// As of today, kro doesn't support custom types in the schema - do // not use this function. func (t *transformer) loadPreDefinedTypes(obj map[string]interface{}) error { t.preDefinedTypes = make(map[string]extv1.JSONSchemaProps) diff --git a/test/README.md b/test/README.md index 9340d921..6471e324 100644 --- a/test/README.md +++ b/test/README.md @@ -1,37 +1,41 @@ -# Testing strategy for KRO +# Testing strategy for kro -This document outlines the testing strategy for KRO, focusing on **integration tests** -and **end-to-end testing**. It defines the purpose, approach and boundaries of each type -of testing, providing clear guidelines for contributors and maintainers. +This document outlines the testing strategy for **kro**, focusing on +**integration tests** and **end-to-end testing**. It defines the purpose, +approach and boundaries of each type of testing, providing clear guidelines for +contributors and maintainers. -KRO is a complex controller that interacts with multiple Kubernetes resources and -depends on various Kubernetes features, such as custom resources, custom controllers, -informers, and client-go. The testing strategy aims to ensure that KRO works as -expected in a Kubernetes environment, and that it can be safely deployed in production -clusters. +kro is a complex controller that interacts with multiple Kubernetes resources +and depends on various Kubernetes features, such as custom resources, custom +controllers, informers, and client-go. The testing strategy aims to ensure that +kro works as expected in a Kubernetes environment, and that it can be safely +deployed in production clusters. ## Technical principles for testing -1. Use existing Kubernetes testing frameworks (when possible): Don't reinvent the wheel. - If a feature is not covered by existing frameworks, contribute to them. -3. Focus on KRO's logic, not on other controllers or Kubernetes components. e.g +1. Use existing Kubernetes testing frameworks (when possible): Don't reinvent + the wheel. If a feature is not covered by existing frameworks, contribute to + them. +2. Focus on kro's logic, not on other controllers or Kubernetes components. e.g avoid testing native controllers, ACK or Karpenter's behaviour... -4. Prioritize integration tests, validate with end to end tests. -5. Maintain seperation of concerns, controller logic, integration tests, and e2e tests -6. Ensure readability: similar to the codebase, tests should be easy to read, understand - and maintain. +3. Prioritize integration tests, validate with end to end tests. +4. Maintain seperation of concerns, controller logic, integration tests, and e2e + tests +5. Ensure readability: similar to the codebase, tests should be easy to read, + understand and maintain. ## Directory structure -- `integration/`: Contains integration test suites for KRO. -- `e2e/`: Contains e2e test suites for KRO. -- `testdata/`: Directory for test data, such as Kubernetes manifests, resourcegroups ... +- `integration/`: Contains integration test suites for kro. +- `e2e/`: Contains e2e test suites for kro. +- `testdata/`: Directory for test data, such as Kubernetes manifests, + resourcegroups ... ## Integration tests -In integration tests, the focus should be on your custom controller's logic and its -interactions with the Kubernetes API server. Instead of emulating other controllers, -you should: +In integration tests, the focus should be on your custom controller's logic and +its interactions with the Kubernetes API server. Instead of emulating other +controllers, you should: 1. Mock the Kubernetes API server responses 2. Verify that your controller makes the correct API calls @@ -50,28 +54,27 @@ you should: 8. Check that the ResourceGroup instance status was updated correctly 9. Verify that the some resources were created in the cluster 10. Trigger a second reconciliation and check that the status was updated - correctly + correctly 11. Repeat until all the RG instances are created 12. Do the same for updates and deletions ## E2e tests -E2E tests for KRO should focus on validating the entire system's behavior -in a real Kubernetes environment. These tests ensure that KRO works correctly -with actual Kubernetes resources and other controllers. The approach for E2E tests +E2E tests for kro should focus on validating the entire system's behavior in a +real Kubernetes environment. These tests ensure that kro works correctly with +actual Kubernetes resources and other controllers. The approach for E2E tests should: 1. Use a real Kubernetes cluster (e.g. kind, minikube, or EKS) -2. Deploy KRO controller and it's CRDs -3. Deploy other controllers or resources that will interact with KRO - resources. -4. Create KRO ResourceGroups and ResourceGroupInstances and verify - their full lifecycle. +2. Deploy kro controller and it's CRDs +3. Deploy other controllers or resources that will interact with kro resources. +4. Create kro ResourceGroups and ResourceGroupInstances and verify their full + lifecycle. ### E2e test example -1. Deploy KRO controller and CRDs -2. Deploy a sample application that uses KRO +1. Deploy kro controller and CRDs +2. Deploy a sample application that uses kro 3. Create a `ResourceGroup` custom resource 4. Verify that the corresponding CRD is created in the cluster 5. Create an instance of the `ResourceGroup` @@ -87,13 +90,13 @@ should: ### Addional scenarios 1. Cross namespace resource management -2. Scaling testing: Create a large number of ResourceGroups and - ResourceGroup instances to test KRO at scale. -3. Failure recovery: Simulate failures in the controller or the Kubernetes - API server and verify that KRO recovers correctly -4. Controller upgrade testing: Deploy a new version of KRO and verify that it +2. Scaling testing: Create a large number of ResourceGroups and ResourceGroup + instances to test kro at scale. +3. Failure recovery: Simulate failures in the controller or the Kubernetes API + server and verify that kro recovers correctly +4. Controller upgrade testing: Deploy a new version of kro and verify that it can handle existing `ResourceGroups` and `ResourceGroup` instances -5. ResourceGroup conflict testing: Create multiple `ResourceGroups` with conflicting - resources and verify that KRO handles the conflicts correctly -6. Integration with other controllers: Deploy other controllers that interact with - KRO resources and verify that they work correctly together +5. ResourceGroup conflict testing: Create multiple `ResourceGroups` with + conflicting resources and verify that kro handles the conflicts correctly +6. Integration with other controllers: Deploy other controllers that interact + with kro resources and verify that they work correctly together diff --git a/test/integration/suites/ackekscluster/generator.go b/test/integration/suites/ackekscluster/generator.go index bc2ffc4d..6fad8eb1 100644 --- a/test/integration/suites/ackekscluster/generator.go +++ b/test/integration/suites/ackekscluster/generator.go @@ -190,7 +190,7 @@ func clusterRoleDef(namespace string) map[string]interface{} { }, "spec": map[string]interface{}{ "name": "kro-cluster-role", - "description": "KRO created cluster cluster role", + "description": "kro created cluster cluster role", "policies": []interface{}{ "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy", }, @@ -220,7 +220,7 @@ func nodeRoleDef(namespace string) map[string]interface{} { }, "spec": map[string]interface{}{ "name": "kro-cluster-node-role", - "description": "KRO created cluster node role", + "description": "kro created cluster node role", "policies": []interface{}{ "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", @@ -252,7 +252,7 @@ func adminRoleDef(namespace string) map[string]interface{} { }, "spec": map[string]interface{}{ "name": "kro-cluster-pia-role", - "description": "KRO created cluster admin pia role", + "description": "kro created cluster admin pia role", "policies": []interface{}{ "arn:aws:iam::aws:policy/AdministratorAccess", }, diff --git a/website/docs/docs/concepts/00-resource-groups.md b/website/docs/docs/concepts/00-resource-groups.md index 888559fc..f31e389f 100644 --- a/website/docs/docs/concepts/00-resource-groups.md +++ b/website/docs/docs/concepts/00-resource-groups.md @@ -4,8 +4,8 @@ sidebar_position: 1 # ResourceGroups -**ResourceGroups** are the fundamental building blocks in KRO. They provide a -way to define, organize, and manage sets of related Kubernetes resources as a +**ResourceGroups** are the fundamental building blocks in **kro**. They provide +a way to define, organize, and manage sets of related Kubernetes resources as a single, reusable unit. ## What is a **ResourceGroup**? @@ -86,7 +86,7 @@ can be set when instantiating this ResourceGroup. ## **ResourceGroup** Processing -When a **ResourceGroup** is submitted to the Kubernetes API server, the KRO +When a **ResourceGroup** is submitted to the Kubernetes API server, the kro controller processes it as follows: 1. **Formal Verification**: The controller performs a comprehensive analysis of @@ -106,7 +106,7 @@ controller processes it as follows: 3. **CRD Registration**: It registers the newly generated CRD with the Kubernetes API server, making it available for use in the cluster. -4. **Micro-Controller Deployment**: KRO deploys a dedicated micro-controller for +4. **Micro-Controller Deployment**: kro deploys a dedicated micro-controller for this ResourceGroup. This micro-controller will listen for **"instance" events** - instances of the CRD created in step 2. It will be responsible for managing the **lifecycle of resources** defined in the ResourceGroup for each @@ -122,7 +122,7 @@ the web application with customizable parameters. The deployed micro-controller would then manage all **SimpleWebApps instances**, creating and managing the associated **Deployments** and **Services** as defined in the ResourceGroup. -The **KRO** controller continues to monitor the **ResourceGroup** for any +The **kro** controller continues to monitor the **ResourceGroup** for any changes, updating the corresponding CRD and micro-controller as necessary. ## **ResourceGroup** Instance Example diff --git a/website/docs/docs/concepts/10-simple-schema.md b/website/docs/docs/concepts/10-simple-schema.md index 2fecd05e..cc8d7011 100644 --- a/website/docs/docs/concepts/10-simple-schema.md +++ b/website/docs/docs/concepts/10-simple-schema.md @@ -4,7 +4,7 @@ sidebar_position: 2 # Simple Schema -KRO's Simple Schema provides a powerful yet intuitive way to define the +kro's Simple Schema provides a powerful yet intuitive way to define the structure of your ResourceGroup. Here is comprehensive example: ```yaml @@ -124,7 +124,7 @@ include value markers: be dynamically obtained from another resource. The value is a CEL expression that is validated at ResourceGroup processing time and evaluated at runtime. -:::tip Note that the value marker is a KRO extension to the OpenAPISchema and is +:::tip Note that the value marker is a kro extension to the OpenAPISchema and is not part of the official OpenAPISchema specification. ::: Example: @@ -136,19 +136,19 @@ status: ## Default status fields -**KRO** automatically injects two common fields into the status of all instances +**kro** automatically injects two common fields into the status of all instances generated from **ResourceGroups**: `conditions` and `state`. These fields provide essential information about the current status of the instance and its associated resources. :::tip `conditions` and `state` are reserved words in the status section. If a -user defines these fields in their **ResourceGroup**'s status schema, KRO will +user defines these fields in their **ResourceGroup**'s status schema, kro will override them with its own values. ::: ### 1. Conditions The `conditions` field is an array of condition objects, each representing a -specific aspect of the instance's state. KRO automatically manages this field. +specific aspect of the instance's state. kro automatically manages this field. ```yaml status: diff --git a/website/docs/docs/concepts/15-instances.md b/website/docs/docs/concepts/15-instances.md index 0d265f09..993b0279 100644 --- a/website/docs/docs/concepts/15-instances.md +++ b/website/docs/docs/concepts/15-instances.md @@ -4,8 +4,8 @@ sidebar_position: 15 # Instances -Instances are a fundamental concept in **KRO** that represent instances of -ResourceGroups. They define the desired state of a set of resources, which KRO +Instances are a fundamental concept in **kro** that represent instances of +ResourceGroups. They define the desired state of a set of resources, which kro continuously works to maintain. ## What is an Instance? @@ -46,15 +46,15 @@ ResourceGroup. ## The reconciliation loop -KRO manages Instances through a continuous reconciliation process: +kro manages Instances through a continuous reconciliation process: -- **Desired state detection**: KRO observes the Instance, which represents the +- **Desired state detection**: kro observes the Instance, which represents the desired state of resources. -- **Current state assessment**: KRO talks to the api-server and checks the +- **Current state assessment**: kro talks to the api-server and checks the current state of resources in the cluster related to the Instance. - **Difference identification**: Any differences between the desired state (Instance) and the current state are identified. -- **State Reconciliation**: KRO takes necessary actions to align the current +- **State Reconciliation**: kro takes necessary actions to align the current state with the desired state. This may involve creating, updating, or deleting resources as needed. - **Status Updates**: The Instance's status is updated to reflect the current @@ -79,7 +79,7 @@ KRO manages Instances through a continuous reconciliation process: state. Use version control for your Instances to track changes over time. - Leverage labels and annotations in Instances for organization and filtering. - Regularly review Instances to ensure they reflect current requirements. -- Use KRO's dry-run feature to preview reconciliation actions before applying +- Use kro's dry-run feature to preview reconciliation actions before applying changes to Instances. - Monitor Instance statuses to understand the current state of your applications. @@ -88,13 +88,13 @@ KRO manages Instances through a continuous reconciliation process: ## Common Status Fields -KRO automatically injects two common fields into the status of all instances: +kro automatically injects two common fields into the status of all instances: **Conditions** and **State**. These fields provide crucial information about the current status of the instance and its associated resources. ### 1. Conditions -Conditions are a standard Kubernetes concept that KRO leverages to provide +Conditions are a standard Kubernetes concept that kro leverages to provide detailed status information. Each condition represents a specific aspect of the instance's state. Common conditions include: @@ -148,4 +148,4 @@ status: These common status fields provide users with a consistent and informative way to check the health and state of their instances across different ResourceGroups. They are essential for monitoring, troubleshooting, and -automating operations based on the status of KRO-managed resources. +automating operations based on the status of kro-managed resources. diff --git a/website/docs/docs/concepts/_category_.json b/website/docs/docs/concepts/_category_.json index d29b57ce..8d8506ec 100644 --- a/website/docs/docs/concepts/_category_.json +++ b/website/docs/docs/concepts/_category_.json @@ -2,7 +2,7 @@ "label": "Concepts", "position": 40, "link": { - "description": "Learn about the core concepts of KRO" + "description": "Learn about the core concepts of kro" }, "collapsible": true, "collapsed": false diff --git a/website/docs/docs/faq.md b/website/docs/docs/faq.md index 9f11d210..414bcd9f 100644 --- a/website/docs/docs/faq.md +++ b/website/docs/docs/faq.md @@ -1,6 +1,76 @@ --- -sidebar_label: 'FAQ' +sidebar_label: "FAQ" sidebar_position: 100 --- -# FAQ \ No newline at end of file +# FAQ + +1. **What is kro?** + + Kube Resource Orchestrator (**kro**) is a new operator for Kubernetes that + simplifies the creation of complex Kubernetes resource configurations. kro + lets you create and manage custom groups of Kubernetes resources by defining + them as a _ResourceGroup_, the project's fundamental custom resource. + ResourceGroup specifications define a set of resources and how they relate to + each other functionally. Once defined, resource groups can be applied to a + Kubernetes cluster where the kro controller is running. Once validated by + kro, you can create instances of your resource group. kro translates your + ResourceGroup instance and its parameters into specific Kubernetes resources + and configurations which it then manages for you. + +2. **How does kro work?** + + kro is designed to use core Kubernetes primitives to make resource grouping, + customization, and dependency management simpler. When a ResourceGroup is + applied to the cluster, the kro controller verifies its specification, then + dynamically creates a new CRD and registers it with the API server. kro then + deploys a dedicated controller to respond to instance events on the CRD. This + microcontroller is responsible for managing the lifecycle of resources + defined in the ResourceGroup for each instance that is created. + +3. **How do I use kro?** + + First, you define your custom resource groups by creating _ResourceGroup_ + specifications. These specify one or more Kubernetes resources, and can + include specific configuration for each resource. + + For example, you can define a _WebApp_ resource group that is composed of a + _Deployment_, pre-configured to deploy your web server backend, and a + _Service_ configured to run on a specific port. You can just as easily create + a more complex _WebAppWithDB_ resource group by combining the existing + _WebApp_ resource group with a _Table_ custom resource to provision a cloud + managed database instance for your web app to use. + + Once you have defined a ResourceGroup, you can apply it to a Kubernetes + cluster where the kro controller is running. kro will take care of the heavy + lifting of creating CRDs and deploying dedicated controllers in order to + manage instances of your new custom resource group. + + You are now ready to create instances of your new custom resource group, and + kro will respond by dynamically creating, configuring, and managing the + underlying Kubernetes resources for you. + +4. **Why did you build this project?** + + We want to help streamline and simplify building with Kubernetes. Building + with Kubernetes means dealing with resources that need to operate and work + together, and orchestrating this can get complex and difficult at scale. With + this project, we're taking a first step in reducing the complexity of + resource dependency management and customization, paving the way for a simple + and scalable way to create complex custom resources for Kubernetes. + +5. **Do I need to have an AWS account to use this?** + + No, you can use kro with any Kubernetes cluster. + +6. **Can I use this in production?** + + This project is in active development and not yet intended for production + use. The _ResourceGroup_ CRD and other APIs used in this project are not + solidified and highly subject to change. + +7. **Will this be built into Amazon Elastic Kubernetes Service (EKS)?** + + This project is a public experiment, and not currently integrated into Amazon + EKS. We welcome your feedback and want to hear about what works and what + doesn't for your use cases, please let us know what you think. diff --git a/website/docs/docs/getting-started/01-Installation.md b/website/docs/docs/getting-started/01-Installation.md index 0b22fc29..66b64c6e 100644 --- a/website/docs/docs/getting-started/01-Installation.md +++ b/website/docs/docs/getting-started/01-Installation.md @@ -2,9 +2,9 @@ sidebar_position: 1 --- -# Installing KRO +# Installing kro -This guide walks you through the process of installing KRO on your Kubernetes +This guide walks you through the process of installing kro on your Kubernetes cluster using Helm. ## Prerequisites @@ -18,14 +18,14 @@ Before you begin, ensure you have the following: :::info[**Alpha Stage**] -KRO is currently in alpha stage. While the images are publicly available, please +kro is currently in alpha stage. While the images are publicly available, please note that the software is still under active development and APIs may change. ::: -### Install KRO using Helm +### Install kro using Helm -Once authenticated, install KRO using the Helm chart: +Once authenticated, install kro using the Helm chart: ```sh # Fetch the latest release version from GitHub @@ -35,7 +35,7 @@ export KRO_VERSION=$(curl -s \ sed -E 's/.*"([^"]+)".*/\1/' \ ) -# Install KRO using Helm +# Install kro using Helm helm install kro oci://public.ecr.aws/kro/kro \ --namespace kro \ --create-namespace \ @@ -55,15 +55,15 @@ correctly: You should see the "kro" release listed. -2. Check the KRO pods: +2. Check the kro pods: ```sh kubectl get pods -n kro ``` You should see kro-related pods running. -## Upgrading KRO +## Upgrading kro -To upgrade to a newer version of KRO, use the Helm upgrade command: +To upgrade to a newer version of kro, use the Helm upgrade command: ```bash # Replace `` with the version you want to upgrade to. @@ -78,18 +78,19 @@ helm upgrade kro oci://public.ecr.aws/kro/kro \ :::info[**CRD Updates**] Helm does not support updating CRDs, so you may need to manually update or -remove and re-apply kro related CRDs. For more information, refer to the Helm documentation. +remove and re-apply kro related CRDs. For more information, refer to the Helm +documentation. ::: -## Uninstalling KRO +## Uninstalling kro -To uninstall KRO, use the following command: +To uninstall kro, use the following command: ```bash helm uninstall kro -n kro ``` -Keep in mind that this command will remove all KRO resources from your cluster, +Keep in mind that this command will remove all kro resources from your cluster, except for the ResourceGroup CRD and any other custom resources you may have created. diff --git a/website/docs/docs/getting-started/02-deploy-a-resource-group.md b/website/docs/docs/getting-started/02-deploy-a-resource-group.md index 5a03e48d..b235e0ae 100644 --- a/website/docs/docs/getting-started/02-deploy-a-resource-group.md +++ b/website/docs/docs/getting-started/02-deploy-a-resource-group.md @@ -4,19 +4,19 @@ sidebar_position: 2 # Deploy Your First ResourceGroup -This guide will walk you through creating your first Resource Group in **KRO**. -We'll create a simple `ResourceGroup` that demonstrates key KRO features. +This guide will walk you through creating your first Resource Group in **kro**. +We'll create a simple `ResourceGroup` that demonstrates key kro features. ## What is a **ResourceGroup**? A `ResourceGroup` lets you create new Kubernetes APIs that deploy multiple -resources together as a single, reusable unit. In this example, we’ll create a -`ResourceGroup` that packages a reusable set of resources, including a `Deployment`, `Service`, -and `Ingress`. These resources are available in any Kubernetes cluster. -Users can then call the API to deploy resources as a single unit, ensuring they're -always created together with the right configuration. +resources together as a single, reusable unit. In this example, we’ll create a +`ResourceGroup` that packages a reusable set of resources, including a +`Deployment`, `Service`, and `Ingress`. These resources are available in any +Kubernetes cluster. Users can then call the API to deploy resources as a single +unit, ensuring they're always created together with the right configuration. -Under the hood, when you create a `ResourceGroup`, KRO: +Under the hood, when you create a `ResourceGroup`, kro: 1. Treats your resources as a Directed Acyclic Graph (DAG) to understand their dependencies @@ -26,9 +26,10 @@ Under the hood, when you create a `ResourceGroup`, KRO: :::tip[info] -**KRO** is a Kubernetes-native tool that speaks **Kubernetes**! All you need -to get started is a Kubernetes cluster that supports CRDs, version 1.16 or later. -KRO understands native Kubernetes resource as well as any custom resources your cluster supports. +**kro** is a Kubernetes-native tool that speaks **Kubernetes**! All you need to +get started is a Kubernetes cluster that supports CRDs, version 1.16 or later. +kro understands native Kubernetes resource as well as any custom resources your +cluster supports. ::: @@ -36,7 +37,7 @@ KRO understands native Kubernetes resource as well as any custom resources your Before you begin, make sure you have the following: -- **KRO** [installed](./01-Installation.md) and running in your Kubernetes +- **kro** [installed](./01-Installation.md) and running in your Kubernetes cluster. - `kubectl` installed and configured to interact with your Kubernetes cluster. @@ -51,7 +52,7 @@ kind: ResourceGroup metadata: name: my-application spec: - # KRO uses this simple schema to create your CRD schema and apply it + # kro uses this simple schema to create your CRD schema and apply it # The schema defines what users can provide when they instantiate the RG (create an instance). schema: apiVersion: v1alpha1 @@ -161,7 +162,7 @@ spec: ### Create your Application Instance -Now that your `ResourceGroup` is created, KRO has generated a new API +Now that your `ResourceGroup` is created, kro has generated a new API (Application) that orchestrates creation of the a `Deployment`, a `Service`, and an `Ingress`. Let's use it! @@ -223,7 +224,7 @@ an `Ingress`. Let's use it! ### Delete the Application instance -KRO can also help you clean up resources when you're done with them. +kro can also help you clean up resources when you're done with them. 1. **Delete the Application instance**: Clean up the resources by deleting the Application instance: diff --git a/website/docs/docs/overview.md b/website/docs/docs/overview.md index e3668ec8..28998cf6 100644 --- a/website/docs/docs/overview.md +++ b/website/docs/docs/overview.md @@ -2,18 +2,18 @@ sidebar_position: 1 --- -# What is KRO? +# What is kro? -**KRO** (Kube Resource Orchesrtator) is an open-source project that allows you +**kro** (Kube Resource Orchestrator) is an open-source project that allows you to define custom **Kubernetes APIs** using simple and straightforward -configuration. With KRO, you can easily configure new custom APIs that create a -group of Kubernetes objects and the logical operations between them. KRO +configuration. With kro, you can easily configure new custom APIs that create a +group of Kubernetes objects and the logical operations between them. kro automatically calculates the order in which objects should be created. You can pass values from one object to another, set default values for fields in the API specification, and incorporate conditionals into your custom API definitions. End users can easily call these custom APIs to create grouped resources. -# How does KRO work? +# How does kro work? ### Developer interface @@ -43,12 +43,13 @@ Definition (CRD) called **ResourceGroup (RG)**. The **Platform**, **Security**, and **Compliance** teams, can collaborate to create custom APIs by defining Custom Resources for the ResourceGroup CRD. -In the depicted example, the **Platform Team** has created a **RG** with arbitrary name -"Application Stack" that encapsulates the necessary resources, along with any additional -logic, abstractions, and security best practices. When the RG is applied to the cluster, -a new API of kind ApplicationStack is created and available for Developer to interact with. -The Developers no longer need to directly manage the underlying infrastructure complexities, -as the custom API handles the deployment and configuration of the required resources. +In the depicted example, the **Platform Team** has created a **RG** with +arbitrary name "Application Stack" that encapsulates the necessary resources, +along with any additional logic, abstractions, and security best practices. When +the RG is applied to the cluster, a new API of kind ApplicationStack is created +and available for Developer to interact with. The Developers no longer need to +directly manage the underlying infrastructure complexities, as the custom API +handles the deployment and configuration of the required resources.
@@ -75,11 +76,11 @@ _Fugure 3: ResourceGroup Instance (RGI)_
-# Why KRO? +# Why kro? ### Manage any group of resources as one unit -Using **KRO**, the **Platform Team** can enable Developer teams to quickly +Using **kro**, the **Platform Team** can enable Developer teams to quickly deploy and manage applications and their dependencies as one unit, handling the entire lifecycle from deployment to maintenance. The new APIs integrate seamlessly with developers' existing CD tools, preserving familiar processes and @@ -88,7 +89,7 @@ interfaces to simplify adoption. ### Collaborate Transform **Kubernetes** into your unified platform configuration framework -using **KRO**. Platform, Compliance, and Security teams work together to develop +using **kro**. Platform, Compliance, and Security teams work together to develop APIs that standardize and streamline configurations, making it easier for Developer teams to adopt secure, compliant practices. This collaboration lets you build your organizational standards directly into the APIs, ensuring every diff --git a/website/docusaurus.config.ts b/website/docusaurus.config.ts index 30575751..6424798a 100644 --- a/website/docusaurus.config.ts +++ b/website/docusaurus.config.ts @@ -3,7 +3,7 @@ import type { Config } from "@docusaurus/types"; import type * as Preset from "@docusaurus/preset-classic"; const config: Config = { - title: "KRO", + title: "kro", tagline: "Kube Resources Orchestrator", // The Melodious Kubernetes Integrator // Cementing Your Kubernetes Infrastructure @@ -70,7 +70,7 @@ const config: Config = { }, }, navbar: { - title: "KRO", + title: "kro", hideOnScroll: true, /* logo: { alt: "KRO Logo", diff --git a/website/src/components/HomepageFeatures/index.tsx b/website/src/components/HomepageFeatures/index.tsx index 5e9d3e7a..90de6c67 100644 --- a/website/src/components/HomepageFeatures/index.tsx +++ b/website/src/components/HomepageFeatures/index.tsx @@ -35,7 +35,7 @@ const FeatureList: FeatureItem[] = [ scale: 0.6, description: ( <> - KRO streamlines Kubernetes complexity, allowing you to manage resources + kro streamlines Kubernetes complexity, allowing you to manage resources intuitively and focus on developing your application, not wrestling with YAML files. @@ -49,7 +49,7 @@ const FeatureList: FeatureItem[] = [ Svg: require("@site/static/img/expand-arrows.svg").default, description: ( <> - KRO effortlessly scales your resource management from simple deployments + kro effortlessly scales your resource management from simple deployments to complex, multi-service architectures. ), diff --git a/website/src/pages/index.tsx b/website/src/pages/index.tsx index f106852d..442f473a 100644 --- a/website/src/pages/index.tsx +++ b/website/src/pages/index.tsx @@ -41,7 +41,7 @@ export default function Home(): JSX.Element { const { siteConfig } = useDocusaurusContext(); return (