Skip to content

Commit

Permalink
Test: Implemented ginkgo test against local kubeconfig (#2015)
Browse files Browse the repository at this point in the history
* Implemented ginkgo test against local kubeconfig

* PR Comments

* Update test/pkg/environment/environment.go

Co-authored-by: Nick Tran <[email protected]>

Co-authored-by: Nick Tran <[email protected]>
  • Loading branch information
ellistarn and njtran authored Jul 1, 2022
1 parent 17f0b9a commit 4c35c0f
Show file tree
Hide file tree
Showing 25 changed files with 319 additions and 87 deletions.
9 changes: 6 additions & 3 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -24,20 +24,23 @@ dev: verify test ## Run all steps in the developer loop
ci: toolchain verify licenses battletest ## Run all steps used by continuous integration

test: ## Run tests
ginkgo -r
go test -v ./pkg/...

strongertests:
# Run randomized, racing, code coveraged, tests
ginkgo -r \
-cover -coverprofile=coverage.out -outputdir=. -coverpkg=./pkg/... \
--randomizeAllSpecs --randomizeSuites -race
--randomizeAllSpecs --randomizeSuites -race ./pkg/...

e2etests: ## Run the e2e suite against your local cluster
go test -v ./test/... -environment-name=${CLUSTER_NAME}

benchmark:
go test -tags=test_performance -run=NoTests -bench=. ./...

deflake:
for i in $(shell seq 1 5); do make strongertests || exit 1; done
ginkgo -r -race -tags random_test_delay
ginkgo -r pkg -race -tags random_test_delay

battletest: strongertests
go tool cover -html coverage.out -o coverage.html
Expand Down
6 changes: 6 additions & 0 deletions pkg/apis/provisioning/v1alpha5/labels.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,12 @@ var (

// Karpenter specific domains and labels
KarpenterLabelDomain = "karpenter.sh"

ProvisionerNameLabelKey = Group + "/provisioner-name"
DoNotEvictPodAnnotationKey = Group + "/do-not-evict"
EmptinessTimestampAnnotationKey = Group + "/emptiness-timestamp"
TerminationFinalizer = Group + "/termination"

LabelCapacityType = KarpenterLabelDomain + "/capacity-type"
LabelNodeInitialized = KarpenterLabelDomain + "/initialized"

Expand Down
4 changes: 0 additions & 4 deletions pkg/apis/provisioning/v1alpha5/register.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,10 +40,6 @@ var (
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
})
ProvisionerNameLabelKey = Group + "/provisioner-name"
DoNotEvictPodAnnotationKey = Group + "/do-not-evict"
EmptinessTimestampAnnotationKey = Group + "/emptiness-timestamp"
TerminationFinalizer = Group + "/termination"
)

const (
Expand Down
4 changes: 2 additions & 2 deletions pkg/cloudprovider/aws/apis/v1alpha1/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ type BlockDevice struct {

func Deserialize(provider *v1alpha5.Provider) (*AWS, error) {
if provider == nil {
return nil, fmt.Errorf("invariant violated: spec.provider is not defined. Is the defaulting webhook installed?")
return nil, fmt.Errorf("invariant violated: spec.provider is not defined. Is the validating webhook installed?")
}
a := &AWS{}
_, gvk, err := Codec.UniversalDeserializer().Decode(provider.Raw, nil, a)
Expand All @@ -207,7 +207,7 @@ func Deserialize(provider *v1alpha5.Provider) (*AWS, error) {

func (a *AWS) Serialize(provider *v1alpha5.Provider) error {
if provider == nil {
return fmt.Errorf("invariant violated: spec.provider is not defined. Is the defaulting webhook installed?")
return fmt.Errorf("invariant violated: spec.provider is not defined. Is the validating webhook installed?")
}
bytes, err := json.Marshal(a)
if err != nil {
Expand Down
12 changes: 6 additions & 6 deletions pkg/cloudprovider/aws/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -982,7 +982,7 @@ var _ = Describe("Allocation", func() {
}
nodeTemplate := test.AWSNodeTemplate(test.AWSNodeTemplateOptions{
UserData: aws.String(string(content)),
AWS: provider,
AWS: *provider,
ObjectMeta: metav1.ObjectMeta{Name: providerRefName}})
ExpectApplied(ctx, env.Client, nodeTemplate)
controller := provisioning.NewController(injection.WithOptions(ctx, opts), cfg, env.Client, clientSet.CoreV1(), recorder, cloudProvider, cluster)
Expand Down Expand Up @@ -1010,7 +1010,7 @@ var _ = Describe("Allocation", func() {
}
nodeTemplate := test.AWSNodeTemplate(test.AWSNodeTemplateOptions{
UserData: nil,
AWS: provider,
AWS: *provider,
ObjectMeta: metav1.ObjectMeta{Name: providerRefName}})
ExpectApplied(ctx, env.Client, nodeTemplate)
controller := provisioning.NewController(injection.WithOptions(ctx, opts), cfg, env.Client, clientSet.CoreV1(), recorder, cloudProvider, cluster)
Expand Down Expand Up @@ -1049,7 +1049,7 @@ var _ = Describe("Allocation", func() {
}
nodeTemplate := test.AWSNodeTemplate(test.AWSNodeTemplateOptions{
UserData: aws.String("#/bin/bash\n ./not-toml.sh"),
AWS: provider,
AWS: *provider,
ObjectMeta: metav1.ObjectMeta{Name: providerRefName}})
ExpectApplied(ctx, env.Client, nodeTemplate)
controller := provisioning.NewController(ctx, cfg, env.Client, clientSet.CoreV1(), recorder, cloudProvider, cluster)
Expand All @@ -1071,7 +1071,7 @@ var _ = Describe("Allocation", func() {
}
nodeTemplate := test.AWSNodeTemplate(test.AWSNodeTemplateOptions{
UserData: aws.String(string(content)),
AWS: provider,
AWS: *provider,
ObjectMeta: metav1.ObjectMeta{Name: providerRefName}})
ExpectApplied(ctx, env.Client, nodeTemplate)
controller := provisioning.NewController(injection.WithOptions(ctx, opts), cfg, env.Client, clientSet.CoreV1(), recorder, cloudProvider, cluster)
Expand All @@ -1095,7 +1095,7 @@ var _ = Describe("Allocation", func() {
}
nodeTemplate := test.AWSNodeTemplate(test.AWSNodeTemplateOptions{
UserData: nil,
AWS: provider,
AWS: *provider,
ObjectMeta: metav1.ObjectMeta{Name: providerRefName}})
ExpectApplied(ctx, env.Client, nodeTemplate)
controller := provisioning.NewController(injection.WithOptions(ctx, opts), cfg, env.Client, clientSet.CoreV1(), recorder, cloudProvider, cluster)
Expand All @@ -1119,7 +1119,7 @@ var _ = Describe("Allocation", func() {
}
nodeTemplate := test.AWSNodeTemplate(test.AWSNodeTemplateOptions{
UserData: aws.String("#/bin/bash\n ./not-mime.sh"),
AWS: provider,
AWS: *provider,
ObjectMeta: metav1.ObjectMeta{Name: providerRefName}})
ExpectApplied(ctx, env.Client, nodeTemplate)
controller := provisioning.NewController(injection.WithOptions(ctx, opts), cfg, env.Client, clientSet.CoreV1(), recorder, cloudProvider, cluster)
Expand Down
4 changes: 2 additions & 2 deletions pkg/test/awsnodetemplate.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ import (
type AWSNodeTemplateOptions struct {
metav1.ObjectMeta
UserData *string
AWS *aws.AWS
AWS aws.AWS
}

func AWSNodeTemplate(overrides ...AWSNodeTemplateOptions) *v1alpha1.AWSNodeTemplate {
Expand All @@ -41,7 +41,7 @@ func AWSNodeTemplate(overrides ...AWSNodeTemplateOptions) *v1alpha1.AWSNodeTempl
ObjectMeta: ObjectMeta(options.ObjectMeta),
Spec: v1alpha1.AWSNodeTemplateSpec{
UserData: options.UserData,
AWS: *options.AWS,
AWS: options.AWS,
},
}
}
4 changes: 1 addition & 3 deletions pkg/test/expectations/expectations.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,7 @@ import (
"github.com/aws/karpenter/pkg/test"

"github.com/onsi/ginkgo"

//nolint:revive,stylecheck
. "github.com/onsi/gomega"
. "github.com/onsi/gomega" //nolint:revive,stylecheck
prometheus "github.com/prometheus/client_model/go"
"sigs.k8s.io/controller-runtime/pkg/metrics"

Expand Down
2 changes: 1 addition & 1 deletion pkg/test/pods.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ func Pod(overrides ...PodOptions) *v1.Pod {
}
}
if options.Image == "" {
options.Image = "k8s.gcr.io/pause"
options.Image = "alpine"
}
volumes := []v1.Volume{}
for _, pvc := range options.PersistentVolumeClaims {
Expand Down
26 changes: 19 additions & 7 deletions pkg/test/provisioner.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,8 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"knative.dev/pkg/logging"
"knative.dev/pkg/ptr"

"github.com/aws/karpenter/pkg/apis/provisioning/v1alpha5"
)
Expand All @@ -44,7 +46,7 @@ type ProvisionerOptions struct {
Status v1alpha5.ProvisionerStatus
}

// Provisioner creates a test pod with defaults that can be overridden by ProvisionerOptions.
// Provisioner creates a test provisioner with defaults that can be overridden by ProvisionerOptions.
// Overrides are applied in order, with a last write wins semantic.
func Provisioner(overrides ...ProvisionerOptions) *v1alpha5.Provisioner {
options := ProvisionerOptions{}
Expand All @@ -59,25 +61,35 @@ func Provisioner(overrides ...ProvisionerOptions) *v1alpha5.Provisioner {
if options.Limits == nil {
options.Limits = v1.ResourceList{v1.ResourceCPU: resource.MustParse("1000")}
}
if options.Provider == nil {
options.Provider = struct{}{}
}
provider, _ := json.Marshal(options.Provider)

provisioner := &v1alpha5.Provisioner{
ObjectMeta: ObjectMeta(options.ObjectMeta),
Spec: v1alpha5.ProvisionerSpec{
Requirements: options.Requirements,
KubeletConfiguration: options.Kubelet,
Provider: &runtime.RawExtension{Raw: provider},
ProviderRef: options.ProviderRef,
Taints: options.Taints,
StartupTaints: options.StartupTaints,
Labels: options.Labels,
Limits: &v1alpha5.Limits{Resources: options.Limits},
TTLSecondsAfterEmpty: ptr.Int64(10),
},
Status: options.Status,
}

if options.ProviderRef == nil {
if options.Provider == nil {
options.Provider = struct{}{}
}
provider, err := json.Marshal(options.Provider)
if err != nil {
panic(err)
}
provisioner.Spec.Provider = &runtime.RawExtension{Raw: provider}
}
provisioner.SetDefaults(context.Background())
_ = provisioner.Validate(context.Background())
if err := provisioner.Validate(context.Background()); err != nil {
logging.FromContext(context.TODO()).Info("TODO: Fix the tests that cause this")
}
return provisioner
}
7 changes: 7 additions & 0 deletions test/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,3 +13,10 @@ Testing infrastructure will be divided up into three layers: Management Cluster,
- `Clusters Under Test`: Rapid iteration [KIT](https://github.com/awslabs/kubernetes-iteration-toolkit) Guest Clusters and EKS Clusters where test suites will run.

*Note: A more formal design discussing testing infrastructure will come soon.*

## Developing
Use the Tekton UI to manage and monitor resources and test-runs:
```
kubectl port-forward service/tekton-dashboard -n tekton-pipelines 9097&
open http://localhost:9097
```
10 changes: 5 additions & 5 deletions test/infrastructure/management-cluster.cloudformation.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,17 +5,17 @@ Parameters:
Type: String
Description: "Host cluster name"
Resources:
KarpenterGuestClusterNodeInstanceProfile:
KarpenterNodeInstanceProfile:
Type: "AWS::IAM::InstanceProfile"
Properties:
InstanceProfileName: !Sub "KarpenterGuestClusterNodeInstanceProfile-${ClusterName}"
InstanceProfileName: !Sub "KarpenterNodeInstanceProfile-${ClusterName}"
Path: "/"
Roles:
- Ref: "KarpenterGuestClusterNodeRole"
KarpenterGuestClusterNodeRole:
- Ref: "KarpenterNodeRole"
KarpenterNodeRole:
Type: "AWS::IAM::Role"
Properties:
RoleName: !Sub "KarpenterGuestClusterNodeRole-${ClusterName}"
RoleName: !Sub "KarpenterNodeRole-${ClusterName}"
Path: /
AssumeRolePolicyDocument:
Version: "2012-10-17"
Expand Down
3 changes: 1 addition & 2 deletions test/infrastructure/scripts/step-01-config.sh
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
export CLUSTER_NAME="${CLUSTER_NAME:-karpenter-test-cluster}"
export CLUSTER_NAME="${CLUSTER_NAME:-karpenter-test-infrastructure}"
export AWS_PROFILE="${AWS_PROFILE:-karpenter-ci}"
export AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text)
export AWS_REGION="${AWS_REGION:-us-west-2}"
export KARPENTER_VERSION="${KARPENTER_VERSION:-v0.9.0}"
export AWS_PAGER=""
8 changes: 6 additions & 2 deletions test/infrastructure/scripts/step-02-eksctl-cluster.sh
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
cmd="create"
K8S_VERSION="1.22"
eksctl get cluster --name "${CLUSTER_NAME}" && cmd="upgrade"
eksctl ${cmd} cluster -f - << EOF
eksctl ${cmd} cluster -f - <<EOF
---
apiVersion: eksctl.io/v1alpha5
kind: ClusterConfig
Expand All @@ -23,7 +23,11 @@ managedNodeGroups:
name: ${CLUSTER_NAME}-system-pool
desiredCapacity: 2
minSize: 2
maxSize: 4
maxSize: 2
taints:
- key: CriticalAddonsOnly
value: "true"
effect: NoSchedule
iam:
withOIDC: true
EOF
9 changes: 9 additions & 0 deletions test/infrastructure/scripts/step-03-tekton-controllers.sh
Original file line number Diff line number Diff line change
@@ -1,7 +1,16 @@
echo "Installing Tekton"

kubectl apply -f https://storage.googleapis.com/tekton-releases/pipeline/previous/v0.33.2/release.yaml
kubectl patch configmap config-defaults -n tekton-pipelines --patch '{"data": { "default-task-run-workspace-binding": "emptyDir: {}" } }'
kubectl patch deployment tekton-pipelines-controller -n tekton-pipelines --patch '{"spec":{"template":{"spec":{"tolerations":[{"key":"CriticalAddonsOnly", "operator":"Exists"}]}}}}'
kubectl patch deployment tekton-pipelines-webhook -n tekton-pipelines --patch '{"spec":{"template":{"spec":{"tolerations":[{"key":"CriticalAddonsOnly", "operator":"Exists"}]}}}}'
sleep 10

kubectl apply -f https://storage.googleapis.com/tekton-releases/triggers/previous/v0.19.0/release.yaml
kubectl patch deployment tekton-triggers-controller -n tekton-pipelines --patch '{"spec":{"template":{"spec":{"tolerations":[{"key":"CriticalAddonsOnly", "operator":"Exists"}]}}}}'
kubectl patch deployment tekton-triggers-webhook -n tekton-pipelines --patch '{"spec":{"template":{"spec":{"tolerations":[{"key":"CriticalAddonsOnly", "operator":"Exists"}]}}}}'
sleep 10

kubectl apply -f https://github.com/tektoncd/dashboard/releases/download/v0.24.1/tekton-dashboard-release.yaml
kubectl patch deployment tekton-dashboard -n tekton-pipelines --patch '{"spec":{"template":{"spec":{"tolerations":[{"key":"CriticalAddonsOnly", "operator":"Exists"}]}}}}'
sleep 10
2 changes: 2 additions & 0 deletions test/infrastructure/scripts/step-04-aws-load-balancer.sh
Original file line number Diff line number Diff line change
Expand Up @@ -22,5 +22,7 @@ helm upgrade --install aws-load-balancer-controller eks/aws-load-balancer-contro
-n kube-system \
--set clusterName=${CLUSTER_NAME} \
--set serviceAccount.create=false \
--set tolerations[0].key="CriticalAddonsOnly" \
--set tolerations[0].operator="Exists" \
--set replicaCount=1 \
--set serviceAccount.name=aws-load-balancer-controller
22 changes: 12 additions & 10 deletions test/infrastructure/scripts/step-05-ebs-csi-driver.sh
Original file line number Diff line number Diff line change
Expand Up @@ -9,17 +9,19 @@ if ! aws iam get-policy --policy-arn arn:aws:iam::${AWS_ACCOUNT_ID}:policy/Karpe
fi

eksctl create iamserviceaccount \
--name=ebs-csi-controller-sa \
--namespace=kube-system \
--cluster=${CLUSTER_NAME} \
--attach-policy-arn=arn:aws:iam::${AWS_ACCOUNT_ID}:policy/Karpenter-AmazonEBSCSIDriverServiceRolePolicy-${CLUSTER_NAME} \
--approve \
--override-existing-serviceaccounts
--name=ebs-csi-controller-sa \
--namespace=kube-system \
--cluster=${CLUSTER_NAME} \
--attach-policy-arn=arn:aws:iam::${AWS_ACCOUNT_ID}:policy/Karpenter-AmazonEBSCSIDriverServiceRolePolicy-${CLUSTER_NAME} \
--approve \
--override-existing-serviceaccounts

helm repo add aws-ebs-csi-driver https://kubernetes-sigs.github.io/aws-ebs-csi-driver
helm repo update
helm upgrade --install aws-ebs-csi-driver \
--namespace kube-system \
--set controller.replicaCount=1 \
--set controller.serviceAccount.create=false \
aws-ebs-csi-driver/aws-ebs-csi-driver
--namespace kube-system \
--set controller.replicaCount=1 \
--set controller.serviceAccount.create=false \
--set tolerations[0].key="CriticalAddonsOnly" \
--set tolerations[0].operator="Exists" \
aws-ebs-csi-driver/aws-ebs-csi-driver
21 changes: 11 additions & 10 deletions test/infrastructure/scripts/step-06-karpenter.sh
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
echo "Installing Karpenter for Guest Clusters"
echo "Installing Karpenter"

aws cloudformation deploy \
--stack-name "KarpenterTesting-${CLUSTER_NAME}" \
--stack-name "KarpenterTestInfrastructure-${CLUSTER_NAME}" \
--template-file ${SCRIPTPATH}/management-cluster.cloudformation.yaml \
--capabilities CAPABILITY_NAMED_IAM \
--parameter-overrides "ClusterName=${CLUSTER_NAME}"

ROLE=" - rolearn: arn:aws:iam::${AWS_ACCOUNT_ID}:role/KarpenterGuestClusterNodeRole-${CLUSTER_NAME}\n username: system:node:{{EC2PrivateDNSName}}\n groups:\n - system:nodes\n - system:bootstrappers"
kubectl get -n kube-system configmap/aws-auth -o yaml | awk "/mapRoles: \|/{print;print \"${ROLE}\";next}1" > /tmp/aws-auth-patch.yml
ROLE=" - rolearn: arn:aws:iam::${AWS_ACCOUNT_ID}:role/KarpenterNodeRole-${CLUSTER_NAME}\n username: system:node:{{EC2PrivateDNSName}}\n groups:\n - system:nodes\n - system:bootstrappers"
kubectl get -n kube-system configmap/aws-auth -o yaml | awk "/mapRoles: \|/{print;print \"${ROLE}\";next}1" >/tmp/aws-auth-patch.yml
kubectl patch configmap/aws-auth -n kube-system --patch "$(cat /tmp/aws-auth-patch.yml)"

eksctl create iamserviceaccount \
Expand All @@ -21,15 +21,16 @@ export KARPENTER_IAM_ROLE_ARN="arn:aws:iam::${AWS_ACCOUNT_ID}:role/${CLUSTER_NAM

aws iam create-service-linked-role --aws-service-name spot.amazonaws.com || true

if [ -z "$(helm repo list | grep karpenter)" ] ; then
if [ -z "$(helm repo list | grep karpenter)" ]; then
helm repo add karpenter https://charts.karpenter.sh
fi
helm repo update
helm upgrade --install --namespace karpenter --create-namespace \
karpenter karpenter/karpenter \
--version ${KARPENTER_VERSION} \
helm upgrade --install --namespace karpenter --create-namespace karpenter --version v0.9.0 \
--set serviceAccount.annotations."eks\.amazonaws\.com/role-arn"="arn:aws:iam::${AWS_ACCOUNT_ID}:role/${CLUSTER_NAME}-karpenter" \
--set clusterName=${CLUSTER_NAME} \
--set clusterEndpoint=$(aws eks describe-cluster --name ${CLUSTER_NAME} --query "cluster.endpoint" --output json) \
--set aws.defaultInstanceProfile=KarpenterGuestClusterNodeInstanceProfile-${CLUSTER_NAME} \
--set clusterEndpoint=$(aws eks describe-cluster --name ${CLUSTER_NAME} --query "cluster.endpoint" --output json) \
--set aws.defaultInstanceProfile=KarpenterNodeInstanceProfile-${CLUSTER_NAME} \
--set tolerations[0].key="CriticalAddonsOnly" \
--set tolerations[0].operator="Exists" \
karpenter/karpenter \
--wait
Loading

0 comments on commit 4c35c0f

Please sign in to comment.