diff --git a/cmd/broker/main.go b/cmd/broker/main.go index 1d14157fe..e7f46d786 100644 --- a/cmd/broker/main.go +++ b/cmd/broker/main.go @@ -337,7 +337,7 @@ func main() { fatalOnError(err, logs) // create kubeconfig builder - kcBuilder := kubeconfig.NewBuilder(provisionerClient, skrK8sClientProvider) + kcBuilder := kubeconfig.NewBuilder(provisionerClient, cli, skrK8sClientProvider) // create server router := mux.NewRouter() diff --git a/internal/kubeconfig/builder.go b/internal/kubeconfig/builder.go index eb6e35626..5a5ad91a3 100644 --- a/internal/kubeconfig/builder.go +++ b/internal/kubeconfig/builder.go @@ -2,13 +2,17 @@ package kubeconfig import ( "bytes" + "context" "fmt" "text/template" - "github.com/kyma-project/kyma-environment-broker/internal" + imv1 "github.com/kyma-project/infrastructure-manager/api/v1" "github.com/kyma-project/kyma-environment-broker/internal/provisioner" + "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/kyma-project/kyma-environment-broker/internal" "gopkg.in/yaml.v2" + "k8s.io/apimachinery/pkg/api/errors" ) type Config struct { @@ -16,18 +20,20 @@ type Config struct { } type Builder struct { - provisionerClient provisioner.Client kubeconfigProvider kubeconfigProvider + kcpClient client.Client + provisionerClient provisioner.Client } type kubeconfigProvider interface { KubeconfigForRuntimeID(runtimeID string) ([]byte, error) } -func NewBuilder(provisionerClient provisioner.Client, provider kubeconfigProvider) *Builder { +func NewBuilder(provisionerClient provisioner.Client, kcpClient client.Client, provider kubeconfigProvider) *Builder { return &Builder{ - provisionerClient: provisionerClient, + kcpClient: kcpClient, kubeconfigProvider: provider, + provisionerClient: provisionerClient, } } @@ -43,9 +49,12 @@ func (b *Builder) BuildFromAdminKubeconfig(instance *internal.Instance, adminKub if instance.RuntimeID == "" { return "", fmt.Errorf("RuntimeID must not be empty") } - status, err := b.provisionerClient.RuntimeStatus(instance.GlobalAccountID, instance.RuntimeID) + issuerURL, clientID, err := b.getOidcDataFromRuntimeResource(instance.RuntimeID) + if errors.IsNotFound(err) { + issuerURL, clientID, err = b.getOidcDataFromProvisioner(instance) + } if err != nil { - return "", fmt.Errorf("while fetching runtime status from provisioner: %w", err) + return "", fmt.Errorf("while fetching oidc data: %w", err) } var kubeCfg kubeconfig @@ -70,8 +79,8 @@ func (b *Builder) BuildFromAdminKubeconfig(instance *internal.Instance, adminKub ContextName: kubeCfg.CurrentContext, CAData: kubeCfg.Clusters[0].Cluster.CertificateAuthorityData, ServerURL: kubeCfg.Clusters[0].Cluster.Server, - OIDCIssuerURL: status.RuntimeConfiguration.ClusterConfig.OidcConfig.IssuerURL, - OIDCClientID: status.RuntimeConfiguration.ClusterConfig.OidcConfig.ClientID, + OIDCIssuerURL: issuerURL, + OIDCClientID: clientID, }) } @@ -126,3 +135,26 @@ func (b *Builder) validKubeconfig(kc kubeconfig) error { return nil } + +func (b *Builder) getOidcDataFromRuntimeResource(id string) (string, string, error) { + var runtime imv1.Runtime + err := b.kcpClient.Get(context.Background(), client.ObjectKey{Name: id, Namespace: kcpNamespace}, &runtime) + if err != nil { + return "", "", err + } + if runtime.Spec.Shoot.Kubernetes.KubeAPIServer.OidcConfig.IssuerURL == nil { + return "", "", fmt.Errorf("Runtime Resource contains an empty OIDC issuer URL") + } + if runtime.Spec.Shoot.Kubernetes.KubeAPIServer.OidcConfig.ClientID == nil { + return "", "", fmt.Errorf("Runtime Resource contains an empty OIDC client ID") + } + return *runtime.Spec.Shoot.Kubernetes.KubeAPIServer.OidcConfig.IssuerURL, *runtime.Spec.Shoot.Kubernetes.KubeAPIServer.OidcConfig.ClientID, nil +} + +func (b *Builder) getOidcDataFromProvisioner(instance *internal.Instance) (string, string, error) { + status, err := b.provisionerClient.RuntimeStatus(instance.GlobalAccountID, instance.RuntimeID) + if err != nil { + return "", "", err + } + return status.RuntimeConfiguration.ClusterConfig.OidcConfig.IssuerURL, status.RuntimeConfiguration.ClusterConfig.OidcConfig.ClientID, nil +} diff --git a/internal/kubeconfig/builder_test.go b/internal/kubeconfig/builder_test.go index d2e605ca3..3d6f2cc13 100644 --- a/internal/kubeconfig/builder_test.go +++ b/internal/kubeconfig/builder_test.go @@ -4,6 +4,11 @@ import ( "fmt" "testing" + imv1 "github.com/kyma-project/infrastructure-manager/api/v1" + "github.com/stretchr/testify/assert" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + schema "github.com/kyma-project/control-plane/components/provisioner/pkg/gqlschema" "github.com/kyma-project/kyma-environment-broker/internal" "github.com/kyma-project/kyma-environment-broker/internal/provisioner/automock" @@ -19,7 +24,11 @@ const ( clientID = "c1id" ) -func TestBuilder_Build(t *testing.T) { +func TestBuilder_BuildFromProvisioner(t *testing.T) { + err := imv1.AddToScheme(scheme.Scheme) + assert.NoError(t, err) + kcpClient := fake.NewClientBuilder().Build() + t.Run("new kubeconfig was build properly", func(t *testing.T) { // given provisionerClient := &automock.Client{} @@ -40,7 +49,7 @@ func TestBuilder_Build(t *testing.T) { }, nil) defer provisionerClient.AssertExpectations(t) - builder := NewBuilder(provisionerClient, NewFakeKubeconfigProvider(skrKubeconfig())) + builder := NewBuilder(provisionerClient, kcpClient, NewFakeKubeconfigProvider(skrKubeconfig())) instance := &internal.Instance{ RuntimeID: runtimeID, @@ -61,7 +70,7 @@ func TestBuilder_Build(t *testing.T) { provisionerClient.On("RuntimeStatus", globalAccountID, runtimeID).Return(schema.RuntimeStatus{}, fmt.Errorf("cannot return kubeconfig")) defer provisionerClient.AssertExpectations(t) - builder := NewBuilder(provisionerClient, NewFakeKubeconfigProvider(skrKubeconfig())) + builder := NewBuilder(provisionerClient, kcpClient, NewFakeKubeconfigProvider(skrKubeconfig())) instance := &internal.Instance{ RuntimeID: runtimeID, GlobalAccountID: globalAccountID, @@ -72,11 +81,55 @@ func TestBuilder_Build(t *testing.T) { //then require.Error(t, err) - require.Contains(t, err.Error(), "while fetching runtime status from provisioner: cannot return kubeconfig") + require.Contains(t, err.Error(), "while fetching oidc data") + }) +} + +func TestBuilder_BuildFromRuntimeResource(t *testing.T) { + err := imv1.AddToScheme(scheme.Scheme) + assert.NoError(t, err) + kcpClient := fake.NewClientBuilder().Build() + + t.Run("new kubeconfig was built properly", func(t *testing.T) { + // given + provisionerClient := &automock.Client{} + provisionerClient.On("RuntimeStatus", globalAccountID, runtimeID).Return(schema.RuntimeStatus{ + RuntimeConfiguration: &schema.RuntimeConfig{ + Kubeconfig: skrKubeconfig(), + ClusterConfig: &schema.GardenerConfig{ + OidcConfig: &schema.OIDCConfig{ + ClientID: clientID, + GroupsClaim: "gclaim", + IssuerURL: issuerURL, + SigningAlgs: nil, + UsernameClaim: "uclaim", + UsernamePrefix: "-", + }, + }, + }, + }, nil) + defer provisionerClient.AssertExpectations(t) + + builder := NewBuilder(provisionerClient, kcpClient, NewFakeKubeconfigProvider(skrKubeconfig())) + + instance := &internal.Instance{ + RuntimeID: runtimeID, + GlobalAccountID: globalAccountID, + } + + // when + kubeconfig, err := builder.Build(instance) + + //then + require.NoError(t, err) + require.Equal(t, kubeconfig, newKubeconfig()) }) } func TestBuilder_BuildFromAdminKubeconfig(t *testing.T) { + err := imv1.AddToScheme(scheme.Scheme) + assert.NoError(t, err) + kcpClient := fake.NewClientBuilder().Build() t.Run("new kubeconfig was build properly", func(t *testing.T) { // given provisionerClient := &automock.Client{} @@ -97,7 +150,7 @@ func TestBuilder_BuildFromAdminKubeconfig(t *testing.T) { }, nil) defer provisionerClient.AssertExpectations(t) - builder := NewBuilder(provisionerClient, NewFakeKubeconfigProvider(skrKubeconfig())) + builder := NewBuilder(provisionerClient, kcpClient, NewFakeKubeconfigProvider(skrKubeconfig())) instance := &internal.Instance{ RuntimeID: runtimeID,