From 931c7c5fcb14dc54683539d87c060d69f80be7d5 Mon Sep 17 00:00:00 2001 From: Hussein Galal Date: Thu, 23 Jan 2025 01:55:05 +0200 Subject: [PATCH] Fix secret tokens and DNS translation (#200) * Include init containers in token translation Signed-off-by: galal-hussein * Fix kubernetes.defaul service DNS translation Signed-off-by: galal-hussein * Add skip test var to dapper Signed-off-by: galal-hussein * Add kubelet version and image pull policy to the shared agent Signed-off-by: galal-hussein * fixes Signed-off-by: galal-hussein --------- Signed-off-by: galal-hussein --- Dockerfile.dapper | 2 +- charts/k3k/templates/deployment.yaml | 2 + charts/k3k/values.yaml | 1 + k3k-kubelet/config.go | 4 ++ k3k-kubelet/controller/handler.go | 4 ++ k3k-kubelet/kubelet.go | 8 ++-- k3k-kubelet/main.go | 8 +++- k3k-kubelet/provider/configure.go | 6 ++- k3k-kubelet/provider/provider.go | 36 +++++++++++++++++- k3k-kubelet/provider/token.go | 32 ++++++++++++++++ main.go | 39 ++++++++++++++++---- ops/test | 6 ++- pkg/controller/cluster/agent/agent.go | 4 +- pkg/controller/cluster/agent/shared.go | 34 ++++++++++------- pkg/controller/cluster/cluster.go | 26 +++++++------ pkg/controller/cluster/cluster_suite_test.go | 2 +- pkg/controller/cluster/server/ingress.go | 5 ++- pkg/controller/cluster/server/service.go | 13 +++++++ 18 files changed, 183 insertions(+), 49 deletions(-) diff --git a/Dockerfile.dapper b/Dockerfile.dapper index 4d830b88..8e156c60 100644 --- a/Dockerfile.dapper +++ b/Dockerfile.dapper @@ -23,7 +23,7 @@ RUN go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest && \ cp $ENVTEST_BIN/* /usr/local/kubebuilder/bin ENV GO111MODULE on -ENV DAPPER_ENV REPO TAG DRONE_TAG CROSS GITHUB_TOKEN +ENV DAPPER_ENV REPO TAG DRONE_TAG CROSS GITHUB_TOKEN SKIP_TESTS ENV DAPPER_SOURCE /go/src/github.com/rancher/k3k/ ENV DAPPER_OUTPUT ./bin ./dist ./deploy ./charts ENV DAPPER_DOCKER_SOCKET true diff --git a/charts/k3k/templates/deployment.yaml b/charts/k3k/templates/deployment.yaml index e0df1dd7..2b983fa3 100644 --- a/charts/k3k/templates/deployment.yaml +++ b/charts/k3k/templates/deployment.yaml @@ -24,6 +24,8 @@ spec: value: {{ .Values.host.clusterCIDR }} - name: SHARED_AGENT_IMAGE value: "{{ .Values.sharedAgent.image.repository }}:{{ default .Chart.AppVersion .Values.sharedAgent.image.tag }}" + - name: SHARED_AGENT_PULL_POLICY + value: {{ .Values.sharedAgent.image.pullPolicy }} ports: - containerPort: 8080 name: https diff --git a/charts/k3k/values.yaml b/charts/k3k/values.yaml index 26a6bad1..71f6ca54 100644 --- a/charts/k3k/values.yaml +++ b/charts/k3k/values.yaml @@ -26,3 +26,4 @@ sharedAgent: image: repository: "rancher/k3k-kubelet" tag: "" + pullPolicy: "" diff --git a/k3k-kubelet/config.go b/k3k-kubelet/config.go index 73c40e5f..0bb030c6 100644 --- a/k3k-kubelet/config.go +++ b/k3k-kubelet/config.go @@ -18,6 +18,7 @@ type config struct { VirtualConfigPath string `yaml:"virtualConfigPath,omitempty"` KubeletPort string `yaml:"kubeletPort,omitempty"` ServerIP string `yaml:"serverIP,omitempty"` + Version string `yaml:"version,omitempty"` } func (c *config) unmarshalYAML(data []byte) error { @@ -54,6 +55,9 @@ func (c *config) unmarshalYAML(data []byte) error { if c.ServerIP == "" { c.ServerIP = conf.ServerIP } + if c.Version == "" { + c.Version = conf.Version + } return nil } diff --git a/k3k-kubelet/controller/handler.go b/k3k-kubelet/controller/handler.go index acf25a6f..29827022 100644 --- a/k3k-kubelet/controller/handler.go +++ b/k3k-kubelet/controller/handler.go @@ -66,6 +66,10 @@ func (c *ControllerHandler) AddResource(ctx context.Context, obj client.Object) // note that this doesn't do any type safety - fix this // when generics work c.Translater.TranslateTo(s) + // Remove service-account-token types when synced to the host + if s.Type == v1.SecretTypeServiceAccountToken { + s.Type = v1.SecretTypeOpaque + } return s, nil }, Logger: c.Logger, diff --git a/k3k-kubelet/kubelet.go b/k3k-kubelet/kubelet.go index 3ffad12e..fac1719a 100644 --- a/k3k-kubelet/kubelet.go +++ b/k3k-kubelet/kubelet.go @@ -187,8 +187,8 @@ func clusterIP(ctx context.Context, serviceName, clusterNamespace string, hostCl return service.Spec.ClusterIP, nil } -func (k *kubelet) registerNode(ctx context.Context, agentIP, srvPort, namespace, name, hostname, serverIP, dnsIP string) error { - providerFunc := k.newProviderFunc(namespace, name, hostname, agentIP, serverIP, dnsIP) +func (k *kubelet) registerNode(ctx context.Context, agentIP, srvPort, namespace, name, hostname, serverIP, dnsIP, version string) error { + providerFunc := k.newProviderFunc(namespace, name, hostname, agentIP, serverIP, dnsIP, version) nodeOpts := k.nodeOpts(ctx, srvPort, namespace, name, hostname, agentIP) var err error @@ -235,14 +235,14 @@ func (k *kubelet) start(ctx context.Context) { k.logger.Info("node exited successfully") } -func (k *kubelet) newProviderFunc(namespace, name, hostname, agentIP, serverIP, dnsIP string) nodeutil.NewProviderFunc { +func (k *kubelet) newProviderFunc(namespace, name, hostname, agentIP, serverIP, dnsIP, version string) nodeutil.NewProviderFunc { return func(pc nodeutil.ProviderConfig) (nodeutil.Provider, node.NodeProvider, error) { utilProvider, err := provider.New(*k.hostConfig, k.hostMgr, k.virtualMgr, k.logger, namespace, name, serverIP, dnsIP) if err != nil { return nil, nil, errors.New("unable to make nodeutil provider: " + err.Error()) } - provider.ConfigureNode(k.logger, pc.Node, hostname, k.port, agentIP, utilProvider.CoreClient, utilProvider.VirtualClient, k.virtualCluster) + provider.ConfigureNode(k.logger, pc.Node, hostname, k.port, agentIP, utilProvider.CoreClient, utilProvider.VirtualClient, k.virtualCluster, version) return utilProvider, &provider.Node{}, nil } diff --git a/k3k-kubelet/main.go b/k3k-kubelet/main.go index 3cd153d7..314072ae 100644 --- a/k3k-kubelet/main.go +++ b/k3k-kubelet/main.go @@ -73,6 +73,12 @@ func main() { Destination: &cfg.ServerIP, EnvVar: "SERVER_IP", }, + cli.StringFlag{ + Name: "version", + Usage: "Version of kubernetes server", + Destination: &cfg.Version, + EnvVar: "VERSION", + }, cli.StringFlag{ Name: "config", Usage: "Path to k3k-kubelet config file", @@ -112,7 +118,7 @@ func run(clx *cli.Context) { logger.Fatalw("failed to create new virtual kubelet instance", zap.Error(err)) } - if err := k.registerNode(ctx, k.agentIP, cfg.KubeletPort, cfg.ClusterNamespace, cfg.ClusterName, cfg.AgentHostname, cfg.ServerIP, k.dnsIP); err != nil { + if err := k.registerNode(ctx, k.agentIP, cfg.KubeletPort, cfg.ClusterNamespace, cfg.ClusterName, cfg.AgentHostname, cfg.ServerIP, k.dnsIP, cfg.Version); err != nil { logger.Fatalw("failed to register new node", zap.Error(err)) } diff --git a/k3k-kubelet/provider/configure.go b/k3k-kubelet/provider/configure.go index 669465e6..85ac5295 100644 --- a/k3k-kubelet/provider/configure.go +++ b/k3k-kubelet/provider/configure.go @@ -15,7 +15,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -func ConfigureNode(logger *k3klog.Logger, node *v1.Node, hostname string, servicePort int, ip string, coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualCluster v1alpha1.Cluster) { +func ConfigureNode(logger *k3klog.Logger, node *v1.Node, hostname string, servicePort int, ip string, coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualCluster v1alpha1.Cluster, version string) { node.Status.Conditions = nodeConditions() node.Status.DaemonEndpoints.KubeletEndpoint.Port = int32(servicePort) node.Status.Addresses = []v1.NodeAddress{ @@ -32,6 +32,10 @@ func ConfigureNode(logger *k3klog.Logger, node *v1.Node, hostname string, servic node.Labels["node.kubernetes.io/exclude-from-external-load-balancers"] = "true" node.Labels["kubernetes.io/os"] = "linux" + // configure versions + node.Status.NodeInfo.KubeletVersion = version + node.Status.NodeInfo.KubeProxyVersion = version + updateNodeCapacityInterval := 10 * time.Second ticker := time.NewTicker(updateNodeCapacityInterval) diff --git a/k3k-kubelet/provider/provider.go b/k3k-kubelet/provider/provider.go index ee59721b..09f9d5a1 100644 --- a/k3k-kubelet/provider/provider.go +++ b/k3k-kubelet/provider/provider.go @@ -369,7 +369,7 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error { return fmt.Errorf("unable to transform tokens for pod %s/%s: %w", pod.Namespace, pod.Name, err) } // inject networking information to the pod including the virtual cluster controlplane endpoint - p.configureNetworking(pod.Name, pod.Namespace, tPod) + p.configureNetworking(pod.Name, pod.Namespace, tPod, p.serverIP) p.logger.Infow("Creating pod", "Host Namespace", tPod.Namespace, "Host Name", tPod.Name, "Virtual Namespace", pod.Namespace, "Virtual Name", "env", pod.Name, pod.Spec.Containers[0].Env) @@ -475,6 +475,7 @@ func (p *Provider) syncConfigmap(ctx context.Context, podNamespace string, confi // syncSecret will add the secret object to the queue of the syncer controller to be synced to the host cluster func (p *Provider) syncSecret(ctx context.Context, podNamespace string, secretName string, optional bool) error { + p.logger.Infow("Syncing secret", "Name", secretName, "Namespace", podNamespace, "optional", optional) var secret corev1.Secret nsName := types.NamespacedName{ Namespace: podNamespace, @@ -707,7 +708,13 @@ func (p *Provider) GetPods(ctx context.Context) ([]*corev1.Pod, error) { // configureNetworking will inject network information to each pod to connect them to the // virtual cluster api server, as well as confiugre DNS information to connect them to the // synced coredns on the host cluster. -func (p *Provider) configureNetworking(podName, podNamespace string, pod *corev1.Pod) { +func (p *Provider) configureNetworking(podName, podNamespace string, pod *corev1.Pod, serverIP string) { + // inject serverIP to hostalias for the pod + KubernetesHostAlias := corev1.HostAlias{ + IP: serverIP, + Hostnames: []string{"kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster", "kubernetes.default.svc.cluster.local"}, + } + pod.Spec.HostAliases = append(pod.Spec.HostAliases, KubernetesHostAlias) // inject networking information to the pod's environment variables for i := range pod.Spec.Containers { pod.Spec.Containers[i].Env = append(pod.Spec.Containers[i].Env, @@ -733,6 +740,31 @@ func (p *Provider) configureNetworking(podName, podNamespace string, pod *corev1 }, ) } + // handle init contianers as well + for i := range pod.Spec.InitContainers { + pod.Spec.InitContainers[i].Env = append(pod.Spec.InitContainers[i].Env, + corev1.EnvVar{ + Name: "KUBERNETES_PORT_443_TCP", + Value: "tcp://" + p.serverIP + ":6443", + }, + corev1.EnvVar{ + Name: "KUBERNETES_PORT", + Value: "tcp://" + p.serverIP + ":6443", + }, + corev1.EnvVar{ + Name: "KUBERNETES_PORT_443_TCP_ADDR", + Value: p.serverIP, + }, + corev1.EnvVar{ + Name: "KUBERNETES_SERVICE_HOST", + Value: p.serverIP, + }, + corev1.EnvVar{ + Name: "KUBERNETES_SERVICE_PORT", + Value: "6443", + }, + ) + } // injecting cluster DNS IP to the pods except for coredns pod if !strings.HasPrefix(podName, "coredns") { pod.Spec.DNSPolicy = corev1.DNSNone diff --git a/k3k-kubelet/provider/token.go b/k3k-kubelet/provider/token.go index 4379b89d..29d9a06f 100644 --- a/k3k-kubelet/provider/token.go +++ b/k3k-kubelet/provider/token.go @@ -23,6 +23,12 @@ const ( func (p *Provider) transformTokens(ctx context.Context, pod, tPod *corev1.Pod) error { p.logger.Infow("transforming token", "Pod", pod.Name, "Namespace", pod.Namespace, "serviceAccountName", pod.Spec.ServiceAccountName) + // skip this process if the kube-api-access is already removed from the pod + // this is needed in case users already adds their own custom tokens like in rancher imported clusters + if !isKubeAccessVolumeFound(pod) { + return nil + } + virtualSecretName := k3kcontroller.SafeConcatNameWithPrefix(pod.Spec.ServiceAccountName, "token") virtualSecret := virtualSecret(virtualSecretName, pod.Namespace, pod.Spec.ServiceAccountName) if err := p.VirtualClient.Create(ctx, virtualSecret); err != nil { @@ -84,12 +90,30 @@ func (p *Provider) translateToken(pod *corev1.Pod, hostSecretName string) { addKubeAccessVolume(pod, hostSecretName) } +func isKubeAccessVolumeFound(pod *corev1.Pod) bool { + for _, volume := range pod.Spec.Volumes { + if strings.HasPrefix(volume.Name, kubeAPIAccessPrefix) { + return true + } + } + return false +} + func removeKubeAccessVolume(pod *corev1.Pod) { for i, volume := range pod.Spec.Volumes { if strings.HasPrefix(volume.Name, kubeAPIAccessPrefix) { pod.Spec.Volumes = append(pod.Spec.Volumes[:i], pod.Spec.Volumes[i+1:]...) } } + // init containers + for i, container := range pod.Spec.InitContainers { + for j, mountPath := range container.VolumeMounts { + if strings.HasPrefix(mountPath.Name, kubeAPIAccessPrefix) { + pod.Spec.InitContainers[i].VolumeMounts = append(pod.Spec.InitContainers[i].VolumeMounts[:j], pod.Spec.InitContainers[i].VolumeMounts[j+1:]...) + } + } + } + for i, container := range pod.Spec.Containers { for j, mountPath := range container.VolumeMounts { if strings.HasPrefix(mountPath.Name, kubeAPIAccessPrefix) { @@ -109,6 +133,14 @@ func addKubeAccessVolume(pod *corev1.Pod, hostSecretName string) { }, }, }) + + for i := range pod.Spec.InitContainers { + pod.Spec.InitContainers[i].VolumeMounts = append(pod.Spec.InitContainers[i].VolumeMounts, corev1.VolumeMount{ + Name: tokenVolumeName, + MountPath: serviceAccountTokenMountPath, + }) + } + for i := range pod.Spec.Containers { pod.Spec.Containers[i].VolumeMounts = append(pod.Spec.Containers[i].VolumeMounts, corev1.VolumeMount{ Name: tokenVolumeName, diff --git a/main.go b/main.go index f3bb6029..cd99322c 100644 --- a/main.go +++ b/main.go @@ -3,6 +3,7 @@ package main import ( "context" + "errors" "fmt" "os" @@ -15,6 +16,7 @@ import ( "github.com/rancher/k3k/pkg/log" "github.com/urfave/cli" "go.uber.org/zap" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/clientcmd" @@ -24,13 +26,14 @@ import ( ) var ( - scheme = runtime.NewScheme() - clusterCIDR string - sharedAgentImage string - kubeconfig string - debug bool - logger *log.Logger - flags = []cli.Flag{ + scheme = runtime.NewScheme() + clusterCIDR string + sharedAgentImage string + sharedAgentImagePullPolicy string + kubeconfig string + debug bool + logger *log.Logger + flags = []cli.Flag{ cli.StringFlag{ Name: "kubeconfig", EnvVar: "KUBECONFIG", @@ -50,6 +53,12 @@ var ( Value: "rancher/k3k:k3k-kubelet-dev", Destination: &sharedAgentImage, }, + cli.StringFlag{ + Name: "shared-agent-pull-policy", + EnvVar: "SHARED_AGENT_PULL_POLICY", + Usage: "K3K Virtual Kubelet image pull policy must be one of Always, IfNotPresent or Never", + Destination: &sharedAgentImagePullPolicy, + }, cli.BoolFlag{ Name: "debug", EnvVar: "DEBUG", @@ -70,6 +79,9 @@ func main() { app.Action = run app.Version = buildinfo.Version app.Before = func(clx *cli.Context) error { + if err := validate(); err != nil { + return err + } logger = log.New(debug) return nil } @@ -98,7 +110,7 @@ func run(clx *cli.Context) error { ctrlruntimelog.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1)))) logger.Info("adding cluster controller") - if err := cluster.Add(ctx, mgr, sharedAgentImage, logger); err != nil { + if err := cluster.Add(ctx, mgr, sharedAgentImage, sharedAgentImagePullPolicy, logger); err != nil { return fmt.Errorf("failed to add the new cluster controller: %v", err) } @@ -125,3 +137,14 @@ func run(clx *cli.Context) error { return nil } + +func validate() error { + if sharedAgentImagePullPolicy != "" { + if sharedAgentImagePullPolicy != string(v1.PullAlways) && + sharedAgentImagePullPolicy != string(v1.PullIfNotPresent) && + sharedAgentImagePullPolicy != string(v1.PullNever) { + return errors.New("invalid value for shared agent image policy") + } + } + return nil +} diff --git a/ops/test b/ops/test index 08662432..19b2d792 100755 --- a/ops/test +++ b/ops/test @@ -3,5 +3,7 @@ set -e cd $(dirname $0)/.. -echo Running tests -go test -cover -tags=test ./... +if [ -z ${SKIP_TESTS} ]; then + echo Running tests + go test -cover -tags=test ./... +fi diff --git a/pkg/controller/cluster/agent/agent.go b/pkg/controller/cluster/agent/agent.go index b81df880..545f34af 100644 --- a/pkg/controller/cluster/agent/agent.go +++ b/pkg/controller/cluster/agent/agent.go @@ -16,11 +16,11 @@ type Agent interface { Resources() ([]ctrlruntimeclient.Object, error) } -func New(cluster *v1alpha1.Cluster, serviceIP, sharedAgentImage, token string) Agent { +func New(cluster *v1alpha1.Cluster, serviceIP, sharedAgentImage, sharedAgentImagePullPolicy, token string) Agent { if cluster.Spec.Mode == VirtualNodeMode { return NewVirtualAgent(cluster, serviceIP, token) } - return NewSharedAgent(cluster, serviceIP, sharedAgentImage, token) + return NewSharedAgent(cluster, serviceIP, sharedAgentImage, sharedAgentImagePullPolicy, token) } func configSecretName(clusterName string) string { diff --git a/pkg/controller/cluster/agent/shared.go b/pkg/controller/cluster/agent/shared.go index 76b0743e..588bb259 100644 --- a/pkg/controller/cluster/agent/shared.go +++ b/pkg/controller/cluster/agent/shared.go @@ -26,18 +26,20 @@ const ( ) type SharedAgent struct { - cluster *v1alpha1.Cluster - serviceIP string - sharedAgentImage string - token string + cluster *v1alpha1.Cluster + serviceIP string + image string + imagePullPolicy string + token string } -func NewSharedAgent(cluster *v1alpha1.Cluster, serviceIP, sharedAgentImage, token string) Agent { +func NewSharedAgent(cluster *v1alpha1.Cluster, serviceIP, image, imagePullPolicy, token string) Agent { return &SharedAgent{ - cluster: cluster, - serviceIP: serviceIP, - sharedAgentImage: sharedAgentImage, - token: token, + cluster: cluster, + serviceIP: serviceIP, + image: image, + imagePullPolicy: imagePullPolicy, + token: token, } } @@ -60,13 +62,18 @@ func (s *SharedAgent) Config() ctrlruntimeclient.Object { } func sharedAgentData(cluster *v1alpha1.Cluster, token, nodeName, ip string) string { + version := cluster.Spec.Version + if cluster.Spec.Version == "" { + version = cluster.Status.HostVersion + } return fmt.Sprintf(`clusterName: %s clusterNamespace: %s nodeName: %s agentHostname: %s serverIP: %s -token: %s`, - cluster.Name, cluster.Namespace, nodeName, nodeName, ip, token) +token: %s +version: %s`, + cluster.Name, cluster.Namespace, nodeName, nodeName, ip, token, version) } func (s *SharedAgent) Resources() ([]ctrlruntimeclient.Object, error) { @@ -161,8 +168,9 @@ func (s *SharedAgent) podSpec() v1.PodSpec { }, Containers: []v1.Container{ { - Name: s.Name(), - Image: s.sharedAgentImage, + Name: s.Name(), + Image: s.image, + ImagePullPolicy: v1.PullPolicy(s.imagePullPolicy), Resources: v1.ResourceRequirements{ Limits: limit, }, diff --git a/pkg/controller/cluster/cluster.go b/pkg/controller/cluster/cluster.go index 08adab1e..787d6f13 100644 --- a/pkg/controller/cluster/cluster.go +++ b/pkg/controller/cluster/cluster.go @@ -45,15 +45,16 @@ const ( ) type ClusterReconciler struct { - DiscoveryClient *discovery.DiscoveryClient - Client ctrlruntimeclient.Client - Scheme *runtime.Scheme - SharedAgentImage string - logger *log.Logger + DiscoveryClient *discovery.DiscoveryClient + Client ctrlruntimeclient.Client + Scheme *runtime.Scheme + SharedAgentImage string + SharedAgentImagePullPolicy string + logger *log.Logger } // Add adds a new controller to the manager -func Add(ctx context.Context, mgr manager.Manager, sharedAgentImage string, logger *log.Logger) error { +func Add(ctx context.Context, mgr manager.Manager, sharedAgentImage, sharedAgentImagePullPolicy string, logger *log.Logger) error { discoveryClient, err := discovery.NewDiscoveryClientForConfig(mgr.GetConfig()) if err != nil { @@ -62,11 +63,12 @@ func Add(ctx context.Context, mgr manager.Manager, sharedAgentImage string, logg // initialize a new Reconciler reconciler := ClusterReconciler{ - DiscoveryClient: discoveryClient, - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - SharedAgentImage: sharedAgentImage, - logger: logger.Named(clusterController), + DiscoveryClient: discoveryClient, + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + SharedAgentImage: sharedAgentImage, + SharedAgentImagePullPolicy: sharedAgentImagePullPolicy, + logger: logger.Named(clusterController), } return ctrl.NewControllerManagedBy(mgr). @@ -364,7 +366,7 @@ func (c *ClusterReconciler) unbindNodeProxyClusterRole(ctx context.Context, clus } func (c *ClusterReconciler) agent(ctx context.Context, cluster *v1alpha1.Cluster, serviceIP, token string) error { - agent := agent.New(cluster, serviceIP, c.SharedAgentImage, token) + agent := agent.New(cluster, serviceIP, c.SharedAgentImage, c.SharedAgentImagePullPolicy, token) agentsConfig := agent.Config() agentResources, err := agent.Resources() if err != nil { diff --git a/pkg/controller/cluster/cluster_suite_test.go b/pkg/controller/cluster/cluster_suite_test.go index 94ff9bd1..076ed7e8 100644 --- a/pkg/controller/cluster/cluster_suite_test.go +++ b/pkg/controller/cluster/cluster_suite_test.go @@ -57,7 +57,7 @@ var _ = BeforeSuite(func() { Expect(err).NotTo(HaveOccurred()) ctx, cancel = context.WithCancel(context.Background()) - err = cluster.Add(ctx, mgr, "", &log.Logger{SugaredLogger: zap.NewNop().Sugar()}) + err = cluster.Add(ctx, mgr, "", "", &log.Logger{SugaredLogger: zap.NewNop().Sugar()}) Expect(err).NotTo(HaveOccurred()) go func() { diff --git a/pkg/controller/cluster/server/ingress.go b/pkg/controller/cluster/server/ingress.go index c29de26f..ce526963 100644 --- a/pkg/controller/cluster/server/ingress.go +++ b/pkg/controller/cluster/server/ingress.go @@ -16,8 +16,9 @@ const ( nginxBackendProtocolAnnotation = "nginx.ingress.kubernetes.io/backend-protocol" nginxSSLRedirectAnnotation = "nginx.ingress.kubernetes.io/ssl-redirect" - serverPort = 6443 - etcdPort = 2379 + servicePort = 443 + serverPort = 6443 + etcdPort = 2379 ) func (s *Server) Ingress(ctx context.Context, client client.Client) (*networkingv1.Ingress, error) { diff --git a/pkg/controller/cluster/server/service.go b/pkg/controller/cluster/server/service.go index 98a05605..a25b371d 100644 --- a/pkg/controller/cluster/server/service.go +++ b/pkg/controller/cluster/server/service.go @@ -5,6 +5,7 @@ import ( "github.com/rancher/k3k/pkg/controller" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" ) func (s *Server) Service(cluster *v1alpha1.Cluster) *v1.Service { @@ -38,6 +39,12 @@ func (s *Server) Service(cluster *v1alpha1.Cluster) *v1.Service { Protocol: v1.ProtocolTCP, Port: serverPort, }, + { + Name: "k3s-service-port", + Protocol: v1.ProtocolTCP, + Port: servicePort, + TargetPort: intstr.FromInt(serverPort), + }, { Name: "k3s-etcd-port", Protocol: v1.ProtocolTCP, @@ -71,6 +78,12 @@ func (s *Server) StatefulServerService() *v1.Service { Protocol: v1.ProtocolTCP, Port: serverPort, }, + { + Name: "k3s-service-port", + Protocol: v1.ProtocolTCP, + Port: servicePort, + TargetPort: intstr.FromInt(serverPort), + }, { Name: "k3s-etcd-port", Protocol: v1.ProtocolTCP,