diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml
index 69d738c0..834bde0e 100644
--- a/.github/workflows/test.yaml
+++ b/.github/workflows/test.yaml
@@ -99,3 +99,10 @@ jobs:
with:
name: k3s-logs
path: /tmp/k3s.log
+
+ - name: Archive k3k logs
+ uses: actions/upload-artifact@v4
+ if: always()
+ with:
+ name: k3k-logs
+ path: /tmp/k3k.log
diff --git a/charts/k3k/crds/k3k.io_clusters.yaml b/charts/k3k/crds/k3k.io_clusters.yaml
index 8a9e2d95..69267f1e 100644
--- a/charts/k3k/crds/k3k.io_clusters.yaml
+++ b/charts/k3k/crds/k3k.io_clusters.yaml
@@ -172,6 +172,8 @@ spec:
In "shared" mode the node selector will be applied also to the workloads.
type: object
persistence:
+ default:
+ type: dynamic
description: |-
Persistence contains options controlling how the etcd data of the virtual cluster is persisted. By default, no data
persistence is guaranteed, so restart of a virtual cluster pod may result in data loss without this field.
@@ -181,8 +183,8 @@ spec:
storageRequestSize:
type: string
type:
- default: ephemeral
- description: Type can be ephemeral, static, dynamic
+ default: dynamic
+ description: PersistenceMode is the storage mode of a Cluster.
type: string
required:
- type
@@ -255,8 +257,8 @@ spec:
storageRequestSize:
type: string
type:
- default: ephemeral
- description: Type can be ephemeral, static, dynamic
+ default: dynamic
+ description: PersistenceMode is the storage mode of a Cluster.
type: string
required:
- type
diff --git a/cli/cmds/cluster/create.go b/cli/cmds/cluster/create.go
index 5425303e..7a51889d 100644
--- a/cli/cmds/cluster/create.go
+++ b/cli/cmds/cluster/create.go
@@ -182,13 +182,15 @@ func newCluster(name, namespace string, config *CreateConfig) *v1alpha1.Cluster
AgentArgs: config.agentArgs.Value(),
Version: config.version,
Mode: v1alpha1.ClusterMode(config.mode),
- Persistence: &v1alpha1.PersistenceConfig{
- Type: config.persistenceType,
- StorageClassName: config.storageClassName,
+ Persistence: v1alpha1.PersistenceConfig{
+ Type: v1alpha1.PersistenceMode(config.persistenceType),
+ StorageClassName: ptr.To(config.storageClassName),
},
},
}
-
+ if config.storageClassName == "" {
+ cluster.Spec.Persistence.StorageClassName = nil
+ }
if config.token != "" {
cluster.Spec.TokenSecretRef = &v1.SecretReference{
Name: k3kcluster.TokenSecretName(name),
diff --git a/cli/cmds/cluster/create_flags.go b/cli/cmds/cluster/create_flags.go
index abbc6f0f..e7bbb012 100644
--- a/cli/cmds/cluster/create_flags.go
+++ b/cli/cmds/cluster/create_flags.go
@@ -4,7 +4,6 @@ import (
"errors"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
- "github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/urfave/cli/v2"
)
@@ -45,11 +44,11 @@ func NewCreateFlags(config *CreateConfig) []cli.Flag {
&cli.StringFlag{
Name: "persistence-type",
Usage: "persistence mode for the nodes (ephemeral, static, dynamic)",
- Value: server.EphemeralNodesType,
+ Value: string(v1alpha1.DynamicNodesType),
Destination: &config.persistenceType,
Action: func(ctx *cli.Context, value string) error {
- switch value {
- case server.EphemeralNodesType, server.DynamicNodesType:
+ switch v1alpha1.PersistenceMode(value) {
+ case v1alpha1.EphemeralNodeType, v1alpha1.DynamicNodesType:
return nil
default:
return errors.New(`persistence-type should be one of "ephemeral", "static" or "dynamic"`)
diff --git a/docs/crds/crd-docs.md b/docs/crds/crd-docs.md
index 7e8e428a..6d05534b 100644
--- a/docs/crds/crd-docs.md
+++ b/docs/crds/crd-docs.md
@@ -126,7 +126,7 @@ _Appears in:_
| `tlsSANs` _string array_ | TLSSANs are the subjectAlternativeNames for the certificate the K3s server will use. | | |
| `addons` _[Addon](#addon) array_ | Addons is a list of secrets containing raw YAML which will be deployed in the virtual K3k cluster on startup. | | |
| `mode` _[ClusterMode](#clustermode)_ | Mode is the cluster provisioning mode which can be either "shared" or "virtual". Defaults to "shared" | shared | Enum: [shared virtual]
|
-| `persistence` _[PersistenceConfig](#persistenceconfig)_ | Persistence contains options controlling how the etcd data of the virtual cluster is persisted. By default, no data
persistence is guaranteed, so restart of a virtual cluster pod may result in data loss without this field. | | |
+| `persistence` _[PersistenceConfig](#persistenceconfig)_ | Persistence contains options controlling how the etcd data of the virtual cluster is persisted. By default, no data
persistence is guaranteed, so restart of a virtual cluster pod may result in data loss without this field. | \{ type:dynamic \} | |
| `expose` _[ExposeConfig](#exposeconfig)_ | Expose contains options for exposing the apiserver inside/outside of the cluster. By default, this is only exposed as a
clusterIP which is relatively secure, but difficult to access outside of the cluster. | | |
@@ -215,10 +215,23 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `type` _string_ | Type can be ephemeral, static, dynamic | ephemeral | |
+| `type` _[PersistenceMode](#persistencemode)_ | | dynamic | |
| `storageClassName` _string_ | | | |
| `storageRequestSize` _string_ | | | |
+#### PersistenceMode
+
+_Underlying type:_ _string_
+
+PersistenceMode is the storage mode of a Cluster.
+
+
+
+_Appears in:_
+- [PersistenceConfig](#persistenceconfig)
+
+
+
diff --git a/go.mod b/go.mod
index bc48048e..9c4ca902 100644
--- a/go.mod
+++ b/go.mod
@@ -22,9 +22,9 @@ require (
github.com/testcontainers/testcontainers-go/modules/k3s v0.35.0
github.com/urfave/cli/v2 v2.27.5
github.com/virtual-kubelet/virtual-kubelet v1.11.0
- go.etcd.io/etcd/api/v3 v3.5.14
- go.etcd.io/etcd/client/v3 v3.5.14
- go.uber.org/zap v1.26.0
+ go.etcd.io/etcd/api/v3 v3.5.16
+ go.etcd.io/etcd/client/v3 v3.5.16
+ go.uber.org/zap v1.27.0
gopkg.in/yaml.v2 v2.4.0
helm.sh/helm/v3 v3.14.4
k8s.io/api v0.29.11
@@ -32,7 +32,7 @@ require (
k8s.io/apiserver v0.29.11
k8s.io/client-go v0.29.11
k8s.io/component-base v0.29.11
- k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
+ k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738
sigs.k8s.io/controller-runtime v0.17.5
)
@@ -93,7 +93,7 @@ require (
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
- github.com/google/btree v1.0.1 // indirect
+ github.com/google/btree v1.1.3 // indirect
github.com/google/cel-go v0.22.0 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.6.0 // indirect
@@ -110,7 +110,7 @@ require (
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/huandu/xstrings v1.5.0 // indirect
- github.com/imdario/mergo v0.3.16 // indirect
+ github.com/imdario/mergo v0.3.13 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jmoiron/sqlx v1.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
@@ -204,12 +204,12 @@ require (
k8s.io/apiextensions-apiserver v0.29.11 // indirect
k8s.io/cli-runtime v0.29.11 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
- k8s.io/kms v0.30.3 // indirect
+ k8s.io/kms v0.29.11 // indirect
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
k8s.io/kubectl v0.29.11 // indirect
oras.land/oras-go v1.2.5 // indirect
- sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.4 // indirect
- sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
+ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect
+ sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
sigs.k8s.io/kustomize/api v0.18.0 // indirect
sigs.k8s.io/kustomize/kyaml v0.18.1 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.3 // indirect
diff --git a/go.sum b/go.sum
index 54654247..9e3d9a25 100644
--- a/go.sum
+++ b/go.sum
@@ -876,8 +876,8 @@ github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k
github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
-github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
+github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
+github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
github.com/google/cel-go v0.17.7 h1:6ebJFzu1xO2n7TLtN+UBqShGBhlD85bhvglh5DpcfqQ=
github.com/google/cel-go v0.17.7/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
@@ -990,8 +990,8 @@ github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq
github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
-github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
+github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
+github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o=
@@ -1245,14 +1245,14 @@ github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN
github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0=
go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ=
-go.etcd.io/etcd/api/v3 v3.5.14 h1:vHObSCxyB9zlF60w7qzAdTcGaglbJOpSj1Xj9+WGxq0=
-go.etcd.io/etcd/api/v3 v3.5.14/go.mod h1:BmtWcRlQvwa1h3G2jvKYwIQy4PkHlDej5t7uLMUdJUU=
+go.etcd.io/etcd/api/v3 v3.5.16 h1:WvmyJVbjWqK4R1E+B12RRHz3bRGy9XVfh++MgbN+6n0=
+go.etcd.io/etcd/api/v3 v3.5.16/go.mod h1:1P4SlIP/VwkDmGo3OlOD7faPeP8KDIFhqvciH5EfN28=
go.etcd.io/etcd/client/pkg/v3 v3.5.16 h1:ZgY48uH6UvB+/7R9Yf4x574uCO3jIx0TRDyetSfId3Q=
go.etcd.io/etcd/client/pkg/v3 v3.5.16/go.mod h1:V8acl8pcEK0Y2g19YlOV9m9ssUe6MgiDSobSoaBAM0E=
go.etcd.io/etcd/client/v2 v2.305.10 h1:MrmRktzv/XF8CvtQt+P6wLUlURaNpSDJHFZhe//2QE4=
go.etcd.io/etcd/client/v2 v2.305.10/go.mod h1:m3CKZi69HzilhVqtPDcjhSGp+kA1OmbNn0qamH80xjA=
-go.etcd.io/etcd/client/v3 v3.5.14 h1:CWfRs4FDaDoSz81giL7zPpZH2Z35tbOrAJkkjMqOupg=
-go.etcd.io/etcd/client/v3 v3.5.14/go.mod h1:k3XfdV/VIHy/97rqWjoUzrj9tk7GgJGH9J8L4dNXmAk=
+go.etcd.io/etcd/client/v3 v3.5.16 h1:sSmVYOAHeC9doqi0gv7v86oY/BTld0SEFGaxsU9eRhE=
+go.etcd.io/etcd/client/v3 v3.5.16/go.mod h1:X+rExSGkyqxvu276cr2OwPLBaeqFu1cIl4vmRjAD/50=
go.etcd.io/etcd/pkg/v3 v3.5.10 h1:WPR8K0e9kWl1gAhB5A7gEa5ZBTNkT9NdNWrR8Qpo1CM=
go.etcd.io/etcd/pkg/v3 v3.5.10/go.mod h1:TKTuCKKcF1zxmfKWDkfz5qqYaE3JncKKZPFf8c1nFUs=
go.etcd.io/etcd/raft/v3 v3.5.10 h1:cgNAYe7xrsrn/5kXMSaH8kM/Ky8mAdMqGOxyYwpP0LA=
@@ -1295,8 +1295,8 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
-go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
-go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
+go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
+go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@@ -1988,6 +1988,7 @@ gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
@@ -2020,14 +2021,14 @@ k8s.io/component-base v0.29.11 h1:H3GJIyDNPrscvXGP6wx+9gApcwwmrUd0YtCGp5BcHBA=
k8s.io/component-base v0.29.11/go.mod h1:0qu1WStER4wu5o8RMRndZUWPVcPH1XBy/QQiDcD6lew=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
-k8s.io/kms v0.30.3 h1:NLg+oN45S2Y3U0WiLRzbS61AY/XrS5JBMZp531Z+Pho=
-k8s.io/kms v0.30.3/go.mod h1:GrMurD0qk3G4yNgGcsCEmepqf9KyyIrTXYR2lyUOJC4=
+k8s.io/kms v0.29.11 h1:pylaiDJhgfqczvcjMDPI89+VH0OVoGQhscPH1VbBzQE=
+k8s.io/kms v0.29.11/go.mod h1:vWVImKkJd+1BQY4tBwdfSwjQBiLrnbNtHADcDEDQFtk=
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y=
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4=
k8s.io/kubectl v0.29.11 h1:rxflwYQ1kmeEUVPWNevKLTtWjNfLrFSzLRZJoPolguU=
k8s.io/kubectl v0.29.11/go.mod h1:b6IhZyA/zp7q6kbiYfm5B3xwVPodVUvpfN6VG0LwA30=
-k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
-k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
+k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI=
@@ -2068,12 +2069,12 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.4 h1:T1wCDiawfsKNO9v3H/IwVWICPBe6VGifZvWoUuxckUA=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.4/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcpeN4baWEV2ko2Z/AsiZgEdwgcfwLgMo=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
sigs.k8s.io/controller-runtime v0.17.5 h1:1FI9Lm7NiOOmBsgTV36/s2XrEFXnO2C4sbg/Zme72Rw=
sigs.k8s.io/controller-runtime v0.17.5/go.mod h1:N0jpP5Lo7lMTF9aL56Z/B2oWBJjey6StQM0jRbKQXtY=
-sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
-sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
+sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
+sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
sigs.k8s.io/kustomize/api v0.18.0 h1:hTzp67k+3NEVInwz5BHyzc9rGxIauoXferXyjv5lWPo=
sigs.k8s.io/kustomize/api v0.18.0/go.mod h1:f8isXnX+8b+SGLHQ6yO4JG1rdkZlvhaCf/uZbLVMb0U=
sigs.k8s.io/kustomize/kyaml v0.18.1 h1:WvBo56Wzw3fjS+7vBjN6TeivvpbW9GmRaWZ9CIVmt4E=
diff --git a/pkg/apis/k3k.io/v1alpha1/types.go b/pkg/apis/k3k.io/v1alpha1/types.go
index 3562e8b2..f9024c3a 100644
--- a/pkg/apis/k3k.io/v1alpha1/types.go
+++ b/pkg/apis/k3k.io/v1alpha1/types.go
@@ -93,7 +93,8 @@ type ClusterSpec struct {
// Persistence contains options controlling how the etcd data of the virtual cluster is persisted. By default, no data
// persistence is guaranteed, so restart of a virtual cluster pod may result in data loss without this field.
- Persistence *PersistenceConfig `json:"persistence,omitempty"`
+ // +kubebuilder:default={type: "dynamic"}
+ Persistence PersistenceConfig `json:"persistence,omitempty"`
// Expose contains options for exposing the apiserver inside/outside of the cluster. By default, this is only exposed as a
// clusterIP which is relatively secure, but difficult to access outside of the cluster.
@@ -107,9 +108,16 @@ type ClusterSpec struct {
// ClusterMode is the possible provisioning mode of a Cluster.
type ClusterMode string
+// +kubebuilder:default="dynamic"
+//
+// PersistenceMode is the storage mode of a Cluster.
+type PersistenceMode string
+
const (
SharedClusterMode = ClusterMode("shared")
VirtualClusterMode = ClusterMode("virtual")
+ EphemeralNodeType = PersistenceMode("ephemeral")
+ DynamicNodesType = PersistenceMode("dynamic")
)
type ClusterLimit struct {
@@ -134,11 +142,10 @@ type ClusterList struct {
}
type PersistenceConfig struct {
- // Type can be ephemeral, static, dynamic
- // +kubebuilder:default="ephemeral"
- Type string `json:"type"`
- StorageClassName string `json:"storageClassName,omitempty"`
- StorageRequestSize string `json:"storageRequestSize,omitempty"`
+ // +kubebuilder:default="dynamic"
+ Type PersistenceMode `json:"type"`
+ StorageClassName *string `json:"storageClassName,omitempty"`
+ StorageRequestSize string `json:"storageRequestSize,omitempty"`
}
type ExposeConfig struct {
@@ -177,10 +184,10 @@ type NodePortConfig struct {
}
type ClusterStatus struct {
- HostVersion string `json:"hostVersion,omitempty"`
- ClusterCIDR string `json:"clusterCIDR,omitempty"`
- ServiceCIDR string `json:"serviceCIDR,omitempty"`
- ClusterDNS string `json:"clusterDNS,omitempty"`
- TLSSANs []string `json:"tlsSANs,omitempty"`
- Persistence *PersistenceConfig `json:"persistence,omitempty"`
+ HostVersion string `json:"hostVersion,omitempty"`
+ ClusterCIDR string `json:"clusterCIDR,omitempty"`
+ ServiceCIDR string `json:"serviceCIDR,omitempty"`
+ ClusterDNS string `json:"clusterDNS,omitempty"`
+ TLSSANs []string `json:"tlsSANs,omitempty"`
+ Persistence PersistenceConfig `json:"persistence,omitempty"`
}
diff --git a/pkg/apis/k3k.io/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/k3k.io/v1alpha1/zz_generated.deepcopy.go
index c11f54a4..658d6a40 100644
--- a/pkg/apis/k3k.io/v1alpha1/zz_generated.deepcopy.go
+++ b/pkg/apis/k3k.io/v1alpha1/zz_generated.deepcopy.go
@@ -201,6 +201,16 @@ func (in *ClusterSetSpec) DeepCopyInto(out *ClusterSetSpec) {
(*out)[key] = val
}
}
+ if in.AllowedNodeTypes != nil {
+ in, out := &in.AllowedNodeTypes, &out.AllowedNodeTypes
+ *out = make([]ClusterMode, len(*in))
+ copy(*out, *in)
+ }
+ if in.PodSecurityAdmissionLevel != nil {
+ in, out := &in.PodSecurityAdmissionLevel, &out.PodSecurityAdmissionLevel
+ *out = new(PodSecurityAdmissionLevel)
+ **out = **in
+ }
return
}
@@ -262,6 +272,11 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
*out = new(ClusterLimit)
(*in).DeepCopyInto(*out)
}
+ if in.TokenSecretRef != nil {
+ in, out := &in.TokenSecretRef, &out.TokenSecretRef
+ *out = new(v1.SecretReference)
+ **out = **in
+ }
if in.ServerArgs != nil {
in, out := &in.ServerArgs, &out.ServerArgs
*out = make([]string, len(*in))
@@ -282,11 +297,7 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
*out = make([]Addon, len(*in))
copy(*out, *in)
}
- if in.Persistence != nil {
- in, out := &in.Persistence, &out.Persistence
- *out = new(PersistenceConfig)
- **out = **in
- }
+ in.Persistence.DeepCopyInto(&out.Persistence)
if in.Expose != nil {
in, out := &in.Expose, &out.Expose
*out = new(ExposeConfig)
@@ -313,11 +324,7 @@ func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) {
*out = make([]string, len(*in))
copy(*out, *in)
}
- if in.Persistence != nil {
- in, out := &in.Persistence, &out.Persistence
- *out = new(PersistenceConfig)
- **out = **in
- }
+ in.Persistence.DeepCopyInto(&out.Persistence)
return
}
@@ -337,7 +344,7 @@ func (in *ExposeConfig) DeepCopyInto(out *ExposeConfig) {
if in.Ingress != nil {
in, out := &in.Ingress, &out.Ingress
*out = new(IngressConfig)
- **out = **in
+ (*in).DeepCopyInto(*out)
}
if in.LoadBalancer != nil {
in, out := &in.LoadBalancer, &out.LoadBalancer
@@ -347,7 +354,7 @@ func (in *ExposeConfig) DeepCopyInto(out *ExposeConfig) {
if in.NodePort != nil {
in, out := &in.NodePort, &out.NodePort
*out = new(NodePortConfig)
- **out = **in
+ (*in).DeepCopyInto(*out)
}
return
}
@@ -365,6 +372,13 @@ func (in *ExposeConfig) DeepCopy() *ExposeConfig {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressConfig) DeepCopyInto(out *IngressConfig) {
*out = *in
+ if in.Annotations != nil {
+ in, out := &in.Annotations, &out.Annotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
return
}
@@ -397,6 +411,21 @@ func (in *LoadBalancerConfig) DeepCopy() *LoadBalancerConfig {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodePortConfig) DeepCopyInto(out *NodePortConfig) {
*out = *in
+ if in.ServerPort != nil {
+ in, out := &in.ServerPort, &out.ServerPort
+ *out = new(int32)
+ **out = **in
+ }
+ if in.ServicePort != nil {
+ in, out := &in.ServicePort, &out.ServicePort
+ *out = new(int32)
+ **out = **in
+ }
+ if in.ETCDPort != nil {
+ in, out := &in.ETCDPort, &out.ETCDPort
+ *out = new(int32)
+ **out = **in
+ }
return
}
@@ -413,6 +442,11 @@ func (in *NodePortConfig) DeepCopy() *NodePortConfig {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PersistenceConfig) DeepCopyInto(out *PersistenceConfig) {
*out = *in
+ if in.StorageClassName != nil {
+ in, out := &in.StorageClassName, &out.StorageClassName
+ *out = new(string)
+ **out = **in
+ }
return
}
diff --git a/pkg/controller/cluster/cluster.go b/pkg/controller/cluster/cluster.go
index dcefbecc..f6f3dad4 100644
--- a/pkg/controller/cluster/cluster.go
+++ b/pkg/controller/cluster/cluster.go
@@ -162,12 +162,10 @@ func (c *ClusterReconciler) reconcileCluster(ctx context.Context, cluster *v1alp
s := server.New(cluster, c.Client, token, string(cluster.Spec.Mode))
- if cluster.Spec.Persistence != nil {
- cluster.Status.Persistence = cluster.Spec.Persistence
- if cluster.Spec.Persistence.StorageRequestSize == "" {
- // default to 1G of request size
- cluster.Status.Persistence.StorageRequestSize = defaultStoragePersistentSize
- }
+ cluster.Status.Persistence = cluster.Spec.Persistence
+ if cluster.Spec.Persistence.StorageRequestSize == "" {
+ // default to 1G of request size
+ cluster.Status.Persistence.StorageRequestSize = defaultStoragePersistentSize
}
cluster.Status.ClusterCIDR = cluster.Spec.ClusterCIDR
@@ -190,7 +188,6 @@ func (c *ClusterReconciler) reconcileCluster(ctx context.Context, cluster *v1alp
return err
}
- // creating statefulsets in case the user chose a persistence type other than ephemeral
if err := c.server(ctx, cluster, s); err != nil {
return err
}
@@ -207,11 +204,7 @@ func (c *ClusterReconciler) reconcileCluster(ctx context.Context, cluster *v1alp
return err
}
- if err := c.bindNodeProxyClusterRole(ctx, cluster); err != nil {
- return err
- }
-
- return nil
+ return c.bindNodeProxyClusterRole(ctx, cluster)
}
// ensureBootstrapSecret will create or update the Secret containing the bootstrap data from the k3s server
diff --git a/pkg/controller/cluster/cluster_test.go b/pkg/controller/cluster/cluster_test.go
index 133c9e34..14196d0f 100644
--- a/pkg/controller/cluster/cluster_test.go
+++ b/pkg/controller/cluster/cluster_test.go
@@ -40,7 +40,7 @@ var _ = Describe("Cluster Controller", func() {
BeforeEach(func() {
cluster = &v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
- GenerateName: "clusterset-",
+ GenerateName: "cluster-",
Namespace: namespace,
},
}
@@ -54,6 +54,8 @@ var _ = Describe("Cluster Controller", func() {
Expect(cluster.Spec.Agents).To(Equal(ptr.To[int32](0)))
Expect(cluster.Spec.Servers).To(Equal(ptr.To[int32](1)))
Expect(cluster.Spec.Version).To(BeEmpty())
+ // TOFIX
+ //Expect(cluster.Spec.Persistence.Type).To(Equal(v1alpha1.DynamicNodesType))
serverVersion, err := k8s.DiscoveryClient.ServerVersion()
Expect(err).To(Not(HaveOccurred()))
diff --git a/pkg/controller/cluster/server/server.go b/pkg/controller/cluster/server/server.go
index b1fe2c9a..a3763aff 100644
--- a/pkg/controller/cluster/server/server.go
+++ b/pkg/controller/cluster/server/server.go
@@ -1,8 +1,10 @@
package server
import (
+ "bytes"
"context"
"strings"
+ "text/template"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller"
@@ -23,9 +25,7 @@ const (
configName = "server-config"
initConfigName = "init-server-config"
- ServerPort = 6443
- EphemeralNodesType = "ephemeral"
- DynamicNodesType = "dynamic"
+ ServerPort = 6443
)
// Server
@@ -45,7 +45,7 @@ func New(cluster *v1alpha1.Cluster, client client.Client, token, mode string) *S
}
}
-func (s *Server) podSpec(image, name string, persistent bool) v1.PodSpec {
+func (s *Server) podSpec(image, name string, persistent bool, startupCmd string) v1.PodSpec {
var limit v1.ResourceList
if s.cluster.Spec.Limit != nil && s.cluster.Spec.Limit.ServerLimit != nil {
limit = s.cluster.Spec.Limit.ServerLimit
@@ -106,6 +106,12 @@ func (s *Server) podSpec(image, name string, persistent bool) v1.PodSpec {
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
+ {
+ Name: "varlibkubelet",
+ VolumeSource: v1.VolumeSource{
+ EmptyDir: &v1.EmptyDirVolumeSource{},
+ },
+ },
},
Containers: []v1.Container{
{
@@ -123,17 +129,14 @@ func (s *Server) podSpec(image, name string, persistent bool) v1.PodSpec {
},
},
},
- },
- Command: []string{
- "/bin/sh",
- "-c",
- `
- if [ ${POD_NAME: -1} == 0 ]; then
- /bin/k3s server --config /opt/rancher/k3s/init/config.yaml ` + strings.Join(s.cluster.Spec.ServerArgs, " ") + `
- else
- /bin/k3s server --config /opt/rancher/k3s/server/config.yaml ` + strings.Join(s.cluster.Spec.ServerArgs, " ") + `
- fi
- `,
+ {
+ Name: "POD_IP",
+ ValueFrom: &v1.EnvVarSource{
+ FieldRef: &v1.ObjectFieldSelector{
+ FieldPath: "status.podIP",
+ },
+ },
+ },
},
VolumeMounts: []v1.VolumeMount{
{
@@ -181,15 +184,15 @@ func (s *Server) podSpec(image, name string, persistent bool) v1.PodSpec {
},
}
+ cmd := []string{
+ "/bin/sh",
+ "-c",
+ startupCmd,
+ }
+
+ podSpec.Containers[0].Command = cmd
if !persistent {
podSpec.Volumes = append(podSpec.Volumes, v1.Volume{
-
- Name: "varlibkubelet",
- VolumeSource: v1.VolumeSource{
- EmptyDir: &v1.EmptyDirVolumeSource{},
- },
- }, v1.Volume{
-
Name: "varlibrancherk3s",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
@@ -221,7 +224,8 @@ func (s *Server) podSpec(image, name string, persistent bool) v1.PodSpec {
func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error) {
var (
replicas int32
- pvClaims []v1.PersistentVolumeClaim
+ pvClaim v1.PersistentVolumeClaim
+ err error
persistent bool
)
image := controller.K3SImage(s.cluster)
@@ -229,48 +233,9 @@ func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error)
replicas = *s.cluster.Spec.Servers
- if s.cluster.Spec.Persistence != nil && s.cluster.Spec.Persistence.Type != EphemeralNodesType {
+ if s.cluster.Spec.Persistence.Type == v1alpha1.DynamicNodesType {
persistent = true
- pvClaims = []v1.PersistentVolumeClaim{
- {
- TypeMeta: metav1.TypeMeta{
- Kind: "PersistentVolumeClaim",
- APIVersion: "v1",
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: "varlibrancherk3s",
- Namespace: s.cluster.Namespace,
- },
- Spec: v1.PersistentVolumeClaimSpec{
- AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
- StorageClassName: &s.cluster.Spec.Persistence.StorageClassName,
- Resources: v1.VolumeResourceRequirements{
- Requests: v1.ResourceList{
- "storage": resource.MustParse(s.cluster.Spec.Persistence.StorageRequestSize),
- },
- },
- },
- },
- {
- TypeMeta: metav1.TypeMeta{
- Kind: "PersistentVolumeClaim",
- APIVersion: "v1",
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: "varlibkubelet",
- Namespace: s.cluster.Namespace,
- },
- Spec: v1.PersistentVolumeClaimSpec{
- Resources: v1.VolumeResourceRequirements{
- Requests: v1.ResourceList{
- "storage": resource.MustParse(s.cluster.Spec.Persistence.StorageRequestSize),
- },
- },
- AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
- StorageClassName: &s.cluster.Spec.Persistence.StorageClassName,
- },
- },
- }
+ pvClaim = s.setupDynamicPersistence()
}
var volumes []v1.Volume
@@ -337,11 +302,15 @@ func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error)
},
}
- podSpec := s.podSpec(image, name, persistent)
+ startupCommand, err := s.setupStartCommand()
+ if err != nil {
+ return nil, err
+ }
+ podSpec := s.podSpec(image, name, persistent, startupCommand)
podSpec.Volumes = append(podSpec.Volumes, volumes...)
podSpec.Containers[0].VolumeMounts = append(podSpec.Containers[0].VolumeMounts, volumeMounts...)
- return &apps.StatefulSet{
+ ss := &apps.StatefulSet{
TypeMeta: metav1.TypeMeta{
Kind: "StatefulSet",
APIVersion: "apps/v1",
@@ -352,10 +321,9 @@ func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error)
Labels: selector.MatchLabels,
},
Spec: apps.StatefulSetSpec{
- Replicas: &replicas,
- ServiceName: headlessServiceName(s.cluster.Name),
- Selector: &selector,
- VolumeClaimTemplates: pvClaims,
+ Replicas: &replicas,
+ ServiceName: headlessServiceName(s.cluster.Name),
+ Selector: &selector,
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: selector.MatchLabels,
@@ -363,5 +331,55 @@ func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error)
Spec: podSpec,
},
},
- }, nil
+ }
+ if s.cluster.Spec.Persistence.Type == v1alpha1.DynamicNodesType {
+ ss.Spec.VolumeClaimTemplates = []v1.PersistentVolumeClaim{pvClaim}
+ }
+
+ return ss, nil
+}
+
+func (s *Server) setupDynamicPersistence() v1.PersistentVolumeClaim {
+ return v1.PersistentVolumeClaim{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "PersistentVolumeClaim",
+ APIVersion: "v1",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "varlibrancherk3s",
+ Namespace: s.cluster.Namespace,
+ },
+ Spec: v1.PersistentVolumeClaimSpec{
+ AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
+ StorageClassName: s.cluster.Spec.Persistence.StorageClassName,
+ Resources: v1.VolumeResourceRequirements{
+ Requests: v1.ResourceList{
+ "storage": resource.MustParse(s.cluster.Status.Persistence.StorageRequestSize),
+ },
+ },
+ },
+ }
+
+}
+
+func (s *Server) setupStartCommand() (string, error) {
+ var output bytes.Buffer
+
+ tmpl := singleServerTemplate
+ if *s.cluster.Spec.Servers > 1 {
+ tmpl = HAServerTemplate
+ }
+ tmplCmd, err := template.New("").Parse(tmpl)
+ if err != nil {
+ return "", err
+ }
+ if err := tmplCmd.Execute(&output, map[string]string{
+ "ETCD_DIR": "/var/lib/rancher/k3s/server/db/etcd",
+ "INIT_CONFIG": "/opt/rancher/k3s/init/config.yaml",
+ "SERVER_CONFIG": "/opt/rancher/k3s/server/config.yaml",
+ "EXTRA_ARGS": strings.Join(s.cluster.Spec.ServerArgs, " "),
+ }); err != nil {
+ return "", err
+ }
+ return output.String(), nil
}
diff --git a/pkg/controller/cluster/server/template.go b/pkg/controller/cluster/server/template.go
new file mode 100644
index 00000000..42fae023
--- /dev/null
+++ b/pkg/controller/cluster/server/template.go
@@ -0,0 +1,16 @@
+package server
+
+var singleServerTemplate string = `
+if [ -d "{{.ETCD_DIR}}" ]; then
+ # if directory exists then it means its not an initial run
+ /bin/k3s server --cluster-reset --config {{.INIT_CONFIG}} {{.EXTRA_ARGS}}
+fi
+rm -f /var/lib/rancher/k3s/server/db/reset-flag
+/bin/k3s server --config {{.INIT_CONFIG}} {{.EXTRA_ARGS}}`
+
+var HAServerTemplate string = `
+if [ ${POD_NAME: -1} == 0 ] && [ ! -d "{{.ETCD_DIR}}" ]; then
+ /bin/k3s server --config {{.INIT_CONFIG}} {{.EXTRA_ARGS}}
+else
+ /bin/k3s server --config {{.SERVER_CONFIG}} {{.EXTRA_ARGS}}
+fi`
diff --git a/tests/cluster_test.go b/tests/cluster_test.go
index 75cfa527..06296196 100644
--- a/tests/cluster_test.go
+++ b/tests/cluster_test.go
@@ -42,7 +42,7 @@ var _ = When("k3k is installed", func() {
})
})
-var _ = When("a cluster is installed", func() {
+var _ = When("a ephemeral cluster is installed", func() {
var namespace string
@@ -66,6 +66,9 @@ var _ = When("a cluster is installed", func() {
Expose: &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{},
},
+ Persistence: v1alpha1.PersistenceConfig{
+ Type: v1alpha1.EphemeralNodeType,
+ },
},
}
@@ -130,6 +133,9 @@ var _ = When("a cluster is installed", func() {
Expose: &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{},
},
+ Persistence: v1alpha1.PersistenceConfig{
+ Type: v1alpha1.EphemeralNodeType,
+ },
},
}
@@ -191,3 +197,148 @@ var _ = When("a cluster is installed", func() {
Should(BeNil())
})
})
+
+var _ = When("a dynamic cluster is installed", func() {
+
+ var namespace string
+
+ BeforeEach(func() {
+ createdNS := &corev1.Namespace{ObjectMeta: v1.ObjectMeta{GenerateName: "ns-"}}
+ createdNS, err := k8s.CoreV1().Namespaces().Create(context.Background(), createdNS, v1.CreateOptions{})
+ Expect(err).To(Not(HaveOccurred()))
+ namespace = createdNS.Name
+ })
+
+ It("can create a nginx pod", func() {
+ ctx := context.Background()
+
+ cluster := v1alpha1.Cluster{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "mycluster",
+ Namespace: namespace,
+ },
+ Spec: v1alpha1.ClusterSpec{
+ TLSSANs: []string{hostIP},
+ Expose: &v1alpha1.ExposeConfig{
+ NodePort: &v1alpha1.NodePortConfig{},
+ },
+ Persistence: v1alpha1.PersistenceConfig{
+ Type: v1alpha1.DynamicNodesType,
+ },
+ },
+ }
+
+ By(fmt.Sprintf("Creating virtual cluster %s/%s", cluster.Namespace, cluster.Name))
+ NewVirtualCluster(cluster)
+
+ By("Waiting to get a kubernetes client for the virtual cluster")
+ virtualK8sClient := NewVirtualK8sClient(cluster)
+
+ nginxPod := &corev1.Pod{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "nginx",
+ Namespace: "default",
+ },
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{{
+ Name: "nginx",
+ Image: "nginx",
+ }},
+ },
+ }
+ nginxPod, err := virtualK8sClient.CoreV1().Pods(nginxPod.Namespace).Create(ctx, nginxPod, v1.CreateOptions{})
+ Expect(err).To(Not(HaveOccurred()))
+
+ // check that the nginx Pod is up and running in the host cluster
+ Eventually(func() bool {
+ //labelSelector := fmt.Sprintf("%s=%s", translate.ClusterNameLabel, cluster.Namespace)
+ podList, err := k8s.CoreV1().Pods(namespace).List(ctx, v1.ListOptions{})
+ Expect(err).To(Not(HaveOccurred()))
+
+ for _, pod := range podList.Items {
+ resourceName := pod.Annotations[translate.ResourceNameAnnotation]
+ resourceNamespace := pod.Annotations[translate.ResourceNamespaceAnnotation]
+
+ if resourceName == nginxPod.Name && resourceNamespace == nginxPod.Namespace {
+ fmt.Fprintf(GinkgoWriter,
+ "pod=%s resource=%s/%s status=%s\n",
+ pod.Name, resourceNamespace, resourceName, pod.Status.Phase,
+ )
+
+ return pod.Status.Phase == corev1.PodRunning
+ }
+ }
+
+ return false
+ }).
+ WithTimeout(time.Minute).
+ WithPolling(time.Second * 5).
+ Should(BeTrue())
+ })
+
+ It("use the same bootstrap secret after a restart", func() {
+ ctx := context.Background()
+
+ cluster := v1alpha1.Cluster{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "mycluster",
+ Namespace: namespace,
+ },
+ Spec: v1alpha1.ClusterSpec{
+ TLSSANs: []string{hostIP},
+ Expose: &v1alpha1.ExposeConfig{
+ NodePort: &v1alpha1.NodePortConfig{},
+ },
+ Persistence: v1alpha1.PersistenceConfig{
+ Type: v1alpha1.DynamicNodesType,
+ },
+ },
+ }
+
+ By(fmt.Sprintf("Creating virtual cluster %s/%s", cluster.Namespace, cluster.Name))
+ NewVirtualCluster(cluster)
+
+ By("Waiting to get a kubernetes client for the virtual cluster")
+ virtualK8sClient := NewVirtualK8sClient(cluster)
+
+ _, err := virtualK8sClient.DiscoveryClient.ServerVersion()
+ Expect(err).To(Not(HaveOccurred()))
+
+ labelSelector := "cluster=" + cluster.Name + ",role=server"
+ serverPods, err := k8s.CoreV1().Pods(namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
+ Expect(err).To(Not(HaveOccurred()))
+
+ Expect(len(serverPods.Items)).To(Equal(1))
+ serverPod := serverPods.Items[0]
+
+ fmt.Fprintf(GinkgoWriter, "deleting pod %s/%s\n", serverPod.Namespace, serverPod.Name)
+ err = k8s.CoreV1().Pods(namespace).Delete(ctx, serverPod.Name, v1.DeleteOptions{})
+ Expect(err).To(Not(HaveOccurred()))
+
+ By("Deleting server pod")
+
+ // check that the server pods restarted
+ Eventually(func() any {
+ serverPods, err = k8s.CoreV1().Pods(namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
+ Expect(err).To(Not(HaveOccurred()))
+ Expect(len(serverPods.Items)).To(Equal(1))
+ return serverPods.Items[0].DeletionTimestamp
+ }).
+ WithTimeout(30 * time.Second).
+ WithPolling(time.Second * 5).
+ Should(BeNil())
+
+ By("Server pod up and running again")
+
+ By("Using old k8s client configuration should succeed")
+
+ Eventually(func() error {
+ virtualK8sClient = NewVirtualK8sClient(cluster)
+ _, err = virtualK8sClient.DiscoveryClient.ServerVersion()
+ return err
+ }).
+ WithTimeout(2 * time.Minute).
+ WithPolling(time.Second * 5).
+ Should(BeNil())
+ })
+})
diff --git a/tests/tests_suite_test.go b/tests/tests_suite_test.go
index 24a2790c..4f49f536 100644
--- a/tests/tests_suite_test.go
+++ b/tests/tests_suite_test.go
@@ -20,6 +20,7 @@ import (
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/chart/loader"
corev1 "k8s.io/api/core/v1"
+ v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
@@ -137,6 +138,14 @@ var _ = AfterSuite(func() {
fmt.Fprintln(GinkgoWriter, "k3s logs written to: "+logfile)
+ // dump k3k controller logs
+ readCloser, err = k3sContainer.Logs(context.Background())
+ Expect(err).To(Not(HaveOccurred()))
+ writeLogs("k3s.log", readCloser)
+
+ // dump k3k logs
+ writeK3kLogs()
+
testcontainers.CleanupContainer(GinkgoTB(), k3sContainer)
})
@@ -150,3 +159,28 @@ func buildScheme() *runtime.Scheme {
return scheme
}
+
+func writeK3kLogs() {
+ var err error
+ var podList v1.PodList
+
+ ctx := context.Background()
+ err = k8sClient.List(ctx, &podList, &client.ListOptions{Namespace: "k3k-system"})
+ Expect(err).To(Not(HaveOccurred()))
+
+ k3kPod := podList.Items[0]
+ req := k8s.CoreV1().Pods(k3kPod.Namespace).GetLogs(k3kPod.Name, &corev1.PodLogOptions{})
+ podLogs, err := req.Stream(ctx)
+ Expect(err).To(Not(HaveOccurred()))
+ writeLogs("k3k.log", podLogs)
+}
+
+func writeLogs(filename string, logs io.ReadCloser) {
+ logsStr, err := io.ReadAll(logs)
+ Expect(err).To(Not(HaveOccurred()))
+ defer logs.Close()
+ tempfile := path.Join(os.TempDir(), filename)
+ err = os.WriteFile(tempfile, []byte(logsStr), 0644)
+ Expect(err).To(Not(HaveOccurred()))
+ fmt.Fprintln(GinkgoWriter, "logs written to: "+filename)
+}