Skip to content

Commit 87992f4

Browse files
Merge pull request #1972 from sairameshv/bump-1.29.5
OCPBUGS-33710: Bump k8s API to 1.29.5
2 parents 6e2f7c0 + 0e89f73 commit 87992f4

File tree

36 files changed

+552
-130
lines changed

36 files changed

+552
-130
lines changed

CHANGELOG/CHANGELOG-1.29.md

+197-59
Large diffs are not rendered by default.

cluster/gce/config-default.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ fi
9090
# By default, the latest image from the image family will be used unless an
9191
# explicit image will be set.
9292
GCI_VERSION=${KUBE_GCI_VERSION:-}
93-
IMAGE_FAMILY=${KUBE_IMAGE_FAMILY:-cos-97-lts}
93+
IMAGE_FAMILY=${KUBE_IMAGE_FAMILY:-cos-109-lts}
9494
export MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-}
9595
export MASTER_IMAGE_FAMILY=${KUBE_GCE_MASTER_IMAGE_FAMILY:-${IMAGE_FAMILY}}
9696
export MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-cos-cloud}

cluster/gce/config-test.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ ALLOWED_NOTREADY_NODES=${ALLOWED_NOTREADY_NODES:-$(($(get-num-nodes) / 100))}
103103
# By default, the latest image from the image family will be used unless an
104104
# explicit image will be set.
105105
GCI_VERSION=${KUBE_GCI_VERSION:-}
106-
IMAGE_FAMILY=${KUBE_IMAGE_FAMILY:-cos-97-lts}
106+
IMAGE_FAMILY=${KUBE_IMAGE_FAMILY:-cos-109-lts}
107107
export MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-}
108108
export MASTER_IMAGE_FAMILY=${KUBE_GCE_MASTER_IMAGE_FAMILY:-${IMAGE_FAMILY}}
109109
export MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-cos-cloud}

cmd/kube-controller-manager/app/controllermanager.go

+3-1
Original file line numberDiff line numberDiff line change
@@ -619,7 +619,9 @@ func CreateControllerContext(logger klog.Logger, s *config.CompletedConfig, root
619619
// Informer transform to trim ManagedFields for memory efficiency.
620620
trim := func(obj interface{}) (interface{}, error) {
621621
if accessor, err := meta.Accessor(obj); err == nil {
622-
accessor.SetManagedFields(nil)
622+
if accessor.GetManagedFields() != nil {
623+
accessor.SetManagedFields(nil)
624+
}
623625
}
624626
return obj, nil
625627
}

cmd/kubeadm/app/phases/upgrade/postupgrade.go

+29
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,8 @@ import (
4646
"k8s.io/kubernetes/cmd/kubeadm/app/phases/uploadconfig"
4747
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
4848
dryrunutil "k8s.io/kubernetes/cmd/kubeadm/app/util/dryrun"
49+
kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig"
50+
"k8s.io/kubernetes/cmd/kubeadm/app/util/pkiutil"
4951
)
5052

5153
// PerformPostUpgradeTasks runs nearly the same functions as 'kubeadm init' would do
@@ -335,6 +337,33 @@ func createSuperAdminKubeConfig(cfg *kubeadmapi.InitConfiguration, outDir string
335337
superAdminBackupPath = superAdminPath + ".backup"
336338
)
337339

340+
// Check if the CA is missing on disk. This would mean a cluster with external CA is upgraded.
341+
// Show a warning, apply the new admin RBAC and return without generating a separate 'super-admin.conf'.
342+
_, _, err = pkiutil.TryLoadCertAndKeyFromDisk(cfg.CertificatesDir, kubeadmconstants.CACertAndKeyBaseName)
343+
if os.IsNotExist(errors.Cause(err)) {
344+
klog.Warningf("The CA files do not exist in %q, assuming this is an external CA cluster. "+
345+
"Skipping the generating of a 'super-admin.conf' file. Please read the release notes for 1.29 "+
346+
"and manually migrate to the separate 'admin.conf' and 'super-admin.conf' files. "+
347+
"To generate them you can use 'kubeadm init phase kubeconfig ...' on a host that has the CA, or alternatively "+
348+
"you can use 'kubeadm certs generate-csr' to get the new kubeconfig specs and CSRs.",
349+
cfg.CertificatesDir)
350+
351+
// Still apply the RBAC for the regular admin. If 'admin.conf' becomes non-elevated in the future
352+
// after a manual interaction by the user the RBAC will be needed.
353+
adminClient, err := kubeconfigutil.ClientSetFromFile(filepath.Join(outDir, kubeadmconstants.AdminKubeConfigFileName))
354+
if err != nil {
355+
return err
356+
}
357+
// The 'superAdminClient' argument is intentionally nil, so that the function fails if creating the RBAC
358+
// with 'adminClient' fails.
359+
_, err = ensureRBACFunc(context.Background(), adminClient, nil,
360+
kubeadmconstants.APICallRetryInterval, kubeadmconstants.APICallWithWriteTimeout)
361+
if err != nil {
362+
return err
363+
}
364+
return nil
365+
}
366+
338367
// Create new admin.conf and super-admin.conf.
339368
// If something goes wrong, old existing files will be restored from backup as a best effort.
340369

openshift-hack/e2e/annotate/generated/zz_generated.annotations.go

+2
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

openshift-hack/images/hyperkube/Dockerfile.rhel

+1-1
Original file line numberDiff line numberDiff line change
@@ -13,4 +13,4 @@ COPY --from=builder /tmp/build/* /usr/bin/
1313
LABEL io.k8s.display-name="OpenShift Kubernetes Server Commands" \
1414
io.k8s.description="OpenShift is a platform for developing, building, and deploying containerized applications." \
1515
io.openshift.tags="openshift,hyperkube" \
16-
io.openshift.build.versions="kubernetes=1.29.4"
16+
io.openshift.build.versions="kubernetes=1.29.5"

pkg/proxy/topology.go

+3-3
Original file line numberDiff line numberDiff line change
@@ -156,7 +156,7 @@ func canUseTopology(endpoints []Endpoint, svcInfo ServicePort, nodeLabels map[st
156156

157157
zone, ok := nodeLabels[v1.LabelTopologyZone]
158158
if !ok || zone == "" {
159-
klog.InfoS("Skipping topology aware endpoint filtering since node is missing label", "label", v1.LabelTopologyZone)
159+
klog.V(2).InfoS("Skipping topology aware endpoint filtering since node is missing label", "label", v1.LabelTopologyZone)
160160
return false
161161
}
162162

@@ -166,7 +166,7 @@ func canUseTopology(endpoints []Endpoint, svcInfo ServicePort, nodeLabels map[st
166166
continue
167167
}
168168
if endpoint.ZoneHints().Len() == 0 {
169-
klog.InfoS("Skipping topology aware endpoint filtering since one or more endpoints is missing a zone hint", "endpoint", endpoint)
169+
klog.V(2).InfoS("Skipping topology aware endpoint filtering since one or more endpoints is missing a zone hint", "endpoint", endpoint)
170170
return false
171171
}
172172

@@ -176,7 +176,7 @@ func canUseTopology(endpoints []Endpoint, svcInfo ServicePort, nodeLabels map[st
176176
}
177177

178178
if !hasEndpointForZone {
179-
klog.InfoS("Skipping topology aware endpoint filtering since no hints were provided for zone", "zone", zone)
179+
klog.V(2).InfoS("Skipping topology aware endpoint filtering since no hints were provided for zone", "zone", zone)
180180
return false
181181
}
182182

pkg/scheduler/schedule_one.go

+14-9
Original file line numberDiff line numberDiff line change
@@ -439,13 +439,16 @@ func (sched *Scheduler) schedulePod(ctx context.Context, fwk framework.Framework
439439
// filter plugins and filter extenders.
440440
func (sched *Scheduler) findNodesThatFitPod(ctx context.Context, fwk framework.Framework, state *framework.CycleState, pod *v1.Pod) ([]*v1.Node, framework.Diagnosis, error) {
441441
logger := klog.FromContext(ctx)
442-
diagnosis := framework.Diagnosis{
443-
NodeToStatusMap: make(framework.NodeToStatusMap),
444-
}
445442

446443
allNodes, err := sched.nodeInfoSnapshot.NodeInfos().List()
447444
if err != nil {
448-
return nil, diagnosis, err
445+
return nil, framework.Diagnosis{
446+
NodeToStatusMap: make(framework.NodeToStatusMap),
447+
}, err
448+
}
449+
450+
diagnosis := framework.Diagnosis{
451+
NodeToStatusMap: make(framework.NodeToStatusMap, len(allNodes)),
449452
}
450453
// Run "prefilter" plugins.
451454
preRes, s := fwk.RunPreFilterPlugins(ctx, state, pod)
@@ -483,12 +486,14 @@ func (sched *Scheduler) findNodesThatFitPod(ctx context.Context, fwk framework.F
483486
nodes := allNodes
484487
if !preRes.AllNodes() {
485488
nodes = make([]*framework.NodeInfo, 0, len(preRes.NodeNames))
486-
for n := range preRes.NodeNames {
487-
nInfo, err := sched.nodeInfoSnapshot.NodeInfos().Get(n)
488-
if err != nil {
489-
return nil, diagnosis, err
489+
for _, n := range allNodes {
490+
if !preRes.NodeNames.Has(n.Node().Name) {
491+
// We consider Nodes that are filtered out by PreFilterResult as rejected via UnschedulableAndUnresolvable.
492+
// We have to record them in NodeToStatusMap so that they won't be considered as candidates in the preemption.
493+
diagnosis.NodeToStatusMap[n.Node().Name] = framework.NewStatus(framework.UnschedulableAndUnresolvable, "node is filtered out by the prefilter result")
494+
continue
490495
}
491-
nodes = append(nodes, nInfo)
496+
nodes = append(nodes, n)
492497
}
493498
}
494499
feasibleNodes, err := sched.findNodesThatPassFilters(ctx, fwk, state, pod, &diagnosis, nodes)

pkg/scheduler/schedule_one_test.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -2181,7 +2181,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
21812181
nodes: []string{"node1", "node2", "node3"},
21822182
pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
21832183
wantNodes: sets.New("node2"),
2184-
wantEvaluatedNodes: ptr.To[int32](1),
2184+
wantEvaluatedNodes: ptr.To[int32](3),
21852185
},
21862186
{
21872187
name: "test prefilter plugin returning non-intersecting nodes",

pkg/scheduler/scheduler.go

+3-1
Original file line numberDiff line numberDiff line change
@@ -512,7 +512,9 @@ func newPodInformer(cs clientset.Interface, resyncPeriod time.Duration) cache.Sh
512512
// The Extract workflow (i.e. `ExtractPod`) should be unused.
513513
trim := func(obj interface{}) (interface{}, error) {
514514
if accessor, err := meta.Accessor(obj); err == nil {
515-
accessor.SetManagedFields(nil)
515+
if accessor.GetManagedFields() != nil {
516+
accessor.SetManagedFields(nil)
517+
}
516518
}
517519
return obj, nil
518520
}

pkg/scheduler/testing/wrappers.go

+14
Original file line numberDiff line numberDiff line change
@@ -851,6 +851,20 @@ func (p *PersistentVolumeWrapper) HostPathVolumeSource(src *v1.HostPathVolumeSou
851851
return p
852852
}
853853

854+
// NodeAffinityIn creates a HARD node affinity (with the operator In)
855+
// and injects into the pv.
856+
func (p *PersistentVolumeWrapper) NodeAffinityIn(key string, vals []string) *PersistentVolumeWrapper {
857+
if p.Spec.NodeAffinity == nil {
858+
p.Spec.NodeAffinity = &v1.VolumeNodeAffinity{}
859+
}
860+
if p.Spec.NodeAffinity.Required == nil {
861+
p.Spec.NodeAffinity.Required = &v1.NodeSelector{}
862+
}
863+
nodeSelector := MakeNodeSelector().In(key, vals).Obj()
864+
p.Spec.NodeAffinity.Required.NodeSelectorTerms = append(p.Spec.NodeAffinity.Required.NodeSelectorTerms, nodeSelector.NodeSelectorTerms...)
865+
return p
866+
}
867+
854868
// ResourceClaimWrapper wraps a ResourceClaim inside.
855869
type ResourceClaimWrapper struct{ resourcev1alpha2.ResourceClaim }
856870

staging/src/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go

+6-6
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ var (
8484
},
8585
[]string{"endpoint"},
8686
)
87-
storageSizeDescription = compbasemetrics.NewDesc("apiserver_storage_size_bytes", "Size of the storage database file physically allocated in bytes.", []string{"cluster"}, nil, compbasemetrics.ALPHA, "")
87+
storageSizeDescription = compbasemetrics.NewDesc("apiserver_storage_size_bytes", "Size of the storage database file physically allocated in bytes.", []string{"storage_cluster_id"}, nil, compbasemetrics.ALPHA, "")
8888
storageMonitor = &monitorCollector{monitorGetter: func() ([]Monitor, error) { return nil, nil }}
8989
etcdEventsReceivedCounts = compbasemetrics.NewCounterVec(
9090
&compbasemetrics.CounterOpts{
@@ -301,21 +301,21 @@ func (c *monitorCollector) CollectWithStability(ch chan<- compbasemetrics.Metric
301301
}
302302

303303
for i, m := range monitors {
304-
cluster := fmt.Sprintf("etcd-%d", i)
304+
storageClusterID := fmt.Sprintf("etcd-%d", i)
305305

306-
klog.V(4).InfoS("Start collecting storage metrics", "cluster", cluster)
306+
klog.V(4).InfoS("Start collecting storage metrics", "storage_cluster_id", storageClusterID)
307307
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
308308
metrics, err := m.Monitor(ctx)
309309
cancel()
310310
m.Close()
311311
if err != nil {
312-
klog.InfoS("Failed to get storage metrics", "cluster", cluster, "err", err)
312+
klog.InfoS("Failed to get storage metrics", "storage_cluster_id", storageClusterID, "err", err)
313313
continue
314314
}
315315

316-
metric, err := compbasemetrics.NewConstMetric(storageSizeDescription, compbasemetrics.GaugeValue, float64(metrics.Size), cluster)
316+
metric, err := compbasemetrics.NewConstMetric(storageSizeDescription, compbasemetrics.GaugeValue, float64(metrics.Size), storageClusterID)
317317
if err != nil {
318-
klog.ErrorS(err, "Failed to create metric", "cluster", cluster)
318+
klog.ErrorS(err, "Failed to create metric", "storage_cluster_id", storageClusterID)
319319
}
320320
ch <- metric
321321
}

staging/src/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics_test.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -199,7 +199,7 @@ func TestStorageSizeCollector(t *testing.T) {
199199
err: nil,
200200
want: `# HELP apiserver_storage_size_bytes [ALPHA] Size of the storage database file physically allocated in bytes.
201201
# TYPE apiserver_storage_size_bytes gauge
202-
apiserver_storage_size_bytes{cluster="etcd-0"} 1e+09
202+
apiserver_storage_size_bytes{storage_cluster_id="etcd-0"} 1e+09
203203
`,
204204
},
205205
{

test/e2e/scheduling/predicates.go

+73
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@ import (
3939
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
4040
e2eruntimeclass "k8s.io/kubernetes/test/e2e/framework/node/runtimeclass"
4141
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
42+
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
4243
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
4344
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
4445
testutils "k8s.io/kubernetes/test/utils"
@@ -857,6 +858,78 @@ var _ = SIGDescribe("SchedulerPredicates", framework.WithSerial(), func() {
857858
ginkgo.By("Expect all pods are scheduled and running")
858859
framework.ExpectNoError(e2epod.WaitForPodsRunning(cs, ns, replicas, time.Minute))
859860
})
861+
862+
// Regression test for an extended scenario for https://issues.k8s.io/123465
863+
ginkgo.It("when PVC has node-affinity to non-existent/illegal nodes, the pod should be scheduled normally if suitable nodes exist", func(ctx context.Context) {
864+
nodeName := GetNodeThatCanRunPod(ctx, f)
865+
nonExistentNodeName1 := string(uuid.NewUUID())
866+
nonExistentNodeName2 := string(uuid.NewUUID())
867+
hostLabel := "kubernetes.io/hostname"
868+
localPath := "/tmp"
869+
podName := "bind-pv-with-non-existent-nodes"
870+
pvcName := "pvc-" + string(uuid.NewUUID())
871+
_, pvc, err := e2epv.CreatePVPVC(ctx, cs, f.Timeouts, e2epv.PersistentVolumeConfig{
872+
PVSource: v1.PersistentVolumeSource{
873+
Local: &v1.LocalVolumeSource{
874+
Path: localPath,
875+
},
876+
},
877+
Prebind: &v1.PersistentVolumeClaim{
878+
ObjectMeta: metav1.ObjectMeta{Name: pvcName, Namespace: ns},
879+
},
880+
NodeAffinity: &v1.VolumeNodeAffinity{
881+
Required: &v1.NodeSelector{
882+
NodeSelectorTerms: []v1.NodeSelectorTerm{
883+
{
884+
MatchExpressions: []v1.NodeSelectorRequirement{
885+
{
886+
Key: hostLabel,
887+
Operator: v1.NodeSelectorOpIn,
888+
// add non-existent nodes to the list
889+
Values: []string{nodeName, nonExistentNodeName1, nonExistentNodeName2},
890+
},
891+
},
892+
},
893+
},
894+
},
895+
},
896+
}, e2epv.PersistentVolumeClaimConfig{
897+
Name: pvcName,
898+
}, ns, true)
899+
framework.ExpectNoError(err)
900+
bindPvPod := &v1.Pod{
901+
ObjectMeta: metav1.ObjectMeta{
902+
Name: podName,
903+
},
904+
Spec: v1.PodSpec{
905+
Containers: []v1.Container{
906+
{
907+
Name: "pause",
908+
Image: imageutils.GetE2EImage(imageutils.Pause),
909+
VolumeMounts: []v1.VolumeMount{
910+
{
911+
Name: "data",
912+
MountPath: "/tmp",
913+
},
914+
},
915+
},
916+
},
917+
Volumes: []v1.Volume{
918+
{
919+
Name: "data",
920+
VolumeSource: v1.VolumeSource{
921+
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
922+
ClaimName: pvc.Name,
923+
},
924+
},
925+
},
926+
},
927+
},
928+
}
929+
_, err = f.ClientSet.CoreV1().Pods(ns).Create(ctx, bindPvPod, metav1.CreateOptions{})
930+
framework.ExpectNoError(err)
931+
framework.ExpectNoError(e2epod.WaitForPodNotPending(ctx, f.ClientSet, ns, podName))
932+
})
860933
})
861934

862935
func patchPod(cs clientset.Interface, old, new *v1.Pod) (*v1.Pod, error) {

test/e2e/storage/utils/create.go

+4
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,10 @@ func LoadFromManifests(files ...string) ([]interface{}, error) {
6262
if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), data, &what); err != nil {
6363
return fmt.Errorf("decode TypeMeta: %w", err)
6464
}
65+
// Ignore empty documents.
66+
if what.Kind == "" {
67+
return nil
68+
}
6569

6670
factory := factories[what]
6771
if factory == nil {

test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
1-
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-attacher/raw/v3.4.0/deploy/kubernetes//rbac.yaml
2-
# for csi-driver-host-path v1.8.0
1+
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-attacher/raw/v4.5.0/deploy/kubernetes//rbac.yaml
2+
# for csi-driver-host-path release-1.13
33
# by ./update-hostpath.sh
44
#
55
# This YAML file contains all RBAC objects that are necessary to run external

test/e2e/testing-manifests/storage-csi/external-health-monitor/external-health-monitor-controller/rbac.yaml

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
1-
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-health-monitor/raw/v0.4.0/deploy/kubernetes/external-health-monitor-controller/rbac.yaml
2-
# for csi-driver-host-path v1.8.0
1+
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-health-monitor/raw/v0.11.0/deploy/kubernetes/external-health-monitor-controller/rbac.yaml
2+
# for csi-driver-host-path release-1.13
33
# by ./update-hostpath.sh
44
#
55
# This YAML file contains all RBAC objects that are necessary to run external

test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml

+9-5
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
1-
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-provisioner/raw/v3.1.0/deploy/kubernetes//rbac.yaml
2-
# for csi-driver-host-path v1.8.0
1+
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-provisioner/raw/v4.0.0/deploy/kubernetes//rbac.yaml
2+
# for csi-driver-host-path release-1.13
33
# by ./update-hostpath.sh
44
#
55
# This YAML file contains all RBAC objects that are necessary to run external
@@ -61,6 +61,13 @@ rules:
6161
- apiGroups: ["storage.k8s.io"]
6262
resources: ["volumeattachments"]
6363
verbs: ["get", "list", "watch"]
64+
# (Alpha) Access to referencegrants is only needed when the CSI driver
65+
# has the CrossNamespaceVolumeDataSource controller capability.
66+
# In that case, external-provisioner requires "get", "list", "watch"
67+
# permissions for "referencegrants" on "gateway.networking.k8s.io".
68+
#- apiGroups: ["gateway.networking.k8s.io"]
69+
# resources: ["referencegrants"]
70+
# verbs: ["get", "list", "watch"]
6471

6572
---
6673
kind: ClusterRoleBinding
@@ -89,9 +96,6 @@ metadata:
8996
rules:
9097
# Only one of the following rules for endpoints or leases is required based on
9198
# what is set for `--leader-election-type`. Endpoints are deprecated in favor of Leases.
92-
- apiGroups: [""]
93-
resources: ["endpoints"]
94-
verbs: ["get", "watch", "list", "delete", "update", "create"]
9599
- apiGroups: ["coordination.k8s.io"]
96100
resources: ["leases"]
97101
verbs: ["get", "watch", "list", "delete", "update", "create"]

0 commit comments

Comments
 (0)