From 1dec65472ee3020639c1872d5e429d7d1b439c89 Mon Sep 17 00:00:00 2001 From: Periyasamy Palanisamy Date: Thu, 19 Dec 2024 15:32:21 +0100 Subject: [PATCH 1/6] Cleanup ipsec state only when ipsec is not full mode There is an incorrect check while cleaning up ipsec state upon deleting ipsec pod which removes states in all cases, so this fix removes state only when ipsec mode is not full mode. Signed-off-by: Periyasamy Palanisamy (cherry picked from commit 864bdc599ff0da2693b9d6969d87b5853ed71abc) --- bindata/network/ovn-kubernetes/common/ipsec-host.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bindata/network/ovn-kubernetes/common/ipsec-host.yaml b/bindata/network/ovn-kubernetes/common/ipsec-host.yaml index f0ecd632e3..7771d7095c 100644 --- a/bindata/network/ovn-kubernetes/common/ipsec-host.yaml +++ b/bindata/network/ovn-kubernetes/common/ipsec-host.yaml @@ -407,7 +407,7 @@ spec: # When east-west ipsec is not disabled, then do not flush xfrm states and # policies in order to maintain traffic flows during container restart. ipsecflush() { - if [ "$(kubectl get networks.operator.openshift.io cluster -ojsonpath='{.spec.defaultNetwork.ovnKubernetesConfig.ipsecConfig.mode}')" != "Full" ] || \ + if [ "$(kubectl get networks.operator.openshift.io cluster -ojsonpath='{.spec.defaultNetwork.ovnKubernetesConfig.ipsecConfig.mode}')" != "Full" ] && \ [ "$(kubectl get networks.operator.openshift.io cluster -ojsonpath='{.spec.defaultNetwork.ovnKubernetesConfig.ipsecConfig}')" != "{}" ]; then ip x s flush ip x p flush From eb50b6a638d1aba44778b0b5157a17694ebf4470 Mon Sep 17 00:00:00 2001 From: Periyasamy Palanisamy Date: Thu, 19 Dec 2024 18:28:51 +0100 Subject: [PATCH 2/6] Revert "Configure narrowing=yes for IPsec connections" This reverts commit e0bfa7eb290e70c1e938fefec6a5490b9c71a6db. Signed-off-by: Periyasamy Palanisamy (cherry picked from commit ece9fbb3eab09a1d962bd09d395be457275474a3) --- .../network/ovn-kubernetes/common/ipsec-host.yaml | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/bindata/network/ovn-kubernetes/common/ipsec-host.yaml b/bindata/network/ovn-kubernetes/common/ipsec-host.yaml index 7771d7095c..a132e1cbd2 100644 --- a/bindata/network/ovn-kubernetes/common/ipsec-host.yaml +++ b/bindata/network/ovn-kubernetes/common/ipsec-host.yaml @@ -241,20 +241,6 @@ spec: sed -i "/${defaultcpinclude}/s/^/# /" /etc/ipsec.conf fi - # Use /etc/ipsec.d/cno.conf file to write our own default IPsec connection parameters. - # The /etc/ipsec.d/openshift.conf file can not be used because it is managed by openvswitch. - touch /etc/ipsec.d/cno.conf - if ! grep -q "narrowing=yes" /etc/ipsec.d/cno.conf; then - cat < /etc/ipsec.d/cno.conf - # Default IPsec connection parameters rendered by network operator. - # The narrowing=yes is needed to narrow down the proposals exchanged - # by two peers to a mutually acceptable set, otherwise it sometimes - # have traffic hit between peer nodes. - conn %default - narrowing=yes - EOF - fi - # since pluto is on the host, we need to restart it after changing connection # parameters. chroot /proc/1/root ipsec restart From 0a2bceba0f3c5cae7d323edf3dd53f65a4d34650 Mon Sep 17 00:00:00 2001 From: Periyasamy Palanisamy Date: Thu, 19 Dec 2024 23:41:02 +0100 Subject: [PATCH 3/6] Restart IPsec service only when needed Signed-off-by: Periyasamy Palanisamy (cherry picked from commit ea1d48918943e4a10de3a7b7de3a029a82bb086e) --- .../common/80-ipsec-master-extensions.yaml | 1 + .../common/80-ipsec-worker-extensions.yaml | 1 + .../ovn-kubernetes/common/ipsec-host.yaml | 31 +++++++++---------- 3 files changed, 17 insertions(+), 16 deletions(-) diff --git a/bindata/network/ovn-kubernetes/common/80-ipsec-master-extensions.yaml b/bindata/network/ovn-kubernetes/common/80-ipsec-master-extensions.yaml index a432d8bae2..830248503b 100644 --- a/bindata/network/ovn-kubernetes/common/80-ipsec-master-extensions.yaml +++ b/bindata/network/ovn-kubernetes/common/80-ipsec-master-extensions.yaml @@ -20,6 +20,7 @@ spec: [Service] Type=oneshot + ExecStartPre=rm -f /etc/ipsec.d/cno.conf ExecStart=systemctl enable --now ipsec.service [Install] diff --git a/bindata/network/ovn-kubernetes/common/80-ipsec-worker-extensions.yaml b/bindata/network/ovn-kubernetes/common/80-ipsec-worker-extensions.yaml index 43f9a1bbeb..5acb18ef72 100644 --- a/bindata/network/ovn-kubernetes/common/80-ipsec-worker-extensions.yaml +++ b/bindata/network/ovn-kubernetes/common/80-ipsec-worker-extensions.yaml @@ -20,6 +20,7 @@ spec: [Service] Type=oneshot + ExecStartPre=rm -f /etc/ipsec.d/cno.conf ExecStart=systemctl enable --now ipsec.service [Install] diff --git a/bindata/network/ovn-kubernetes/common/ipsec-host.yaml b/bindata/network/ovn-kubernetes/common/ipsec-host.yaml index a132e1cbd2..4d9b882f86 100644 --- a/bindata/network/ovn-kubernetes/common/ipsec-host.yaml +++ b/bindata/network/ovn-kubernetes/common/ipsec-host.yaml @@ -239,23 +239,22 @@ spec: defaultcpinclude="include \/etc\/crypto-policies\/back-ends\/libreswan.config" if ! grep -q "# ${defaultcpinclude}" /etc/ipsec.conf; then sed -i "/${defaultcpinclude}/s/^/# /" /etc/ipsec.conf - fi - - # since pluto is on the host, we need to restart it after changing connection - # parameters. - chroot /proc/1/root ipsec restart + # since pluto is on the host, we need to restart it after changing connection + # parameters. + chroot /proc/1/root ipsec restart - counter=0 - until [ -r /run/pluto/pluto.ctl ]; do - counter=$((counter+1)) - sleep 1 - if [ $counter -gt 300 ]; - then - echo "ipsec has not started after $counter seconds" - exit 1 - fi - done - echo "ipsec service is restarted" + counter=0 + until [ -r /run/pluto/pluto.ctl ]; do + counter=$((counter+1)) + sleep 1 + if [ $counter -gt 300 ]; + then + echo "ipsec has not started after $counter seconds" + exit 1 + fi + done + echo "ipsec service is restarted" + fi # Workaround for https://github.com/libreswan/libreswan/issues/373 ulimit -n 1024 From 53232880c95fedf99c06dca84581b6d62e27b787 Mon Sep 17 00:00:00 2001 From: Periyasamy Palanisamy Date: Fri, 17 Jan 2025 13:34:23 +0100 Subject: [PATCH 4/6] =?UTF-8?q?Keep=20ovn=20ipsec=20enabled=20during=20the?= =?UTF-8?q?=20upgrade=20=C2=A0=20=C2=A0=20The=20following=20change=20on=20?= =?UTF-8?q?the=20machine=20pool=20`status.MachineCount=20=3D=3D=20status.U?= =?UTF-8?q?pdatedMachineCount=20&&=20hasSourceInMachineConfigStatus(status?= =?UTF-8?q?,=20machineConfigs)`=20is=20introduced=20with=20PR=20https://gi?= =?UTF-8?q?thub.com/openshift/cluster-network-operator/pull/2349=20which?= =?UTF-8?q?=20ensures=20IPsec=20machine=20config=20is=20always=20installed?= =?UTF-8?q?=20on=20all=20the=20nodes=20in=20the=20cluster,=20So=20this=20i?= =?UTF-8?q?s=20deleting=20the=20IPsec=20daemonset=20as=20per=20the=20CNO?= =?UTF-8?q?=20state=20machine=20for=20IPsec=20when=20the=20condition=20is?= =?UTF-8?q?=20not=20met.=20But=20this=20is=20also=20accidentally=20disabli?= =?UTF-8?q?ng=20IPsec=20in=20OVN=20which=20is=20not=20an=20expected=20beha?= =?UTF-8?q?vior.=20This=20causes=20ovs-monitor-ipsec=20to=20refresh=20exis?= =?UTF-8?q?ting=20ipsec=20connections=20unnecessarily=20when=20IPsec=20pod?= =?UTF-8?q?=20comes=20up=20as=20it=20is=20not=20able=20to=20find=20remote?= =?UTF-8?q?=5Fname=20from=20the=20tunnel.=20This=20may=20also=20trigger=20?= =?UTF-8?q?deleting=20IPsec=20connection=20entries=20from=20openshift.conf?= =?UTF-8?q?=20file=20if=20ovs-monitor-ipsec=20is=20not=20killed=20timely?= =?UTF-8?q?=20when=20ipsec=20daemonset=20is=20removed.=20So=20this=20commi?= =?UTF-8?q?t=20enables=20ovn=20ipsec=20option=20as=20long=20as=20the=20API?= =?UTF-8?q?=20is=20set=20with=20Full=20mode.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Periyasamy Palanisamy (cherry picked from commit 4e57dcd195992dd6d19b2d1a731d6673c9ee2e4b) --- pkg/network/ovn_kubernetes.go | 97 ++++++++++++++++++----------------- 1 file changed, 50 insertions(+), 47 deletions(-) diff --git a/pkg/network/ovn_kubernetes.go b/pkg/network/ovn_kubernetes.go index 651fb303a9..fe7ce86afc 100644 --- a/pkg/network/ovn_kubernetes.go +++ b/pkg/network/ovn_kubernetes.go @@ -612,69 +612,72 @@ func IsIPsecLegacyAPI(conf *operv1.OVNKubernetesConfig) bool { return conf.IPsecConfig == nil || conf.IPsecConfig.Mode == "" } -// shouldRenderIPsec method ensures the have following IPsec states for upgrade path from 4.14 to 4.15 or later versions: -// When 4.14 cluster is already installed with MachineConfig for IPsec extension and ipsecConfig is set in network operator -// config (i.e. IPsec for NS+EW), then render CNO's IPsec MC extension and ipsec-host daemonset. -// When 4.14 cluster is just running with ipsecConfig set in network operator config (i.e. IPsec for EW only), then activate -// IPsec MachineConfig and render ipsec-host daemonset. -// When 4.14 cluster is just installed with MachineConfig for IPsec extension (i.e. IPsec for NS only), then just keep MachineConfig -// to be in the same state without rendering IPsec daemonsets. -// When 4.14 cluster is Hypershift cluster running with ipsecConfig set, then just render ovn-ipsec-containerized daemonset as -// MachineConfig kind is not supported there. -// For Upgrade path from pre-4.14 to 5.15 or later versions: -// When pre-4.14 cluster is just running with ipsecConfig set in network operator config (i.e. IPsec for EW only), then activate -// IPsec MachineConfig and render ipsec-host daemonset. -// When pre-4.14 cluster is Hypershift cluster running with ipsecConfig set, then just render ovn-ipsec-containerized daemonset as -// MachineConfig kind is not supported there. -// All Other cases are not supported in pre-4.14 deployments. +// shouldRenderIPsec method ensures the needed states when enabling, disabling +// or upgrading IPsec func shouldRenderIPsec(conf *operv1.OVNKubernetesConfig, bootstrapResult *bootstrap.BootstrapResult) (renderCNOIPsecMachineConfig, renderIPsecDaemonSet, renderIPsecOVN, renderIPsecHostDaemonSet, renderIPsecContainerizedDaemonSet, renderIPsecDaemonSetAsCreateWaitOnly bool) { + + // Note on 4.14 to 4.15 legacy IPsec upgrade for self managed clusters: + // during this upgrade both host and containerized daemonsets are rendered. + // Internally, these damonsets coordinate when they are active or dormant: + // before the IPsec MachineConfig extensions are active, the containerized + // daemonset is active and the host daemonset is dormant; after rebooting + // with the the IPsec MachineConfig extensions active, the containerized + // daemonset is dormant and the host daemonset is active. When the upgrade + // finishes, the containerized daemonset is then not rendered. + isHypershiftHostedCluster := bootstrapResult.Infra.HostedControlPlane != nil - isIpsecUpgrade := bootstrapResult.OVN.IPsecUpdateStatus != nil && bootstrapResult.OVN.IPsecUpdateStatus.LegacyIPsecUpgrade + isIpsecLegacyUpgrade := bootstrapResult.OVN.IPsecUpdateStatus != nil && bootstrapResult.OVN.IPsecUpdateStatus.LegacyIPsecUpgrade isOVNIPsecActive := bootstrapResult.OVN.IPsecUpdateStatus != nil && bootstrapResult.OVN.IPsecUpdateStatus.OVNIPsecActive mode := GetIPsecMode(conf) - // On upgrade, we will just remove any existing ipsec deployment without making any - // change to them. So during upgrade, we must keep track if IPsec MachineConfigs are - // active or not for non Hybrid hosted cluster. - isIPsecMachineConfigActive := isIPsecMachineConfigActive(bootstrapResult.Infra) - isIPsecMachineConfigNotActiveOnUpgrade := isIpsecUpgrade && !isIPsecMachineConfigActive && !isHypershiftHostedCluster - isMachineConfigClusterOperatorReady := bootstrapResult.Infra.MachineConfigClusterOperatorReady - isCNOIPsecMachineConfigPresent := isCNOIPsecMachineConfigPresent(bootstrapResult.Infra) - isUserDefinedIPsecMachineConfigPresent := isUserDefinedIPsecMachineConfigPresent(bootstrapResult.Infra) - // We render the ipsec deployment if IPsec is already active in OVN // or if EW IPsec config is enabled. renderIPsecDaemonSet = isOVNIPsecActive || mode == operv1.IPsecModeFull - // If ipsec is enabled, we render the host ipsec deployment except for - // hypershift hosted clusters and we need to wait for the ipsec MachineConfig - // extensions to be active first. We must also render host ipsec deployment - // at the time of upgrade though user created IPsec Machine Config is not - // present/active. - renderIPsecHostDaemonSet = (renderIPsecDaemonSet && isIPsecMachineConfigActive && !isHypershiftHostedCluster) || isIPsecMachineConfigNotActiveOnUpgrade - - // The containerized ipsec deployment is only rendered during upgrades or - // for hypershift hosted clusters. - renderIPsecContainerizedDaemonSet = (renderIPsecDaemonSet && isHypershiftHostedCluster) || isIPsecMachineConfigNotActiveOnUpgrade - - // MachineConfig IPsec extensions rollout is needed for the ipsec enablement and are used in both External and Full modes. - // except when the containerized deployment is used in hypershift hosted clusters. Also do not render Machine Config if - // user already created their own machine config for IPsec. + // To enable IPsec, specific MachineConfig extensions need to be rolled out + // first with the following exceptions: + // - not needed for the containerized deployment is used in hypershift + // hosted clusters + // - not needed if the user already created their own + isMachineConfigClusterOperatorReady := bootstrapResult.Infra.MachineConfigClusterOperatorReady + isCNOIPsecMachineConfigPresent := isCNOIPsecMachineConfigPresent(bootstrapResult.Infra) + isUserDefinedIPsecMachineConfigPresent := isUserDefinedIPsecMachineConfigPresent(bootstrapResult.Infra) renderCNOIPsecMachineConfig = (mode != operv1.IPsecModeDisabled || renderIPsecDaemonSet) && !isHypershiftHostedCluster && !isUserDefinedIPsecMachineConfigPresent // Wait for MCO to be ready unless we had already rendered the IPsec MachineConfig. renderCNOIPsecMachineConfig = renderCNOIPsecMachineConfig && (isCNOIPsecMachineConfigPresent || isMachineConfigClusterOperatorReady) - // We render OVN IPsec if East-West IPsec is enabled or it's upgrade is in progress. - // If NS IPsec is enabled as well, we need to wait to IPsec MachineConfig - // to be active if it's not an upgrade and not a hypershift hosted cluster. - renderIPsecOVN = (renderIPsecHostDaemonSet || renderIPsecContainerizedDaemonSet) && mode == operv1.IPsecModeFull - - // While OVN ipsec is being upgraded and IPsec MachineConfigs deployment is in progress - // (or) IPsec config in OVN is being disabled, then ipsec deployment is not updated. - renderIPsecDaemonSetAsCreateWaitOnly = isIPsecMachineConfigNotActiveOnUpgrade || (isOVNIPsecActive && !renderIPsecOVN) + // As a general rule, we need to wait until the IPsec MachineConfig + // extensions are active before rendendering the IPsec daemonsets. Note that + // during upgrades or node reboots there is a period of time where the IPsec + // machine configs are not active and the daemonset won't be rendered but + // that is fine since the IPsec configuration should persist. The exception + // is 4.14 to 4.15 legacy IPsec upgrade as noted above. + isIPsecMachineConfigActive := isIPsecMachineConfigActive(bootstrapResult.Infra) + isIPsecMachineConfigNotActiveOnLegacyUpgrade := isIpsecLegacyUpgrade && !isIPsecMachineConfigActive && !isHypershiftHostedCluster + + // We render the host ipsec deployment for self managed clusters after the + // ipsec MachineConfig extensions have been rolled out, except for the 4.14 + // to 4.15 legacy IPsec upgrade as noted above. + renderIPsecHostDaemonSet = (renderIPsecDaemonSet && isIPsecMachineConfigActive && !isHypershiftHostedCluster) || isIPsecMachineConfigNotActiveOnLegacyUpgrade + + // We render the containerized ipsec deployment for hosted clusters. It does + // not depend on any machine config extension however we also render it for + // the 4.14 to 4.15 legacy IPsec upgrade as noted above. + renderIPsecContainerizedDaemonSet = (renderIPsecDaemonSet && isHypershiftHostedCluster) || isIPsecMachineConfigNotActiveOnLegacyUpgrade + + // We render OVN IPsec if EW IPsec is enabled not before the daemon sets are + // rendered. If it is already rendered, keep it rendered unless disabled. + renderIPsecOVN = (renderIPsecHostDaemonSet || renderIPsecContainerizedDaemonSet || isOVNIPsecActive) && mode == operv1.IPsecModeFull + + // Keep IPsec daemonsets updated (but avoid creating) in the following circumstances: + // - on the 4.14 to 4.15 legacy IPsec upgrade, where we just want to update + // them as noted above + // - when disabling OVN IPsec, we want to keep the daemonsets until after + // OVN IPsec is disabled + renderIPsecDaemonSetAsCreateWaitOnly = isIPsecMachineConfigNotActiveOnLegacyUpgrade || (isOVNIPsecActive && !renderIPsecOVN) return } From 5370430df6157326e28e963ea0eac7a908539dbe Mon Sep 17 00:00:00 2001 From: Periyasamy Palanisamy Date: Mon, 27 Jan 2025 17:06:08 +0100 Subject: [PATCH 5/6] Remove 4.13 IPsec upgrade handling This removes stale 4.13 IPsec upgrade handling code which is not a valid anymore for >=4.15 upgrade scenarios. Signed-off-by: Periyasamy Palanisamy (cherry picked from commit 3c99d4ff65343bb66d6c6c214d3e5fb0bdcc5ea9) --- pkg/bootstrap/types.go | 2 +- pkg/network/ovn_kubernetes.go | 101 ++++++++--------------------- pkg/network/ovn_kubernetes_test.go | 43 ------------ 3 files changed, 28 insertions(+), 118 deletions(-) diff --git a/pkg/bootstrap/types.go b/pkg/bootstrap/types.go index 321b9ed543..6634495c37 100644 --- a/pkg/bootstrap/types.go +++ b/pkg/bootstrap/types.go @@ -49,7 +49,7 @@ type OVNUpdateStatus struct { // OVNIPsecStatus contains status of current IPsec configuration // in the cluster. type OVNIPsecStatus struct { - LegacyIPsecUpgrade bool // true if IPsec in 4.14 or Pre-4.14 cluster is upgraded to latest version + LegacyIPsecUpgrade bool // true if IPsec in 4.14 cluster is upgraded to latest version OVNIPsecActive bool // set to true unless we are sure it is not. } diff --git a/pkg/network/ovn_kubernetes.go b/pkg/network/ovn_kubernetes.go index fe7ce86afc..a4ad764c20 100644 --- a/pkg/network/ovn_kubernetes.go +++ b/pkg/network/ovn_kubernetes.go @@ -522,25 +522,6 @@ func renderOVNKubernetes(conf *operv1.NetworkSpec, bootstrapResult *bootstrap.Bo anno[names.CreateWaitAnnotation] = "true" o.SetAnnotations(anno) }) - // The legacy ovn-ipsec deployment is only rendered during upgrades until we - // are ready to remove it. - ovnIPsecLegacyDS := &appsv1.DaemonSet{ - TypeMeta: metav1.TypeMeta{ - Kind: "DaemonSet", - APIVersion: appsv1.SchemeGroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "ovn-ipsec", - Namespace: util.OVN_NAMESPACE, - // We never update the legacy ovn-ipsec daemonset. - Annotations: map[string]string{names.CreateWaitAnnotation: "true"}, - }, - } - obj, err := k8s.ToUnstructured(ovnIPsecLegacyDS) - if err != nil { - return nil, progressing, fmt.Errorf("unable to render legacy ovn-ipsec daemonset: %w", err) - } - objs = append(objs, obj) } klog.Infof("ovnk components: ovnkube-node: isRunning=%t, update=%t; ovnkube-control-plane: isRunning=%t, update=%t", @@ -1213,77 +1194,49 @@ func bootstrapOVN(conf *operv1.Network, kubeClient cnoclient.Client, infraStatus prepullerStatus.Progressing = daemonSetProgressing(prePullerDaemonSet, true) } - ipsecDaemonSet := &appsv1.DaemonSet{ + ipsecContainerizedDaemonSet := &appsv1.DaemonSet{ TypeMeta: metav1.TypeMeta{ Kind: "DaemonSet", APIVersion: appsv1.SchemeGroupVersion.String(), }, } - - ipsecStatus := &bootstrap.OVNIPsecStatus{} - - // The IPsec daemonset name is ovn-ipsec if we are upgrading from <= 4.13. - nsn = types.NamespacedName{Namespace: util.OVN_NAMESPACE, Name: "ovn-ipsec"} - if err := kubeClient.ClientFor("").CRClient().Get(context.TODO(), nsn, ipsecDaemonSet); err != nil { + ipsecHostDaemonSet := &appsv1.DaemonSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "DaemonSet", + APIVersion: appsv1.SchemeGroupVersion.String(), + }, + } + // Retrieve container based IPsec daemonset with name ovn-ipsec-containerized. + nsn = types.NamespacedName{Namespace: util.OVN_NAMESPACE, Name: "ovn-ipsec-containerized"} + if err := kubeClient.ClientFor("").CRClient().Get(context.TODO(), nsn, ipsecContainerizedDaemonSet); err != nil { if !apierrors.IsNotFound(err) { - return nil, fmt.Errorf("Failed to retrieve existing pre-4.14 ipsec DaemonSet: %w", err) + return nil, fmt.Errorf("Failed to retrieve existing ipsec containerized DaemonSet: %w", err) } else { - ipsecStatus = nil - } - } else { - ipsecStatus.LegacyIPsecUpgrade = true - } - - if ipsecStatus == nil { - ipsecStatus = &bootstrap.OVNIPsecStatus{} - ipsecContainerizedDaemonSet := &appsv1.DaemonSet{ - TypeMeta: metav1.TypeMeta{ - Kind: "DaemonSet", - APIVersion: appsv1.SchemeGroupVersion.String(), - }, - } - ipsecHostDaemonSet := &appsv1.DaemonSet{ - TypeMeta: metav1.TypeMeta{ - Kind: "DaemonSet", - APIVersion: appsv1.SchemeGroupVersion.String(), - }, - } - // Retrieve container based IPsec daemonset with name ovn-ipsec-containerized. - nsn = types.NamespacedName{Namespace: util.OVN_NAMESPACE, Name: "ovn-ipsec-containerized"} - if err := kubeClient.ClientFor("").CRClient().Get(context.TODO(), nsn, ipsecContainerizedDaemonSet); err != nil { - if !apierrors.IsNotFound(err) { - return nil, fmt.Errorf("Failed to retrieve existing ipsec containerized DaemonSet: %w", err) - } else { - ipsecContainerizedDaemonSet = nil - } - } - // Retrieve host based IPsec daemonset with name ovn-ipsec-host - nsn = types.NamespacedName{Namespace: util.OVN_NAMESPACE, Name: "ovn-ipsec-host"} - if err := kubeClient.ClientFor("").CRClient().Get(context.TODO(), nsn, ipsecHostDaemonSet); err != nil { - if !apierrors.IsNotFound(err) { - return nil, fmt.Errorf("Failed to retrieve existing ipsec host DaemonSet: %w", err) - } else { - ipsecHostDaemonSet = nil - } + ipsecContainerizedDaemonSet = nil } - if ipsecContainerizedDaemonSet != nil && ipsecHostDaemonSet != nil { - // Both IPsec daemonset versions exist, so this is an upgrade from 4.14. - ipsecStatus.LegacyIPsecUpgrade = true - } else if ipsecContainerizedDaemonSet == nil && ipsecHostDaemonSet == nil { - ipsecStatus = nil + } + // Retrieve host based IPsec daemonset with name ovn-ipsec-host + nsn = types.NamespacedName{Namespace: util.OVN_NAMESPACE, Name: "ovn-ipsec-host"} + if err := kubeClient.ClientFor("").CRClient().Get(context.TODO(), nsn, ipsecHostDaemonSet); err != nil { + if !apierrors.IsNotFound(err) { + return nil, fmt.Errorf("Failed to retrieve existing ipsec host DaemonSet: %w", err) + } else { + ipsecHostDaemonSet = nil } } - - // set OVN IPsec status into ipsecStatus only when IPsec daemonset(s) exists in the cluster. - if ipsecStatus != nil { - ipsecStatus.OVNIPsecActive = ovnIPsecStatus.OVNIPsecActive + if ipsecContainerizedDaemonSet != nil && ipsecHostDaemonSet != nil { + // Both IPsec daemonset versions exist, so this is an upgrade from 4.14. + ovnIPsecStatus.LegacyIPsecUpgrade = true + } else if ipsecContainerizedDaemonSet == nil && ipsecHostDaemonSet == nil { + // set OVN IPsec status to nil since none of the IPsec daemonset(s) exists in the cluster. + ovnIPsecStatus = nil } res := bootstrap.OVNBootstrapResult{ ControlPlaneReplicaCount: controlPlaneReplicaCount, ControlPlaneUpdateStatus: controlPlaneStatus, NodeUpdateStatus: nodeStatus, - IPsecUpdateStatus: ipsecStatus, + IPsecUpdateStatus: ovnIPsecStatus, PrePullerUpdateStatus: prepullerStatus, OVNKubernetesConfig: ovnConfigResult, FlowsConfig: bootstrapFlowsConfig(kubeClient.ClientFor("").CRClient()), diff --git a/pkg/network/ovn_kubernetes_test.go b/pkg/network/ovn_kubernetes_test.go index e0b026351d..f06fdf8bc9 100644 --- a/pkg/network/ovn_kubernetes_test.go +++ b/pkg/network/ovn_kubernetes_test.go @@ -2450,10 +2450,6 @@ func TestRenderOVNKubernetesEnableIPsec(t *testing.T) { if renderedIPsec != nil { t.Errorf("ovn-ipsec-containerized DaemonSet must not exist, but it's available") } - renderedIPsec = findInObjs("apps", "DaemonSet", "ovn-ipsec", "openshift-ovn-kubernetes", objs) - if renderedIPsec != nil { - t.Errorf("ovn-ipsec DaemonSet must not exist, but it's available") - } renderedMasterIPsecExtension := findInObjs("machineconfiguration.openshift.io", "MachineConfig", masterMachineConfigIPsecExtName, "", objs) if renderedMasterIPsecExtension != nil { t.Errorf("The MachineConfig %s must not exist, but it's available", masterMachineConfigIPsecExtName) @@ -2478,10 +2474,6 @@ func TestRenderOVNKubernetesEnableIPsec(t *testing.T) { if renderedIPsec != nil { t.Errorf("ovn-ipsec-containerized DaemonSet must not exist, but it's available") } - renderedIPsec = findInObjs("apps", "DaemonSet", "ovn-ipsec", "openshift-ovn-kubernetes", objs) - if renderedIPsec != nil { - t.Errorf("ovn-ipsec DaemonSet must not exist, but it's available") - } renderedMasterIPsecExtension = findInObjs("machineconfiguration.openshift.io", "MachineConfig", masterMachineConfigIPsecExtName, "", objs) if renderedMasterIPsecExtension == nil { t.Errorf("The MachineConfig %s must exist, but it's not available", masterMachineConfigIPsecExtName) @@ -2510,10 +2502,6 @@ func TestRenderOVNKubernetesEnableIPsec(t *testing.T) { if renderedIPsec != nil { t.Errorf("ovn-ipsec-containerized DaemonSet must not exist, but it's available") } - renderedIPsec = findInObjs("apps", "DaemonSet", "ovn-ipsec", "openshift-ovn-kubernetes", objs) - if renderedIPsec != nil { - t.Errorf("ovn-ipsec DaemonSet must not exist, but it's available") - } renderedMasterIPsecExtension = findInObjs("machineconfiguration.openshift.io", "MachineConfig", masterMachineConfigIPsecExtName, "", objs) if renderedMasterIPsecExtension == nil { t.Errorf("The MachineConfig %s must exist, but it's not available", masterMachineConfigIPsecExtName) @@ -2542,10 +2530,6 @@ func TestRenderOVNKubernetesEnableIPsec(t *testing.T) { if renderedIPsec != nil { t.Errorf("ovn-ipsec-containerized DaemonSet must not exist, but it's available") } - renderedIPsec = findInObjs("apps", "DaemonSet", "ovn-ipsec", "openshift-ovn-kubernetes", objs) - if renderedIPsec != nil { - t.Errorf("ovn-ipsec DaemonSet must not exist, but it's available") - } renderedMasterIPsecExtension = findInObjs("machineconfiguration.openshift.io", "MachineConfig", masterMachineConfigIPsecExtName, "", objs) if renderedMasterIPsecExtension == nil { t.Errorf("The MachineConfig %s must exist, but it's not available", masterMachineConfigIPsecExtName) @@ -2586,10 +2570,6 @@ func TestRenderOVNKubernetesEnableIPsec(t *testing.T) { if renderedIPsec != nil { t.Errorf("ovn-ipsec-containerized DaemonSet must not exist, but it's available") } - renderedIPsec = findInObjs("apps", "DaemonSet", "ovn-ipsec", "openshift-ovn-kubernetes", objs) - if renderedIPsec != nil { - t.Errorf("ovn-ipsec DaemonSet must not exist, but it's available") - } renderedNode := findInObjs("apps", "DaemonSet", "ovnkube-node", "openshift-ovn-kubernetes", objs) if renderedNode == nil { t.Errorf("ovnkube-node DaemonSet must exist, but it's not available") @@ -2681,10 +2661,6 @@ func TestRenderOVNKubernetesEnableIPsecForHostedControlPlane(t *testing.T) { if renderedIPsec != nil { t.Errorf("ovn-ipsec-host DaemonSet must not exist, but it's available") } - renderedIPsec = findInObjs("apps", "DaemonSet", "ovn-ipsec", "openshift-ovn-kubernetes", objs) - if renderedIPsec != nil { - t.Errorf("ovn-ipsec DaemonSet must not exist, but it's available") - } renderedNode := findInObjs("apps", "DaemonSet", "ovnkube-node", "openshift-ovn-kubernetes", objs) if renderedNode == nil { t.Errorf("ovnkube-node DaemonSet must exist, but it's not available") @@ -2807,10 +2783,6 @@ func TestRenderOVNKubernetesIPsecUpgradeWithMachineConfig(t *testing.T) { if renderedIPsec != nil { t.Errorf("ovn-ipsec-containerized DaemonSet must not exist, but it's available") } - renderedIPsec = findInObjs("apps", "DaemonSet", "ovn-ipsec", "openshift-ovn-kubernetes", objs) - if renderedIPsec != nil { - t.Errorf("ovn-ipsec DaemonSet must not exist, but it's available") - } renderedNode := findInObjs("apps", "DaemonSet", "ovnkube-node", "openshift-ovn-kubernetes", objs) if renderedNode == nil { t.Errorf("ovnkube-node DaemonSet must exist, but it's not available") @@ -2918,13 +2890,6 @@ func TestRenderOVNKubernetesIPsecUpgradeWithNoMachineConfig(t *testing.T) { if _, ok := renderedIPsec.GetAnnotations()[names.CreateWaitAnnotation]; !ok { t.Errorf("ovn-ipsec-containerized DaemonSet should have create-wait annotation, does not") } - renderedIPsec = findInObjs("apps", "DaemonSet", "ovn-ipsec", "openshift-ovn-kubernetes", objs) - if renderedIPsec == nil { - t.Errorf("ovn-ipsec DaemonSet must exist, but it's not available") - } - if _, ok := renderedIPsec.GetAnnotations()[names.CreateWaitAnnotation]; !ok { - t.Errorf("ovn-ipsec DaemonSet should have create-wait annotation, does not") - } // The ovnkube-node must set with ipsec-enabled annotation. renderedNode := findInObjs("apps", "DaemonSet", "ovnkube-node", "openshift-ovn-kubernetes", objs) if renderedNode == nil { @@ -2971,10 +2936,6 @@ func TestRenderOVNKubernetesIPsecUpgradeWithNoMachineConfig(t *testing.T) { if renderedIPsec != nil { t.Errorf("ovn-ipsec-containerized DaemonSet must not exist, but it's available") } - renderedIPsec = findInObjs("apps", "DaemonSet", "ovn-ipsec", "openshift-ovn-kubernetes", objs) - if renderedIPsec != nil { - t.Errorf("ovn-ipsec DaemonSet must not exist, but it's available") - } renderedNode = findInObjs("apps", "DaemonSet", "ovnkube-node", "openshift-ovn-kubernetes", objs) if renderedNode == nil { t.Errorf("ovnkube-node DaemonSet must exist, but it's not available") @@ -3081,10 +3042,6 @@ func TestRenderOVNKubernetesIPsecUpgradeWithHypershiftHostedCluster(t *testing.T if renderedIPsec != nil { t.Errorf("ovn-ipsec-host DaemonSet must not exist, but it's available") } - renderedIPsec = findInObjs("apps", "DaemonSet", "ovn-ipsec", "openshift-ovn-kubernetes", objs) - if renderedIPsec != nil { - t.Errorf("ovn-ipsec DaemonSet must not exist, but it's available") - } renderedNode := findInObjs("apps", "DaemonSet", "ovnkube-node", "openshift-ovn-kubernetes", objs) if renderedNode == nil { t.Errorf("ovnkube-node DaemonSet must exist, but it's not available") From 813104e8fb8c43b514d0caee8ef995592141275a Mon Sep 17 00:00:00 2001 From: Periyasamy Palanisamy Date: Tue, 28 Jan 2025 19:03:50 +0100 Subject: [PATCH 6/6] Keep rendering OVN IPsec when its daemonset not available The commit 4e57dcd is not complete because OVNIPsecStatus is still not set when none of the IPsec daemonset exists on the cluster at the time of machine config pools are updating (or) node is rebooted. Hence fixing it by OVNIPsecStatus is always set to reflect ipsec deployment state of the cluster and update the render pipeline to render ovn ipsec for the above mentioned scenarios. It renders ovn ipsec even before ipsec daemonsets are deployed when IPsec is freshly enabled on the cluster. That's ok because It will be effective only when the ovs-monitor-ipsec script is started and that's going to be done only when the ipsec pod is running. so we are safe to ignore it now. When IPsec is disabled from API, ovn ipsec is disabled followed by stop rendering ipsec machine config and ipsec daemonset. While ipsec machine configs are removed which would make ovnkube-node daemonset into progressing state and OVNIPsecActive condition becomes true again. Hence this commit considers machine config status as well so that the rendering pipeline will not render IPsec machine configs again. Signed-off-by: Periyasamy Palanisamy (cherry picked from commit ff0b1474d2874e79c71b9bb6ac7252b4bc267642) --- pkg/bootstrap/types.go | 9 +++- pkg/network/ovn_kubernetes.go | 19 +++---- pkg/network/ovn_kubernetes_test.go | 80 ++++++++++++++++++++++++++---- 3 files changed, 87 insertions(+), 21 deletions(-) diff --git a/pkg/bootstrap/types.go b/pkg/bootstrap/types.go index 6634495c37..b27a72a011 100644 --- a/pkg/bootstrap/types.go +++ b/pkg/bootstrap/types.go @@ -49,8 +49,13 @@ type OVNUpdateStatus struct { // OVNIPsecStatus contains status of current IPsec configuration // in the cluster. type OVNIPsecStatus struct { - LegacyIPsecUpgrade bool // true if IPsec in 4.14 cluster is upgraded to latest version - OVNIPsecActive bool // set to true unless we are sure it is not. + // LegacyIPsecUpgrade true if IPsec in 4.14.x cluster is upgraded to 4.15.x version. + LegacyIPsecUpgrade bool + // IsOVNIPsecActiveOrRollingOut set to true unless we are sure it is not. Note that this is + // set to true when ovnkube-node daemonset is in progressing state which is not reflecting + // actual ovn ipsec state. so must be precautious in making decisions at the time of machine + // configs rollout and node reboot scenarios. + IsOVNIPsecActiveOrRollingOut bool } type OVNBootstrapResult struct { diff --git a/pkg/network/ovn_kubernetes.go b/pkg/network/ovn_kubernetes.go index a4ad764c20..1a0e2600f8 100644 --- a/pkg/network/ovn_kubernetes.go +++ b/pkg/network/ovn_kubernetes.go @@ -609,10 +609,17 @@ func shouldRenderIPsec(conf *operv1.OVNKubernetesConfig, bootstrapResult *bootst isHypershiftHostedCluster := bootstrapResult.Infra.HostedControlPlane != nil isIpsecLegacyUpgrade := bootstrapResult.OVN.IPsecUpdateStatus != nil && bootstrapResult.OVN.IPsecUpdateStatus.LegacyIPsecUpgrade - isOVNIPsecActive := bootstrapResult.OVN.IPsecUpdateStatus != nil && bootstrapResult.OVN.IPsecUpdateStatus.OVNIPsecActive + isOVNIPsecActiveOrRollingOut := bootstrapResult.OVN.IPsecUpdateStatus != nil && bootstrapResult.OVN.IPsecUpdateStatus.IsOVNIPsecActiveOrRollingOut + isCNOIPsecMachineConfigPresent := isCNOIPsecMachineConfigPresent(bootstrapResult.Infra) + isUserDefinedIPsecMachineConfigPresent := isUserDefinedIPsecMachineConfigPresent(bootstrapResult.Infra) + isMachineConfigClusterOperatorReady := bootstrapResult.Infra.MachineConfigClusterOperatorReady mode := GetIPsecMode(conf) + // when OVN is rolling out, OVN IPsec might be fully or partially active or inactive. + // If MachineConfigs are not present, we know its inactive since we only stop rendering them once inactive. + isOVNIPsecActive := isOVNIPsecActiveOrRollingOut && (isCNOIPsecMachineConfigPresent || isUserDefinedIPsecMachineConfigPresent || isHypershiftHostedCluster) + // We render the ipsec deployment if IPsec is already active in OVN // or if EW IPsec config is enabled. renderIPsecDaemonSet = isOVNIPsecActive || mode == operv1.IPsecModeFull @@ -622,9 +629,6 @@ func shouldRenderIPsec(conf *operv1.OVNKubernetesConfig, bootstrapResult *bootst // - not needed for the containerized deployment is used in hypershift // hosted clusters // - not needed if the user already created their own - isMachineConfigClusterOperatorReady := bootstrapResult.Infra.MachineConfigClusterOperatorReady - isCNOIPsecMachineConfigPresent := isCNOIPsecMachineConfigPresent(bootstrapResult.Infra) - isUserDefinedIPsecMachineConfigPresent := isUserDefinedIPsecMachineConfigPresent(bootstrapResult.Infra) renderCNOIPsecMachineConfig = (mode != operv1.IPsecModeDisabled || renderIPsecDaemonSet) && !isHypershiftHostedCluster && !isUserDefinedIPsecMachineConfigPresent // Wait for MCO to be ready unless we had already rendered the IPsec MachineConfig. @@ -649,7 +653,7 @@ func shouldRenderIPsec(conf *operv1.OVNKubernetesConfig, bootstrapResult *bootst // the 4.14 to 4.15 legacy IPsec upgrade as noted above. renderIPsecContainerizedDaemonSet = (renderIPsecDaemonSet && isHypershiftHostedCluster) || isIPsecMachineConfigNotActiveOnLegacyUpgrade - // We render OVN IPsec if EW IPsec is enabled not before the daemon sets are + // We render OVN IPsec if EW IPsec is enabled and before the daemon sets are // rendered. If it is already rendered, keep it rendered unless disabled. renderIPsecOVN = (renderIPsecHostDaemonSet || renderIPsecContainerizedDaemonSet || isOVNIPsecActive) && mode == operv1.IPsecModeFull @@ -1168,7 +1172,7 @@ func bootstrapOVN(conf *operv1.Network, kubeClient cnoclient.Client, infraStatus nodeStatus.Progressing = daemonSetProgressing(nodeDaemonSet, true) // Retrieve OVN IPsec status from ovnkube-node daemonset as this is being used to rollout IPsec // config from 4.14. - ovnIPsecStatus.OVNIPsecActive = !isOVNIPsecNotActiveInDaemonSet(nodeDaemonSet) + ovnIPsecStatus.IsOVNIPsecActiveOrRollingOut = !isOVNIPsecNotActiveInDaemonSet(nodeDaemonSet) klog.Infof("ovnkube-node DaemonSet status: progressing=%t", nodeStatus.Progressing) } @@ -1227,9 +1231,6 @@ func bootstrapOVN(conf *operv1.Network, kubeClient cnoclient.Client, infraStatus if ipsecContainerizedDaemonSet != nil && ipsecHostDaemonSet != nil { // Both IPsec daemonset versions exist, so this is an upgrade from 4.14. ovnIPsecStatus.LegacyIPsecUpgrade = true - } else if ipsecContainerizedDaemonSet == nil && ipsecHostDaemonSet == nil { - // set OVN IPsec status to nil since none of the IPsec daemonset(s) exists in the cluster. - ovnIPsecStatus = nil } res := bootstrap.OVNBootstrapResult{ diff --git a/pkg/network/ovn_kubernetes_test.go b/pkg/network/ovn_kubernetes_test.go index f06fdf8bc9..995be91bde 100644 --- a/pkg/network/ovn_kubernetes_test.go +++ b/pkg/network/ovn_kubernetes_test.go @@ -2729,8 +2729,8 @@ func TestRenderOVNKubernetesIPsecUpgradeWithMachineConfig(t *testing.T) { IPFamilyMode: names.IPFamilySingleStack, }, IPsecUpdateStatus: &bootstrap.OVNIPsecStatus{ - LegacyIPsecUpgrade: true, - OVNIPsecActive: true, + LegacyIPsecUpgrade: true, + IsOVNIPsecActiveOrRollingOut: true, }, OVNKubernetesConfig: &bootstrap.OVNConfigBoostrapResult{ DpuHostModeLabel: OVN_NODE_SELECTOR_DEFAULT_DPU_HOST, @@ -2844,8 +2844,8 @@ func TestRenderOVNKubernetesIPsecUpgradeWithNoMachineConfig(t *testing.T) { IPFamilyMode: names.IPFamilySingleStack, }, IPsecUpdateStatus: &bootstrap.OVNIPsecStatus{ - LegacyIPsecUpgrade: true, - OVNIPsecActive: true, + LegacyIPsecUpgrade: true, + IsOVNIPsecActiveOrRollingOut: true, }, OVNKubernetesConfig: &bootstrap.OVNConfigBoostrapResult{ DpuHostModeLabel: OVN_NODE_SELECTOR_DEFAULT_DPU_HOST, @@ -2995,8 +2995,8 @@ func TestRenderOVNKubernetesIPsecUpgradeWithHypershiftHostedCluster(t *testing.T IPFamilyMode: names.IPFamilySingleStack, }, IPsecUpdateStatus: &bootstrap.OVNIPsecStatus{ - LegacyIPsecUpgrade: true, - OVNIPsecActive: true, + LegacyIPsecUpgrade: true, + IsOVNIPsecActiveOrRollingOut: true, }, OVNKubernetesConfig: &bootstrap.OVNConfigBoostrapResult{ DpuHostModeLabel: OVN_NODE_SELECTOR_DEFAULT_DPU_HOST, @@ -3101,7 +3101,7 @@ func TestRenderOVNKubernetesDisableIPsec(t *testing.T) { Progressing: false, }, IPsecUpdateStatus: &bootstrap.OVNIPsecStatus{ - OVNIPsecActive: true, + IsOVNIPsecActiveOrRollingOut: true, }, OVNKubernetesConfig: &bootstrap.OVNConfigBoostrapResult{ DpuHostModeLabel: OVN_NODE_SELECTOR_DEFAULT_DPU_HOST, @@ -3160,7 +3160,7 @@ func TestRenderOVNKubernetesDisableIPsec(t *testing.T) { } // Ensure renderOVNKubernetes removes MachineConfigs and IPsec daemonset. - bootstrapResult.OVN.IPsecUpdateStatus.OVNIPsecActive = false + bootstrapResult.OVN.IPsecUpdateStatus.IsOVNIPsecActiveOrRollingOut = false objs, progressing, err = renderOVNKubernetes(config, bootstrapResult, manifestDirOvn, fakeClient, featureGatesCNO) if err != nil { t.Errorf("Unexpected error: %v", err) @@ -3186,6 +3186,66 @@ func TestRenderOVNKubernetesDisableIPsec(t *testing.T) { if _, ok := renderedNode.GetAnnotations()[names.IPsecEnableAnnotation]; ok { t.Errorf("ovnkube-node DaemonSet shouldn't have ipsec-enabled annotation, but it does") } + + // While IPsec machine configs removal in progress, ensure MachineConfigs and IPsec daemonset are not rendered. + bootstrapResult.OVN.IPsecUpdateStatus.IsOVNIPsecActiveOrRollingOut = true + bootstrapResult.Infra.MasterMCPStatuses = []mcfgv1.MachineConfigPoolStatus{{MachineCount: 1, ReadyMachineCount: 0, UpdatedMachineCount: 0, + Configuration: mcfgv1.MachineConfigPoolStatusConfiguration{}}} + bootstrapResult.Infra.WorkerMCPStatuses = []mcfgv1.MachineConfigPoolStatus{{MachineCount: 1, ReadyMachineCount: 1, UpdatedMachineCount: 1, + Configuration: mcfgv1.MachineConfigPoolStatusConfiguration{}}} + bootstrapResult.Infra.MasterIPsecMachineConfigs = []*mcfgv1.MachineConfig{{}} + bootstrapResult.Infra.WorkerIPsecMachineConfigs = []*mcfgv1.MachineConfig{{}} + objs, progressing, err = renderOVNKubernetes(config, bootstrapResult, manifestDirOvn, fakeClient, featureGatesCNO) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + g.Expect(progressing).To(BeFalse()) + renderedMasterIPsecExtension = findInObjs("machineconfiguration.openshift.io", "MachineConfig", masterMachineConfigIPsecExtName, "", objs) + if renderedMasterIPsecExtension != nil { + t.Errorf("The MachineConfig %s must not exist, but it's available", masterMachineConfigIPsecExtName) + } + renderedWorkerIPsecExtension = findInObjs("machineconfiguration.openshift.io", "MachineConfig", workerMachineConfigIPsecExtName, "", objs) + if renderedWorkerIPsecExtension != nil { + t.Errorf("The MachineConfig %s must not exist, but it's available", workerMachineConfigIPsecExtName) + } + // Ensure ovn-ipsec-host daemonset is removed and ovnkube-node doesn't contain ipsec-enabled annotation. + renderedIPsec = findInObjs("apps", "DaemonSet", "ovn-ipsec-host", "openshift-ovn-kubernetes", objs) + if renderedIPsec != nil { + t.Errorf("ovn-ipsec-host DaemonSet must not exist, but it's available") + } + renderedNode = findInObjs("apps", "DaemonSet", "ovnkube-node", "openshift-ovn-kubernetes", objs) + if renderedNode == nil { + t.Errorf("ovnkube-node DaemonSet must exist, but it's not available") + } + if _, ok := renderedNode.GetAnnotations()[names.IPsecEnableAnnotation]; ok { + t.Errorf("ovnkube-node DaemonSet shouldn't have ipsec-enabled annotation, but it does") + } + + // Ensure MachineConfigs and IPsec daemonset are not rendered once machine config pools settles. + bootstrapResult.OVN.IPsecUpdateStatus.IsOVNIPsecActiveOrRollingOut = false + bootstrapResult.Infra.MasterMCPStatuses = []mcfgv1.MachineConfigPoolStatus{{MachineCount: 1, ReadyMachineCount: 1, UpdatedMachineCount: 1, + Configuration: mcfgv1.MachineConfigPoolStatusConfiguration{}}} + g.Expect(progressing).To(BeFalse()) + renderedMasterIPsecExtension = findInObjs("machineconfiguration.openshift.io", "MachineConfig", masterMachineConfigIPsecExtName, "", objs) + if renderedMasterIPsecExtension != nil { + t.Errorf("The MachineConfig %s must not exist, but it's available", masterMachineConfigIPsecExtName) + } + renderedWorkerIPsecExtension = findInObjs("machineconfiguration.openshift.io", "MachineConfig", workerMachineConfigIPsecExtName, "", objs) + if renderedWorkerIPsecExtension != nil { + t.Errorf("The MachineConfig %s must not exist, but it's available", workerMachineConfigIPsecExtName) + } + // Ensure ovn-ipsec-host daemonset is removed and ovnkube-node doesn't contain ipsec-enabled annotation. + renderedIPsec = findInObjs("apps", "DaemonSet", "ovn-ipsec-host", "openshift-ovn-kubernetes", objs) + if renderedIPsec != nil { + t.Errorf("ovn-ipsec-host DaemonSet must not exist, but it's available") + } + renderedNode = findInObjs("apps", "DaemonSet", "ovnkube-node", "openshift-ovn-kubernetes", objs) + if renderedNode == nil { + t.Errorf("ovnkube-node DaemonSet must exist, but it's not available") + } + if _, ok := renderedNode.GetAnnotations()[names.IPsecEnableAnnotation]; ok { + t.Errorf("ovnkube-node DaemonSet shouldn't have ipsec-enabled annotation, but it does") + } } func TestRenderOVNKubernetesEnableIPsecWithUserInstalledIPsecMachineConfigs(t *testing.T) { @@ -3377,7 +3437,7 @@ func TestRenderOVNKubernetesDisableIPsecWithUserInstalledIPsecMachineConfigs(t * Progressing: false, }, IPsecUpdateStatus: &bootstrap.OVNIPsecStatus{ - OVNIPsecActive: true, + IsOVNIPsecActiveOrRollingOut: true, }, OVNKubernetesConfig: &bootstrap.OVNConfigBoostrapResult{ DpuHostModeLabel: OVN_NODE_SELECTOR_DEFAULT_DPU_HOST, @@ -3435,7 +3495,7 @@ func TestRenderOVNKubernetesDisableIPsecWithUserInstalledIPsecMachineConfigs(t * } // Ensure renderOVNKubernetes removes IPsec daemonset. - bootstrapResult.OVN.IPsecUpdateStatus.OVNIPsecActive = false + bootstrapResult.OVN.IPsecUpdateStatus.IsOVNIPsecActiveOrRollingOut = false objs, progressing, err = renderOVNKubernetes(config, bootstrapResult, manifestDirOvn, fakeClient, featureGatesCNO) if err != nil { t.Errorf("Unexpected error: %v", err)