diff --git a/clients/rancher/generated/management/v3/zz_generated_active_directory_config.go b/clients/rancher/generated/management/v3/zz_generated_active_directory_config.go index d72dd760..2966043a 100644 --- a/clients/rancher/generated/management/v3/zz_generated_active_directory_config.go +++ b/clients/rancher/generated/management/v3/zz_generated_active_directory_config.go @@ -21,7 +21,6 @@ const ( ActiveDirectoryConfigFieldGroupSearchFilter = "groupSearchFilter" ActiveDirectoryConfigFieldGroupUniqueIDAttribute = "groupUniqueIdAttribute" ActiveDirectoryConfigFieldLabels = "labels" - ActiveDirectoryConfigFieldLogoutAllSupported = "logoutAllSupported" ActiveDirectoryConfigFieldName = "name" ActiveDirectoryConfigFieldNestedGroupMembershipEnabled = "nestedGroupMembershipEnabled" ActiveDirectoryConfigFieldOwnerReferences = "ownerReferences" @@ -66,7 +65,6 @@ type ActiveDirectoryConfig struct { GroupSearchFilter string `json:"groupSearchFilter,omitempty" yaml:"groupSearchFilter,omitempty"` GroupUniqueIDAttribute string `json:"groupUniqueIdAttribute,omitempty" yaml:"groupUniqueIdAttribute,omitempty"` Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"` - LogoutAllSupported bool `json:"logoutAllSupported,omitempty" yaml:"logoutAllSupported,omitempty"` Name string `json:"name,omitempty" yaml:"name,omitempty"` NestedGroupMembershipEnabled *bool `json:"nestedGroupMembershipEnabled,omitempty" yaml:"nestedGroupMembershipEnabled,omitempty"` OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" yaml:"ownerReferences,omitempty"` diff --git a/clients/rancher/generated/management/v3/zz_generated_adfs_config.go b/clients/rancher/generated/management/v3/zz_generated_adfs_config.go index c2a68881..5170ec83 100644 --- a/clients/rancher/generated/management/v3/zz_generated_adfs_config.go +++ b/clients/rancher/generated/management/v3/zz_generated_adfs_config.go @@ -13,9 +13,6 @@ const ( ADFSConfigFieldGroupsField = "groupsField" ADFSConfigFieldIDPMetadataContent = "idpMetadataContent" ADFSConfigFieldLabels = "labels" - ADFSConfigFieldLogoutAllEnabled = "logoutAllEnabled" - ADFSConfigFieldLogoutAllForced = "logoutAllForced" - ADFSConfigFieldLogoutAllSupported = "logoutAllSupported" ADFSConfigFieldName = "name" ADFSConfigFieldOwnerReferences = "ownerReferences" ADFSConfigFieldRancherAPIHost = "rancherApiHost" @@ -41,9 +38,6 @@ type ADFSConfig struct { GroupsField string `json:"groupsField,omitempty" yaml:"groupsField,omitempty"` IDPMetadataContent string `json:"idpMetadataContent,omitempty" yaml:"idpMetadataContent,omitempty"` Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"` - LogoutAllEnabled bool `json:"logoutAllEnabled,omitempty" yaml:"logoutAllEnabled,omitempty"` - LogoutAllForced bool `json:"logoutAllForced,omitempty" yaml:"logoutAllForced,omitempty"` - LogoutAllSupported bool `json:"logoutAllSupported,omitempty" yaml:"logoutAllSupported,omitempty"` Name string `json:"name,omitempty" yaml:"name,omitempty"` OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" yaml:"ownerReferences,omitempty"` RancherAPIHost string `json:"rancherApiHost,omitempty" yaml:"rancherApiHost,omitempty"` diff --git a/clients/rancher/generated/management/v3/zz_generated_aliyun_smsconfig.go b/clients/rancher/generated/management/v3/zz_generated_aliyun_smsconfig.go deleted file mode 100644 index b3549c50..00000000 --- a/clients/rancher/generated/management/v3/zz_generated_aliyun_smsconfig.go +++ /dev/null @@ -1,18 +0,0 @@ -package client - -const ( - AliyunSMSConfigType = "aliyunSMSConfig" - AliyunSMSConfigFieldAccessKeyID = "accessKeyID" - AliyunSMSConfigFieldAccessKeySecret = "accessKeySecret" - AliyunSMSConfigFieldSignName = "signName" - AliyunSMSConfigFieldTemplateCode = "templateCode" - AliyunSMSConfigFieldTo = "to" -) - -type AliyunSMSConfig struct { - AccessKeyID string `json:"accessKeyID,omitempty" yaml:"accessKeyID,omitempty"` - AccessKeySecret string `json:"accessKeySecret,omitempty" yaml:"accessKeySecret,omitempty"` - SignName string `json:"signName,omitempty" yaml:"signName,omitempty"` - TemplateCode string `json:"templateCode,omitempty" yaml:"templateCode,omitempty"` - To []string `json:"to,omitempty" yaml:"to,omitempty"` -} diff --git a/clients/rancher/generated/management/v3/zz_generated_auth_config.go b/clients/rancher/generated/management/v3/zz_generated_auth_config.go index 04206bae..ac6c47bb 100644 --- a/clients/rancher/generated/management/v3/zz_generated_auth_config.go +++ b/clients/rancher/generated/management/v3/zz_generated_auth_config.go @@ -13,7 +13,6 @@ const ( AuthConfigFieldCreatorID = "creatorId" AuthConfigFieldEnabled = "enabled" AuthConfigFieldLabels = "labels" - AuthConfigFieldLogoutAllSupported = "logoutAllSupported" AuthConfigFieldName = "name" AuthConfigFieldOwnerReferences = "ownerReferences" AuthConfigFieldRemoved = "removed" @@ -31,7 +30,6 @@ type AuthConfig struct { CreatorID string `json:"creatorId,omitempty" yaml:"creatorId,omitempty"` Enabled bool `json:"enabled,omitempty" yaml:"enabled,omitempty"` Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"` - LogoutAllSupported bool `json:"logoutAllSupported,omitempty" yaml:"logoutAllSupported,omitempty"` Name string `json:"name,omitempty" yaml:"name,omitempty"` OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" yaml:"ownerReferences,omitempty"` Removed string `json:"removed,omitempty" yaml:"removed,omitempty"` diff --git a/clients/rancher/generated/management/v3/zz_generated_azure_adconfig.go b/clients/rancher/generated/management/v3/zz_generated_azure_adconfig.go index a332a1c3..4de1b3f6 100644 --- a/clients/rancher/generated/management/v3/zz_generated_azure_adconfig.go +++ b/clients/rancher/generated/management/v3/zz_generated_azure_adconfig.go @@ -16,7 +16,6 @@ const ( AzureADConfigFieldGraphEndpoint = "graphEndpoint" AzureADConfigFieldGroupMembershipFilter = "groupMembershipFilter" AzureADConfigFieldLabels = "labels" - AzureADConfigFieldLogoutAllSupported = "logoutAllSupported" AzureADConfigFieldName = "name" AzureADConfigFieldOwnerReferences = "ownerReferences" AzureADConfigFieldRancherURL = "rancherUrl" @@ -43,7 +42,6 @@ type AzureADConfig struct { GraphEndpoint string `json:"graphEndpoint,omitempty" yaml:"graphEndpoint,omitempty"` GroupMembershipFilter string `json:"groupMembershipFilter,omitempty" yaml:"groupMembershipFilter,omitempty"` Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"` - LogoutAllSupported bool `json:"logoutAllSupported,omitempty" yaml:"logoutAllSupported,omitempty"` Name string `json:"name,omitempty" yaml:"name,omitempty"` OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" yaml:"ownerReferences,omitempty"` RancherURL string `json:"rancherUrl,omitempty" yaml:"rancherUrl,omitempty"` diff --git a/clients/rancher/generated/management/v3/zz_generated_basic_auth.go b/clients/rancher/generated/management/v3/zz_generated_basic_auth.go deleted file mode 100644 index 8d97d8cc..00000000 --- a/clients/rancher/generated/management/v3/zz_generated_basic_auth.go +++ /dev/null @@ -1,12 +0,0 @@ -package client - -const ( - BasicAuthType = "basicAuth" - BasicAuthFieldPassword = "password" - BasicAuthFieldUsername = "username" -) - -type BasicAuth struct { - Password string `json:"password,omitempty" yaml:"password,omitempty"` - Username string `json:"username,omitempty" yaml:"username,omitempty"` -} diff --git a/clients/rancher/generated/management/v3/zz_generated_cluster.go b/clients/rancher/generated/management/v3/zz_generated_cluster.go index af2b5500..52df221b 100644 --- a/clients/rancher/generated/management/v3/zz_generated_cluster.go +++ b/clients/rancher/generated/management/v3/zz_generated_cluster.go @@ -50,8 +50,6 @@ const ( ClusterFieldDriver = "driver" ClusterFieldEKSConfig = "eksConfig" ClusterFieldEKSStatus = "eksStatus" - ClusterFieldEnableClusterAlerting = "enableClusterAlerting" - ClusterFieldEnableClusterMonitoring = "enableClusterMonitoring" ClusterFieldEnableGPUManagement = "enableGPUManagement" ClusterFieldEnableNetworkPolicy = "enableNetworkPolicy" ClusterFieldExternalFlags = "externalFlags" @@ -143,8 +141,6 @@ type Cluster struct { Driver string `json:"driver,omitempty" yaml:"driver,omitempty"` EKSConfig *EKSClusterConfigSpec `json:"eksConfig,omitempty" yaml:"eksConfig,omitempty"` EKSStatus *EKSStatus `json:"eksStatus,omitempty" yaml:"eksStatus,omitempty"` - EnableClusterAlerting bool `json:"enableClusterAlerting,omitempty" yaml:"enableClusterAlerting,omitempty"` - EnableClusterMonitoring bool `json:"enableClusterMonitoring,omitempty" yaml:"enableClusterMonitoring,omitempty"` EnableGPUManagement bool `json:"enableGPUManagement,omitempty" yaml:"enableGPUManagement,omitempty"` EnableNetworkPolicy *bool `json:"enableNetworkPolicy,omitempty" yaml:"enableNetworkPolicy,omitempty"` ExternalFlags *ExternalFlags `json:"externalFlags,omitempty" yaml:"externalFlags,omitempty"` diff --git a/clients/rancher/generated/management/v3/zz_generated_cluster_spec.go b/clients/rancher/generated/management/v3/zz_generated_cluster_spec.go index 491eaea0..f7f4b289 100644 --- a/clients/rancher/generated/management/v3/zz_generated_cluster_spec.go +++ b/clients/rancher/generated/management/v3/zz_generated_cluster_spec.go @@ -23,8 +23,6 @@ const ( ClusterSpecFieldDisplayName = "displayName" ClusterSpecFieldDockerRootDir = "dockerRootDir" ClusterSpecFieldEKSConfig = "eksConfig" - ClusterSpecFieldEnableClusterAlerting = "enableClusterAlerting" - ClusterSpecFieldEnableClusterMonitoring = "enableClusterMonitoring" ClusterSpecFieldEnableGPUManagement = "enableGPUManagement" ClusterSpecFieldEnableNetworkPolicy = "enableNetworkPolicy" ClusterSpecFieldExternalFlags = "externalFlags" @@ -67,8 +65,6 @@ type ClusterSpec struct { DisplayName string `json:"displayName,omitempty" yaml:"displayName,omitempty"` DockerRootDir string `json:"dockerRootDir,omitempty" yaml:"dockerRootDir,omitempty"` EKSConfig *EKSClusterConfigSpec `json:"eksConfig,omitempty" yaml:"eksConfig,omitempty"` - EnableClusterAlerting bool `json:"enableClusterAlerting,omitempty" yaml:"enableClusterAlerting,omitempty"` - EnableClusterMonitoring bool `json:"enableClusterMonitoring,omitempty" yaml:"enableClusterMonitoring,omitempty"` EnableGPUManagement bool `json:"enableGPUManagement,omitempty" yaml:"enableGPUManagement,omitempty"` EnableNetworkPolicy *bool `json:"enableNetworkPolicy,omitempty" yaml:"enableNetworkPolicy,omitempty"` ExternalFlags *ExternalFlags `json:"externalFlags,omitempty" yaml:"externalFlags,omitempty"` diff --git a/clients/rancher/generated/management/v3/zz_generated_cluster_spec_base.go b/clients/rancher/generated/management/v3/zz_generated_cluster_spec_base.go index afcb1780..3a323166 100644 --- a/clients/rancher/generated/management/v3/zz_generated_cluster_spec_base.go +++ b/clients/rancher/generated/management/v3/zz_generated_cluster_spec_base.go @@ -11,8 +11,6 @@ const ( ClusterSpecBaseFieldDesiredAgentImage = "desiredAgentImage" ClusterSpecBaseFieldDesiredAuthImage = "desiredAuthImage" ClusterSpecBaseFieldDockerRootDir = "dockerRootDir" - ClusterSpecBaseFieldEnableClusterAlerting = "enableClusterAlerting" - ClusterSpecBaseFieldEnableClusterMonitoring = "enableClusterMonitoring" ClusterSpecBaseFieldEnableGPUManagement = "enableGPUManagement" ClusterSpecBaseFieldEnableNetworkPolicy = "enableNetworkPolicy" ClusterSpecBaseFieldFleetAgentDeploymentCustomization = "fleetAgentDeploymentCustomization" @@ -33,8 +31,6 @@ type ClusterSpecBase struct { DesiredAgentImage string `json:"desiredAgentImage,omitempty" yaml:"desiredAgentImage,omitempty"` DesiredAuthImage string `json:"desiredAuthImage,omitempty" yaml:"desiredAuthImage,omitempty"` DockerRootDir string `json:"dockerRootDir,omitempty" yaml:"dockerRootDir,omitempty"` - EnableClusterAlerting bool `json:"enableClusterAlerting,omitempty" yaml:"enableClusterAlerting,omitempty"` - EnableClusterMonitoring bool `json:"enableClusterMonitoring,omitempty" yaml:"enableClusterMonitoring,omitempty"` EnableGPUManagement bool `json:"enableGPUManagement,omitempty" yaml:"enableGPUManagement,omitempty"` EnableNetworkPolicy *bool `json:"enableNetworkPolicy,omitempty" yaml:"enableNetworkPolicy,omitempty"` FleetAgentDeploymentCustomization *AgentDeploymentCustomization `json:"fleetAgentDeploymentCustomization,omitempty" yaml:"fleetAgentDeploymentCustomization,omitempty"` diff --git a/clients/rancher/generated/management/v3/zz_generated_extra_alert_data.go b/clients/rancher/generated/management/v3/zz_generated_extra_alert_data.go deleted file mode 100644 index 0592aa73..00000000 --- a/clients/rancher/generated/management/v3/zz_generated_extra_alert_data.go +++ /dev/null @@ -1,16 +0,0 @@ -package client - -const ( - ExtraAlertDataType = "extraAlertData" - ExtraAlertDataFieldSourceType = "sourceType" - ExtraAlertDataFieldSourceValue = "sourceValue" - ExtraAlertDataFieldTargetKey = "targetKey" - ExtraAlertDataFieldTargetType = "targetType" -) - -type ExtraAlertData struct { - SourceType string `json:"sourceType,omitempty" yaml:"sourceType,omitempty"` - SourceValue string `json:"sourceValue,omitempty" yaml:"sourceValue,omitempty"` - TargetKey string `json:"targetKey,omitempty" yaml:"targetKey,omitempty"` - TargetType string `json:"targetType,omitempty" yaml:"targetType,omitempty"` -} diff --git a/clients/rancher/generated/management/v3/zz_generated_free_ipa_config.go b/clients/rancher/generated/management/v3/zz_generated_free_ipa_config.go index 805189d8..09dba452 100644 --- a/clients/rancher/generated/management/v3/zz_generated_free_ipa_config.go +++ b/clients/rancher/generated/management/v3/zz_generated_free_ipa_config.go @@ -20,7 +20,6 @@ const ( FreeIpaConfigFieldGroupSearchFilter = "groupSearchFilter" FreeIpaConfigFieldGroupUniqueIDAttribute = "groupUniqueIdAttribute" FreeIpaConfigFieldLabels = "labels" - FreeIpaConfigFieldLogoutAllSupported = "logoutAllSupported" FreeIpaConfigFieldName = "name" FreeIpaConfigFieldOwnerReferences = "ownerReferences" FreeIpaConfigFieldPort = "port" @@ -64,7 +63,6 @@ type FreeIpaConfig struct { GroupSearchFilter string `json:"groupSearchFilter,omitempty" yaml:"groupSearchFilter,omitempty"` GroupUniqueIDAttribute string `json:"groupUniqueIdAttribute,omitempty" yaml:"groupUniqueIdAttribute,omitempty"` Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"` - LogoutAllSupported bool `json:"logoutAllSupported,omitempty" yaml:"logoutAllSupported,omitempty"` Name string `json:"name,omitempty" yaml:"name,omitempty"` OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" yaml:"ownerReferences,omitempty"` Port int64 `json:"port,omitempty" yaml:"port,omitempty"` diff --git a/clients/rancher/generated/management/v3/zz_generated_github_config.go b/clients/rancher/generated/management/v3/zz_generated_github_config.go index fda235ac..04dc4531 100644 --- a/clients/rancher/generated/management/v3/zz_generated_github_config.go +++ b/clients/rancher/generated/management/v3/zz_generated_github_config.go @@ -14,7 +14,6 @@ const ( GithubConfigFieldHostname = "hostname" GithubConfigFieldHostnameToClientID = "hostnameToClientId" GithubConfigFieldLabels = "labels" - GithubConfigFieldLogoutAllSupported = "logoutAllSupported" GithubConfigFieldName = "name" GithubConfigFieldOwnerReferences = "ownerReferences" GithubConfigFieldRemoved = "removed" @@ -37,7 +36,6 @@ type GithubConfig struct { Hostname string `json:"hostname,omitempty" yaml:"hostname,omitempty"` HostnameToClientID map[string]string `json:"hostnameToClientId,omitempty" yaml:"hostnameToClientId,omitempty"` Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"` - LogoutAllSupported bool `json:"logoutAllSupported,omitempty" yaml:"logoutAllSupported,omitempty"` Name string `json:"name,omitempty" yaml:"name,omitempty"` OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" yaml:"ownerReferences,omitempty"` Removed string `json:"removed,omitempty" yaml:"removed,omitempty"` diff --git a/clients/rancher/generated/management/v3/zz_generated_google_oauth_config.go b/clients/rancher/generated/management/v3/zz_generated_google_oauth_config.go index 8939cd9f..f4414229 100644 --- a/clients/rancher/generated/management/v3/zz_generated_google_oauth_config.go +++ b/clients/rancher/generated/management/v3/zz_generated_google_oauth_config.go @@ -11,7 +11,6 @@ const ( GoogleOauthConfigFieldEnabled = "enabled" GoogleOauthConfigFieldHostname = "hostname" GoogleOauthConfigFieldLabels = "labels" - GoogleOauthConfigFieldLogoutAllSupported = "logoutAllSupported" GoogleOauthConfigFieldName = "name" GoogleOauthConfigFieldNestedGroupMembershipEnabled = "nestedGroupMembershipEnabled" GoogleOauthConfigFieldOauthCredential = "oauthCredential" @@ -34,7 +33,6 @@ type GoogleOauthConfig struct { Enabled bool `json:"enabled,omitempty" yaml:"enabled,omitempty"` Hostname string `json:"hostname,omitempty" yaml:"hostname,omitempty"` Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"` - LogoutAllSupported bool `json:"logoutAllSupported,omitempty" yaml:"logoutAllSupported,omitempty"` Name string `json:"name,omitempty" yaml:"name,omitempty"` NestedGroupMembershipEnabled bool `json:"nestedGroupMembershipEnabled,omitempty" yaml:"nestedGroupMembershipEnabled,omitempty"` OauthCredential string `json:"oauthCredential,omitempty" yaml:"oauthCredential,omitempty"` diff --git a/clients/rancher/generated/management/v3/zz_generated_key_cloak_config.go b/clients/rancher/generated/management/v3/zz_generated_key_cloak_config.go index e67d0920..7a2b3925 100644 --- a/clients/rancher/generated/management/v3/zz_generated_key_cloak_config.go +++ b/clients/rancher/generated/management/v3/zz_generated_key_cloak_config.go @@ -13,9 +13,6 @@ const ( KeyCloakConfigFieldGroupsField = "groupsField" KeyCloakConfigFieldIDPMetadataContent = "idpMetadataContent" KeyCloakConfigFieldLabels = "labels" - KeyCloakConfigFieldLogoutAllEnabled = "logoutAllEnabled" - KeyCloakConfigFieldLogoutAllForced = "logoutAllForced" - KeyCloakConfigFieldLogoutAllSupported = "logoutAllSupported" KeyCloakConfigFieldName = "name" KeyCloakConfigFieldOwnerReferences = "ownerReferences" KeyCloakConfigFieldRancherAPIHost = "rancherApiHost" @@ -41,9 +38,6 @@ type KeyCloakConfig struct { GroupsField string `json:"groupsField,omitempty" yaml:"groupsField,omitempty"` IDPMetadataContent string `json:"idpMetadataContent,omitempty" yaml:"idpMetadataContent,omitempty"` Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"` - LogoutAllEnabled bool `json:"logoutAllEnabled,omitempty" yaml:"logoutAllEnabled,omitempty"` - LogoutAllForced bool `json:"logoutAllForced,omitempty" yaml:"logoutAllForced,omitempty"` - LogoutAllSupported bool `json:"logoutAllSupported,omitempty" yaml:"logoutAllSupported,omitempty"` Name string `json:"name,omitempty" yaml:"name,omitempty"` OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" yaml:"ownerReferences,omitempty"` RancherAPIHost string `json:"rancherApiHost,omitempty" yaml:"rancherApiHost,omitempty"` diff --git a/clients/rancher/generated/management/v3/zz_generated_key_cloak_oidcconfig.go b/clients/rancher/generated/management/v3/zz_generated_key_cloak_oidcconfig.go index 05cd3142..a0ed6436 100644 --- a/clients/rancher/generated/management/v3/zz_generated_key_cloak_oidcconfig.go +++ b/clients/rancher/generated/management/v3/zz_generated_key_cloak_oidcconfig.go @@ -3,7 +3,6 @@ package client const ( KeyCloakOIDCConfigType = "keyCloakOIDCConfig" KeyCloakOIDCConfigFieldAccessMode = "accessMode" - KeyCloakOIDCConfigFieldAcrValue = "acrValue" KeyCloakOIDCConfigFieldAllowedPrincipalIDs = "allowedPrincipalIds" KeyCloakOIDCConfigFieldAnnotations = "annotations" KeyCloakOIDCConfigFieldAuthEndpoint = "authEndpoint" @@ -18,7 +17,6 @@ const ( KeyCloakOIDCConfigFieldIssuer = "issuer" KeyCloakOIDCConfigFieldJWKSUrl = "jwksUrl" KeyCloakOIDCConfigFieldLabels = "labels" - KeyCloakOIDCConfigFieldLogoutAllSupported = "logoutAllSupported" KeyCloakOIDCConfigFieldName = "name" KeyCloakOIDCConfigFieldOwnerReferences = "ownerReferences" KeyCloakOIDCConfigFieldPrivateKey = "privateKey" @@ -34,7 +32,6 @@ const ( type KeyCloakOIDCConfig struct { AccessMode string `json:"accessMode,omitempty" yaml:"accessMode,omitempty"` - AcrValue string `json:"acrValue,omitempty" yaml:"acrValue,omitempty"` AllowedPrincipalIDs []string `json:"allowedPrincipalIds,omitempty" yaml:"allowedPrincipalIds,omitempty"` Annotations map[string]string `json:"annotations,omitempty" yaml:"annotations,omitempty"` AuthEndpoint string `json:"authEndpoint,omitempty" yaml:"authEndpoint,omitempty"` @@ -49,7 +46,6 @@ type KeyCloakOIDCConfig struct { Issuer string `json:"issuer,omitempty" yaml:"issuer,omitempty"` JWKSUrl string `json:"jwksUrl,omitempty" yaml:"jwksUrl,omitempty"` Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"` - LogoutAllSupported bool `json:"logoutAllSupported,omitempty" yaml:"logoutAllSupported,omitempty"` Name string `json:"name,omitempty" yaml:"name,omitempty"` OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" yaml:"ownerReferences,omitempty"` PrivateKey string `json:"privateKey,omitempty" yaml:"privateKey,omitempty"` diff --git a/clients/rancher/generated/management/v3/zz_generated_ldap_config.go b/clients/rancher/generated/management/v3/zz_generated_ldap_config.go index 6e270c91..0e1b312e 100644 --- a/clients/rancher/generated/management/v3/zz_generated_ldap_config.go +++ b/clients/rancher/generated/management/v3/zz_generated_ldap_config.go @@ -24,7 +24,6 @@ const ( LdapConfigFieldGroupSearchFilter = "groupSearchFilter" LdapConfigFieldGroupUniqueIDAttribute = "groupUniqueIdAttribute" LdapConfigFieldLabels = "labels" - LdapConfigFieldLogoutAllSupported = "logoutAllSupported" LdapConfigFieldName = "name" LdapConfigFieldNestedGroupMembershipEnabled = "nestedGroupMembershipEnabled" LdapConfigFieldOwnerReferences = "ownerReferences" @@ -70,7 +69,6 @@ type LdapConfig struct { GroupSearchFilter string `json:"groupSearchFilter,omitempty" yaml:"groupSearchFilter,omitempty"` GroupUniqueIDAttribute string `json:"groupUniqueIdAttribute,omitempty" yaml:"groupUniqueIdAttribute,omitempty"` Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"` - LogoutAllSupported bool `json:"logoutAllSupported,omitempty" yaml:"logoutAllSupported,omitempty"` Name string `json:"name,omitempty" yaml:"name,omitempty"` NestedGroupMembershipEnabled bool `json:"nestedGroupMembershipEnabled,omitempty" yaml:"nestedGroupMembershipEnabled,omitempty"` OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" yaml:"ownerReferences,omitempty"` diff --git a/clients/rancher/generated/management/v3/zz_generated_local_config.go b/clients/rancher/generated/management/v3/zz_generated_local_config.go index 1b0933e9..ca4d5123 100644 --- a/clients/rancher/generated/management/v3/zz_generated_local_config.go +++ b/clients/rancher/generated/management/v3/zz_generated_local_config.go @@ -9,7 +9,6 @@ const ( LocalConfigFieldCreatorID = "creatorId" LocalConfigFieldEnabled = "enabled" LocalConfigFieldLabels = "labels" - LocalConfigFieldLogoutAllSupported = "logoutAllSupported" LocalConfigFieldName = "name" LocalConfigFieldOwnerReferences = "ownerReferences" LocalConfigFieldRemoved = "removed" @@ -26,7 +25,6 @@ type LocalConfig struct { CreatorID string `json:"creatorId,omitempty" yaml:"creatorId,omitempty"` Enabled bool `json:"enabled,omitempty" yaml:"enabled,omitempty"` Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"` - LogoutAllSupported bool `json:"logoutAllSupported,omitempty" yaml:"logoutAllSupported,omitempty"` Name string `json:"name,omitempty" yaml:"name,omitempty"` OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" yaml:"ownerReferences,omitempty"` Removed string `json:"removed,omitempty" yaml:"removed,omitempty"` diff --git a/clients/rancher/generated/management/v3/zz_generated_oidc_config.go b/clients/rancher/generated/management/v3/zz_generated_oidc_config.go index f6bc09bf..cd6e9d64 100644 --- a/clients/rancher/generated/management/v3/zz_generated_oidc_config.go +++ b/clients/rancher/generated/management/v3/zz_generated_oidc_config.go @@ -3,7 +3,6 @@ package client const ( OIDCConfigType = "oidcConfig" OIDCConfigFieldAccessMode = "accessMode" - OIDCConfigFieldAcrValue = "acrValue" OIDCConfigFieldAllowedPrincipalIDs = "allowedPrincipalIds" OIDCConfigFieldAnnotations = "annotations" OIDCConfigFieldAuthEndpoint = "authEndpoint" @@ -18,7 +17,6 @@ const ( OIDCConfigFieldIssuer = "issuer" OIDCConfigFieldJWKSUrl = "jwksUrl" OIDCConfigFieldLabels = "labels" - OIDCConfigFieldLogoutAllSupported = "logoutAllSupported" OIDCConfigFieldName = "name" OIDCConfigFieldOwnerReferences = "ownerReferences" OIDCConfigFieldPrivateKey = "privateKey" @@ -34,7 +32,6 @@ const ( type OIDCConfig struct { AccessMode string `json:"accessMode,omitempty" yaml:"accessMode,omitempty"` - AcrValue string `json:"acrValue,omitempty" yaml:"acrValue,omitempty"` AllowedPrincipalIDs []string `json:"allowedPrincipalIds,omitempty" yaml:"allowedPrincipalIds,omitempty"` Annotations map[string]string `json:"annotations,omitempty" yaml:"annotations,omitempty"` AuthEndpoint string `json:"authEndpoint,omitempty" yaml:"authEndpoint,omitempty"` @@ -49,7 +46,6 @@ type OIDCConfig struct { Issuer string `json:"issuer,omitempty" yaml:"issuer,omitempty"` JWKSUrl string `json:"jwksUrl,omitempty" yaml:"jwksUrl,omitempty"` Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"` - LogoutAllSupported bool `json:"logoutAllSupported,omitempty" yaml:"logoutAllSupported,omitempty"` Name string `json:"name,omitempty" yaml:"name,omitempty"` OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" yaml:"ownerReferences,omitempty"` PrivateKey string `json:"privateKey,omitempty" yaml:"privateKey,omitempty"` diff --git a/clients/rancher/generated/management/v3/zz_generated_okta_config.go b/clients/rancher/generated/management/v3/zz_generated_okta_config.go index 1fb77e7a..4279acbc 100644 --- a/clients/rancher/generated/management/v3/zz_generated_okta_config.go +++ b/clients/rancher/generated/management/v3/zz_generated_okta_config.go @@ -13,9 +13,6 @@ const ( OKTAConfigFieldGroupsField = "groupsField" OKTAConfigFieldIDPMetadataContent = "idpMetadataContent" OKTAConfigFieldLabels = "labels" - OKTAConfigFieldLogoutAllEnabled = "logoutAllEnabled" - OKTAConfigFieldLogoutAllForced = "logoutAllForced" - OKTAConfigFieldLogoutAllSupported = "logoutAllSupported" OKTAConfigFieldName = "name" OKTAConfigFieldOpenLdapConfig = "openLdapConfig" OKTAConfigFieldOwnerReferences = "ownerReferences" @@ -42,9 +39,6 @@ type OKTAConfig struct { GroupsField string `json:"groupsField,omitempty" yaml:"groupsField,omitempty"` IDPMetadataContent string `json:"idpMetadataContent,omitempty" yaml:"idpMetadataContent,omitempty"` Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"` - LogoutAllEnabled bool `json:"logoutAllEnabled,omitempty" yaml:"logoutAllEnabled,omitempty"` - LogoutAllForced bool `json:"logoutAllForced,omitempty" yaml:"logoutAllForced,omitempty"` - LogoutAllSupported bool `json:"logoutAllSupported,omitempty" yaml:"logoutAllSupported,omitempty"` Name string `json:"name,omitempty" yaml:"name,omitempty"` OpenLdapConfig *LdapFields `json:"openLdapConfig,omitempty" yaml:"openLdapConfig,omitempty"` OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" yaml:"ownerReferences,omitempty"` diff --git a/clients/rancher/generated/management/v3/zz_generated_open_ldap_config.go b/clients/rancher/generated/management/v3/zz_generated_open_ldap_config.go index bb339b8e..48ee16f0 100644 --- a/clients/rancher/generated/management/v3/zz_generated_open_ldap_config.go +++ b/clients/rancher/generated/management/v3/zz_generated_open_ldap_config.go @@ -20,7 +20,6 @@ const ( OpenLdapConfigFieldGroupSearchFilter = "groupSearchFilter" OpenLdapConfigFieldGroupUniqueIDAttribute = "groupUniqueIdAttribute" OpenLdapConfigFieldLabels = "labels" - OpenLdapConfigFieldLogoutAllSupported = "logoutAllSupported" OpenLdapConfigFieldName = "name" OpenLdapConfigFieldNestedGroupMembershipEnabled = "nestedGroupMembershipEnabled" OpenLdapConfigFieldOwnerReferences = "ownerReferences" @@ -65,7 +64,6 @@ type OpenLdapConfig struct { GroupSearchFilter string `json:"groupSearchFilter,omitempty" yaml:"groupSearchFilter,omitempty"` GroupUniqueIDAttribute string `json:"groupUniqueIdAttribute,omitempty" yaml:"groupUniqueIdAttribute,omitempty"` Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"` - LogoutAllSupported bool `json:"logoutAllSupported,omitempty" yaml:"logoutAllSupported,omitempty"` Name string `json:"name,omitempty" yaml:"name,omitempty"` NestedGroupMembershipEnabled bool `json:"nestedGroupMembershipEnabled,omitempty" yaml:"nestedGroupMembershipEnabled,omitempty"` OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" yaml:"ownerReferences,omitempty"` diff --git a/clients/rancher/generated/management/v3/zz_generated_ping_config.go b/clients/rancher/generated/management/v3/zz_generated_ping_config.go index 42494b6b..3905905b 100644 --- a/clients/rancher/generated/management/v3/zz_generated_ping_config.go +++ b/clients/rancher/generated/management/v3/zz_generated_ping_config.go @@ -13,9 +13,6 @@ const ( PingConfigFieldGroupsField = "groupsField" PingConfigFieldIDPMetadataContent = "idpMetadataContent" PingConfigFieldLabels = "labels" - PingConfigFieldLogoutAllEnabled = "logoutAllEnabled" - PingConfigFieldLogoutAllForced = "logoutAllForced" - PingConfigFieldLogoutAllSupported = "logoutAllSupported" PingConfigFieldName = "name" PingConfigFieldOwnerReferences = "ownerReferences" PingConfigFieldRancherAPIHost = "rancherApiHost" @@ -41,9 +38,6 @@ type PingConfig struct { GroupsField string `json:"groupsField,omitempty" yaml:"groupsField,omitempty"` IDPMetadataContent string `json:"idpMetadataContent,omitempty" yaml:"idpMetadataContent,omitempty"` Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"` - LogoutAllEnabled bool `json:"logoutAllEnabled,omitempty" yaml:"logoutAllEnabled,omitempty"` - LogoutAllForced bool `json:"logoutAllForced,omitempty" yaml:"logoutAllForced,omitempty"` - LogoutAllSupported bool `json:"logoutAllSupported,omitempty" yaml:"logoutAllSupported,omitempty"` Name string `json:"name,omitempty" yaml:"name,omitempty"` OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" yaml:"ownerReferences,omitempty"` RancherAPIHost string `json:"rancherApiHost,omitempty" yaml:"rancherApiHost,omitempty"` diff --git a/clients/rancher/generated/management/v3/zz_generated_project.go b/clients/rancher/generated/management/v3/zz_generated_project.go index 96bece29..c2deec71 100644 --- a/clients/rancher/generated/management/v3/zz_generated_project.go +++ b/clients/rancher/generated/management/v3/zz_generated_project.go @@ -7,7 +7,6 @@ import ( const ( ProjectType = "project" ProjectFieldAnnotations = "annotations" - ProjectFieldBackingNamespace = "backingNamespace" ProjectFieldClusterID = "clusterId" ProjectFieldConditions = "conditions" ProjectFieldContainerDefaultResourceLimit = "containerDefaultResourceLimit" @@ -30,7 +29,6 @@ const ( type Project struct { types.Resource Annotations map[string]string `json:"annotations,omitempty" yaml:"annotations,omitempty"` - BackingNamespace string `json:"backingNamespace,omitempty" yaml:"backingNamespace,omitempty"` ClusterID string `json:"clusterId,omitempty" yaml:"clusterId,omitempty"` Conditions []ProjectCondition `json:"conditions,omitempty" yaml:"conditions,omitempty"` ContainerDefaultResourceLimit *ContainerResourceLimit `json:"containerDefaultResourceLimit,omitempty" yaml:"containerDefaultResourceLimit,omitempty"` diff --git a/clients/rancher/generated/management/v3/zz_generated_project_status.go b/clients/rancher/generated/management/v3/zz_generated_project_status.go index 05d8ed48..7806c91b 100644 --- a/clients/rancher/generated/management/v3/zz_generated_project_status.go +++ b/clients/rancher/generated/management/v3/zz_generated_project_status.go @@ -1,12 +1,10 @@ package client const ( - ProjectStatusType = "projectStatus" - ProjectStatusFieldBackingNamespace = "backingNamespace" - ProjectStatusFieldConditions = "conditions" + ProjectStatusType = "projectStatus" + ProjectStatusFieldConditions = "conditions" ) type ProjectStatus struct { - BackingNamespace string `json:"backingNamespace,omitempty" yaml:"backingNamespace,omitempty"` - Conditions []ProjectCondition `json:"conditions,omitempty" yaml:"conditions,omitempty"` + Conditions []ProjectCondition `json:"conditions,omitempty" yaml:"conditions,omitempty"` } diff --git a/clients/rancher/generated/management/v3/zz_generated_service_now_config.go b/clients/rancher/generated/management/v3/zz_generated_service_now_config.go deleted file mode 100644 index 0590df4e..00000000 --- a/clients/rancher/generated/management/v3/zz_generated_service_now_config.go +++ /dev/null @@ -1,16 +0,0 @@ -package client - -const ( - ServiceNowConfigType = "serviceNowConfig" - ServiceNowConfigFieldBasicAuth = "basic_auth" - ServiceNowConfigFieldBearerToken = "bearer_token" - ServiceNowConfigFieldProxyURL = "proxyUrl" - ServiceNowConfigFieldURL = "url" -) - -type ServiceNowConfig struct { - BasicAuth *BasicAuth `json:"basic_auth,omitempty" yaml:"basic_auth,omitempty"` - BearerToken string `json:"bearer_token,omitempty" yaml:"bearer_token,omitempty"` - ProxyURL string `json:"proxyUrl,omitempty" yaml:"proxyUrl,omitempty"` - URL string `json:"url,omitempty" yaml:"url,omitempty"` -} diff --git a/clients/rancher/generated/management/v3/zz_generated_shibboleth_config.go b/clients/rancher/generated/management/v3/zz_generated_shibboleth_config.go index c1d87be5..8ab4b89a 100644 --- a/clients/rancher/generated/management/v3/zz_generated_shibboleth_config.go +++ b/clients/rancher/generated/management/v3/zz_generated_shibboleth_config.go @@ -13,9 +13,6 @@ const ( ShibbolethConfigFieldGroupsField = "groupsField" ShibbolethConfigFieldIDPMetadataContent = "idpMetadataContent" ShibbolethConfigFieldLabels = "labels" - ShibbolethConfigFieldLogoutAllEnabled = "logoutAllEnabled" - ShibbolethConfigFieldLogoutAllForced = "logoutAllForced" - ShibbolethConfigFieldLogoutAllSupported = "logoutAllSupported" ShibbolethConfigFieldName = "name" ShibbolethConfigFieldOpenLdapConfig = "openLdapConfig" ShibbolethConfigFieldOwnerReferences = "ownerReferences" @@ -42,9 +39,6 @@ type ShibbolethConfig struct { GroupsField string `json:"groupsField,omitempty" yaml:"groupsField,omitempty"` IDPMetadataContent string `json:"idpMetadataContent,omitempty" yaml:"idpMetadataContent,omitempty"` Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"` - LogoutAllEnabled bool `json:"logoutAllEnabled,omitempty" yaml:"logoutAllEnabled,omitempty"` - LogoutAllForced bool `json:"logoutAllForced,omitempty" yaml:"logoutAllForced,omitempty"` - LogoutAllSupported bool `json:"logoutAllSupported,omitempty" yaml:"logoutAllSupported,omitempty"` Name string `json:"name,omitempty" yaml:"name,omitempty"` OpenLdapConfig *LdapFields `json:"openLdapConfig,omitempty" yaml:"openLdapConfig,omitempty"` OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" yaml:"ownerReferences,omitempty"` diff --git a/clients/rancher/generated/management/v3/zz_generated_token.go b/clients/rancher/generated/management/v3/zz_generated_token.go index 7c1bfd8f..40c591dd 100644 --- a/clients/rancher/generated/management/v3/zz_generated_token.go +++ b/clients/rancher/generated/management/v3/zz_generated_token.go @@ -19,7 +19,7 @@ const ( TokenFieldGroupPrincipals = "groupPrincipals" TokenFieldIsDerived = "isDerived" TokenFieldLabels = "labels" - TokenFieldLastUsedAt = "lastUsedAt" + TokenFieldLastUpdateTime = "lastUpdateTime" TokenFieldName = "name" TokenFieldOwnerReferences = "ownerReferences" TokenFieldProviderInfo = "providerInfo" @@ -46,7 +46,7 @@ type Token struct { GroupPrincipals []string `json:"groupPrincipals,omitempty" yaml:"groupPrincipals,omitempty"` IsDerived bool `json:"isDerived,omitempty" yaml:"isDerived,omitempty"` Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"` - LastUsedAt string `json:"lastUsedAt,omitempty" yaml:"lastUsedAt,omitempty"` + LastUpdateTime string `json:"lastUpdateTime,omitempty" yaml:"lastUpdateTime,omitempty"` Name string `json:"name,omitempty" yaml:"name,omitempty"` OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" yaml:"ownerReferences,omitempty"` ProviderInfo map[string]string `json:"providerInfo,omitempty" yaml:"providerInfo,omitempty"` diff --git a/extensions/clusters/ack/create.go b/extensions/clusters/ack/create.go index 5f721757..7d244540 100644 --- a/extensions/clusters/ack/create.go +++ b/extensions/clusters/ack/create.go @@ -9,14 +9,12 @@ import ( func CreateACKHostedCluster(client *rancher.Client, displayName, cloudCredentialID string, ackClusterConfig ClusterConfig, enableClusterAlerting, enableClusterMonitoring, enableNetworkPolicy, windowsPreferedCluster bool, labels map[string]string) (*management.Cluster, error) { ackHostCluster := HostClusterConfig(displayName, cloudCredentialID, ackClusterConfig) cluster := &management.Cluster{ - ACKConfig: ackHostCluster, - DockerRootDir: "/var/lib/docker", - EnableClusterAlerting: enableClusterAlerting, - EnableClusterMonitoring: enableClusterMonitoring, - EnableNetworkPolicy: &enableNetworkPolicy, - Labels: labels, - Name: displayName, - WindowsPreferedCluster: windowsPreferedCluster, + ACKConfig: ackHostCluster, + DockerRootDir: "/var/lib/docker", + EnableNetworkPolicy: &enableNetworkPolicy, + Labels: labels, + Name: displayName, + WindowsPreferedCluster: windowsPreferedCluster, } clusterResp, err := client.Management.Cluster.Create(cluster) diff --git a/extensions/clusters/ack/nodepools.go b/extensions/clusters/ack/nodepools.go index d8f8a502..62eae44f 100644 --- a/extensions/clusters/ack/nodepools.go +++ b/extensions/clusters/ack/nodepools.go @@ -35,14 +35,12 @@ func updateNodePoolQuantity(client *rancher.Client, cluster *management.Cluster, ackConfig.NodePoolList[0].InstancesNum += nodePool.InstancesNum ackHostCluster := &management.Cluster{ - DockerRootDir: "/var/lib/docker", - ACKConfig: ackConfig, - EnableClusterAlerting: clusterResp.EnableClusterAlerting, - EnableClusterMonitoring: clusterResp.EnableClusterMonitoring, - EnableNetworkPolicy: clusterResp.EnableNetworkPolicy, - Labels: clusterResp.Labels, - Name: clusterResp.Name, - WindowsPreferedCluster: clusterResp.WindowsPreferedCluster, + DockerRootDir: "/var/lib/docker", + ACKConfig: ackConfig, + EnableNetworkPolicy: clusterResp.EnableNetworkPolicy, + Labels: clusterResp.Labels, + Name: clusterResp.Name, + WindowsPreferedCluster: clusterResp.WindowsPreferedCluster, } logrus.Infof("Scaling the node pool to %v total nodes", ackConfig.NodePoolList[0].InstancesNum) diff --git a/extensions/clusters/cce/create.go b/extensions/clusters/cce/create.go index 85471ba8..63ddfabc 100644 --- a/extensions/clusters/cce/create.go +++ b/extensions/clusters/cce/create.go @@ -27,14 +27,12 @@ import ( func CreateCCEHostedCluster(client *rancher.Client, displayName, cloudCredentialID string, enableClusterAlerting, enableClusterMonitoring, enableNetworkPolicy, windowsPreferedCluster bool, labels map[string]string) (*management.Cluster, error) { cceHostCluster := HostClusterConfig(displayName, cloudCredentialID) cluster := &management.Cluster{ - DockerRootDir: "/var/lib/docker", - CCEConfig: cceHostCluster, - Name: displayName, - EnableClusterAlerting: enableClusterAlerting, - EnableClusterMonitoring: enableClusterMonitoring, - EnableNetworkPolicy: &enableNetworkPolicy, - Labels: labels, - WindowsPreferedCluster: windowsPreferedCluster, + DockerRootDir: "/var/lib/docker", + CCEConfig: cceHostCluster, + Name: displayName, + EnableNetworkPolicy: &enableNetworkPolicy, + Labels: labels, + WindowsPreferedCluster: windowsPreferedCluster, } clusterResp, err := client.Management.Cluster.Create(cluster) @@ -165,7 +163,7 @@ func UpdateNodePublicIP(client *rancher.Client, ID string) (bool, error) { } logrus.Infof("successfully bind EIP [%v] for node [%v]", eipAddr, utils.Value(node.Metadata.Name)) - time.Sleep(5 * time.Second) + <-time.After(time.Second) count++ } if count == len(*nodesRes.Items) { diff --git a/extensions/clusters/cce/nodepools.go b/extensions/clusters/cce/nodepools.go index 0597024f..2adb8c53 100644 --- a/extensions/clusters/cce/nodepools.go +++ b/extensions/clusters/cce/nodepools.go @@ -24,14 +24,12 @@ func updateNodePoolQuantity(client *rancher.Client, cluster *management.Cluster, cceConfig.NodePools[0].InitialNodeCount += nodePool.InitialNodeCount cceHostCluster := &management.Cluster{ - DockerRootDir: "/var/lib/docker", - CCEConfig: cceConfig, - EnableClusterAlerting: clusterResp.EnableClusterAlerting, - EnableClusterMonitoring: clusterResp.EnableClusterMonitoring, - EnableNetworkPolicy: clusterResp.EnableNetworkPolicy, - Labels: clusterResp.Labels, - Name: clusterResp.Name, - WindowsPreferedCluster: clusterResp.WindowsPreferedCluster, + DockerRootDir: "/var/lib/docker", + CCEConfig: cceConfig, + EnableNetworkPolicy: clusterResp.EnableNetworkPolicy, + Labels: clusterResp.Labels, + Name: clusterResp.Name, + WindowsPreferedCluster: clusterResp.WindowsPreferedCluster, } logrus.Infof("Scaling the node pool to %v total nodes", cceConfig.NodePools[0].InitialNodeCount) diff --git a/extensions/clusters/tke/create.go b/extensions/clusters/tke/create.go index ae89e741..c2f0a06c 100644 --- a/extensions/clusters/tke/create.go +++ b/extensions/clusters/tke/create.go @@ -9,14 +9,12 @@ import ( func CreateTKEHostedCluster(client *rancher.Client, displayName, cloudCredentialID string, enableClusterAlerting, enableClusterMonitoring, enableNetworkPolicy, windowsPreferedCluster bool, labels map[string]string) (*management.Cluster, error) { tkeHostCluster := HostClusterConfig(displayName, cloudCredentialID) cluster := &management.Cluster{ - DockerRootDir: "/var/lib/docker", - TKEConfig: tkeHostCluster, - Name: displayName, - EnableClusterAlerting: enableClusterAlerting, - EnableClusterMonitoring: enableClusterMonitoring, - EnableNetworkPolicy: &enableNetworkPolicy, - Labels: labels, - WindowsPreferedCluster: windowsPreferedCluster, + DockerRootDir: "/var/lib/docker", + TKEConfig: tkeHostCluster, + Name: displayName, + EnableNetworkPolicy: &enableNetworkPolicy, + Labels: labels, + WindowsPreferedCluster: windowsPreferedCluster, } clusterResp, err := client.Management.Cluster.Create(cluster) diff --git a/extensions/clusters/tke/nodepools.go b/extensions/clusters/tke/nodepools.go index a7d72d89..b41a3dde 100644 --- a/extensions/clusters/tke/nodepools.go +++ b/extensions/clusters/tke/nodepools.go @@ -24,14 +24,12 @@ func updateNodePoolQuantity(client *rancher.Client, cluster *management.Cluster, tkeConfig.NodePoolList[0].AutoScalingGroupPara.DesiredCapacity += nodePool.AutoScalingGroupPara.DesiredCapacity tkeHostCluster := &management.Cluster{ - DockerRootDir: "/var/lib/docker", - TKEConfig: tkeConfig, - EnableClusterAlerting: clusterResp.EnableClusterAlerting, - EnableClusterMonitoring: clusterResp.EnableClusterMonitoring, - EnableNetworkPolicy: clusterResp.EnableNetworkPolicy, - Labels: clusterResp.Labels, - Name: clusterResp.Name, - WindowsPreferedCluster: clusterResp.WindowsPreferedCluster, + DockerRootDir: "/var/lib/docker", + TKEConfig: tkeConfig, + EnableNetworkPolicy: clusterResp.EnableNetworkPolicy, + Labels: clusterResp.Labels, + Name: clusterResp.Name, + WindowsPreferedCluster: clusterResp.WindowsPreferedCluster, } logrus.Infof("Scaling the node pool to %v total nodes", tkeConfig.NodePoolList[0].AutoScalingGroupPara.DesiredCapacity) diff --git a/extensions/users/users.go b/extensions/users/users.go index 89288123..ebd4e8d0 100644 --- a/extensions/users/users.go +++ b/extensions/users/users.go @@ -26,15 +26,6 @@ import ( namegen "github.com/rancher/shepherd/pkg/namegenerator" "github.com/rancher/shepherd/pkg/ref" "github.com/rancher/shepherd/pkg/wait" - authzv1 "k8s.io/api/authorization/v1" - rbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/selection" - kwait "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apimachinery/pkg/watch" ) const ( @@ -96,9 +87,6 @@ func AddProjectMember(rancherClient *rancher.Client, project *management.Project projectID := strings.Split(project.ID, ":") namespace := string(projectID[0]) - if project.BackingNamespace != "" { - namespace = project.BackingNamespace - } name := string(projectID[1]) adminClient, err := rancher.NewClient(rancherClient.RancherConfig.AdminToken, rancherClient.Session) diff --git a/pkg/generated/controllers/management.cattle.io/v3/casprovider.go b/pkg/generated/controllers/management.cattle.io/v3/casprovider.go new file mode 100644 index 00000000..898a63aa --- /dev/null +++ b/pkg/generated/controllers/management.cattle.io/v3/casprovider.go @@ -0,0 +1,39 @@ +/* +Copyright 2024 Rancher Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package v3 + +import ( + v3 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3" + "github.com/rancher/shepherd/pkg/wrangler/pkg/generic" +) + +// CASProviderController interface for managing CASProvider resources. +type CASProviderController interface { + generic.NonNamespacedControllerInterface[*v3.CASProvider, *v3.CASProviderList] +} + +// CASProviderClient interface for managing CASProvider resources in Kubernetes. +type CASProviderClient interface { + generic.NonNamespacedClientInterface[*v3.CASProvider, *v3.CASProviderList] +} + +// CASProviderCache interface for retrieving CASProvider resources in memory. +type CASProviderCache interface { + generic.NonNamespacedCacheInterface[*v3.CASProvider] +} diff --git a/pkg/generated/controllers/management.cattle.io/v3/clusterlogging.go b/pkg/generated/controllers/management.cattle.io/v3/clusterlogging.go new file mode 100644 index 00000000..9bb46463 --- /dev/null +++ b/pkg/generated/controllers/management.cattle.io/v3/clusterlogging.go @@ -0,0 +1,208 @@ +/* +Copyright 2024 Rancher Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package v3 + +import ( + "context" + "sync" + "time" + + v3 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3" + "github.com/rancher/shepherd/pkg/wrangler/pkg/generic" + "github.com/rancher/wrangler/v3/pkg/apply" + "github.com/rancher/wrangler/v3/pkg/condition" + "github.com/rancher/wrangler/v3/pkg/kv" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// ClusterLoggingController interface for managing ClusterLogging resources. +type ClusterLoggingController interface { + generic.ControllerInterface[*v3.ClusterLogging, *v3.ClusterLoggingList] +} + +// ClusterLoggingClient interface for managing ClusterLogging resources in Kubernetes. +type ClusterLoggingClient interface { + generic.ClientInterface[*v3.ClusterLogging, *v3.ClusterLoggingList] +} + +// ClusterLoggingCache interface for retrieving ClusterLogging resources in memory. +type ClusterLoggingCache interface { + generic.CacheInterface[*v3.ClusterLogging] +} + +// ClusterLoggingStatusHandler is executed for every added or modified ClusterLogging. Should return the new status to be updated +type ClusterLoggingStatusHandler func(obj *v3.ClusterLogging, status v3.ClusterLoggingStatus) (v3.ClusterLoggingStatus, error) + +// ClusterLoggingGeneratingHandler is the top-level handler that is executed for every ClusterLogging event. It extends ClusterLoggingStatusHandler by a returning a slice of child objects to be passed to apply.Apply +type ClusterLoggingGeneratingHandler func(obj *v3.ClusterLogging, status v3.ClusterLoggingStatus) ([]runtime.Object, v3.ClusterLoggingStatus, error) + +// RegisterClusterLoggingStatusHandler configures a ClusterLoggingController to execute a ClusterLoggingStatusHandler for every events observed. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterClusterLoggingStatusHandler(ctx context.Context, controller ClusterLoggingController, condition condition.Cond, name string, handler ClusterLoggingStatusHandler) { + statusHandler := &clusterLoggingStatusHandler{ + client: controller, + condition: condition, + handler: handler, + } + controller.AddGenericHandler(ctx, name, generic.FromObjectHandlerToHandler(statusHandler.sync)) +} + +// RegisterClusterLoggingGeneratingHandler configures a ClusterLoggingController to execute a ClusterLoggingGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterClusterLoggingGeneratingHandler(ctx context.Context, controller ClusterLoggingController, apply apply.Apply, + condition condition.Cond, name string, handler ClusterLoggingGeneratingHandler, opts *generic.GeneratingHandlerOptions) { + statusHandler := &clusterLoggingGeneratingHandler{ + ClusterLoggingGeneratingHandler: handler, + apply: apply, + name: name, + gvk: controller.GroupVersionKind(), + } + if opts != nil { + statusHandler.opts = *opts + } + controller.OnChange(ctx, name, statusHandler.Remove) + RegisterClusterLoggingStatusHandler(ctx, controller, condition, name, statusHandler.Handle) +} + +type clusterLoggingStatusHandler struct { + client ClusterLoggingClient + condition condition.Cond + handler ClusterLoggingStatusHandler +} + +// sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API +func (a *clusterLoggingStatusHandler) sync(key string, obj *v3.ClusterLogging) (*v3.ClusterLogging, error) { + if obj == nil { + return obj, nil + } + + origStatus := obj.Status.DeepCopy() + obj = obj.DeepCopy() + newStatus, err := a.handler(obj, obj.Status) + if err != nil { + // Revert to old status on error + newStatus = *origStatus.DeepCopy() + } + + if a.condition != "" { + if errors.IsConflict(err) { + a.condition.SetError(&newStatus, "", nil) + } else { + a.condition.SetError(&newStatus, "", err) + } + } + if !equality.Semantic.DeepEqual(origStatus, &newStatus) { + if a.condition != "" { + // Since status has changed, update the lastUpdatedTime + a.condition.LastUpdated(&newStatus, time.Now().UTC().Format(time.RFC3339)) + } + + var newErr error + obj.Status = newStatus + newObj, newErr := a.client.UpdateStatus(obj) + if err == nil { + err = newErr + } + if newErr == nil { + obj = newObj + } + } + return obj, err +} + +type clusterLoggingGeneratingHandler struct { + ClusterLoggingGeneratingHandler + apply apply.Apply + opts generic.GeneratingHandlerOptions + gvk schema.GroupVersionKind + name string + seen sync.Map +} + +// Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied +func (a *clusterLoggingGeneratingHandler) Remove(key string, obj *v3.ClusterLogging) (*v3.ClusterLogging, error) { + if obj != nil { + return obj, nil + } + + obj = &v3.ClusterLogging{} + obj.Namespace, obj.Name = kv.RSplit(key, "/") + obj.SetGroupVersionKind(a.gvk) + + if a.opts.UniqueApplyForResourceVersion { + a.seen.Delete(key) + } + + return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects() +} + +// Handle executes the configured ClusterLoggingGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource +func (a *clusterLoggingGeneratingHandler) Handle(obj *v3.ClusterLogging, status v3.ClusterLoggingStatus) (v3.ClusterLoggingStatus, error) { + if !obj.DeletionTimestamp.IsZero() { + return status, nil + } + + objs, newStatus, err := a.ClusterLoggingGeneratingHandler(obj, status) + if err != nil { + return newStatus, err + } + if !a.isNewResourceVersion(obj) { + return newStatus, nil + } + + err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects(objs...) + if err != nil { + return newStatus, err + } + a.storeResourceVersion(obj) + return newStatus, nil +} + +// isNewResourceVersion detects if a specific resource version was already successfully processed. +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *clusterLoggingGeneratingHandler) isNewResourceVersion(obj *v3.ClusterLogging) bool { + if !a.opts.UniqueApplyForResourceVersion { + return true + } + + // Apply once per resource version + key := obj.Namespace + "/" + obj.Name + previous, ok := a.seen.Load(key) + return !ok || previous != obj.ResourceVersion +} + +// storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *clusterLoggingGeneratingHandler) storeResourceVersion(obj *v3.ClusterLogging) { + if !a.opts.UniqueApplyForResourceVersion { + return + } + + key := obj.Namespace + "/" + obj.Name + a.seen.Store(key, obj.ResourceVersion) +} diff --git a/pkg/generated/controllers/management.cattle.io/v3/globaldns.go b/pkg/generated/controllers/management.cattle.io/v3/globaldns.go new file mode 100644 index 00000000..7b03aec2 --- /dev/null +++ b/pkg/generated/controllers/management.cattle.io/v3/globaldns.go @@ -0,0 +1,208 @@ +/* +Copyright 2024 Rancher Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package v3 + +import ( + "context" + "sync" + "time" + + v3 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3" + "github.com/rancher/shepherd/pkg/wrangler/pkg/generic" + "github.com/rancher/wrangler/v3/pkg/apply" + "github.com/rancher/wrangler/v3/pkg/condition" + "github.com/rancher/wrangler/v3/pkg/kv" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GlobalDnsController interface for managing GlobalDns resources. +type GlobalDnsController interface { + generic.ControllerInterface[*v3.GlobalDns, *v3.GlobalDnsList] +} + +// GlobalDnsClient interface for managing GlobalDns resources in Kubernetes. +type GlobalDnsClient interface { + generic.ClientInterface[*v3.GlobalDns, *v3.GlobalDnsList] +} + +// GlobalDnsCache interface for retrieving GlobalDns resources in memory. +type GlobalDnsCache interface { + generic.CacheInterface[*v3.GlobalDns] +} + +// GlobalDnsStatusHandler is executed for every added or modified GlobalDns. Should return the new status to be updated +type GlobalDnsStatusHandler func(obj *v3.GlobalDns, status v3.GlobalDNSStatus) (v3.GlobalDNSStatus, error) + +// GlobalDnsGeneratingHandler is the top-level handler that is executed for every GlobalDns event. It extends GlobalDnsStatusHandler by a returning a slice of child objects to be passed to apply.Apply +type GlobalDnsGeneratingHandler func(obj *v3.GlobalDns, status v3.GlobalDNSStatus) ([]runtime.Object, v3.GlobalDNSStatus, error) + +// RegisterGlobalDnsStatusHandler configures a GlobalDnsController to execute a GlobalDnsStatusHandler for every events observed. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterGlobalDnsStatusHandler(ctx context.Context, controller GlobalDnsController, condition condition.Cond, name string, handler GlobalDnsStatusHandler) { + statusHandler := &globalDnsStatusHandler{ + client: controller, + condition: condition, + handler: handler, + } + controller.AddGenericHandler(ctx, name, generic.FromObjectHandlerToHandler(statusHandler.sync)) +} + +// RegisterGlobalDnsGeneratingHandler configures a GlobalDnsController to execute a GlobalDnsGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterGlobalDnsGeneratingHandler(ctx context.Context, controller GlobalDnsController, apply apply.Apply, + condition condition.Cond, name string, handler GlobalDnsGeneratingHandler, opts *generic.GeneratingHandlerOptions) { + statusHandler := &globalDnsGeneratingHandler{ + GlobalDnsGeneratingHandler: handler, + apply: apply, + name: name, + gvk: controller.GroupVersionKind(), + } + if opts != nil { + statusHandler.opts = *opts + } + controller.OnChange(ctx, name, statusHandler.Remove) + RegisterGlobalDnsStatusHandler(ctx, controller, condition, name, statusHandler.Handle) +} + +type globalDnsStatusHandler struct { + client GlobalDnsClient + condition condition.Cond + handler GlobalDnsStatusHandler +} + +// sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API +func (a *globalDnsStatusHandler) sync(key string, obj *v3.GlobalDns) (*v3.GlobalDns, error) { + if obj == nil { + return obj, nil + } + + origStatus := obj.Status.DeepCopy() + obj = obj.DeepCopy() + newStatus, err := a.handler(obj, obj.Status) + if err != nil { + // Revert to old status on error + newStatus = *origStatus.DeepCopy() + } + + if a.condition != "" { + if errors.IsConflict(err) { + a.condition.SetError(&newStatus, "", nil) + } else { + a.condition.SetError(&newStatus, "", err) + } + } + if !equality.Semantic.DeepEqual(origStatus, &newStatus) { + if a.condition != "" { + // Since status has changed, update the lastUpdatedTime + a.condition.LastUpdated(&newStatus, time.Now().UTC().Format(time.RFC3339)) + } + + var newErr error + obj.Status = newStatus + newObj, newErr := a.client.UpdateStatus(obj) + if err == nil { + err = newErr + } + if newErr == nil { + obj = newObj + } + } + return obj, err +} + +type globalDnsGeneratingHandler struct { + GlobalDnsGeneratingHandler + apply apply.Apply + opts generic.GeneratingHandlerOptions + gvk schema.GroupVersionKind + name string + seen sync.Map +} + +// Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied +func (a *globalDnsGeneratingHandler) Remove(key string, obj *v3.GlobalDns) (*v3.GlobalDns, error) { + if obj != nil { + return obj, nil + } + + obj = &v3.GlobalDns{} + obj.Namespace, obj.Name = kv.RSplit(key, "/") + obj.SetGroupVersionKind(a.gvk) + + if a.opts.UniqueApplyForResourceVersion { + a.seen.Delete(key) + } + + return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects() +} + +// Handle executes the configured GlobalDnsGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource +func (a *globalDnsGeneratingHandler) Handle(obj *v3.GlobalDns, status v3.GlobalDNSStatus) (v3.GlobalDNSStatus, error) { + if !obj.DeletionTimestamp.IsZero() { + return status, nil + } + + objs, newStatus, err := a.GlobalDnsGeneratingHandler(obj, status) + if err != nil { + return newStatus, err + } + if !a.isNewResourceVersion(obj) { + return newStatus, nil + } + + err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects(objs...) + if err != nil { + return newStatus, err + } + a.storeResourceVersion(obj) + return newStatus, nil +} + +// isNewResourceVersion detects if a specific resource version was already successfully processed. +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *globalDnsGeneratingHandler) isNewResourceVersion(obj *v3.GlobalDns) bool { + if !a.opts.UniqueApplyForResourceVersion { + return true + } + + // Apply once per resource version + key := obj.Namespace + "/" + obj.Name + previous, ok := a.seen.Load(key) + return !ok || previous != obj.ResourceVersion +} + +// storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *globalDnsGeneratingHandler) storeResourceVersion(obj *v3.GlobalDns) { + if !a.opts.UniqueApplyForResourceVersion { + return + } + + key := obj.Namespace + "/" + obj.Name + a.seen.Store(key, obj.ResourceVersion) +} diff --git a/pkg/generated/controllers/management.cattle.io/v3/globaldnsprovider.go b/pkg/generated/controllers/management.cattle.io/v3/globaldnsprovider.go new file mode 100644 index 00000000..5714a5f7 --- /dev/null +++ b/pkg/generated/controllers/management.cattle.io/v3/globaldnsprovider.go @@ -0,0 +1,39 @@ +/* +Copyright 2024 Rancher Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package v3 + +import ( + v3 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3" + "github.com/rancher/shepherd/pkg/wrangler/pkg/generic" +) + +// GlobalDnsProviderController interface for managing GlobalDnsProvider resources. +type GlobalDnsProviderController interface { + generic.ControllerInterface[*v3.GlobalDnsProvider, *v3.GlobalDnsProviderList] +} + +// GlobalDnsProviderClient interface for managing GlobalDnsProvider resources in Kubernetes. +type GlobalDnsProviderClient interface { + generic.ClientInterface[*v3.GlobalDnsProvider, *v3.GlobalDnsProviderList] +} + +// GlobalDnsProviderCache interface for retrieving GlobalDnsProvider resources in memory. +type GlobalDnsProviderCache interface { + generic.CacheInterface[*v3.GlobalDnsProvider] +} diff --git a/pkg/generated/controllers/management.cattle.io/v3/interface.go b/pkg/generated/controllers/management.cattle.io/v3/interface.go index 66ff8fc3..c2de84f5 100644 --- a/pkg/generated/controllers/management.cattle.io/v3/interface.go +++ b/pkg/generated/controllers/management.cattle.io/v3/interface.go @@ -38,12 +38,14 @@ type Interface interface { AuthProvider() AuthProviderController AuthToken() AuthTokenController AzureADProvider() AzureADProviderController + CASProvider() CASProviderController Catalog() CatalogController CatalogTemplate() CatalogTemplateController CatalogTemplateVersion() CatalogTemplateVersionController CloudCredential() CloudCredentialController Cluster() ClusterController ClusterCatalog() ClusterCatalogController + ClusterLogging() ClusterLoggingController ClusterProxyConfig() ClusterProxyConfigController ClusterRegistrationToken() ClusterRegistrationTokenController ClusterRoleTemplateBinding() ClusterRoleTemplateBindingController @@ -57,6 +59,8 @@ type Interface interface { FreeIpaProvider() FreeIpaProviderController GenericOIDCProvider() GenericOIDCProviderController GithubProvider() GithubProviderController + GlobalDns() GlobalDnsController + GlobalDnsProvider() GlobalDnsProviderController GlobalRole() GlobalRoleController GlobalRoleBinding() GlobalRoleBindingController GoogleOAuthProvider() GoogleOAuthProviderController @@ -73,12 +77,15 @@ type Interface interface { NodeTemplate() NodeTemplateController OIDCProvider() OIDCProviderController OpenLdapProvider() OpenLdapProviderController + OperatorSetting() OperatorSettingController PodSecurityAdmissionConfigurationTemplate() PodSecurityAdmissionConfigurationTemplateController Preference() PreferenceController Principal() PrincipalController Project() ProjectController ProjectCatalog() ProjectCatalogController + ProjectLogging() ProjectLoggingController ProjectNetworkPolicy() ProjectNetworkPolicyController + ProjectResourceQuotaUsage() ProjectResourceQuotaUsageController ProjectRoleTemplateBinding() ProjectRoleTemplateBindingController RancherUserNotification() RancherUserNotificationController RkeAddon() RkeAddonController @@ -132,6 +139,10 @@ func (v *version) AzureADProvider() AzureADProviderController { return generic.NewNonNamespacedController[*v3.AzureADProvider, *v3.AzureADProviderList](schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "AzureADProvider"}, "azureadproviders", v.controllerFactory, v.ts) } +func (v *version) CASProvider() CASProviderController { + return generic.NewNonNamespacedController[*v3.CASProvider, *v3.CASProviderList](schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "CASProvider"}, "casproviders", v.controllerFactory, v.ts) +} + func (v *version) Catalog() CatalogController { return generic.NewNonNamespacedController[*v3.Catalog, *v3.CatalogList](schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "Catalog"}, "catalogs", v.controllerFactory, v.ts) } @@ -156,6 +167,10 @@ func (v *version) ClusterCatalog() ClusterCatalogController { return generic.NewController[*v3.ClusterCatalog, *v3.ClusterCatalogList](schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "ClusterCatalog"}, "clustercatalogs", true, v.controllerFactory, v.ts) } +func (v *version) ClusterLogging() ClusterLoggingController { + return generic.NewController[*v3.ClusterLogging, *v3.ClusterLoggingList](schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "ClusterLogging"}, "clusterloggings", true, v.controllerFactory, v.ts) +} + func (v *version) ClusterProxyConfig() ClusterProxyConfigController { return generic.NewController[*v3.ClusterProxyConfig, *v3.ClusterProxyConfigList](schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "ClusterProxyConfig"}, "clusterproxyconfigs", true, v.controllerFactory, v.ts) } @@ -208,6 +223,14 @@ func (v *version) GithubProvider() GithubProviderController { return generic.NewNonNamespacedController[*v3.GithubProvider, *v3.GithubProviderList](schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "GithubProvider"}, "githubproviders", v.controllerFactory, v.ts) } +func (v *version) GlobalDns() GlobalDnsController { + return generic.NewController[*v3.GlobalDns, *v3.GlobalDnsList](schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "GlobalDns"}, "globaldnses", true, v.controllerFactory, v.ts) +} + +func (v *version) GlobalDnsProvider() GlobalDnsProviderController { + return generic.NewController[*v3.GlobalDnsProvider, *v3.GlobalDnsProviderList](schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "GlobalDnsProvider"}, "globaldnsproviders", true, v.controllerFactory, v.ts) +} + func (v *version) GlobalRole() GlobalRoleController { return generic.NewNonNamespacedController[*v3.GlobalRole, *v3.GlobalRoleList](schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "GlobalRole"}, "globalroles", v.controllerFactory, v.ts) } @@ -272,6 +295,10 @@ func (v *version) OpenLdapProvider() OpenLdapProviderController { return generic.NewNonNamespacedController[*v3.OpenLdapProvider, *v3.OpenLdapProviderList](schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "OpenLdapProvider"}, "openldapproviders", v.controllerFactory, v.ts) } +func (v *version) OperatorSetting() OperatorSettingController { + return generic.NewNonNamespacedController[*v3.OperatorSetting, *v3.OperatorSettingList](schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "OperatorSetting"}, "operatorsettings", v.controllerFactory, v.ts) +} + func (v *version) PodSecurityAdmissionConfigurationTemplate() PodSecurityAdmissionConfigurationTemplateController { return generic.NewNonNamespacedController[*v3.PodSecurityAdmissionConfigurationTemplate, *v3.PodSecurityAdmissionConfigurationTemplateList](schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "PodSecurityAdmissionConfigurationTemplate"}, "podsecurityadmissionconfigurationtemplates", v.controllerFactory, v.ts) } @@ -292,10 +319,18 @@ func (v *version) ProjectCatalog() ProjectCatalogController { return generic.NewController[*v3.ProjectCatalog, *v3.ProjectCatalogList](schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "ProjectCatalog"}, "projectcatalogs", true, v.controllerFactory, v.ts) } +func (v *version) ProjectLogging() ProjectLoggingController { + return generic.NewController[*v3.ProjectLogging, *v3.ProjectLoggingList](schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "ProjectLogging"}, "projectloggings", true, v.controllerFactory, v.ts) +} + func (v *version) ProjectNetworkPolicy() ProjectNetworkPolicyController { return generic.NewController[*v3.ProjectNetworkPolicy, *v3.ProjectNetworkPolicyList](schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "ProjectNetworkPolicy"}, "projectnetworkpolicies", true, v.controllerFactory, v.ts) } +func (v *version) ProjectResourceQuotaUsage() ProjectResourceQuotaUsageController { + return generic.NewController[*v3.ProjectResourceQuotaUsage, *v3.ProjectResourceQuotaUsageList](schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "ProjectResourceQuotaUsage"}, "projectresourcequotausages", true, v.controllerFactory, v.ts) +} + func (v *version) ProjectRoleTemplateBinding() ProjectRoleTemplateBindingController { return generic.NewController[*v3.ProjectRoleTemplateBinding, *v3.ProjectRoleTemplateBindingList](schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "ProjectRoleTemplateBinding"}, "projectroletemplatebindings", true, v.controllerFactory, v.ts) } diff --git a/pkg/generated/controllers/management.cattle.io/v3/operatorsetting.go b/pkg/generated/controllers/management.cattle.io/v3/operatorsetting.go new file mode 100644 index 00000000..f43b1b9d --- /dev/null +++ b/pkg/generated/controllers/management.cattle.io/v3/operatorsetting.go @@ -0,0 +1,208 @@ +/* +Copyright 2024 Rancher Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package v3 + +import ( + "context" + "sync" + "time" + + v3 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3" + "github.com/rancher/shepherd/pkg/wrangler/pkg/generic" + "github.com/rancher/wrangler/v3/pkg/apply" + "github.com/rancher/wrangler/v3/pkg/condition" + "github.com/rancher/wrangler/v3/pkg/kv" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// OperatorSettingController interface for managing OperatorSetting resources. +type OperatorSettingController interface { + generic.NonNamespacedControllerInterface[*v3.OperatorSetting, *v3.OperatorSettingList] +} + +// OperatorSettingClient interface for managing OperatorSetting resources in Kubernetes. +type OperatorSettingClient interface { + generic.NonNamespacedClientInterface[*v3.OperatorSetting, *v3.OperatorSettingList] +} + +// OperatorSettingCache interface for retrieving OperatorSetting resources in memory. +type OperatorSettingCache interface { + generic.NonNamespacedCacheInterface[*v3.OperatorSetting] +} + +// OperatorSettingStatusHandler is executed for every added or modified OperatorSetting. Should return the new status to be updated +type OperatorSettingStatusHandler func(obj *v3.OperatorSetting, status v3.OperatorSettingStatus) (v3.OperatorSettingStatus, error) + +// OperatorSettingGeneratingHandler is the top-level handler that is executed for every OperatorSetting event. It extends OperatorSettingStatusHandler by a returning a slice of child objects to be passed to apply.Apply +type OperatorSettingGeneratingHandler func(obj *v3.OperatorSetting, status v3.OperatorSettingStatus) ([]runtime.Object, v3.OperatorSettingStatus, error) + +// RegisterOperatorSettingStatusHandler configures a OperatorSettingController to execute a OperatorSettingStatusHandler for every events observed. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterOperatorSettingStatusHandler(ctx context.Context, controller OperatorSettingController, condition condition.Cond, name string, handler OperatorSettingStatusHandler) { + statusHandler := &operatorSettingStatusHandler{ + client: controller, + condition: condition, + handler: handler, + } + controller.AddGenericHandler(ctx, name, generic.FromObjectHandlerToHandler(statusHandler.sync)) +} + +// RegisterOperatorSettingGeneratingHandler configures a OperatorSettingController to execute a OperatorSettingGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterOperatorSettingGeneratingHandler(ctx context.Context, controller OperatorSettingController, apply apply.Apply, + condition condition.Cond, name string, handler OperatorSettingGeneratingHandler, opts *generic.GeneratingHandlerOptions) { + statusHandler := &operatorSettingGeneratingHandler{ + OperatorSettingGeneratingHandler: handler, + apply: apply, + name: name, + gvk: controller.GroupVersionKind(), + } + if opts != nil { + statusHandler.opts = *opts + } + controller.OnChange(ctx, name, statusHandler.Remove) + RegisterOperatorSettingStatusHandler(ctx, controller, condition, name, statusHandler.Handle) +} + +type operatorSettingStatusHandler struct { + client OperatorSettingClient + condition condition.Cond + handler OperatorSettingStatusHandler +} + +// sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API +func (a *operatorSettingStatusHandler) sync(key string, obj *v3.OperatorSetting) (*v3.OperatorSetting, error) { + if obj == nil { + return obj, nil + } + + origStatus := obj.Status.DeepCopy() + obj = obj.DeepCopy() + newStatus, err := a.handler(obj, obj.Status) + if err != nil { + // Revert to old status on error + newStatus = *origStatus.DeepCopy() + } + + if a.condition != "" { + if errors.IsConflict(err) { + a.condition.SetError(&newStatus, "", nil) + } else { + a.condition.SetError(&newStatus, "", err) + } + } + if !equality.Semantic.DeepEqual(origStatus, &newStatus) { + if a.condition != "" { + // Since status has changed, update the lastUpdatedTime + a.condition.LastUpdated(&newStatus, time.Now().UTC().Format(time.RFC3339)) + } + + var newErr error + obj.Status = newStatus + newObj, newErr := a.client.UpdateStatus(obj) + if err == nil { + err = newErr + } + if newErr == nil { + obj = newObj + } + } + return obj, err +} + +type operatorSettingGeneratingHandler struct { + OperatorSettingGeneratingHandler + apply apply.Apply + opts generic.GeneratingHandlerOptions + gvk schema.GroupVersionKind + name string + seen sync.Map +} + +// Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied +func (a *operatorSettingGeneratingHandler) Remove(key string, obj *v3.OperatorSetting) (*v3.OperatorSetting, error) { + if obj != nil { + return obj, nil + } + + obj = &v3.OperatorSetting{} + obj.Namespace, obj.Name = kv.RSplit(key, "/") + obj.SetGroupVersionKind(a.gvk) + + if a.opts.UniqueApplyForResourceVersion { + a.seen.Delete(key) + } + + return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects() +} + +// Handle executes the configured OperatorSettingGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource +func (a *operatorSettingGeneratingHandler) Handle(obj *v3.OperatorSetting, status v3.OperatorSettingStatus) (v3.OperatorSettingStatus, error) { + if !obj.DeletionTimestamp.IsZero() { + return status, nil + } + + objs, newStatus, err := a.OperatorSettingGeneratingHandler(obj, status) + if err != nil { + return newStatus, err + } + if !a.isNewResourceVersion(obj) { + return newStatus, nil + } + + err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects(objs...) + if err != nil { + return newStatus, err + } + a.storeResourceVersion(obj) + return newStatus, nil +} + +// isNewResourceVersion detects if a specific resource version was already successfully processed. +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *operatorSettingGeneratingHandler) isNewResourceVersion(obj *v3.OperatorSetting) bool { + if !a.opts.UniqueApplyForResourceVersion { + return true + } + + // Apply once per resource version + key := obj.Namespace + "/" + obj.Name + previous, ok := a.seen.Load(key) + return !ok || previous != obj.ResourceVersion +} + +// storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *operatorSettingGeneratingHandler) storeResourceVersion(obj *v3.OperatorSetting) { + if !a.opts.UniqueApplyForResourceVersion { + return + } + + key := obj.Namespace + "/" + obj.Name + a.seen.Store(key, obj.ResourceVersion) +} diff --git a/pkg/generated/controllers/management.cattle.io/v3/projectlogging.go b/pkg/generated/controllers/management.cattle.io/v3/projectlogging.go new file mode 100644 index 00000000..72771d1e --- /dev/null +++ b/pkg/generated/controllers/management.cattle.io/v3/projectlogging.go @@ -0,0 +1,208 @@ +/* +Copyright 2024 Rancher Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package v3 + +import ( + "context" + "sync" + "time" + + v3 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3" + "github.com/rancher/shepherd/pkg/wrangler/pkg/generic" + "github.com/rancher/wrangler/v3/pkg/apply" + "github.com/rancher/wrangler/v3/pkg/condition" + "github.com/rancher/wrangler/v3/pkg/kv" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// ProjectLoggingController interface for managing ProjectLogging resources. +type ProjectLoggingController interface { + generic.ControllerInterface[*v3.ProjectLogging, *v3.ProjectLoggingList] +} + +// ProjectLoggingClient interface for managing ProjectLogging resources in Kubernetes. +type ProjectLoggingClient interface { + generic.ClientInterface[*v3.ProjectLogging, *v3.ProjectLoggingList] +} + +// ProjectLoggingCache interface for retrieving ProjectLogging resources in memory. +type ProjectLoggingCache interface { + generic.CacheInterface[*v3.ProjectLogging] +} + +// ProjectLoggingStatusHandler is executed for every added or modified ProjectLogging. Should return the new status to be updated +type ProjectLoggingStatusHandler func(obj *v3.ProjectLogging, status v3.ProjectLoggingStatus) (v3.ProjectLoggingStatus, error) + +// ProjectLoggingGeneratingHandler is the top-level handler that is executed for every ProjectLogging event. It extends ProjectLoggingStatusHandler by a returning a slice of child objects to be passed to apply.Apply +type ProjectLoggingGeneratingHandler func(obj *v3.ProjectLogging, status v3.ProjectLoggingStatus) ([]runtime.Object, v3.ProjectLoggingStatus, error) + +// RegisterProjectLoggingStatusHandler configures a ProjectLoggingController to execute a ProjectLoggingStatusHandler for every events observed. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterProjectLoggingStatusHandler(ctx context.Context, controller ProjectLoggingController, condition condition.Cond, name string, handler ProjectLoggingStatusHandler) { + statusHandler := &projectLoggingStatusHandler{ + client: controller, + condition: condition, + handler: handler, + } + controller.AddGenericHandler(ctx, name, generic.FromObjectHandlerToHandler(statusHandler.sync)) +} + +// RegisterProjectLoggingGeneratingHandler configures a ProjectLoggingController to execute a ProjectLoggingGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterProjectLoggingGeneratingHandler(ctx context.Context, controller ProjectLoggingController, apply apply.Apply, + condition condition.Cond, name string, handler ProjectLoggingGeneratingHandler, opts *generic.GeneratingHandlerOptions) { + statusHandler := &projectLoggingGeneratingHandler{ + ProjectLoggingGeneratingHandler: handler, + apply: apply, + name: name, + gvk: controller.GroupVersionKind(), + } + if opts != nil { + statusHandler.opts = *opts + } + controller.OnChange(ctx, name, statusHandler.Remove) + RegisterProjectLoggingStatusHandler(ctx, controller, condition, name, statusHandler.Handle) +} + +type projectLoggingStatusHandler struct { + client ProjectLoggingClient + condition condition.Cond + handler ProjectLoggingStatusHandler +} + +// sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API +func (a *projectLoggingStatusHandler) sync(key string, obj *v3.ProjectLogging) (*v3.ProjectLogging, error) { + if obj == nil { + return obj, nil + } + + origStatus := obj.Status.DeepCopy() + obj = obj.DeepCopy() + newStatus, err := a.handler(obj, obj.Status) + if err != nil { + // Revert to old status on error + newStatus = *origStatus.DeepCopy() + } + + if a.condition != "" { + if errors.IsConflict(err) { + a.condition.SetError(&newStatus, "", nil) + } else { + a.condition.SetError(&newStatus, "", err) + } + } + if !equality.Semantic.DeepEqual(origStatus, &newStatus) { + if a.condition != "" { + // Since status has changed, update the lastUpdatedTime + a.condition.LastUpdated(&newStatus, time.Now().UTC().Format(time.RFC3339)) + } + + var newErr error + obj.Status = newStatus + newObj, newErr := a.client.UpdateStatus(obj) + if err == nil { + err = newErr + } + if newErr == nil { + obj = newObj + } + } + return obj, err +} + +type projectLoggingGeneratingHandler struct { + ProjectLoggingGeneratingHandler + apply apply.Apply + opts generic.GeneratingHandlerOptions + gvk schema.GroupVersionKind + name string + seen sync.Map +} + +// Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied +func (a *projectLoggingGeneratingHandler) Remove(key string, obj *v3.ProjectLogging) (*v3.ProjectLogging, error) { + if obj != nil { + return obj, nil + } + + obj = &v3.ProjectLogging{} + obj.Namespace, obj.Name = kv.RSplit(key, "/") + obj.SetGroupVersionKind(a.gvk) + + if a.opts.UniqueApplyForResourceVersion { + a.seen.Delete(key) + } + + return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects() +} + +// Handle executes the configured ProjectLoggingGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource +func (a *projectLoggingGeneratingHandler) Handle(obj *v3.ProjectLogging, status v3.ProjectLoggingStatus) (v3.ProjectLoggingStatus, error) { + if !obj.DeletionTimestamp.IsZero() { + return status, nil + } + + objs, newStatus, err := a.ProjectLoggingGeneratingHandler(obj, status) + if err != nil { + return newStatus, err + } + if !a.isNewResourceVersion(obj) { + return newStatus, nil + } + + err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects(objs...) + if err != nil { + return newStatus, err + } + a.storeResourceVersion(obj) + return newStatus, nil +} + +// isNewResourceVersion detects if a specific resource version was already successfully processed. +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *projectLoggingGeneratingHandler) isNewResourceVersion(obj *v3.ProjectLogging) bool { + if !a.opts.UniqueApplyForResourceVersion { + return true + } + + // Apply once per resource version + key := obj.Namespace + "/" + obj.Name + previous, ok := a.seen.Load(key) + return !ok || previous != obj.ResourceVersion +} + +// storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *projectLoggingGeneratingHandler) storeResourceVersion(obj *v3.ProjectLogging) { + if !a.opts.UniqueApplyForResourceVersion { + return + } + + key := obj.Namespace + "/" + obj.Name + a.seen.Store(key, obj.ResourceVersion) +} diff --git a/pkg/generated/controllers/management.cattle.io/v3/projectresourcequotausage.go b/pkg/generated/controllers/management.cattle.io/v3/projectresourcequotausage.go new file mode 100644 index 00000000..1c5dded9 --- /dev/null +++ b/pkg/generated/controllers/management.cattle.io/v3/projectresourcequotausage.go @@ -0,0 +1,208 @@ +/* +Copyright 2024 Rancher Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package v3 + +import ( + "context" + "sync" + "time" + + v3 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3" + "github.com/rancher/shepherd/pkg/wrangler/pkg/generic" + "github.com/rancher/wrangler/v3/pkg/apply" + "github.com/rancher/wrangler/v3/pkg/condition" + "github.com/rancher/wrangler/v3/pkg/kv" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// ProjectResourceQuotaUsageController interface for managing ProjectResourceQuotaUsage resources. +type ProjectResourceQuotaUsageController interface { + generic.ControllerInterface[*v3.ProjectResourceQuotaUsage, *v3.ProjectResourceQuotaUsageList] +} + +// ProjectResourceQuotaUsageClient interface for managing ProjectResourceQuotaUsage resources in Kubernetes. +type ProjectResourceQuotaUsageClient interface { + generic.ClientInterface[*v3.ProjectResourceQuotaUsage, *v3.ProjectResourceQuotaUsageList] +} + +// ProjectResourceQuotaUsageCache interface for retrieving ProjectResourceQuotaUsage resources in memory. +type ProjectResourceQuotaUsageCache interface { + generic.CacheInterface[*v3.ProjectResourceQuotaUsage] +} + +// ProjectResourceQuotaUsageStatusHandler is executed for every added or modified ProjectResourceQuotaUsage. Should return the new status to be updated +type ProjectResourceQuotaUsageStatusHandler func(obj *v3.ProjectResourceQuotaUsage, status v3.ProjectResourceQuotaUsageAmount) (v3.ProjectResourceQuotaUsageAmount, error) + +// ProjectResourceQuotaUsageGeneratingHandler is the top-level handler that is executed for every ProjectResourceQuotaUsage event. It extends ProjectResourceQuotaUsageStatusHandler by a returning a slice of child objects to be passed to apply.Apply +type ProjectResourceQuotaUsageGeneratingHandler func(obj *v3.ProjectResourceQuotaUsage, status v3.ProjectResourceQuotaUsageAmount) ([]runtime.Object, v3.ProjectResourceQuotaUsageAmount, error) + +// RegisterProjectResourceQuotaUsageStatusHandler configures a ProjectResourceQuotaUsageController to execute a ProjectResourceQuotaUsageStatusHandler for every events observed. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterProjectResourceQuotaUsageStatusHandler(ctx context.Context, controller ProjectResourceQuotaUsageController, condition condition.Cond, name string, handler ProjectResourceQuotaUsageStatusHandler) { + statusHandler := &projectResourceQuotaUsageStatusHandler{ + client: controller, + condition: condition, + handler: handler, + } + controller.AddGenericHandler(ctx, name, generic.FromObjectHandlerToHandler(statusHandler.sync)) +} + +// RegisterProjectResourceQuotaUsageGeneratingHandler configures a ProjectResourceQuotaUsageController to execute a ProjectResourceQuotaUsageGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterProjectResourceQuotaUsageGeneratingHandler(ctx context.Context, controller ProjectResourceQuotaUsageController, apply apply.Apply, + condition condition.Cond, name string, handler ProjectResourceQuotaUsageGeneratingHandler, opts *generic.GeneratingHandlerOptions) { + statusHandler := &projectResourceQuotaUsageGeneratingHandler{ + ProjectResourceQuotaUsageGeneratingHandler: handler, + apply: apply, + name: name, + gvk: controller.GroupVersionKind(), + } + if opts != nil { + statusHandler.opts = *opts + } + controller.OnChange(ctx, name, statusHandler.Remove) + RegisterProjectResourceQuotaUsageStatusHandler(ctx, controller, condition, name, statusHandler.Handle) +} + +type projectResourceQuotaUsageStatusHandler struct { + client ProjectResourceQuotaUsageClient + condition condition.Cond + handler ProjectResourceQuotaUsageStatusHandler +} + +// sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API +func (a *projectResourceQuotaUsageStatusHandler) sync(key string, obj *v3.ProjectResourceQuotaUsage) (*v3.ProjectResourceQuotaUsage, error) { + if obj == nil { + return obj, nil + } + + origStatus := obj.Status.DeepCopy() + obj = obj.DeepCopy() + newStatus, err := a.handler(obj, obj.Status) + if err != nil { + // Revert to old status on error + newStatus = *origStatus.DeepCopy() + } + + if a.condition != "" { + if errors.IsConflict(err) { + a.condition.SetError(&newStatus, "", nil) + } else { + a.condition.SetError(&newStatus, "", err) + } + } + if !equality.Semantic.DeepEqual(origStatus, &newStatus) { + if a.condition != "" { + // Since status has changed, update the lastUpdatedTime + a.condition.LastUpdated(&newStatus, time.Now().UTC().Format(time.RFC3339)) + } + + var newErr error + obj.Status = newStatus + newObj, newErr := a.client.UpdateStatus(obj) + if err == nil { + err = newErr + } + if newErr == nil { + obj = newObj + } + } + return obj, err +} + +type projectResourceQuotaUsageGeneratingHandler struct { + ProjectResourceQuotaUsageGeneratingHandler + apply apply.Apply + opts generic.GeneratingHandlerOptions + gvk schema.GroupVersionKind + name string + seen sync.Map +} + +// Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied +func (a *projectResourceQuotaUsageGeneratingHandler) Remove(key string, obj *v3.ProjectResourceQuotaUsage) (*v3.ProjectResourceQuotaUsage, error) { + if obj != nil { + return obj, nil + } + + obj = &v3.ProjectResourceQuotaUsage{} + obj.Namespace, obj.Name = kv.RSplit(key, "/") + obj.SetGroupVersionKind(a.gvk) + + if a.opts.UniqueApplyForResourceVersion { + a.seen.Delete(key) + } + + return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects() +} + +// Handle executes the configured ProjectResourceQuotaUsageGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource +func (a *projectResourceQuotaUsageGeneratingHandler) Handle(obj *v3.ProjectResourceQuotaUsage, status v3.ProjectResourceQuotaUsageAmount) (v3.ProjectResourceQuotaUsageAmount, error) { + if !obj.DeletionTimestamp.IsZero() { + return status, nil + } + + objs, newStatus, err := a.ProjectResourceQuotaUsageGeneratingHandler(obj, status) + if err != nil { + return newStatus, err + } + if !a.isNewResourceVersion(obj) { + return newStatus, nil + } + + err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects(objs...) + if err != nil { + return newStatus, err + } + a.storeResourceVersion(obj) + return newStatus, nil +} + +// isNewResourceVersion detects if a specific resource version was already successfully processed. +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *projectResourceQuotaUsageGeneratingHandler) isNewResourceVersion(obj *v3.ProjectResourceQuotaUsage) bool { + if !a.opts.UniqueApplyForResourceVersion { + return true + } + + // Apply once per resource version + key := obj.Namespace + "/" + obj.Name + previous, ok := a.seen.Load(key) + return !ok || previous != obj.ResourceVersion +} + +// storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *projectResourceQuotaUsageGeneratingHandler) storeResourceVersion(obj *v3.ProjectResourceQuotaUsage) { + if !a.opts.UniqueApplyForResourceVersion { + return + } + + key := obj.Namespace + "/" + obj.Name + a.seen.Store(key, obj.ResourceVersion) +}