Skip to content

Commit

Permalink
Merge pull request #34 from junior/v091-patch
Browse files Browse the repository at this point in the history
V091 patch
  • Loading branch information
junior authored May 21, 2023
2 parents f60f689 + 1e64cf5 commit 281bc7a
Show file tree
Hide file tree
Showing 5 changed files with 25 additions and 34 deletions.
8 changes: 4 additions & 4 deletions examples/basic-with-existing-network/oke.tf
Original file line number Diff line number Diff line change
Expand Up @@ -27,12 +27,12 @@ module "oke-quickstart" {
# VCN for OKE arguments
create_new_vcn = false
existent_vcn_ocid = "<Existent VCN OCID>" # ocid1.vcn.oc1....
existent_vcn_compartment_ocid = "" # Optional. Specify if want to create terraform to create the subnets and the VCN is in a different compartment than the OKE cluster
existent_vcn_compartment_ocid = "" # Optional. Specify if want to create terraform to create the subnets and the VCN is in a different compartment than the OKE cluster

# Subnet for OKE arguments
create_subnets = false
existent_oke_k8s_endpoint_subnet_ocid = "<Existent Kubernetes API Endpoint Subnet>" # ocid1.subnet....
existent_oke_nodes_subnet_ocid = "<Existent Worker Nodes Subnet>" # ocid1.subnet....
existent_oke_load_balancer_subnet_ocid = "<Existent Load Balancer Subnet>" # ocid1.subnet....
existent_oke_vcn_native_pod_networking_subnet_ocid = "" # Optional. Existent VCN Native POD Networking subnet if the CNI Type is "OCI_VCN_IP_NATIVE"
existent_oke_nodes_subnet_ocid = "<Existent Worker Nodes Subnet>" # ocid1.subnet....
existent_oke_load_balancer_subnet_ocid = "<Existent Load Balancer Subnet>" # ocid1.subnet....
existent_oke_vcn_native_pod_networking_subnet_ocid = "" # Optional. Existent VCN Native POD Networking subnet if the CNI Type is "OCI_VCN_IP_NATIVE"
}
30 changes: 15 additions & 15 deletions main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -110,23 +110,23 @@ module "oke_node_pools" {
create_new_node_pool = var.create_new_oke_cluster

# OKE Worker Nodes (Compute)
node_pool_name = each.value.node_pool_name
node_pool_min_nodes = each.value.node_pool_min_nodes
node_pool_max_nodes = each.value.node_pool_max_nodes
node_k8s_version = each.value.node_k8s_version
node_pool_name = try(each.value.node_pool_name, "no_pool_name")
node_pool_min_nodes = try(each.value.node_pool_min_nodes, 1)
node_pool_max_nodes = try(each.value.node_pool_max_nodes, 3)
node_k8s_version = try(each.value.node_k8s_version, var.k8s_version)
node_pool_shape = each.value.node_pool_shape
node_pool_shape_specific_ad = each.value.node_pool_shape_specific_ad
node_pool_node_shape_config_ocpus = each.value.node_pool_node_shape_config_ocpus
node_pool_node_shape_config_memory_in_gbs = each.value.node_pool_node_shape_config_memory_in_gbs
existent_oke_nodepool_id_for_autoscaler = each.value.existent_oke_nodepool_id_for_autoscaler
node_pool_shape_specific_ad = try(each.value.node_pool_shape_specific_ad, 0)
node_pool_node_shape_config_ocpus = try(each.value.node_pool_node_shape_config_ocpus, 4)
node_pool_node_shape_config_memory_in_gbs = try(each.value.node_pool_node_shape_config_memory_in_gbs, 48)
existent_oke_nodepool_id_for_autoscaler = try(each.value.existent_oke_nodepool_id_for_autoscaler, null)
node_pool_autoscaler_enabled = try(each.value.node_pool_autoscaler_enabled, true)
node_pool_oke_init_params = each.value.node_pool_oke_init_params
node_pool_cloud_init_parts = each.value.node_pool_cloud_init_parts
public_ssh_key = local.workers_public_ssh_key
image_operating_system = each.value.image_operating_system
image_operating_system_version = each.value.image_operating_system_version
extra_initial_node_labels = each.value.extra_initial_node_labels
cni_type = each.value.cni_type
node_pool_oke_init_params = try(each.value.node_pool_oke_init_params, "")
node_pool_cloud_init_parts = try(each.value.node_pool_cloud_init_parts, [])
public_ssh_key = try(local.workers_public_ssh_key, "")
image_operating_system = try(each.value.image_operating_system, "Oracle Linux")
image_operating_system_version = try(each.value.image_operating_system_version, "8")
extra_initial_node_labels = try(each.value.extra_initial_node_labels, {})
cni_type = try(each.value.cni_type, "FLANNEL_OVERLAY")

# OKE Network Details
# nodes_subnet_id = local.create_subnets ? module.subnets["oke_nodes_subnet"].subnet_id : var.existent_oke_nodes_subnet_ocid
Expand Down
10 changes: 5 additions & 5 deletions modules/cluster-tools/cluster-tools.tf
Original file line number Diff line number Diff line change
Expand Up @@ -15,15 +15,15 @@ locals {
# Helm repos
helm_repository = {
ingress_nginx = "https://kubernetes.github.io/ingress-nginx"
ingress_nginx_version = "4.4.0"
ingress_nginx_version = "4.6.1"
jetstack = "https://charts.jetstack.io" # cert-manager
jetstack_version = "1.10.1" # cert-manager
jetstack_version = "1.12.0" # cert-manager
grafana = "https://grafana.github.io/helm-charts"
grafana_version = "6.47.1"
grafana_version = "6.56.5"
prometheus = "https://prometheus-community.github.io/helm-charts"
prometheus_version = "19.0.1"
prometheus_version = "22.6.2"
metrics_server = "https://kubernetes-sigs.github.io/metrics-server"
metrics_server_version = "3.8.3"
metrics_server_version = "3.10.0"
}
use_cluster_tools_namespace = anytrue([var.grafana_enabled, var.ingress_nginx_enabled, var.cert_manager_enabled, var.prometheus_enabled]) ? true : false
}
Expand Down
1 change: 1 addition & 0 deletions modules/oke-node-pool/datasources.tf
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ data "cloudinit_config" "nodes" {
#!/bin/bash
curl --fail -H "Authorization: Bearer Oracle" -L0 http://169.254.169.254/opc/v2/instance/metadata/oke_init_script | base64 --decode >/var/run/oke-init.sh
bash /var/run/oke-init.sh ${var.node_pool_oke_init_params}
/usr/libexec/oci-growfs -y
EOF
}

Expand Down
10 changes: 0 additions & 10 deletions providers.tf.example
Original file line number Diff line number Diff line change
Expand Up @@ -17,16 +17,6 @@ provider "oci" {
private_key_path = var.private_key_path
}

# provider "oci" {
# alias = "current_region"
# tenancy_ocid = var.tenancy_ocid
# region = var.region

# user_ocid = var.user_ocid
# fingerprint = var.fingerprint
# private_key_path = var.private_key_path
# }

# New configuration to avoid Terraform Kubernetes provider interpolation. https://registry.terraform.io/providers/hashicorp/kubernetes/2.2.0/docs#stacking-with-managed-kubernetes-cluster-resources
# Currently need to uncheck to refresh (--refresh=false) when destroying or else the terraform destroy will fail

Expand Down

0 comments on commit 281bc7a

Please sign in to comment.