地形AZURE Kubernetes NodePool在每次应用地形时重新创建

wfsdck30  于 2022-12-14  发布在  Kubernetes
关注(0)|答案(2)|浏览(167)

我正在尝试使用1个节点池创建Azure Kubernetes群集。请查找以下代码段:

resource "azurerm_kubernetes_cluster" "k8s_cluster" {
  lifecycle {
    ignore_changes = [
      default_node_pool
    ]
    prevent_destroy = false
  }

  key_vault_secrets_provider {
    secret_rotation_enabled = true
  }

  private_cluster_enabled = true
  name                    = var.cluster_name
  location                = var.location
  resource_group_name     = var.rg_name
  dns_prefix              = var.dns_prefix
  kubernetes_version      = var.kubernetes_version
  # node_resource_group = var.resource_group_name

  default_node_pool {
    name                  = var.default_node_pool.name
    node_count            = var.default_node_pool.node_count
    max_count             = var.default_node_pool.max_count
    min_count             = var.default_node_pool.min_count
    vm_size               = var.default_node_pool.vm_size
    os_disk_size_gb       = var.default_node_pool.os_disk_size_gb
    vnet_subnet_id        = var.vnet_subnet_id
    max_pods              = var.default_node_pool.max_pods
    type                  = var.default_node_pool.agent_pool_type
    enable_node_public_ip = var.default_node_pool.enable_node_public_ip
    enable_auto_scaling   = var.default_node_pool.enable_auto_scaling

    tags = merge(var.common_tags)
  }

  linux_profile {
    admin_username = var.admin_username
    ssh_key {
      key_data = file("${path.module}/${var.ssh_public_key}")

    }
  }

  identity {
    type = var.identity
  }

  network_profile {
    network_plugin    = var.network_plugin    #azure
    network_policy    = var.network_policy    #"azure"
    load_balancer_sku = var.load_balancer_sku #"standard"
    # pod_cidr  = var.pod_cidr | When network_plugin is set to azure - the vnet_subnet_id field in the default_node_pool block must be set and pod_cidr must not be set.
  }

  tags = merge({ "Name" : var.cluster_name }, var.common_tags)
}

data "azurerm_kubernetes_service_versions" "current" {
  location = "Germany West Central"
}

# Node Pool
resource "azurerm_kubernetes_cluster_node_pool" "gp_nodes" {
  zones                 = var.np_availability_zones  #[1, 2, 3]
  enable_auto_scaling   = var.np_enable_auto_scaling #true
  kubernetes_cluster_id = azurerm_kubernetes_cluster.k8s_cluster.id
  node_count            = var.np_node_count
  max_count             = var.np_max_count
  min_count             = var.np_min_count
  mode                  = var.np_mode 
  name                  = var.np_name 
  orchestrator_version  = "1.22.15"  
  os_disk_size_gb       = 30
  os_type               = var.np_os_type  
  vm_size               = var.np_vm_size  
  priority              = var.np_priority
  node_labels           = merge({ "Name" : var.np_name }, var.common_tags)

  lifecycle {
    ignore_changes = [
      kubernetes_cluster_id
    ]
  }

  #pod_subnet_id = ""
  tags = merge(
    { "Name" : var.np_name },
  var.common_tags)
}

但是对于每一个terraform plan/apply,我都会得到以下更改,并且它会强制重新创建节点池。有人能帮助我理解为什么会发生这种情况吗?

# module.aks_cluster.azurerm_kubernetes_cluster_node_pool.gp_nodes must be replaced
-/+ resource "azurerm_kubernetes_cluster_node_pool" "gp_nodes" {
      - custom_ca_trust_enabled = false -> null
      - enable_host_encryption  = false -> null
      - enable_node_public_ip   = false -> null
      - fips_enabled            = false -> null
      ~ id                      = "/subscriptions/<SOME-VALUE>/resourceGroups/shared-rg/providers/Microsoft.ContainerService/managedClusters/test-cluster/agentPools/gpnodes" -> (known after apply)
      ~ kubelet_disk_type       = "OS" -> (known after apply)
      - max_count               = 0 -> null
      ~ max_pods                = 30 -> (known after apply)
      - min_count               = 0 -> null
        name                    = "gpnodes"
      - node_taints             = [] -> null
      ~ os_sku                  = "Ubuntu" -> (known after apply)
        tags                    = {
            "Name"        = "test-cluster"
            "developedby" = "jana"
            "environment" = "test"
            "managedby"   = "devops"
        }
      - vnet_subnet_id          = "/subscriptions/<SOME-VALUE>/resourceGroups/shared-rg/providers/Microsoft.Network/virtualNetworks/shared-network/subnets/aks-subnet-test" -> null # forces replacement
        # (15 unchanged attributes hidden)
    }
brqmpdu1

brqmpdu11#

我可以通过如下设置azurerm_kubernetes_cluster_node_pool中的vnet_subnet_id来解决此问题:

# Node Pool
resource "azurerm_kubernetes_cluster_node_pool" "gp_nodes" {
  . . .
  vnet_subnet_id        = var.vnet_subnet_id
  . . .
}
1zmg4dgp

1zmg4dgp2#

azurerm_kubernetes_cluster_node_pool通常通过VNet下的子网ID管理Kubernetes群集内的节点池
这是我复制的代码库。我们可以将它们与vnet和子网结合使用。

data "azurerm_resource_group" "example" {
  name     = "v-swarna-mindtree"
}
data "azuread_client_config" "current" {}

resource "azurerm_virtual_network" "example" {
  name                = "example-network"
  location            = data.azurerm_resource_group.example.location
  resource_group_name = data.azurerm_resource_group.example.name
  address_space       = ["10.0.0.0/16"]
  dns_servers         = ["10.0.0.4", "10.0.0.5"]

  subnet {
    name           = "subnet1"
    address_prefix = "10.0.1.0/24"
  }

 tags = {
    environment = "Production"
  }
}

resource "azurerm_kubernetes_cluster" "cluster" {
   name                    = "swarnademo-cluster"
  location                = data.azurerm_resource_group.example.location
  resource_group_name     = data.azurerm_resource_group.example.name
  dns_prefix              = "swarnaexampleaks"
  kubernetes_version      = "1.22.11"

  default_node_pool {
        name       = "linux"
        node_count = 6
        vm_size    = "Standard_D4s_v3"
        max_pods   = 200
    }
    network_profile {
        network_plugin = "azure"
    }

    service_principal {
        client_id     = "8f12c4f7-5250-4454-90ba-654ac9ead9d2"
        client_secret = "EAX8Q~HXyRRR38q75bOHZpOjyEsQmMmoPx_DJbb7"
    }

}

resource "azurerm_kubernetes_cluster_node_pool" "system" {
    name                  = "sys"
    kubernetes_cluster_id = azurerm_kubernetes_cluster.cluster.id
    vm_size               = "Standard_D2s_v3"
    node_count            = 2
    os_type = "Linux"
}

提供程序文件如下:

terraform {
  
      required_version = "~>1.3.3"
      required_providers {
        azurerm = {
           source = "hashicorp/azurerm"
           version = ">=3.0.0"
             }
           }
 }

 provider "azurerm" {
    features {}
    skip_provider_registration = true
}

第二步:
运行下面的命令并部署相应的基础架构。

terraform plan 
terraform apply -auto-approve

第三步:
再次运行上述命令,我们可以观察到只有Subnet会被刷新而不是cluster node pool
计划参考

应用引用:

相关问题