我有一个带有以下污点的EksNodeGroup:
const ssdEksNodeGroupPublicLargeSubnet = new aws.eks.EksNodeGroup(
this,
"ssdEksNodeGroupPublicLargeSubnet",
{
// ... other stuff...
taint: [
{
key: "app",
value: "strick",
effect: "NO_SCHEDULE",
},
],
}
);
在我的代码的其他地方,我试图迭代我的nodeGroup污点,以动态创建kubernetes pod tolerations。
const nodeGrouop = ssdEksNodeGroupPublicLargeSubnet
const tolerations: k8s.DeploymentSpecTemplateSpecToleration[] = [];
for (let i = 0; i < Fn.lengthOf(nodeGroup.taint); i++) {
const taint = nodeGroup.taint.get(i);
tolerations.push({
key: taint.key,
value: taint.value,
effect: taint.effect,
operator: "Equal"
});
}
console.log("##################", tolerations)
然而,当我尝试运行它时,我看到log语句打印了一个空数组,当我的pod/deployment被创建时,它是在没有tolerations的情况下创建的。
这里是我的kubernetes部署的完整声明
const pausePodDeployment = new k8s.Deployment(
this,
pausePodDeploymentName,
{
metadata: {
name: pausePodDeploymentName,
namespace: namespace.metadata.name,
},
spec: {
replicas: "1",
selector: {
matchLabels: {
app: pausePodDeploymentName,
},
},
template: {
metadata: {
labels: {
app: pausePodDeploymentName,
},
},
spec: {
priorityClassName: priorityClass.metadata.name,
terminationGracePeriodSeconds: 0,
container: [
{
name: "reserve-resources",
image: "k8s.gcr.io/pause",
resources: {
requests: {
cpu: "1",
},
},
},
],
toleration: tolerations,
nodeSelector: {
...nodeGroupLabels,
},
},
},
},
}
);
下面是CDK的完整输出(注意没有任何tolerations):
# kubernetes_deployment.overprovisioner_strick-overprovisioner-pause-pods_B5F26972 (overprovisioner/strick-overprovisioner-pause-pods) will be created
+ resource "kubernetes_deployment" "overprovisioner_strick-overprovisioner-pause-pods_B5F26972" {
+ id = (known after apply)
+ wait_for_rollout = true
+ metadata {
+ generation = (known after apply)
+ name = "strick-overprovisioner-pause-pods"
+ namespace = "overprovisioner"
+ resource_version = (known after apply)
+ uid = (known after apply)
}
+ spec {
+ min_ready_seconds = 0
+ paused = false
+ progress_deadline_seconds = 600
+ replicas = "1"
+ revision_history_limit = 10
+ selector {
+ match_labels = {
+ "app" = "strick-overprovisioner-pause-pods"
}
}
+ strategy {
+ type = (known after apply)
+ rolling_update {
+ max_surge = (known after apply)
+ max_unavailable = (known after apply)
}
}
+ template {
+ metadata {
+ generation = (known after apply)
+ labels = {
+ "app" = "strick-overprovisioner-pause-pods"
}
+ name = (known after apply)
+ resource_version = (known after apply)
+ uid = (known after apply)
}
+ spec {
+ automount_service_account_token = true
+ dns_policy = "ClusterFirst"
+ enable_service_links = true
+ host_ipc = false
+ host_network = false
+ host_pid = false
+ hostname = (known after apply)
+ node_name = (known after apply)
+ node_selector = {
+ "diskType" = "ssd"
}
+ priority_class_name = "overprovisioner"
+ restart_policy = "Always"
+ service_account_name = (known after apply)
+ share_process_namespace = false
+ termination_grace_period_seconds = 0
+ container {
+ image = "k8s.gcr.io/pause"
+ image_pull_policy = (known after apply)
+ name = "reserve-resources"
+ stdin = false
+ stdin_once = false
+ termination_message_path = "/dev/termination-log"
+ termination_message_policy = (known after apply)
+ tty = false
+ resources {
+ limits = (known after apply)
+ requests = {
+ "cpu" = "1"
}
}
}
+ image_pull_secrets {
+ name = (known after apply)
}
+ readiness_gate {
+ condition_type = (known after apply)
}
+ volume {
+ name = (known after apply)
+ aws_elastic_block_store {
+ fs_type = (known after apply)
+ partition = (known after apply)
+ read_only = (known after apply)
+ volume_id = (known after apply)
}
+ azure_disk {
+ caching_mode = (known after apply)
+ data_disk_uri = (known after apply)
+ disk_name = (known after apply)
+ fs_type = (known after apply)
+ kind = (known after apply)
+ read_only = (known after apply)
}
+ azure_file {
+ read_only = (known after apply)
+ secret_name = (known after apply)
+ secret_namespace = (known after apply)
+ share_name = (known after apply)
}
+ ceph_fs {
+ monitors = (known after apply)
+ path = (known after apply)
+ read_only = (known after apply)
+ secret_file = (known after apply)
+ user = (known after apply)
+ secret_ref {
+ name = (known after apply)
+ namespace = (known after apply)
}
}
+ cinder {
+ fs_type = (known after apply)
+ read_only = (known after apply)
+ volume_id = (known after apply)
}
+ config_map {
+ default_mode = (known after apply)
+ name = (known after apply)
+ optional = (known after apply)
+ items {
+ key = (known after apply)
+ mode = (known after apply)
+ path = (known after apply)
}
}
+ csi {
+ driver = (known after apply)
+ fs_type = (known after apply)
+ read_only = (known after apply)
+ volume_attributes = (known after apply)
+ node_publish_secret_ref {
+ name = (known after apply)
}
}
+ downward_api {
+ default_mode = (known after apply)
+ items {
+ mode = (known after apply)
+ path = (known after apply)
+ field_ref {
+ api_version = (known after apply)
+ field_path = (known after apply)
}
+ resource_field_ref {
+ container_name = (known after apply)
+ divisor = (known after apply)
+ resource = (known after apply)
}
}
}
+ empty_dir {
+ medium = (known after apply)
+ size_limit = (known after apply)
}
+ fc {
+ fs_type = (known after apply)
+ lun = (known after apply)
+ read_only = (known after apply)
+ target_ww_ns = (known after apply)
}
+ flex_volume {
+ driver = (known after apply)
+ fs_type = (known after apply)
+ options = (known after apply)
+ read_only = (known after apply)
+ secret_ref {
+ name = (known after apply)
+ namespace = (known after apply)
}
}
+ flocker {
+ dataset_name = (known after apply)
+ dataset_uuid = (known after apply)
}
+ gce_persistent_disk {
+ fs_type = (known after apply)
+ partition = (known after apply)
+ pd_name = (known after apply)
+ read_only = (known after apply)
}
+ git_repo {
+ directory = (known after apply)
+ repository = (known after apply)
+ revision = (known after apply)
}
+ glusterfs {
+ endpoints_name = (known after apply)
+ path = (known after apply)
+ read_only = (known after apply)
}
+ host_path {
+ path = (known after apply)
+ type = (known after apply)
}
+ iscsi {
+ fs_type = (known after apply)
+ iqn = (known after apply)
+ iscsi_interface = (known after apply)
+ lun = (known after apply)
+ read_only = (known after apply)
+ target_portal = (known after apply)
}
+ local {
+ path = (known after apply)
}
+ nfs {
+ path = (known after apply)
+ read_only = (known after apply)
+ server = (known after apply)
}
+ persistent_volume_claim {
+ claim_name = (known after apply)
+ read_only = (known after apply)
}
+ photon_persistent_disk {
+ fs_type = (known after apply)
+ pd_id = (known after apply)
}
+ projected {
+ default_mode = (known after apply)
+ sources {
+ config_map {
+ name = (known after apply)
+ optional = (known after apply)
+ items {
+ key = (known after apply)
+ mode = (known after apply)
+ path = (known after apply)
}
}
+ downward_api {
+ items {
+ mode = (known after apply)
+ path = (known after apply)
+ field_ref {
+ api_version = (known after apply)
+ field_path = (known after apply)
}
+ resource_field_ref {
+ container_name = (known after apply)
+ divisor = (known after apply)
+ resource = (known after apply)
}
}
}
+ secret {
+ name = (known after apply)
+ optional = (known after apply)
+ items {
+ key = (known after apply)
+ mode = (known after apply)
+ path = (known after apply)
}
}
+ service_account_token {
+ audience = (known after apply)
+ expiration_seconds = (known after apply)
+ path = (known after apply)
}
}
}
+ quobyte {
+ group = (known after apply)
+ read_only = (known after apply)
+ registry = (known after apply)
+ user = (known after apply)
+ volume = (known after apply)
}
+ rbd {
+ ceph_monitors = (known after apply)
+ fs_type = (known after apply)
+ keyring = (known after apply)
+ rados_user = (known after apply)
+ rbd_image = (known after apply)
+ rbd_pool = (known after apply)
+ read_only = (known after apply)
+ secret_ref {
+ name = (known after apply)
+ namespace = (known after apply)
}
}
+ secret {
+ default_mode = (known after apply)
+ optional = (known after apply)
+ secret_name = (known after apply)
+ items {
+ key = (known after apply)
+ mode = (known after apply)
+ path = (known after apply)
}
}
+ vsphere_volume {
+ fs_type = (known after apply)
+ volume_path = (known after apply)
}
}
}
}
}
如何动态地迭代节点组的污染,为该节点组构造正确的pod容忍度?
1条答案
按热度按时间wgx48brx1#
这是由于Token System为CDKTF提供了支持。访问
ssdEksNodeGroupPublicLargeSubnet.taint
会给你一个token,而不是真实的的运行时值,它是对污点的引用。这个引用后来被Terraform用于例如对单个资源的部署进行排序。如果你的输入是静态的,你可以通过访问
ssdEksNodeGroupPublicLargeSubnet.taintInput
直接使用输入。如果你想在地形侧而不是在合成期间循环污染,你可以使用动态块,例如。