@@ -15,26 +15,24 @@ limitations under the License.
15
15
package integration_test
16
16
17
17
import (
18
+ "math"
19
+
18
20
. "github.com/onsi/ginkgo/v2"
19
21
. "github.com/onsi/gomega"
20
22
appsv1 "k8s.io/api/apps/v1"
21
23
v1 "k8s.io/api/core/v1"
24
+ "k8s.io/apimachinery/pkg/util/sets"
22
25
"knative.dev/pkg/ptr"
23
26
24
27
"github.com/aws/karpenter/pkg/apis/awsnodetemplate/v1alpha1"
25
28
"github.com/aws/karpenter/pkg/apis/provisioning/v1alpha5"
26
29
awsv1alpha1 "github.com/aws/karpenter/pkg/cloudprovider/aws/apis/v1alpha1"
30
+ "github.com/aws/karpenter/pkg/scheduling"
27
31
"github.com/aws/karpenter/pkg/test"
28
32
)
29
33
30
34
var _ = Describe ("KubeletConfiguration Overrides" , func () {
31
35
It ("should schedule pods onto separate nodes when maxPods is set" , func () {
32
- // Get the total number of daemonsets so that we see how many pods will be taken up
33
- // by daemonset overhead
34
- dsList := & appsv1.DaemonSetList {}
35
- Expect (env .Client .List (env .Context , dsList )).To (Succeed ())
36
- dsCount := len (dsList .Items )
37
-
38
36
provider := test .AWSNodeTemplate (v1alpha1.AWSNodeTemplateSpec {AWS : awsv1alpha1.AWS {
39
37
SecurityGroupSelector : map [string ]string {"karpenter.sh/discovery" : env .ClusterName },
40
38
SubnetSelector : map [string ]string {"karpenter.sh/discovery" : env .ClusterName },
@@ -43,26 +41,36 @@ var _ = Describe("KubeletConfiguration Overrides", func() {
43
41
// MaxPods needs to account for the daemonsets that will run on the nodes
44
42
provisioner := test .Provisioner (test.ProvisionerOptions {
45
43
ProviderRef : & v1alpha5.ProviderRef {Name : provider .Name },
46
- Kubelet : & v1alpha5.KubeletConfiguration {
47
- MaxPods : ptr .Int32 (1 + int32 (dsCount )),
44
+ Requirements : []v1.NodeSelectorRequirement {
45
+ {
46
+ Key : v1 .LabelOSStable ,
47
+ Operator : v1 .NodeSelectorOpIn ,
48
+ Values : []string {string (v1 .Linux )},
49
+ },
48
50
},
49
51
})
50
52
53
+ // Get the DS pod count and use it to calculate the DS pod overhead
54
+ dsCount := getDaemonSetPodCount (provisioner )
55
+ provisioner .Spec .KubeletConfiguration = & v1alpha5.KubeletConfiguration {
56
+ MaxPods : ptr .Int32 (1 + int32 (dsCount )),
57
+ }
58
+
51
59
pods := []* v1.Pod {test .Pod (), test .Pod (), test .Pod ()}
52
60
env .ExpectCreated (provisioner , provider )
53
61
for _ , pod := range pods {
54
62
env .ExpectCreated (pod )
55
63
}
56
64
env .EventuallyExpectHealthy (pods ... )
57
65
env .ExpectCreatedNodeCount ("==" , 3 )
66
+
67
+ nodeNames := sets .NewString ()
68
+ for _ , pod := range pods {
69
+ nodeNames .Insert (pod .Spec .NodeName )
70
+ }
71
+ Expect (len (nodeNames )).To (BeNumerically ("==" , 3 ))
58
72
})
59
73
It ("should schedule pods onto separate nodes when podsPerCore is set" , func () {
60
- // Get the total number of daemonsets so that we see how many pods will be taken up
61
- // by daemonset overhead
62
- dsList := & appsv1.DaemonSetList {}
63
- Expect (env .Client .List (env .Context , dsList )).To (Succeed ())
64
- dsCount := len (dsList .Items )
65
-
66
74
provider := test .AWSNodeTemplate (v1alpha1.AWSNodeTemplateSpec {AWS : awsv1alpha1.AWS {
67
75
SecurityGroupSelector : map [string ]string {"karpenter.sh/discovery" : env .ClusterName },
68
76
SubnetSelector : map [string ]string {"karpenter.sh/discovery" : env .ClusterName },
@@ -71,25 +79,46 @@ var _ = Describe("KubeletConfiguration Overrides", func() {
71
79
// This will have 4 pods available on each node (2 taken by daemonset pods)
72
80
provisioner := test .Provisioner (test.ProvisionerOptions {
73
81
ProviderRef : & v1alpha5.ProviderRef {Name : provider .Name },
74
- Kubelet : & v1alpha5.KubeletConfiguration {
75
- PodsPerCore : ptr .Int32 (2 + int32 (dsCount )),
76
- },
77
82
Requirements : []v1.NodeSelectorRequirement {
78
83
{
79
84
Key : awsv1alpha1 .LabelInstanceCPU ,
80
85
Operator : v1 .NodeSelectorOpIn ,
81
- Values : []string {"1" },
86
+ Values : []string {"2" },
87
+ },
88
+ {
89
+ Key : v1 .LabelOSStable ,
90
+ Operator : v1 .NodeSelectorOpIn ,
91
+ Values : []string {string (v1 .Linux )},
82
92
},
83
93
},
84
94
})
85
95
96
+ // Get the DS pod count and use it to calculate the DS pod overhead
97
+ // We calculate podsPerCore to split the test pods and the DS pods between two nodes:
98
+ // 1. If # of DS pods is odd, we will have i.e. ceil((3+2)/2) = 3
99
+ // Since we restrict node to two cores, we will allow 6 pods. One node will have 3
100
+ // DS pods and 3 test pods. Other node will have 1 test pod and 3 DS pods
101
+ // 2. If # of DS pods is even, we will have i.e. ceil((4+2)/2) = 3
102
+ // Since we restrict node to two cores, we will allow 6 pods. Both nodes will have
103
+ // 4 DS pods and 2 test pods.
104
+ dsCount := getDaemonSetPodCount (provisioner )
105
+ provisioner .Spec .KubeletConfiguration = & v1alpha5.KubeletConfiguration {
106
+ PodsPerCore : ptr .Int32 (int32 (math .Ceil (float64 (2 + dsCount ) / 2 ))),
107
+ }
108
+
86
109
pods := []* v1.Pod {test .Pod (), test .Pod (), test .Pod (), test .Pod ()}
87
110
env .ExpectCreated (provisioner , provider )
88
111
for _ , pod := range pods {
89
112
env .ExpectCreated (pod )
90
113
}
91
114
env .EventuallyExpectHealthy (pods ... )
92
115
env .ExpectCreatedNodeCount ("==" , 2 )
116
+
117
+ nodeNames := sets .NewString ()
118
+ for _ , pod := range pods {
119
+ nodeNames .Insert (pod .Spec .NodeName )
120
+ }
121
+ Expect (len (nodeNames )).To (BeNumerically ("==" , 2 ))
93
122
})
94
123
It ("should ignore podsPerCore value when Bottlerocket is used" , func () {
95
124
provider := test .AWSNodeTemplate (v1alpha1.AWSNodeTemplateSpec {AWS : awsv1alpha1.AWS {
@@ -108,7 +137,7 @@ var _ = Describe("KubeletConfiguration Overrides", func() {
108
137
{
109
138
Key : awsv1alpha1 .LabelInstanceCPU ,
110
139
Operator : v1 .NodeSelectorOpIn ,
111
- Values : []string {"1 " },
140
+ Values : []string {"2 " },
112
141
},
113
142
},
114
143
})
@@ -122,3 +151,24 @@ var _ = Describe("KubeletConfiguration Overrides", func() {
122
151
env .ExpectCreatedNodeCount ("==" , 1 )
123
152
})
124
153
})
154
+
155
+ // Performs the same logic as the scheduler to get the number of daemonset
156
+ // pods that we estimate we will need to schedule as overhead to each node
157
+ func getDaemonSetPodCount (provisioner * v1alpha5.Provisioner ) int {
158
+ daemonSetList := & appsv1.DaemonSetList {}
159
+ Expect (env .Client .List (env .Context , daemonSetList )).To (Succeed ())
160
+
161
+ count := 0
162
+ for _ , daemonSet := range daemonSetList .Items {
163
+ p := & v1.Pod {Spec : daemonSet .Spec .Template .Spec }
164
+ nodeTemplate := scheduling .NewNodeTemplate (provisioner )
165
+ if err := nodeTemplate .Taints .Tolerates (p ); err != nil {
166
+ continue
167
+ }
168
+ if err := nodeTemplate .Requirements .Compatible (scheduling .NewPodRequirements (p )); err != nil {
169
+ continue
170
+ }
171
+ count ++
172
+ }
173
+ return count
174
+ }
0 commit comments