From cf2a6763704d0f3e2c3d842a972d8268b2e21f1e Mon Sep 17 00:00:00 2001 From: Yuichiro Ueno Date: Sat, 13 Nov 2021 15:24:05 +0900 Subject: [PATCH] core: add context parameter to k8sutil node This commit adds context parameter to k8sutil node functions. By this, we can handle cancellation during API call of node resource. Signed-off-by: Yuichiro Ueno --- cmd/rook/ceph/osd.go | 10 ++++++---- pkg/operator/ceph/cluster/cephstatus.go | 2 +- pkg/operator/ceph/cluster/cleanup.go | 2 +- pkg/operator/ceph/cluster/osd/create.go | 4 ++-- pkg/operator/ceph/cluster/osd/osd.go | 25 +++++++++++-------------- pkg/operator/discover/discover.go | 15 ++++++--------- pkg/operator/discover/discover_test.go | 6 +++--- pkg/operator/k8sutil/node.go | 19 +++++++------------ pkg/operator/k8sutil/node_test.go | 18 +++++++++--------- tests/integration/ceph_mgr_test.go | 2 +- 10 files changed, 47 insertions(+), 56 deletions(-) diff --git a/cmd/rook/ceph/osd.go b/cmd/rook/ceph/osd.go index a47a0b4fb1e7d..1d96d9f28d221 100644 --- a/cmd/rook/ceph/osd.go +++ b/cmd/rook/ceph/osd.go @@ -214,9 +214,11 @@ func prepareOSD(cmd *cobra.Command, args []string) error { } } + backgroundCtx := ctx.Background() + context := createContext() commonOSDInit(provisionCmd) - crushLocation, topologyAffinity, err := getLocation(context.Clientset) + crushLocation, topologyAffinity, err := getLocation(backgroundCtx, context.Clientset) if err != nil { rook.TerminateFatal(err) } @@ -227,7 +229,7 @@ func prepareOSD(cmd *cobra.Command, args []string) error { ownerRef := opcontroller.ClusterOwnerRef(clusterName, ownerRefID) ownerInfo := k8sutil.NewOwnerInfoWithOwnerRef(&ownerRef, clusterInfo.Namespace) clusterInfo.OwnerInfo = ownerInfo - clusterInfo.Context = ctx.Background() + clusterInfo.Context = backgroundCtx kv := k8sutil.NewConfigMapKVStore(clusterInfo.Namespace, context.Clientset, ownerInfo) agent := osddaemon.NewAgent(context, dataDevices, cfg.metadataDevice, forceFormat, cfg.storeConfig, &clusterInfo, cfg.nodeName, kv, cfg.pvcBacked) @@ -281,13 +283,13 @@ func commonOSDInit(cmd *cobra.Command) { } // use zone/region/hostname labels in the crushmap -func getLocation(clientset kubernetes.Interface) (string, string, error) { +func getLocation(ctx ctx.Context, clientset kubernetes.Interface) (string, string, error) { // get the value the operator instructed to use as the host name in the CRUSH map hostNameLabel := os.Getenv("ROOK_CRUSHMAP_HOSTNAME") rootLabel := os.Getenv(oposd.CrushRootVarName) - loc, topologyAffinity, err := oposd.GetLocationWithNode(clientset, os.Getenv(k8sutil.NodeNameEnvVar), rootLabel, hostNameLabel) + loc, topologyAffinity, err := oposd.GetLocationWithNode(ctx, clientset, os.Getenv(k8sutil.NodeNameEnvVar), rootLabel, hostNameLabel) if err != nil { return "", "", err } diff --git a/pkg/operator/ceph/cluster/cephstatus.go b/pkg/operator/ceph/cluster/cephstatus.go index 4b2e210501ca5..40a8132e44518 100644 --- a/pkg/operator/ceph/cluster/cephstatus.go +++ b/pkg/operator/ceph/cluster/cephstatus.go @@ -286,7 +286,7 @@ func cephStatusOnError(errorMessage string) *cephclient.CephStatus { // forceDeleteStuckPodsOnNotReadyNodes lists all the nodes that are in NotReady state and // gets all the pods on the failed node and force delete the pods stuck in terminating state. func (c *cephStatusChecker) forceDeleteStuckRookPodsOnNotReadyNodes(ctx context.Context) error { - nodes, err := k8sutil.GetNotReadyKubernetesNodes(c.context.Clientset) + nodes, err := k8sutil.GetNotReadyKubernetesNodes(ctx, c.context.Clientset) if err != nil { return errors.Wrap(err, "failed to get NotReady nodes") } diff --git a/pkg/operator/ceph/cluster/cleanup.go b/pkg/operator/ceph/cluster/cleanup.go index c86ccdee649e7..fe0cd1c6829fc 100644 --- a/pkg/operator/ceph/cluster/cleanup.go +++ b/pkg/operator/ceph/cluster/cleanup.go @@ -232,7 +232,7 @@ func (c *ClusterController) getCephHosts(namespace string) ([]string, error) { logger.Infof("existing ceph daemons in the namespace %q. %s", namespace, b.String()) for nodeName := range nodeNameList { - podHostName, err := k8sutil.GetNodeHostName(c.context.Clientset, nodeName) + podHostName, err := k8sutil.GetNodeHostName(c.OpManagerCtx, c.context.Clientset, nodeName) if err != nil { return nil, errors.Wrapf(err, "failed to get hostname from node %q", nodeName) } diff --git a/pkg/operator/ceph/cluster/osd/create.go b/pkg/operator/ceph/cluster/osd/create.go index 173cecab06990..b490dec51dc19 100644 --- a/pkg/operator/ceph/cluster/osd/create.go +++ b/pkg/operator/ceph/cluster/osd/create.go @@ -272,7 +272,7 @@ func (c *Cluster) startProvisioningOverNodes(config *provisionConfig, errs *prov } // Get the list of all nodes in the cluster. The placement settings will be applied below. - hostnameMap, err := k8sutil.GetNodeHostNames(c.context.Clientset) + hostnameMap, err := k8sutil.GetNodeHostNames(c.clusterInfo.Context, c.context.Clientset) if err != nil { errs.addError("failed to provision OSDs on nodes. failed to get node hostnames. %v", err) return sets.NewString(), nil @@ -287,7 +287,7 @@ func (c *Cluster) startProvisioningOverNodes(config *provisionConfig, errs *prov logger.Debugf("storage nodes: %+v", c.spec.Storage.Nodes) } // generally speaking, this finds nodes which are capable of running new osds - validNodes := k8sutil.GetValidNodes(c.spec.Storage, c.context.Clientset, cephv1.GetOSDPlacement(c.spec.Placement)) + validNodes := k8sutil.GetValidNodes(c.clusterInfo.Context, c.spec.Storage, c.context.Clientset, cephv1.GetOSDPlacement(c.spec.Placement)) logger.Infof("%d of the %d storage nodes are valid", len(validNodes), len(c.spec.Storage.Nodes)) diff --git a/pkg/operator/ceph/cluster/osd/osd.go b/pkg/operator/ceph/cluster/osd/osd.go index a0d619891c1a2..b9a9a008011d8 100644 --- a/pkg/operator/ceph/cluster/osd/osd.go +++ b/pkg/operator/ceph/cluster/osd/osd.go @@ -428,7 +428,7 @@ func (c *Cluster) getPVCHostName(pvcName string) (string, error) { return "", errors.Wrapf(err, "failed to get pod for osd with pvc %q", pvcName) } for _, pod := range pods.Items { - name, err := k8sutil.GetNodeHostName(c.context.Clientset, pod.Spec.NodeName) + name, err := k8sutil.GetNodeHostName(c.clusterInfo.Context, c.context.Clientset, pod.Spec.NodeName) if err != nil { logger.Warningf("falling back to node name %s since hostname not found for node", pod.Spec.NodeName) name = pod.Spec.NodeName @@ -515,7 +515,7 @@ func (c *Cluster) getOSDInfo(d *appsv1.Deployment) (OSDInfo, error) { // if the ROOK_TOPOLOGY_AFFINITY env var was not found in the loop above, detect it from the node if isPVC && osd.TopologyAffinity == "" { - osd.TopologyAffinity, err = getTopologyFromNode(c.context.Clientset, d, osd) + osd.TopologyAffinity, err = getTopologyFromNode(c.clusterInfo.Context, c.context.Clientset, d, osd) if err != nil { logger.Errorf("failed to get topology affinity for osd %d. %v", osd.ID, err) } @@ -533,7 +533,7 @@ func (c *Cluster) getOSDInfo(d *appsv1.Deployment) (OSDInfo, error) { } if !locationFound { - location, _, err := getLocationFromPod(c.context.Clientset, d, cephclient.GetCrushRootFromSpec(&c.spec)) + location, _, err := getLocationFromPod(c.clusterInfo.Context, c.context.Clientset, d, cephclient.GetCrushRootFromSpec(&c.spec)) if err != nil { logger.Errorf("failed to get location. %v", err) } else { @@ -594,14 +594,13 @@ func getBlockPathFromActivateInitContainer(d *appsv1.Deployment) (string, error) return "", errors.Errorf("failed to find activate init container") } -func getLocationFromPod(clientset kubernetes.Interface, d *appsv1.Deployment, crushRoot string) (string, string, error) { - ctx := context.TODO() +func getLocationFromPod(ctx context.Context, clientset kubernetes.Interface, d *appsv1.Deployment, crushRoot string) (string, string, error) { pods, err := clientset.CoreV1().Pods(d.Namespace).List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", OsdIdLabelKey, d.Labels[OsdIdLabelKey])}) if err != nil || len(pods.Items) == 0 { return "", "", err } nodeName := pods.Items[0].Spec.NodeName - hostName, err := k8sutil.GetNodeHostName(clientset, nodeName) + hostName, err := k8sutil.GetNodeHostName(ctx, clientset, nodeName) if err != nil { return "", "", err } @@ -612,10 +611,10 @@ func getLocationFromPod(clientset kubernetes.Interface, d *appsv1.Deployment, cr hostName = pvcName } } - return GetLocationWithNode(clientset, nodeName, crushRoot, hostName) + return GetLocationWithNode(ctx, clientset, nodeName, crushRoot, hostName) } -func getTopologyFromNode(clientset kubernetes.Interface, d *appsv1.Deployment, osd OSDInfo) (string, error) { +func getTopologyFromNode(ctx context.Context, clientset kubernetes.Interface, d *appsv1.Deployment, osd OSDInfo) (string, error) { portable, ok := d.GetLabels()[portableKey] if !ok || portable != "true" { // osd is not portable, no need to load the topology affinity @@ -624,7 +623,6 @@ func getTopologyFromNode(clientset kubernetes.Interface, d *appsv1.Deployment, o logger.Infof("detecting topology affinity for osd %d after upgrade", osd.ID) // Get the osd pod and its assigned node, then look up the node labels - ctx := context.TODO() pods, err := clientset.CoreV1().Pods(d.Namespace).List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", OsdIdLabelKey, d.Labels[OsdIdLabelKey])}) if err != nil { return "", errors.Wrap(err, "failed to get osd pod") @@ -636,7 +634,7 @@ func getTopologyFromNode(clientset kubernetes.Interface, d *appsv1.Deployment, o if nodeName == "" { return "", errors.Errorf("osd %d is not assigned to a node, cannot detect topology affinity", osd.ID) } - node, err := getNode(clientset, nodeName) + node, err := getNode(ctx, clientset, nodeName) if err != nil { return "", errors.Wrap(err, "failed to get the node for topology affinity") } @@ -649,8 +647,8 @@ func getTopologyFromNode(clientset kubernetes.Interface, d *appsv1.Deployment, o // location: The CRUSH properties for the OSD to apply // topologyAffinity: The label to be applied to the OSD daemon to guarantee it will start in the same // topology as the OSD prepare job. -func GetLocationWithNode(clientset kubernetes.Interface, nodeName string, crushRoot, crushHostname string) (string, string, error) { - node, err := getNode(clientset, nodeName) +func GetLocationWithNode(ctx context.Context, clientset kubernetes.Interface, nodeName string, crushRoot, crushHostname string) (string, string, error) { + node, err := getNode(ctx, clientset, nodeName) if err != nil { return "", "", errors.Wrap(err, "could not get the node for topology labels") } @@ -679,8 +677,7 @@ func GetLocationWithNode(clientset kubernetes.Interface, nodeName string, crushR // getNode will try to get the node object for the provided nodeName // it will try using the node's name it's hostname label -func getNode(clientset kubernetes.Interface, nodeName string) (*corev1.Node, error) { - ctx := context.TODO() +func getNode(ctx context.Context, clientset kubernetes.Interface, nodeName string) (*corev1.Node, error) { var node *corev1.Node var err error // try to find by the node by matching the provided nodeName diff --git a/pkg/operator/discover/discover.go b/pkg/operator/discover/discover.go index e0761205e8802..c77e0ce7ed8b7 100644 --- a/pkg/operator/discover/discover.go +++ b/pkg/operator/discover/discover.go @@ -250,12 +250,11 @@ func getEnvVar(varName string, defaultValue string) string { } // ListDevices lists all devices discovered on all nodes or specific node if node name is provided. -func ListDevices(clusterdContext *clusterd.Context, namespace, nodeName string) (map[string][]sys.LocalDisk, error) { - ctx := context.TODO() +func ListDevices(ctx context.Context, clusterdContext *clusterd.Context, namespace, nodeName string) (map[string][]sys.LocalDisk, error) { // convert the host name label to the k8s node name to look up the configmap with the devices if len(nodeName) > 0 { var err error - nodeName, err = k8sutil.GetNodeNameFromHostname(clusterdContext.Clientset, nodeName) + nodeName, err = k8sutil.GetNodeNameFromHostname(ctx, clusterdContext.Clientset, nodeName) if err != nil { logger.Warningf("failed to get node name from hostname. %+v", err) } @@ -314,8 +313,7 @@ func ListDevices(clusterdContext *clusterd.Context, namespace, nodeName string) } // ListDevicesInUse lists all devices on a node that are already used by existing clusters. -func ListDevicesInUse(clusterdContext *clusterd.Context, namespace, nodeName string) ([]sys.LocalDisk, error) { - ctx := context.TODO() +func ListDevicesInUse(ctx context.Context, clusterdContext *clusterd.Context, namespace, nodeName string) ([]sys.LocalDisk, error) { var devices []sys.LocalDisk if len(nodeName) == 0 { @@ -364,15 +362,14 @@ func matchDeviceFullPath(devLinks, fullpath string) bool { } // GetAvailableDevices conducts outer join using input filters with free devices that a node has. It marks the devices from join result as in-use. -func GetAvailableDevices(clusterdContext *clusterd.Context, nodeName, clusterName string, devices []cephv1.Device, filter string, useAllDevices bool) ([]cephv1.Device, error) { - ctx := context.TODO() +func GetAvailableDevices(ctx context.Context, clusterdContext *clusterd.Context, nodeName, clusterName string, devices []cephv1.Device, filter string, useAllDevices bool) ([]cephv1.Device, error) { results := []cephv1.Device{} if len(devices) == 0 && len(filter) == 0 && !useAllDevices { return results, nil } namespace := os.Getenv(k8sutil.PodNamespaceEnvVar) // find all devices - allDevices, err := ListDevices(clusterdContext, namespace, nodeName) + allDevices, err := ListDevices(ctx, clusterdContext, namespace, nodeName) if err != nil { return results, err } @@ -382,7 +379,7 @@ func GetAvailableDevices(clusterdContext *clusterd.Context, nodeName, clusterNam return results, fmt.Errorf("node %s has no devices", nodeName) } // find those in use on the node - devicesInUse, err := ListDevicesInUse(clusterdContext, namespace, nodeName) + devicesInUse, err := ListDevicesInUse(ctx, clusterdContext, namespace, nodeName) if err != nil { return results, err } diff --git a/pkg/operator/discover/discover_test.go b/pkg/operator/discover/discover_test.go index ba7a86e78c748..994575f95da78 100644 --- a/pkg/operator/discover/discover_test.go +++ b/pkg/operator/discover/discover_test.go @@ -129,15 +129,15 @@ func TestGetAvailableDevices(t *testing.T) { }, } - nodeDevices, err := ListDevices(context, ns, "" /* all nodes */) + nodeDevices, err := ListDevices(ctx, context, ns, "" /* all nodes */) assert.Nil(t, err) assert.Equal(t, 1, len(nodeDevices)) - devices, err := GetAvailableDevices(context, nodeName, ns, d, "^sd.", pvcBackedOSD) + devices, err := GetAvailableDevices(ctx, context, nodeName, ns, d, "^sd.", pvcBackedOSD) assert.Nil(t, err) assert.Equal(t, 1, len(devices)) // devices should be in use now, 2nd try gets the same list - devices, err = GetAvailableDevices(context, nodeName, ns, d, "^sd.", pvcBackedOSD) + devices, err = GetAvailableDevices(ctx, context, nodeName, ns, d, "^sd.", pvcBackedOSD) assert.Nil(t, err) assert.Equal(t, 1, len(devices)) } diff --git a/pkg/operator/k8sutil/node.go b/pkg/operator/k8sutil/node.go index 284171c84c259..c7be30551a008 100644 --- a/pkg/operator/k8sutil/node.go +++ b/pkg/operator/k8sutil/node.go @@ -62,8 +62,8 @@ func ValidNode(node v1.Node, placement cephv1.Placement) (bool, error) { // GetValidNodes returns all nodes that (1) are not cordoned, (2) meet Rook's placement terms, and // (3) are ready. -func GetValidNodes(rookStorage cephv1.StorageScopeSpec, clientset kubernetes.Interface, placement cephv1.Placement) []cephv1.Node { - matchingK8sNodes, err := GetKubernetesNodesMatchingRookNodes(rookStorage.Nodes, clientset) +func GetValidNodes(ctx context.Context, rookStorage cephv1.StorageScopeSpec, clientset kubernetes.Interface, placement cephv1.Placement) []cephv1.Node { + matchingK8sNodes, err := GetKubernetesNodesMatchingRookNodes(ctx, rookStorage.Nodes, clientset) if err != nil { // cannot list nodes, return empty nodes logger.Errorf("failed to list nodes: %+v", err) @@ -86,8 +86,7 @@ func GetValidNodes(rookStorage cephv1.StorageScopeSpec, clientset kubernetes.Int // GetNodeNameFromHostname returns the name of the node resource looked up by the hostname label // Typically these will be the same name, but sometimes they are not such as when nodes have a longer // dns name, but the hostname is short. -func GetNodeNameFromHostname(clientset kubernetes.Interface, hostName string) (string, error) { - ctx := context.TODO() +func GetNodeNameFromHostname(ctx context.Context, clientset kubernetes.Interface, hostName string) (string, error) { options := metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", v1.LabelHostname, hostName)} nodes, err := clientset.CoreV1().Nodes().List(ctx, options) if err != nil { @@ -101,8 +100,7 @@ func GetNodeNameFromHostname(clientset kubernetes.Interface, hostName string) (s } // GetNodeHostName returns the hostname label given the node name. -func GetNodeHostName(clientset kubernetes.Interface, nodeName string) (string, error) { - ctx := context.TODO() +func GetNodeHostName(ctx context.Context, clientset kubernetes.Interface, nodeName string) (string, error) { node, err := clientset.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) if err != nil { return "", err @@ -121,8 +119,7 @@ func GetNodeHostNameLabel(node *v1.Node) (string, error) { // GetNodeHostNames returns the name of the node resource mapped to their hostname label. // Typically these will be the same name, but sometimes they are not such as when nodes have a longer // dns name, but the hostname is short. -func GetNodeHostNames(clientset kubernetes.Interface) (map[string]string, error) { - ctx := context.TODO() +func GetNodeHostNames(ctx context.Context, clientset kubernetes.Interface) (map[string]string, error) { nodes, err := clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) if err != nil { return nil, err @@ -269,8 +266,7 @@ func normalizeHostname(kubernetesNode v1.Node) string { // GetKubernetesNodesMatchingRookNodes lists all the nodes in Kubernetes and returns all the // Kubernetes nodes that have a corresponding match in the list of Rook nodes. -func GetKubernetesNodesMatchingRookNodes(rookNodes []cephv1.Node, clientset kubernetes.Interface) ([]v1.Node, error) { - ctx := context.TODO() +func GetKubernetesNodesMatchingRookNodes(ctx context.Context, rookNodes []cephv1.Node, clientset kubernetes.Interface) ([]v1.Node, error) { nodes := []v1.Node{} k8sNodes, err := clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) if err != nil { @@ -287,8 +283,7 @@ func GetKubernetesNodesMatchingRookNodes(rookNodes []cephv1.Node, clientset kube } // GetNotReadyKubernetesNodes lists all the nodes that are in NotReady state -func GetNotReadyKubernetesNodes(clientset kubernetes.Interface) ([]v1.Node, error) { - ctx := context.TODO() +func GetNotReadyKubernetesNodes(ctx context.Context, clientset kubernetes.Interface) ([]v1.Node, error) { nodes := []v1.Node{} k8sNodes, err := clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) if err != nil { diff --git a/pkg/operator/k8sutil/node_test.go b/pkg/operator/k8sutil/node_test.go index 9b98f6da4540c..ae00d37e17ce0 100644 --- a/pkg/operator/k8sutil/node_test.go +++ b/pkg/operator/k8sutil/node_test.go @@ -70,7 +70,7 @@ func TestValidNode(t *testing.T) { assert.Nil(t, nodeErr) nodeErr = createNode(nodeB, v1.NodeNetworkUnavailable, clientset) assert.Nil(t, nodeErr) - validNodes := GetValidNodes(storage, clientset, placement) + validNodes := GetValidNodes(context.TODO(), storage, clientset, placement) assert.Equal(t, len(validNodes), 1) } @@ -214,7 +214,7 @@ func TestGetRookNodesMatchingKubernetesNodes(t *testing.T) { } // no rook nodes specified - nodes, err := GetKubernetesNodesMatchingRookNodes(rookNodes, clientset) + nodes, err := GetKubernetesNodesMatchingRookNodes(ctx, rookNodes, clientset) assert.NoError(t, err) assert.Empty(t, nodes) @@ -223,7 +223,7 @@ func TestGetRookNodesMatchingKubernetesNodes(t *testing.T) { {Name: "node0"}, {Name: "node2"}, {Name: "node5"}} - nodes, err = GetKubernetesNodesMatchingRookNodes(rookNodes, clientset) + nodes, err = GetKubernetesNodesMatchingRookNodes(ctx, rookNodes, clientset) assert.NoError(t, err) assert.Len(t, nodes, 2) assert.Contains(t, nodes, getNode("node0")) @@ -234,7 +234,7 @@ func TestGetRookNodesMatchingKubernetesNodes(t *testing.T) { {Name: "node0"}, {Name: "node1"}, {Name: "node2"}} - nodes, err = GetKubernetesNodesMatchingRookNodes(rookNodes, clientset) + nodes, err = GetKubernetesNodesMatchingRookNodes(ctx, rookNodes, clientset) assert.NoError(t, err) assert.Len(t, nodes, 3) assert.Contains(t, nodes, getNode("node0")) @@ -243,7 +243,7 @@ func TestGetRookNodesMatchingKubernetesNodes(t *testing.T) { // no k8s nodes exist clientset = optest.New(t, 0) - nodes, err = GetKubernetesNodesMatchingRookNodes(rookNodes, clientset) + nodes, err = GetKubernetesNodesMatchingRookNodes(ctx, rookNodes, clientset) assert.NoError(t, err) assert.Len(t, nodes, 0) } @@ -379,13 +379,13 @@ func TestGetNotReadyKubernetesNodes(t *testing.T) { clientset := optest.New(t, 0) //when there is no node - nodes, err := GetNotReadyKubernetesNodes(clientset) + nodes, err := GetNotReadyKubernetesNodes(ctx, clientset) assert.NoError(t, err) assert.Equal(t, 0, len(nodes)) //when all the nodes are in ready state clientset = optest.New(t, 2) - nodes, err = GetNotReadyKubernetesNodes(clientset) + nodes, err = GetNotReadyKubernetesNodes(ctx, clientset) assert.NoError(t, err) assert.Equal(t, 0, len(nodes)) @@ -404,7 +404,7 @@ func TestGetNotReadyKubernetesNodes(t *testing.T) { } _, err = clientset.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{}) assert.NoError(t, err) - nodes, err = GetNotReadyKubernetesNodes(clientset) + nodes, err = GetNotReadyKubernetesNodes(ctx, clientset) assert.NoError(t, err) assert.Equal(t, 1, len(nodes)) @@ -417,7 +417,7 @@ func TestGetNotReadyKubernetesNodes(t *testing.T) { _, err := clientset.CoreV1().Nodes().Update(ctx, &updateNode, metav1.UpdateOptions{}) assert.NoError(t, err) } - nodes, err = GetNotReadyKubernetesNodes(clientset) + nodes, err = GetNotReadyKubernetesNodes(ctx, clientset) assert.NoError(t, err) assert.Equal(t, 3, len(nodes)) } diff --git a/tests/integration/ceph_mgr_test.go b/tests/integration/ceph_mgr_test.go index 7dac1a98cdfdf..72a23dff28340 100644 --- a/tests/integration/ceph_mgr_test.go +++ b/tests/integration/ceph_mgr_test.go @@ -279,7 +279,7 @@ func (s *CephMgrSuite) TestHostLs() { sort.Strings(hostOutput) // get the k8s nodes - nodes, err := k8sutil.GetNodeHostNames(s.k8sh.Clientset) + nodes, err := k8sutil.GetNodeHostNames(context.TODO(), s.k8sh.Clientset) assert.Nil(s.T(), err) k8sNodes := make([]string, 0, len(nodes))