Skip to content

Commit

Permalink
core: add context parameter to k8sutil node
Browse files Browse the repository at this point in the history
This commit adds context parameter to k8sutil node functions. By this,
we can handle cancellation during API call of node resource.

Signed-off-by: Yuichiro Ueno <y1r.ueno@gmail.com>
  • Loading branch information
y1r committed Nov 15, 2021
1 parent 7d87de8 commit cf2a676
Show file tree
Hide file tree
Showing 10 changed files with 47 additions and 56 deletions.
10 changes: 6 additions & 4 deletions cmd/rook/ceph/osd.go
Expand Up @@ -214,9 +214,11 @@ func prepareOSD(cmd *cobra.Command, args []string) error {
}
}

backgroundCtx := ctx.Background()

context := createContext()
commonOSDInit(provisionCmd)
crushLocation, topologyAffinity, err := getLocation(context.Clientset)
crushLocation, topologyAffinity, err := getLocation(backgroundCtx, context.Clientset)
if err != nil {
rook.TerminateFatal(err)
}
Expand All @@ -227,7 +229,7 @@ func prepareOSD(cmd *cobra.Command, args []string) error {
ownerRef := opcontroller.ClusterOwnerRef(clusterName, ownerRefID)
ownerInfo := k8sutil.NewOwnerInfoWithOwnerRef(&ownerRef, clusterInfo.Namespace)
clusterInfo.OwnerInfo = ownerInfo
clusterInfo.Context = ctx.Background()
clusterInfo.Context = backgroundCtx
kv := k8sutil.NewConfigMapKVStore(clusterInfo.Namespace, context.Clientset, ownerInfo)
agent := osddaemon.NewAgent(context, dataDevices, cfg.metadataDevice, forceFormat,
cfg.storeConfig, &clusterInfo, cfg.nodeName, kv, cfg.pvcBacked)
Expand Down Expand Up @@ -281,13 +283,13 @@ func commonOSDInit(cmd *cobra.Command) {
}

// use zone/region/hostname labels in the crushmap
func getLocation(clientset kubernetes.Interface) (string, string, error) {
func getLocation(ctx ctx.Context, clientset kubernetes.Interface) (string, string, error) {
// get the value the operator instructed to use as the host name in the CRUSH map
hostNameLabel := os.Getenv("ROOK_CRUSHMAP_HOSTNAME")

rootLabel := os.Getenv(oposd.CrushRootVarName)

loc, topologyAffinity, err := oposd.GetLocationWithNode(clientset, os.Getenv(k8sutil.NodeNameEnvVar), rootLabel, hostNameLabel)
loc, topologyAffinity, err := oposd.GetLocationWithNode(ctx, clientset, os.Getenv(k8sutil.NodeNameEnvVar), rootLabel, hostNameLabel)
if err != nil {
return "", "", err
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/operator/ceph/cluster/cephstatus.go
Expand Up @@ -286,7 +286,7 @@ func cephStatusOnError(errorMessage string) *cephclient.CephStatus {
// forceDeleteStuckPodsOnNotReadyNodes lists all the nodes that are in NotReady state and
// gets all the pods on the failed node and force delete the pods stuck in terminating state.
func (c *cephStatusChecker) forceDeleteStuckRookPodsOnNotReadyNodes(ctx context.Context) error {
nodes, err := k8sutil.GetNotReadyKubernetesNodes(c.context.Clientset)
nodes, err := k8sutil.GetNotReadyKubernetesNodes(ctx, c.context.Clientset)
if err != nil {
return errors.Wrap(err, "failed to get NotReady nodes")
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/operator/ceph/cluster/cleanup.go
Expand Up @@ -232,7 +232,7 @@ func (c *ClusterController) getCephHosts(namespace string) ([]string, error) {
logger.Infof("existing ceph daemons in the namespace %q. %s", namespace, b.String())

for nodeName := range nodeNameList {
podHostName, err := k8sutil.GetNodeHostName(c.context.Clientset, nodeName)
podHostName, err := k8sutil.GetNodeHostName(c.OpManagerCtx, c.context.Clientset, nodeName)
if err != nil {
return nil, errors.Wrapf(err, "failed to get hostname from node %q", nodeName)
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/operator/ceph/cluster/osd/create.go
Expand Up @@ -272,7 +272,7 @@ func (c *Cluster) startProvisioningOverNodes(config *provisionConfig, errs *prov
}

// Get the list of all nodes in the cluster. The placement settings will be applied below.
hostnameMap, err := k8sutil.GetNodeHostNames(c.context.Clientset)
hostnameMap, err := k8sutil.GetNodeHostNames(c.clusterInfo.Context, c.context.Clientset)
if err != nil {
errs.addError("failed to provision OSDs on nodes. failed to get node hostnames. %v", err)
return sets.NewString(), nil
Expand All @@ -287,7 +287,7 @@ func (c *Cluster) startProvisioningOverNodes(config *provisionConfig, errs *prov
logger.Debugf("storage nodes: %+v", c.spec.Storage.Nodes)
}
// generally speaking, this finds nodes which are capable of running new osds
validNodes := k8sutil.GetValidNodes(c.spec.Storage, c.context.Clientset, cephv1.GetOSDPlacement(c.spec.Placement))
validNodes := k8sutil.GetValidNodes(c.clusterInfo.Context, c.spec.Storage, c.context.Clientset, cephv1.GetOSDPlacement(c.spec.Placement))

logger.Infof("%d of the %d storage nodes are valid", len(validNodes), len(c.spec.Storage.Nodes))

Expand Down
25 changes: 11 additions & 14 deletions pkg/operator/ceph/cluster/osd/osd.go
Expand Up @@ -428,7 +428,7 @@ func (c *Cluster) getPVCHostName(pvcName string) (string, error) {
return "", errors.Wrapf(err, "failed to get pod for osd with pvc %q", pvcName)
}
for _, pod := range pods.Items {
name, err := k8sutil.GetNodeHostName(c.context.Clientset, pod.Spec.NodeName)
name, err := k8sutil.GetNodeHostName(c.clusterInfo.Context, c.context.Clientset, pod.Spec.NodeName)
if err != nil {
logger.Warningf("falling back to node name %s since hostname not found for node", pod.Spec.NodeName)
name = pod.Spec.NodeName
Expand Down Expand Up @@ -515,7 +515,7 @@ func (c *Cluster) getOSDInfo(d *appsv1.Deployment) (OSDInfo, error) {

// if the ROOK_TOPOLOGY_AFFINITY env var was not found in the loop above, detect it from the node
if isPVC && osd.TopologyAffinity == "" {
osd.TopologyAffinity, err = getTopologyFromNode(c.context.Clientset, d, osd)
osd.TopologyAffinity, err = getTopologyFromNode(c.clusterInfo.Context, c.context.Clientset, d, osd)
if err != nil {
logger.Errorf("failed to get topology affinity for osd %d. %v", osd.ID, err)
}
Expand All @@ -533,7 +533,7 @@ func (c *Cluster) getOSDInfo(d *appsv1.Deployment) (OSDInfo, error) {
}

if !locationFound {
location, _, err := getLocationFromPod(c.context.Clientset, d, cephclient.GetCrushRootFromSpec(&c.spec))
location, _, err := getLocationFromPod(c.clusterInfo.Context, c.context.Clientset, d, cephclient.GetCrushRootFromSpec(&c.spec))
if err != nil {
logger.Errorf("failed to get location. %v", err)
} else {
Expand Down Expand Up @@ -594,14 +594,13 @@ func getBlockPathFromActivateInitContainer(d *appsv1.Deployment) (string, error)
return "", errors.Errorf("failed to find activate init container")
}

func getLocationFromPod(clientset kubernetes.Interface, d *appsv1.Deployment, crushRoot string) (string, string, error) {
ctx := context.TODO()
func getLocationFromPod(ctx context.Context, clientset kubernetes.Interface, d *appsv1.Deployment, crushRoot string) (string, string, error) {
pods, err := clientset.CoreV1().Pods(d.Namespace).List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", OsdIdLabelKey, d.Labels[OsdIdLabelKey])})
if err != nil || len(pods.Items) == 0 {
return "", "", err
}
nodeName := pods.Items[0].Spec.NodeName
hostName, err := k8sutil.GetNodeHostName(clientset, nodeName)
hostName, err := k8sutil.GetNodeHostName(ctx, clientset, nodeName)
if err != nil {
return "", "", err
}
Expand All @@ -612,10 +611,10 @@ func getLocationFromPod(clientset kubernetes.Interface, d *appsv1.Deployment, cr
hostName = pvcName
}
}
return GetLocationWithNode(clientset, nodeName, crushRoot, hostName)
return GetLocationWithNode(ctx, clientset, nodeName, crushRoot, hostName)
}

func getTopologyFromNode(clientset kubernetes.Interface, d *appsv1.Deployment, osd OSDInfo) (string, error) {
func getTopologyFromNode(ctx context.Context, clientset kubernetes.Interface, d *appsv1.Deployment, osd OSDInfo) (string, error) {
portable, ok := d.GetLabels()[portableKey]
if !ok || portable != "true" {
// osd is not portable, no need to load the topology affinity
Expand All @@ -624,7 +623,6 @@ func getTopologyFromNode(clientset kubernetes.Interface, d *appsv1.Deployment, o
logger.Infof("detecting topology affinity for osd %d after upgrade", osd.ID)

// Get the osd pod and its assigned node, then look up the node labels
ctx := context.TODO()
pods, err := clientset.CoreV1().Pods(d.Namespace).List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", OsdIdLabelKey, d.Labels[OsdIdLabelKey])})
if err != nil {
return "", errors.Wrap(err, "failed to get osd pod")
Expand All @@ -636,7 +634,7 @@ func getTopologyFromNode(clientset kubernetes.Interface, d *appsv1.Deployment, o
if nodeName == "" {
return "", errors.Errorf("osd %d is not assigned to a node, cannot detect topology affinity", osd.ID)
}
node, err := getNode(clientset, nodeName)
node, err := getNode(ctx, clientset, nodeName)
if err != nil {
return "", errors.Wrap(err, "failed to get the node for topology affinity")
}
Expand All @@ -649,8 +647,8 @@ func getTopologyFromNode(clientset kubernetes.Interface, d *appsv1.Deployment, o
// location: The CRUSH properties for the OSD to apply
// topologyAffinity: The label to be applied to the OSD daemon to guarantee it will start in the same
// topology as the OSD prepare job.
func GetLocationWithNode(clientset kubernetes.Interface, nodeName string, crushRoot, crushHostname string) (string, string, error) {
node, err := getNode(clientset, nodeName)
func GetLocationWithNode(ctx context.Context, clientset kubernetes.Interface, nodeName string, crushRoot, crushHostname string) (string, string, error) {
node, err := getNode(ctx, clientset, nodeName)
if err != nil {
return "", "", errors.Wrap(err, "could not get the node for topology labels")
}
Expand Down Expand Up @@ -679,8 +677,7 @@ func GetLocationWithNode(clientset kubernetes.Interface, nodeName string, crushR

// getNode will try to get the node object for the provided nodeName
// it will try using the node's name it's hostname label
func getNode(clientset kubernetes.Interface, nodeName string) (*corev1.Node, error) {
ctx := context.TODO()
func getNode(ctx context.Context, clientset kubernetes.Interface, nodeName string) (*corev1.Node, error) {
var node *corev1.Node
var err error
// try to find by the node by matching the provided nodeName
Expand Down
15 changes: 6 additions & 9 deletions pkg/operator/discover/discover.go
Expand Up @@ -250,12 +250,11 @@ func getEnvVar(varName string, defaultValue string) string {
}

// ListDevices lists all devices discovered on all nodes or specific node if node name is provided.
func ListDevices(clusterdContext *clusterd.Context, namespace, nodeName string) (map[string][]sys.LocalDisk, error) {
ctx := context.TODO()
func ListDevices(ctx context.Context, clusterdContext *clusterd.Context, namespace, nodeName string) (map[string][]sys.LocalDisk, error) {
// convert the host name label to the k8s node name to look up the configmap with the devices
if len(nodeName) > 0 {
var err error
nodeName, err = k8sutil.GetNodeNameFromHostname(clusterdContext.Clientset, nodeName)
nodeName, err = k8sutil.GetNodeNameFromHostname(ctx, clusterdContext.Clientset, nodeName)
if err != nil {
logger.Warningf("failed to get node name from hostname. %+v", err)
}
Expand Down Expand Up @@ -314,8 +313,7 @@ func ListDevices(clusterdContext *clusterd.Context, namespace, nodeName string)
}

// ListDevicesInUse lists all devices on a node that are already used by existing clusters.
func ListDevicesInUse(clusterdContext *clusterd.Context, namespace, nodeName string) ([]sys.LocalDisk, error) {
ctx := context.TODO()
func ListDevicesInUse(ctx context.Context, clusterdContext *clusterd.Context, namespace, nodeName string) ([]sys.LocalDisk, error) {
var devices []sys.LocalDisk

if len(nodeName) == 0 {
Expand Down Expand Up @@ -364,15 +362,14 @@ func matchDeviceFullPath(devLinks, fullpath string) bool {
}

// GetAvailableDevices conducts outer join using input filters with free devices that a node has. It marks the devices from join result as in-use.
func GetAvailableDevices(clusterdContext *clusterd.Context, nodeName, clusterName string, devices []cephv1.Device, filter string, useAllDevices bool) ([]cephv1.Device, error) {
ctx := context.TODO()
func GetAvailableDevices(ctx context.Context, clusterdContext *clusterd.Context, nodeName, clusterName string, devices []cephv1.Device, filter string, useAllDevices bool) ([]cephv1.Device, error) {
results := []cephv1.Device{}
if len(devices) == 0 && len(filter) == 0 && !useAllDevices {
return results, nil
}
namespace := os.Getenv(k8sutil.PodNamespaceEnvVar)
// find all devices
allDevices, err := ListDevices(clusterdContext, namespace, nodeName)
allDevices, err := ListDevices(ctx, clusterdContext, namespace, nodeName)
if err != nil {
return results, err
}
Expand All @@ -382,7 +379,7 @@ func GetAvailableDevices(clusterdContext *clusterd.Context, nodeName, clusterNam
return results, fmt.Errorf("node %s has no devices", nodeName)
}
// find those in use on the node
devicesInUse, err := ListDevicesInUse(clusterdContext, namespace, nodeName)
devicesInUse, err := ListDevicesInUse(ctx, clusterdContext, namespace, nodeName)
if err != nil {
return results, err
}
Expand Down
6 changes: 3 additions & 3 deletions pkg/operator/discover/discover_test.go
Expand Up @@ -129,15 +129,15 @@ func TestGetAvailableDevices(t *testing.T) {
},
}

nodeDevices, err := ListDevices(context, ns, "" /* all nodes */)
nodeDevices, err := ListDevices(ctx, context, ns, "" /* all nodes */)
assert.Nil(t, err)
assert.Equal(t, 1, len(nodeDevices))

devices, err := GetAvailableDevices(context, nodeName, ns, d, "^sd.", pvcBackedOSD)
devices, err := GetAvailableDevices(ctx, context, nodeName, ns, d, "^sd.", pvcBackedOSD)
assert.Nil(t, err)
assert.Equal(t, 1, len(devices))
// devices should be in use now, 2nd try gets the same list
devices, err = GetAvailableDevices(context, nodeName, ns, d, "^sd.", pvcBackedOSD)
devices, err = GetAvailableDevices(ctx, context, nodeName, ns, d, "^sd.", pvcBackedOSD)
assert.Nil(t, err)
assert.Equal(t, 1, len(devices))
}
19 changes: 7 additions & 12 deletions pkg/operator/k8sutil/node.go
Expand Up @@ -62,8 +62,8 @@ func ValidNode(node v1.Node, placement cephv1.Placement) (bool, error) {

// GetValidNodes returns all nodes that (1) are not cordoned, (2) meet Rook's placement terms, and
// (3) are ready.
func GetValidNodes(rookStorage cephv1.StorageScopeSpec, clientset kubernetes.Interface, placement cephv1.Placement) []cephv1.Node {
matchingK8sNodes, err := GetKubernetesNodesMatchingRookNodes(rookStorage.Nodes, clientset)
func GetValidNodes(ctx context.Context, rookStorage cephv1.StorageScopeSpec, clientset kubernetes.Interface, placement cephv1.Placement) []cephv1.Node {
matchingK8sNodes, err := GetKubernetesNodesMatchingRookNodes(ctx, rookStorage.Nodes, clientset)
if err != nil {
// cannot list nodes, return empty nodes
logger.Errorf("failed to list nodes: %+v", err)
Expand All @@ -86,8 +86,7 @@ func GetValidNodes(rookStorage cephv1.StorageScopeSpec, clientset kubernetes.Int
// GetNodeNameFromHostname returns the name of the node resource looked up by the hostname label
// Typically these will be the same name, but sometimes they are not such as when nodes have a longer
// dns name, but the hostname is short.
func GetNodeNameFromHostname(clientset kubernetes.Interface, hostName string) (string, error) {
ctx := context.TODO()
func GetNodeNameFromHostname(ctx context.Context, clientset kubernetes.Interface, hostName string) (string, error) {
options := metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", v1.LabelHostname, hostName)}
nodes, err := clientset.CoreV1().Nodes().List(ctx, options)
if err != nil {
Expand All @@ -101,8 +100,7 @@ func GetNodeNameFromHostname(clientset kubernetes.Interface, hostName string) (s
}

// GetNodeHostName returns the hostname label given the node name.
func GetNodeHostName(clientset kubernetes.Interface, nodeName string) (string, error) {
ctx := context.TODO()
func GetNodeHostName(ctx context.Context, clientset kubernetes.Interface, nodeName string) (string, error) {
node, err := clientset.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
if err != nil {
return "", err
Expand All @@ -121,8 +119,7 @@ func GetNodeHostNameLabel(node *v1.Node) (string, error) {
// GetNodeHostNames returns the name of the node resource mapped to their hostname label.
// Typically these will be the same name, but sometimes they are not such as when nodes have a longer
// dns name, but the hostname is short.
func GetNodeHostNames(clientset kubernetes.Interface) (map[string]string, error) {
ctx := context.TODO()
func GetNodeHostNames(ctx context.Context, clientset kubernetes.Interface) (map[string]string, error) {
nodes, err := clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
return nil, err
Expand Down Expand Up @@ -269,8 +266,7 @@ func normalizeHostname(kubernetesNode v1.Node) string {

// GetKubernetesNodesMatchingRookNodes lists all the nodes in Kubernetes and returns all the
// Kubernetes nodes that have a corresponding match in the list of Rook nodes.
func GetKubernetesNodesMatchingRookNodes(rookNodes []cephv1.Node, clientset kubernetes.Interface) ([]v1.Node, error) {
ctx := context.TODO()
func GetKubernetesNodesMatchingRookNodes(ctx context.Context, rookNodes []cephv1.Node, clientset kubernetes.Interface) ([]v1.Node, error) {
nodes := []v1.Node{}
k8sNodes, err := clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
Expand All @@ -287,8 +283,7 @@ func GetKubernetesNodesMatchingRookNodes(rookNodes []cephv1.Node, clientset kube
}

// GetNotReadyKubernetesNodes lists all the nodes that are in NotReady state
func GetNotReadyKubernetesNodes(clientset kubernetes.Interface) ([]v1.Node, error) {
ctx := context.TODO()
func GetNotReadyKubernetesNodes(ctx context.Context, clientset kubernetes.Interface) ([]v1.Node, error) {
nodes := []v1.Node{}
k8sNodes, err := clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
Expand Down
18 changes: 9 additions & 9 deletions pkg/operator/k8sutil/node_test.go
Expand Up @@ -70,7 +70,7 @@ func TestValidNode(t *testing.T) {
assert.Nil(t, nodeErr)
nodeErr = createNode(nodeB, v1.NodeNetworkUnavailable, clientset)
assert.Nil(t, nodeErr)
validNodes := GetValidNodes(storage, clientset, placement)
validNodes := GetValidNodes(context.TODO(), storage, clientset, placement)
assert.Equal(t, len(validNodes), 1)
}

Expand Down Expand Up @@ -214,7 +214,7 @@ func TestGetRookNodesMatchingKubernetesNodes(t *testing.T) {
}

// no rook nodes specified
nodes, err := GetKubernetesNodesMatchingRookNodes(rookNodes, clientset)
nodes, err := GetKubernetesNodesMatchingRookNodes(ctx, rookNodes, clientset)
assert.NoError(t, err)
assert.Empty(t, nodes)

Expand All @@ -223,7 +223,7 @@ func TestGetRookNodesMatchingKubernetesNodes(t *testing.T) {
{Name: "node0"},
{Name: "node2"},
{Name: "node5"}}
nodes, err = GetKubernetesNodesMatchingRookNodes(rookNodes, clientset)
nodes, err = GetKubernetesNodesMatchingRookNodes(ctx, rookNodes, clientset)
assert.NoError(t, err)
assert.Len(t, nodes, 2)
assert.Contains(t, nodes, getNode("node0"))
Expand All @@ -234,7 +234,7 @@ func TestGetRookNodesMatchingKubernetesNodes(t *testing.T) {
{Name: "node0"},
{Name: "node1"},
{Name: "node2"}}
nodes, err = GetKubernetesNodesMatchingRookNodes(rookNodes, clientset)
nodes, err = GetKubernetesNodesMatchingRookNodes(ctx, rookNodes, clientset)
assert.NoError(t, err)
assert.Len(t, nodes, 3)
assert.Contains(t, nodes, getNode("node0"))
Expand All @@ -243,7 +243,7 @@ func TestGetRookNodesMatchingKubernetesNodes(t *testing.T) {

// no k8s nodes exist
clientset = optest.New(t, 0)
nodes, err = GetKubernetesNodesMatchingRookNodes(rookNodes, clientset)
nodes, err = GetKubernetesNodesMatchingRookNodes(ctx, rookNodes, clientset)
assert.NoError(t, err)
assert.Len(t, nodes, 0)
}
Expand Down Expand Up @@ -379,13 +379,13 @@ func TestGetNotReadyKubernetesNodes(t *testing.T) {
clientset := optest.New(t, 0)

//when there is no node
nodes, err := GetNotReadyKubernetesNodes(clientset)
nodes, err := GetNotReadyKubernetesNodes(ctx, clientset)
assert.NoError(t, err)
assert.Equal(t, 0, len(nodes))

//when all the nodes are in ready state
clientset = optest.New(t, 2)
nodes, err = GetNotReadyKubernetesNodes(clientset)
nodes, err = GetNotReadyKubernetesNodes(ctx, clientset)
assert.NoError(t, err)
assert.Equal(t, 0, len(nodes))

Expand All @@ -404,7 +404,7 @@ func TestGetNotReadyKubernetesNodes(t *testing.T) {
}
_, err = clientset.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{})
assert.NoError(t, err)
nodes, err = GetNotReadyKubernetesNodes(clientset)
nodes, err = GetNotReadyKubernetesNodes(ctx, clientset)
assert.NoError(t, err)
assert.Equal(t, 1, len(nodes))

Expand All @@ -417,7 +417,7 @@ func TestGetNotReadyKubernetesNodes(t *testing.T) {
_, err := clientset.CoreV1().Nodes().Update(ctx, &updateNode, metav1.UpdateOptions{})
assert.NoError(t, err)
}
nodes, err = GetNotReadyKubernetesNodes(clientset)
nodes, err = GetNotReadyKubernetesNodes(ctx, clientset)
assert.NoError(t, err)
assert.Equal(t, 3, len(nodes))
}
2 changes: 1 addition & 1 deletion tests/integration/ceph_mgr_test.go
Expand Up @@ -279,7 +279,7 @@ func (s *CephMgrSuite) TestHostLs() {
sort.Strings(hostOutput)

// get the k8s nodes
nodes, err := k8sutil.GetNodeHostNames(s.k8sh.Clientset)
nodes, err := k8sutil.GetNodeHostNames(context.TODO(), s.k8sh.Clientset)
assert.Nil(s.T(), err)

k8sNodes := make([]string, 0, len(nodes))
Expand Down

0 comments on commit cf2a676

Please sign in to comment.