Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

drpolicy controller define #200

Merged
merged 1 commit into from Aug 20, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
14 changes: 14 additions & 0 deletions config/hub/rbac/role.yaml
Expand Up @@ -88,6 +88,20 @@ rules:
- patch
- update
- watch
- apiGroups:
- ramendr.openshift.io
resources:
- drpolicies/finalizers
verbs:
- update
- apiGroups:
- ramendr.openshift.io
resources:
- drpolicies/status
verbs:
- get
- patch
- update
- apiGroups:
- view.open-cluster-management.io
resources:
Expand Down
14 changes: 14 additions & 0 deletions config/rbac/role.yaml
Expand Up @@ -118,6 +118,20 @@ rules:
- patch
- update
- watch
- apiGroups:
ShyamsundarR marked this conversation as resolved.
Show resolved Hide resolved
- ramendr.openshift.io
resources:
- drpolicies/finalizers
verbs:
- update
- apiGroups:
- ramendr.openshift.io
resources:
- drpolicies/status
verbs:
- get
- patch
- update
- apiGroups:
- ramendr.openshift.io
resources:
Expand Down
17 changes: 5 additions & 12 deletions controllers/drplacementcontrol_controller.go
Expand Up @@ -482,7 +482,7 @@ func (r *DRPlacementControlReconciler) finalizeDRPC(ctx context.Context, drpc *r
clustersToClean = append(clustersToClean, drpc.Spec.FailoverCluster)
}

// delete manifestworks (VRG and Roles)
// delete manifestworks (VRG)
for idx := range clustersToClean {
err := mwu.DeleteManifestWorksForCluster(clustersToClean[idx])
if err != nil {
Expand Down Expand Up @@ -797,7 +797,7 @@ func (d *DRPCInstance) runInitialDeployment() (bool, error) {
d.setDRState(rmn.Deploying)

// Create VRG first, to leverage user PlacementRule decision to skip placement and move to cleanup
err := d.createVRGAndRolesManifestWorks(homeCluster)
err := d.createVRGManifestWork(homeCluster)
if err != nil {
return false, err
}
Expand Down Expand Up @@ -1122,7 +1122,7 @@ func (d *DRPCInstance) createManifestWorks(targetCluster string) (bool, error) {
if err != nil {
if errors.IsNotFound(err) {
// Create VRG first, to leverage user PlacementRule decision to skip placement and move to cleanup
err := d.createVRGAndRolesManifestWorks(targetCluster)
err := d.createVRGManifestWork(targetCluster)
if err != nil {
return !created, err
}
Expand Down Expand Up @@ -1346,17 +1346,10 @@ func (d *DRPCInstance) updateUserPlacementRuleStatus(status plrv1.PlacementRuleS
return nil
}

func (d *DRPCInstance) createVRGAndRolesManifestWorks(homeCluster string) error {
d.log.Info("Creating VRG/Roles ManifestWorks",
func (d *DRPCInstance) createVRGManifestWork(homeCluster string) error {
d.log.Info("Creating VRG ManifestWork",
"Last State", d.getLastDRState(), "cluster", homeCluster)

if err := d.mwu.CreateOrUpdateVRGRolesManifestWork(homeCluster); err != nil {
d.log.Error(err, "failed to create or update VolumeReplicationGroup Roles manifest")

return fmt.Errorf("failed to create or update VolumeReplicationGroup Roles manifest in namespace %s (%w)",
homeCluster, err)
}

if err := d.mwu.CreateOrUpdateVRGManifestWork(
d.instance.Name, d.instance.Namespace,
homeCluster, d.drPolicy,
Expand Down
59 changes: 31 additions & 28 deletions controllers/drplacementcontrol_controller_test.go
Expand Up @@ -87,6 +87,25 @@ var (
}

schedulingInterval = "1h"

drPolicy = &rmn.DRPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: DRPolicyName,
},
Spec: rmn.DRPolicySpec{
DRClusterSet: []rmn.ManagedCluster{
{
Name: EastManagedCluster,
S3ProfileName: "fakeS3Profile",
},
{
Name: WestManagedCluster,
S3ProfileName: "fakeS3Profile",
},
},
SchedulingInterval: schedulingInterval,
},
}
)

var drstate string
Expand Down Expand Up @@ -439,22 +458,15 @@ func createManagedClusters() {
}
}

func createDRPolicy(name, namespace string, drClusterSet []rmn.ManagedCluster) {
drPolicy := &rmn.DRPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: rmn.DRPolicySpec{
DRClusterSet: drClusterSet,
SchedulingInterval: schedulingInterval,
},
}

func createDRPolicy() {
err := k8sClient.Create(context.TODO(), drPolicy)
Expect(err).NotTo(HaveOccurred())
}

func deleteDRPolicy() {
Expect(k8sClient.Delete(context.TODO(), drPolicy)).To(Succeed())
}

func updateManifestWorkStatus(clusterNamespace, mwType, workType string) {
manifestLookupKey := types.NamespacedName{
Name: rmnutil.ManifestWorkName(DRPCName, DRPCNamespaceName, mwType),
Expand Down Expand Up @@ -529,17 +541,7 @@ func InitialDeployment(namespace, placementName, homeCluster string) (*plrv1.Pla
createNamespaces()

createManagedClusters()
createDRPolicy(DRPolicyName, DRPCNamespaceName,
[]rmn.ManagedCluster{
{
Name: EastManagedCluster,
S3ProfileName: "fakeS3Profile",
},
{
Name: WestManagedCluster,
S3ProfileName: "fakeS3Profile",
},
})
createDRPolicy()

placementRule := createPlacementRule(placementName, namespace)
drpc := createDRPC(DRPCName, DRPCNamespaceName)
Expand All @@ -549,7 +551,7 @@ func InitialDeployment(namespace, placementName, homeCluster string) (*plrv1.Pla

func verifyVRGManifestWorkCreatedAsPrimary(managedCluster string) {
vrgManifestLookupKey := types.NamespacedName{
Name: "ramendr-vrg-roles",
Name: rmnutil.ClusterRolesManifestWorkName,
Namespace: managedCluster,
}
createdVRGRolesManifest := &ocmworkv1.ManifestWork{}
Expand Down Expand Up @@ -765,7 +767,7 @@ var _ = Describe("DRPlacementControl Reconciler", func() {

recoverToFailoverCluster(drpc, userPlacementRule)
Expect(getManifestWorkCount(WestManagedCluster)).Should(Equal(2)) // MW for VRG+ROLES
Expect(getManifestWorkCount(EastManagedCluster)).Should(Equal(1)) // Roles MWs
Expect(getManifestWorkCount(EastManagedCluster)).Should(Equal(1)) // Roles MW

val, err := rmnutil.GetMetricValueSingle("ramen_failover_time", dto.MetricType_GAUGE)
Expect(err).NotTo(HaveOccurred())
Expand Down Expand Up @@ -796,7 +798,7 @@ var _ = Describe("DRPlacementControl Reconciler", func() {

recoverToFailoverCluster(drpc, userPlacementRule)
Expect(getManifestWorkCount(WestManagedCluster)).Should(Equal(2)) // MW for VRG+ROLES
Expect(getManifestWorkCount(EastManagedCluster)).Should(Equal(1)) // Roles MWs
Expect(getManifestWorkCount(EastManagedCluster)).Should(Equal(1)) // Roles MW

drpc := getLatestDRPC(DRPCName, DRPCNamespaceName)
// At this point expect the DRPC status condition to have 2 types
Expand All @@ -814,7 +816,7 @@ var _ = Describe("DRPlacementControl Reconciler", func() {

relocateToPreferredCluster(drpc, userPlacementRule)
Expect(getManifestWorkCount(EastManagedCluster)).Should(Equal(2)) // MWs for VRG+ROLES
Expect(getManifestWorkCount(WestManagedCluster)).Should(Equal(1)) // Roles MWs
Expect(getManifestWorkCount(WestManagedCluster)).Should(Equal(1)) // Roles MW

drpc = getLatestDRPC(DRPCName, DRPCNamespaceName)
// At this point expect the DRPC status condition to have 2 types
Expand All @@ -831,7 +833,8 @@ var _ = Describe("DRPlacementControl Reconciler", func() {
By("\n\n*** DELETE DRPC ***\n\n")
deleteDRPC()
waitForCompletion("deleted")
Expect(getManifestWorkCount(EastManagedCluster)).Should(Equal(1)) // Roles MWs
Expect(getManifestWorkCount(EastManagedCluster)).Should(Equal(1)) // Roles MW
deleteDRPolicy()
})
})
})
Expand Down
124 changes: 124 additions & 0 deletions controllers/drpolicy_controller.go
@@ -0,0 +1,124 @@
/*
Copyright 2021 The RamenDR authors.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package controllers

import (
"context"
"fmt"

"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"

ramen "github.com/ramendr/ramen/api/v1alpha1"
"github.com/ramendr/ramen/controllers/util"
)

// DRPolicyReconciler reconciles a DRPolicy object
type DRPolicyReconciler struct {
client.Client
Scheme *runtime.Scheme
}

//+kubebuilder:rbac:groups=ramendr.openshift.io,resources=drpolicies,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=ramendr.openshift.io,resources=drpolicies/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=ramendr.openshift.io,resources=drpolicies/finalizers,verbs=update

// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
// TODO(user): Modify the Reconcile function to compare the state specified by
// the DRPolicy object against the actual cluster state, and then
// perform operations to make the cluster state reflect the state specified by
// the user.
//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.9.2/pkg/reconcile
func (r *DRPolicyReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := ctrl.Log.WithName("controllers").WithName("drpolicy").WithValues("name", req.NamespacedName.Name)
log.Info("reconcile enter")

defer log.Info("reconcile exit")

drpolicy := &ramen.DRPolicy{}
if err := r.Client.Get(ctx, req.NamespacedName, drpolicy); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(fmt.Errorf("get: %w", err))
}

manifestWorkUtil := util.MWUtil{Client: r.Client, Ctx: ctx, Log: log, InstName: "", InstNamespace: ""}

switch drpolicy.ObjectMeta.DeletionTimestamp.IsZero() {
case true:
log.Info("create/update")

if err := finalizerAdd(ctx, drpolicy, r.Client, log); err != nil {
return ctrl.Result{}, fmt.Errorf("finalizer add update: %w", err)
}

if err := manifestWorkUtil.ClusterRolesCreate(drpolicy); err != nil {
return ctrl.Result{}, fmt.Errorf("cluster roles create: %w", err)
}
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

As mentioned here (https://github.com/RamenDR/ramen/blob/main/controllers/drplacementcontrol_controller.go#L332-L335), it may be better to incorporate the task of validating the schedule in DRPolicy instead of DRPC.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks Raghu. #201 tracks this issue.

default:
log.Info("delete")

if err := manifestWorkUtil.ClusterRolesDelete(drpolicy); err != nil {
return ctrl.Result{}, fmt.Errorf("cluster roles delete: %w", err)
}

if err := finalizerRemove(ctx, drpolicy, r.Client, log); err != nil {
return ctrl.Result{}, fmt.Errorf("finalizer remove update: %w", err)
}
}

return ctrl.Result{}, nil
}

const finalizerName = "drpolicies.ramendr.openshift.io/ramen"

func finalizerAdd(ctx context.Context, drpolicy *ramen.DRPolicy, client client.Client, log logr.Logger) error {
finalizerCount := len(drpolicy.ObjectMeta.Finalizers)
controllerutil.AddFinalizer(drpolicy, finalizerName)

if len(drpolicy.ObjectMeta.Finalizers) != finalizerCount {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

(this is kind of nit) Can we stick to one pattern when adding and checking finalizers? The following is kind of the same pattern that the DRPC and the VRG are using:

if !controllerutil.ContainsFinalizer(drpolicy, finalizerName) {
    controllerutil.AddFinalizer(drpolicy, finalizerName)
    client.Update(ctx, drpolicy)
    ...
}

I am not saying this is the same pattern, but I am suggesting sticking to one.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I agree. I chose this method because len() of a slice seems to be a constant time operation, whereas Add, Remove, and Contains Finalizer are each linear. It'd be nice if Add and Remove would return a boolean.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think that performance is an issue here.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am going to suggest we stick to the ContainsFinalizer pattern, not because this is incorrect, it is just devaiant from the pattern, and any fixes/updates to controller-util should retain the interface if we follow the pattern. (and as stated this is a nit)

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I agree. I chose this method because len() of a slice seems to be a constant time operation, whereas Add, Remove, and Contains Finalizer are each linear. It'd be nice if Add and Remove would return a boolean.

Pull request to address this: kubernetes-sigs/controller-runtime#1636

log.Info("finalizer add")

return client.Update(ctx, drpolicy)
}

return nil
}

func finalizerRemove(ctx context.Context, drpolicy *ramen.DRPolicy, client client.Client, log logr.Logger) error {
finalizerCount := len(drpolicy.ObjectMeta.Finalizers)
controllerutil.RemoveFinalizer(drpolicy, finalizerName)

if len(drpolicy.ObjectMeta.Finalizers) != finalizerCount {
log.Info("finalizer remove")

return client.Update(ctx, drpolicy)
}

return nil
}

// SetupWithManager sets up the controller with the Manager.
func (r *DRPolicyReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&ramen.DRPolicy{}).
Complete(r)
}