Skip to content

Commit

Permalink
drpolicy controller define
Browse files Browse the repository at this point in the history
	- vrg and pv cluster roles manifestworks unify
	- drpolicy create/update:
		- finalizer add if absent
		- cluster roles manifestwork create, if absent, for each cluster in drpolicy
	- drpolicy delete:
		- cluster roles manifestwork delete for each cluster not in any other drpolicy
		- finalizer remove if present
	- ginkgo version increase from 1.16.1 to 1.16.4
	- gomega version increase from 1.11.0 to 1.15.0

Signed-off-by: bhatfiel <bhatfiel@redhat.com>
  • Loading branch information
hatfieldbrian committed Aug 17, 2021
1 parent dc36bc6 commit e4b57a2
Show file tree
Hide file tree
Showing 12 changed files with 420 additions and 105 deletions.
14 changes: 14 additions & 0 deletions config/rbac/role.yaml
Expand Up @@ -108,6 +108,20 @@ rules:
- patch
- update
- watch
- apiGroups:
- ramendr.openshift.io
resources:
- drpolicies/finalizers
verbs:
- update
- apiGroups:
- ramendr.openshift.io
resources:
- drpolicies/status
verbs:
- get
- patch
- update
- apiGroups:
- ramendr.openshift.io
resources:
Expand Down
4 changes: 4 additions & 0 deletions config/samples/kustomization.yaml
@@ -0,0 +1,4 @@
## Append samples you want in your CSV to this file as resources ##
resources:
- ramendr_v1alpha1_drpolicy.yaml
#+kubebuilder:scaffold:manifestskustomizesamples
14 changes: 0 additions & 14 deletions controllers/drplacementcontrol_controller.go
Expand Up @@ -1114,13 +1114,6 @@ func (d *DRPCInstance) updateUserPlRuleAndCreateVRGMW(homeCluster, homeClusterNa
}

func (d *DRPCInstance) createPVManifestWorkForRestore(newPrimary string) error {
if err := d.mwu.CreateOrUpdatePVRolesManifestWork(newPrimary); err != nil {
d.log.Error(err, "failed to create or update PersistentVolume Roles manifest")

return fmt.Errorf("failed to create or update PersistentVolume Roles manifest in namespace %s (%w)",
newPrimary, err)
}

pvMWName := d.mwu.BuildManifestWorkName(rmnutil.MWTypePV)

existAndApplied, err := d.mwu.ManifestExistAndApplied(pvMWName, newPrimary)
Expand Down Expand Up @@ -1518,13 +1511,6 @@ func (d *DRPCInstance) restorePVFromBackup(homeCluster string) error {
func (d *DRPCInstance) processVRGManifestWork(homeCluster string) error {
d.log.Info("Processing VRG ManifestWork", "cluster", homeCluster)

if err := d.mwu.CreateOrUpdateVRGRolesManifestWork(homeCluster); err != nil {
d.log.Error(err, "failed to create or update VolumeReplicationGroup Roles manifest")

return fmt.Errorf("failed to create or update VolumeReplicationGroup Roles manifest in namespace %s (%w)",
homeCluster, err)
}

if err := d.mwu.CreateOrUpdateVRGManifestWork(
d.instance.Name, d.instance.Namespace,
homeCluster, d.drPolicy, d.instance.Spec.PVCSelector); err != nil {
Expand Down
79 changes: 41 additions & 38 deletions controllers/drplacementcontrol_controller_test.go
Expand Up @@ -87,6 +87,25 @@ var (
}

schedulingInterval = "1h"

drPolicy = &rmn.DRPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: DRPolicyName,
},
Spec: rmn.DRPolicySpec{
DRClusterSet: []rmn.ManagedCluster{
{
Name: EastManagedCluster,
S3ProfileName: "fakeS3Profile",
},
{
Name: WestManagedCluster,
S3ProfileName: "fakeS3Profile",
},
},
SchedulingInterval: schedulingInterval,
},
}
)

var safeToProceed bool
Expand Down Expand Up @@ -474,22 +493,15 @@ func createManagedClusters() {
}
}

func createDRPolicy(name, namespace string, drClusterSet []rmn.ManagedCluster) {
drPolicy := &rmn.DRPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: rmn.DRPolicySpec{
DRClusterSet: drClusterSet,
SchedulingInterval: schedulingInterval,
},
}

func createDRPolicy() {
err := k8sClient.Create(context.TODO(), drPolicy)
Expect(err).NotTo(HaveOccurred())
}

func deleteDRPolicy() {
Expect(k8sClient.Delete(context.TODO(), drPolicy)).To(Succeed())
}

func updateManifestWorkStatus(clusterNamespace, mwType, workType string) {
manifestLookupKey := types.NamespacedName{
Name: rmnutil.ManifestWorkName(DRPCName, DRPCNamespaceName, mwType),
Expand Down Expand Up @@ -564,17 +576,7 @@ func InitialDeployment(namespace, placementName, homeCluster string) (*plrv1.Pla
createNamespaces()

createManagedClusters()
createDRPolicy(DRPolicyName, DRPCNamespaceName,
[]rmn.ManagedCluster{
{
Name: EastManagedCluster,
S3ProfileName: "fakeS3Profile",
},
{
Name: WestManagedCluster,
S3ProfileName: "fakeS3Profile",
},
})
createDRPolicy()

placementRule := createPlacementRule(placementName, namespace)
drpc := createDRPC(DRPCName, DRPCNamespaceName)
Expand All @@ -584,7 +586,7 @@ func InitialDeployment(namespace, placementName, homeCluster string) (*plrv1.Pla

func verifyVRGManifestWorkCreatedAsPrimary(managedCluster string) {
vrgManifestLookupKey := types.NamespacedName{
Name: "ramendr-vrg-roles",
Name: rmnutil.ClusterRolesManifestWorkName,
Namespace: managedCluster,
}
createdVRGRolesManifest := &ocmworkv1.ManifestWork{}
Expand All @@ -595,7 +597,7 @@ func verifyVRGManifestWorkCreatedAsPrimary(managedCluster string) {
return err == nil
}, timeout, interval).Should(BeTrue())

Expect(len(createdVRGRolesManifest.Spec.Workload.Manifests)).To(Equal(2))
Expect(len(createdVRGRolesManifest.Spec.Workload.Manifests)).To(BeNumerically(">=", 2))

vrgClusterRoleManifest := createdVRGRolesManifest.Spec.Workload.Manifests[0]
Expect(vrgClusterRoleManifest).ToNot(BeNil())
Expand Down Expand Up @@ -760,13 +762,13 @@ var _ = Describe("DRPlacementControl Reconciler", func() {
verifyUserPlacementRuleDecision(userPlacementRule.Name, userPlacementRule.Namespace, WestManagedCluster)
verifyDRPCStatusPreferredClusterExpectation(rmn.FailedOver)
verifyVRGManifestWorkCreatedAsPrimary(WestManagedCluster)
Expect(getManifestWorkCount(WestManagedCluster)).Should(Equal(4)) // MWs for VRG+ROLES+PVs
Expect(getManifestWorkCount(WestManagedCluster)).Should(Equal(3)) // MWs for VRG+ROLES+PVs
waitForVRGMWDeletion(EastManagedCluster)
updateManagedClusterViewStatusAsNotFound(mcvEast)
// tickle the DRPC reconciler, should be removed once we watch for MCV resource updates
touchDRPCToForceReconcile(drpc)
setDRPCSpecExpectationTo(drpc, rmn.ActionFailover, "")
Expect(getManifestWorkCount(EastManagedCluster)).Should(Equal(1)) // MW for VRG ROLE only
Expect(getManifestWorkCount(EastManagedCluster)).Should(Equal(1)) // Roles MW
waitForCompletion()

val, err := rmnutil.GetMetricValueSingle("ramen_failover_time", dto.MetricType_GAUGE)
Expand All @@ -786,10 +788,10 @@ var _ = Describe("DRPlacementControl Reconciler", func() {
updateClonedPlacementRuleStatus(userPlacementRule, drpc, WestManagedCluster)
verifyUserPlacementRuleDecision(userPlacementRule.Name, userPlacementRule.Namespace, WestManagedCluster)
verifyDRPCStatusPreferredClusterExpectation(rmn.FailedOver)
Expect(getManifestWorkCount(WestManagedCluster)).Should(Equal(4)) // MWs for VRG+ROLES+PVs
Expect(getManifestWorkCount(WestManagedCluster)).Should(Equal(3)) // MWs for VRG+ROLES+PVs
waitForVRGMWDeletion(EastManagedCluster)
updateManagedClusterViewStatusAsNotFound(mcvEast)
Expect(getManifestWorkCount(EastManagedCluster)).Should(Equal(1)) // MWs for VRG ROLE only
Expect(getManifestWorkCount(EastManagedCluster)).Should(Equal(1)) // Roles MW
touchDRPCToForceReconcile(drpc)
waitForCompletion()
})
Expand Down Expand Up @@ -820,11 +822,11 @@ var _ = Describe("DRPlacementControl Reconciler", func() {
verifyDRPCStatusPreferredClusterExpectation(rmn.FailedBack)
verifyVRGManifestWorkCreatedAsPrimary(EastManagedCluster)

Expect(getManifestWorkCount(EastManagedCluster)).Should(Equal(4)) // MWs for VRG+ROLES+PVs
Expect(getManifestWorkCount(EastManagedCluster)).Should(Equal(3)) // MWs for VRG+ROLES+PVs
waitForVRGMWDeletion(WestManagedCluster)

updateManagedClusterViewStatusAsNotFound(mcvWest)
Expect(getManifestWorkCount(WestManagedCluster)).Should(Equal(2)) // Roles MWs
Expect(getManifestWorkCount(WestManagedCluster)).Should(Equal(1)) // Roles MW
waitForCompletion()

val, err := rmnutil.GetMetricValueSingle("ramen_failback_time", dto.MetricType_GAUGE)
Expand All @@ -848,8 +850,8 @@ var _ = Describe("DRPlacementControl Reconciler", func() {
verifyDRPCStatusPreferredClusterExpectation(rmn.FailedBack)
waitForVRGMWDeletion(WestManagedCluster)
updateManagedClusterViewStatusAsNotFound(mcvWest)
Expect(getManifestWorkCount(EastManagedCluster)).Should(Equal(4)) // MWs for VRG+ROLES+PVs
Expect(getManifestWorkCount(WestManagedCluster)).Should(Equal(2)) // Roles MWs
Expect(getManifestWorkCount(EastManagedCluster)).Should(Equal(3)) // MWs for VRG+ROLES+PVs
Expect(getManifestWorkCount(WestManagedCluster)).Should(Equal(1)) // Roles MW
waitForCompletion()
})
})
Expand All @@ -874,10 +876,10 @@ var _ = Describe("DRPlacementControl Reconciler", func() {
verifyDRPCStatusPreferredClusterExpectation(rmn.FailedOver)
verifyVRGManifestWorkCreatedAsPrimary(WestManagedCluster)

Expect(getManifestWorkCount(WestManagedCluster)).Should(Equal(4)) // MW for VRG+ROLES+PVs
Expect(getManifestWorkCount(WestManagedCluster)).Should(Equal(3)) // MW for VRG+ROLES+PVs
waitForVRGMWDeletion(EastManagedCluster)
updateManagedClusterViewStatusAsNotFound(mcvEast)
Expect(getManifestWorkCount(EastManagedCluster)).Should(Equal(2)) // Roles MWs
Expect(getManifestWorkCount(EastManagedCluster)).Should(Equal(1)) // Roles MW
waitForCompletion()
})
})
Expand All @@ -904,10 +906,10 @@ var _ = Describe("DRPlacementControl Reconciler", func() {
verifyDRPCStatusPreferredClusterExpectation(rmn.Relocated)
verifyVRGManifestWorkCreatedAsPrimary(EastManagedCluster)

Expect(getManifestWorkCount(EastManagedCluster)).Should(Equal(4)) // MWs for VRG+ROLES+PVs
Expect(getManifestWorkCount(EastManagedCluster)).Should(Equal(3)) // MWs for VRG+ROLES+PVs
waitForVRGMWDeletion(WestManagedCluster)
updateManagedClusterViewStatusAsNotFound(mcvWest)
Expect(getManifestWorkCount(WestManagedCluster)).Should(Equal(2)) // Roles MWs
Expect(getManifestWorkCount(WestManagedCluster)).Should(Equal(1)) // Roles MW
waitForCompletion()

val, err := rmnutil.GetMetricValueSingle("ramen_relocate_time", dto.MetricType_GAUGE)
Expand All @@ -921,7 +923,8 @@ var _ = Describe("DRPlacementControl Reconciler", func() {
safeToProceed = false
deleteDRPC()
waitForCompletion()
Expect(getManifestWorkCount(EastManagedCluster)).Should(Equal(2)) // Roles MWs
Expect(getManifestWorkCount(EastManagedCluster)).Should(Equal(1)) // Roles MW
deleteDRPolicy()
})
})
})
Expand Down
124 changes: 124 additions & 0 deletions controllers/drpolicy_controller.go
@@ -0,0 +1,124 @@
/*
Copyright 2021 The RamenDR authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package controllers

import (
"context"
"fmt"

"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"

ramen "github.com/ramendr/ramen/api/v1alpha1"
"github.com/ramendr/ramen/controllers/util"
)

// DRPolicyReconciler reconciles a DRPolicy object
type DRPolicyReconciler struct {
client.Client
Scheme *runtime.Scheme
}

//+kubebuilder:rbac:groups=ramendr.openshift.io,resources=drpolicies,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=ramendr.openshift.io,resources=drpolicies/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=ramendr.openshift.io,resources=drpolicies/finalizers,verbs=update

// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
// TODO(user): Modify the Reconcile function to compare the state specified by
// the DRPolicy object against the actual cluster state, and then
// perform operations to make the cluster state reflect the state specified by
// the user.
//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.9.2/pkg/reconcile
func (r *DRPolicyReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := ctrl.Log.WithName("controllers").WithName("drpolicy").WithValues("name", req.NamespacedName.Name)
log.Info("reconcile enter")

defer log.Info("reconcile exit")

drpolicy := &ramen.DRPolicy{}
if err := r.Client.Get(ctx, req.NamespacedName, drpolicy); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(fmt.Errorf("get: %w", err))
}

manifestWorkUtil := util.MWUtil{Client: r.Client, Ctx: ctx, Log: log, InstName: "", InstNamespace: ""}

switch drpolicy.ObjectMeta.DeletionTimestamp.IsZero() {
case true:
log.Info("create/update")

if err := finalizerAdd(ctx, drpolicy, r.Client, log); err != nil {
return ctrl.Result{}, fmt.Errorf("finalizer add update: %w", err)
}

if err := manifestWorkUtil.ClusterRolesCreate(drpolicy); err != nil {
return ctrl.Result{}, fmt.Errorf("cluster roles create: %w", err)
}
default:
log.Info("delete")

if err := manifestWorkUtil.ClusterRolesDelete(drpolicy); err != nil {
return ctrl.Result{}, fmt.Errorf("cluster roles delete: %w", err)
}

if err := finalizerRemove(ctx, drpolicy, r.Client, log); err != nil {
return ctrl.Result{}, fmt.Errorf("finalizer remove update: %w", err)
}
}

return ctrl.Result{}, nil
}

const finalizerName = "drpolicies.ramendr.openshift.io/ramen"

func finalizerAdd(ctx context.Context, drpolicy *ramen.DRPolicy, client client.Client, log logr.Logger) error {
finalizerCount := len(drpolicy.ObjectMeta.Finalizers)
controllerutil.AddFinalizer(drpolicy, finalizerName)

if len(drpolicy.ObjectMeta.Finalizers) != finalizerCount {
log.Info("finalizer add")

return client.Update(ctx, drpolicy)
}

return nil
}

func finalizerRemove(ctx context.Context, drpolicy *ramen.DRPolicy, client client.Client, log logr.Logger) error {
finalizerCount := len(drpolicy.ObjectMeta.Finalizers)
controllerutil.RemoveFinalizer(drpolicy, finalizerName)

if len(drpolicy.ObjectMeta.Finalizers) != finalizerCount {
log.Info("finalizer remove")

return client.Update(ctx, drpolicy)
}

return nil
}

// SetupWithManager sets up the controller with the Manager.
func (r *DRPolicyReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&ramen.DRPolicy{}).
Complete(r)
}

0 comments on commit e4b57a2

Please sign in to comment.