Skip to content

Commit

Permalink
drpolicy controller define
Browse files Browse the repository at this point in the history
	- drpolicy create/update:
		- finalizer add if absent
		- cluster roles manifestwork create, if absent, for each cluster in drpolicy
	- drpolicy delete:
		- cluster roles manifestwork delete for each cluster not in any other drpolicy
		- finalizer remove if present
	- ginkgo version increase from 1.16.1 to 1.16.4
	- gomega version increase from 1.11.0 to 1.15.0

Signed-off-by: bhatfiel <bhatfiel@redhat.com>
  • Loading branch information
hatfieldbrian committed Aug 20, 2021
1 parent 63764ce commit 46b229f
Show file tree
Hide file tree
Showing 12 changed files with 390 additions and 113 deletions.
14 changes: 14 additions & 0 deletions config/hub/rbac/role.yaml
Expand Up @@ -88,6 +88,20 @@ rules:
- patch
- update
- watch
- apiGroups:
- ramendr.openshift.io
resources:
- drpolicies/finalizers
verbs:
- update
- apiGroups:
- ramendr.openshift.io
resources:
- drpolicies/status
verbs:
- get
- patch
- update
- apiGroups:
- view.open-cluster-management.io
resources:
Expand Down
14 changes: 14 additions & 0 deletions config/rbac/role.yaml
Expand Up @@ -118,6 +118,20 @@ rules:
- patch
- update
- watch
- apiGroups:
- ramendr.openshift.io
resources:
- drpolicies/finalizers
verbs:
- update
- apiGroups:
- ramendr.openshift.io
resources:
- drpolicies/status
verbs:
- get
- patch
- update
- apiGroups:
- ramendr.openshift.io
resources:
Expand Down
17 changes: 5 additions & 12 deletions controllers/drplacementcontrol_controller.go
Expand Up @@ -482,7 +482,7 @@ func (r *DRPlacementControlReconciler) finalizeDRPC(ctx context.Context, drpc *r
clustersToClean = append(clustersToClean, drpc.Spec.FailoverCluster)
}

// delete manifestworks (VRG and Roles)
// delete manifestworks (VRG)
for idx := range clustersToClean {
err := mwu.DeleteManifestWorksForCluster(clustersToClean[idx])
if err != nil {
Expand Down Expand Up @@ -797,7 +797,7 @@ func (d *DRPCInstance) runInitialDeployment() (bool, error) {
d.setDRState(rmn.Deploying)

// Create VRG first, to leverage user PlacementRule decision to skip placement and move to cleanup
err := d.createVRGAndRolesManifestWorks(homeCluster)
err := d.createVRGManifestWork(homeCluster)
if err != nil {
return false, err
}
Expand Down Expand Up @@ -1122,7 +1122,7 @@ func (d *DRPCInstance) createManifestWorks(targetCluster string) (bool, error) {
if err != nil {
if errors.IsNotFound(err) {
// Create VRG first, to leverage user PlacementRule decision to skip placement and move to cleanup
err := d.createVRGAndRolesManifestWorks(targetCluster)
err := d.createVRGManifestWork(targetCluster)
if err != nil {
return !created, err
}
Expand Down Expand Up @@ -1346,17 +1346,10 @@ func (d *DRPCInstance) updateUserPlacementRuleStatus(status plrv1.PlacementRuleS
return nil
}

func (d *DRPCInstance) createVRGAndRolesManifestWorks(homeCluster string) error {
d.log.Info("Creating VRG/Roles ManifestWorks",
func (d *DRPCInstance) createVRGManifestWork(homeCluster string) error {
d.log.Info("Creating VRG ManifestWork",
"Last State", d.getLastDRState(), "cluster", homeCluster)

if err := d.mwu.CreateOrUpdateVRGRolesManifestWork(homeCluster); err != nil {
d.log.Error(err, "failed to create or update VolumeReplicationGroup Roles manifest")

return fmt.Errorf("failed to create or update VolumeReplicationGroup Roles manifest in namespace %s (%w)",
homeCluster, err)
}

if err := d.mwu.CreateOrUpdateVRGManifestWork(
d.instance.Name, d.instance.Namespace,
homeCluster, d.drPolicy,
Expand Down
59 changes: 31 additions & 28 deletions controllers/drplacementcontrol_controller_test.go
Expand Up @@ -87,6 +87,25 @@ var (
}

schedulingInterval = "1h"

drPolicy = &rmn.DRPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: DRPolicyName,
},
Spec: rmn.DRPolicySpec{
DRClusterSet: []rmn.ManagedCluster{
{
Name: EastManagedCluster,
S3ProfileName: "fakeS3Profile",
},
{
Name: WestManagedCluster,
S3ProfileName: "fakeS3Profile",
},
},
SchedulingInterval: schedulingInterval,
},
}
)

var drstate string
Expand Down Expand Up @@ -439,22 +458,15 @@ func createManagedClusters() {
}
}

func createDRPolicy(name, namespace string, drClusterSet []rmn.ManagedCluster) {
drPolicy := &rmn.DRPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: rmn.DRPolicySpec{
DRClusterSet: drClusterSet,
SchedulingInterval: schedulingInterval,
},
}

func createDRPolicy() {
err := k8sClient.Create(context.TODO(), drPolicy)
Expect(err).NotTo(HaveOccurred())
}

func deleteDRPolicy() {
Expect(k8sClient.Delete(context.TODO(), drPolicy)).To(Succeed())
}

func updateManifestWorkStatus(clusterNamespace, mwType, workType string) {
manifestLookupKey := types.NamespacedName{
Name: rmnutil.ManifestWorkName(DRPCName, DRPCNamespaceName, mwType),
Expand Down Expand Up @@ -529,17 +541,7 @@ func InitialDeployment(namespace, placementName, homeCluster string) (*plrv1.Pla
createNamespaces()

createManagedClusters()
createDRPolicy(DRPolicyName, DRPCNamespaceName,
[]rmn.ManagedCluster{
{
Name: EastManagedCluster,
S3ProfileName: "fakeS3Profile",
},
{
Name: WestManagedCluster,
S3ProfileName: "fakeS3Profile",
},
})
createDRPolicy()

placementRule := createPlacementRule(placementName, namespace)
drpc := createDRPC(DRPCName, DRPCNamespaceName)
Expand All @@ -549,7 +551,7 @@ func InitialDeployment(namespace, placementName, homeCluster string) (*plrv1.Pla

func verifyVRGManifestWorkCreatedAsPrimary(managedCluster string) {
vrgManifestLookupKey := types.NamespacedName{
Name: "ramendr-vrg-roles",
Name: rmnutil.ClusterRolesManifestWorkName,
Namespace: managedCluster,
}
createdVRGRolesManifest := &ocmworkv1.ManifestWork{}
Expand Down Expand Up @@ -765,7 +767,7 @@ var _ = Describe("DRPlacementControl Reconciler", func() {

recoverToFailoverCluster(drpc, userPlacementRule)
Expect(getManifestWorkCount(WestManagedCluster)).Should(Equal(2)) // MW for VRG+ROLES
Expect(getManifestWorkCount(EastManagedCluster)).Should(Equal(1)) // Roles MWs
Expect(getManifestWorkCount(EastManagedCluster)).Should(Equal(1)) // Roles MW

val, err := rmnutil.GetMetricValueSingle("ramen_failover_time", dto.MetricType_GAUGE)
Expect(err).NotTo(HaveOccurred())
Expand Down Expand Up @@ -796,7 +798,7 @@ var _ = Describe("DRPlacementControl Reconciler", func() {

recoverToFailoverCluster(drpc, userPlacementRule)
Expect(getManifestWorkCount(WestManagedCluster)).Should(Equal(2)) // MW for VRG+ROLES
Expect(getManifestWorkCount(EastManagedCluster)).Should(Equal(1)) // Roles MWs
Expect(getManifestWorkCount(EastManagedCluster)).Should(Equal(1)) // Roles MW

drpc := getLatestDRPC(DRPCName, DRPCNamespaceName)
// At this point expect the DRPC status condition to have 2 types
Expand All @@ -814,7 +816,7 @@ var _ = Describe("DRPlacementControl Reconciler", func() {

relocateToPreferredCluster(drpc, userPlacementRule)
Expect(getManifestWorkCount(EastManagedCluster)).Should(Equal(2)) // MWs for VRG+ROLES
Expect(getManifestWorkCount(WestManagedCluster)).Should(Equal(1)) // Roles MWs
Expect(getManifestWorkCount(WestManagedCluster)).Should(Equal(1)) // Roles MW

drpc = getLatestDRPC(DRPCName, DRPCNamespaceName)
// At this point expect the DRPC status condition to have 2 types
Expand All @@ -831,7 +833,8 @@ var _ = Describe("DRPlacementControl Reconciler", func() {
By("\n\n*** DELETE DRPC ***\n\n")
deleteDRPC()
waitForCompletion("deleted")
Expect(getManifestWorkCount(EastManagedCluster)).Should(Equal(1)) // Roles MWs
Expect(getManifestWorkCount(EastManagedCluster)).Should(Equal(1)) // Roles MW
deleteDRPolicy()
})
})
})
Expand Down
124 changes: 124 additions & 0 deletions controllers/drpolicy_controller.go
@@ -0,0 +1,124 @@
/*
Copyright 2021 The RamenDR authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package controllers

import (
"context"
"fmt"

"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"

ramen "github.com/ramendr/ramen/api/v1alpha1"
"github.com/ramendr/ramen/controllers/util"
)

// DRPolicyReconciler reconciles a DRPolicy object
type DRPolicyReconciler struct {
client.Client
Scheme *runtime.Scheme
}

//+kubebuilder:rbac:groups=ramendr.openshift.io,resources=drpolicies,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=ramendr.openshift.io,resources=drpolicies/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=ramendr.openshift.io,resources=drpolicies/finalizers,verbs=update

// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
// TODO(user): Modify the Reconcile function to compare the state specified by
// the DRPolicy object against the actual cluster state, and then
// perform operations to make the cluster state reflect the state specified by
// the user.
//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.9.2/pkg/reconcile
func (r *DRPolicyReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := ctrl.Log.WithName("controllers").WithName("drpolicy").WithValues("name", req.NamespacedName.Name)
log.Info("reconcile enter")

defer log.Info("reconcile exit")

drpolicy := &ramen.DRPolicy{}
if err := r.Client.Get(ctx, req.NamespacedName, drpolicy); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(fmt.Errorf("get: %w", err))
}

manifestWorkUtil := util.MWUtil{Client: r.Client, Ctx: ctx, Log: log, InstName: "", InstNamespace: ""}

switch drpolicy.ObjectMeta.DeletionTimestamp.IsZero() {
case true:
log.Info("create/update")

if err := finalizerAdd(ctx, drpolicy, r.Client, log); err != nil {
return ctrl.Result{}, fmt.Errorf("finalizer add update: %w", err)
}

if err := manifestWorkUtil.ClusterRolesCreate(drpolicy); err != nil {
return ctrl.Result{}, fmt.Errorf("cluster roles create: %w", err)
}
default:
log.Info("delete")

if err := manifestWorkUtil.ClusterRolesDelete(drpolicy); err != nil {
return ctrl.Result{}, fmt.Errorf("cluster roles delete: %w", err)
}

if err := finalizerRemove(ctx, drpolicy, r.Client, log); err != nil {
return ctrl.Result{}, fmt.Errorf("finalizer remove update: %w", err)
}
}

return ctrl.Result{}, nil
}

const finalizerName = "drpolicies.ramendr.openshift.io/ramen"

func finalizerAdd(ctx context.Context, drpolicy *ramen.DRPolicy, client client.Client, log logr.Logger) error {
finalizerCount := len(drpolicy.ObjectMeta.Finalizers)
controllerutil.AddFinalizer(drpolicy, finalizerName)

if len(drpolicy.ObjectMeta.Finalizers) != finalizerCount {
log.Info("finalizer add")

return client.Update(ctx, drpolicy)
}

return nil
}

func finalizerRemove(ctx context.Context, drpolicy *ramen.DRPolicy, client client.Client, log logr.Logger) error {
finalizerCount := len(drpolicy.ObjectMeta.Finalizers)
controllerutil.RemoveFinalizer(drpolicy, finalizerName)

if len(drpolicy.ObjectMeta.Finalizers) != finalizerCount {
log.Info("finalizer remove")

return client.Update(ctx, drpolicy)
}

return nil
}

// SetupWithManager sets up the controller with the Manager.
func (r *DRPolicyReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&ramen.DRPolicy{}).
Complete(r)
}

0 comments on commit 46b229f

Please sign in to comment.