Skip to content

Commit

Permalink
drpolicy controller define
Browse files Browse the repository at this point in the history
	- vrg and pv cluster roles manifestworks unify
	- drpolicy create/update:
		- finalizer add if absent
		- cluster roles manifestwork create, if absent, for each cluster in drpolicy
	- drpolicy delete:
		- cluster roles manifestwork delete for each cluster not in any other drpolicy
		- finalizer remove if present
	- ginkgo version increase from 1.16.1 to 1.16.4
	- gomega version increase from 1.11.0 to 1.15.0

Signed-off-by: bhatfiel <bhatfiel@redhat.com>
  • Loading branch information
hatfieldbrian committed Aug 11, 2021
1 parent e61df22 commit ee4fc2a
Show file tree
Hide file tree
Showing 10 changed files with 348 additions and 67 deletions.
14 changes: 14 additions & 0 deletions config/rbac/role.yaml
Expand Up @@ -108,6 +108,20 @@ rules:
- patch
- update
- watch
- apiGroups:
- ramendr.openshift.io
resources:
- drpolicies/finalizers
verbs:
- update
- apiGroups:
- ramendr.openshift.io
resources:
- drpolicies/status
verbs:
- get
- patch
- update
- apiGroups:
- ramendr.openshift.io
resources:
Expand Down
4 changes: 4 additions & 0 deletions config/samples/kustomization.yaml
@@ -0,0 +1,4 @@
## Append samples you want in your CSV to this file as resources ##
resources:
- ramendr_v1alpha1_drpolicy.yaml
#+kubebuilder:scaffold:manifestskustomizesamples
14 changes: 0 additions & 14 deletions controllers/drplacementcontrol_controller.go
Expand Up @@ -992,13 +992,6 @@ func (d *DRPCInstance) updateUserPlRuleAndCreateVRGMW(homeCluster, homeClusterNa
}

func (d *DRPCInstance) createPVManifestWorkForRestore(newPrimary string) error {
if err := d.mwu.CreateOrUpdatePVRolesManifestWork(newPrimary); err != nil {
d.log.Error(err, "failed to create or update PersistentVolume Roles manifest")

return fmt.Errorf("failed to create or update PersistentVolume Roles manifest in namespace %s (%w)",
newPrimary, err)
}

pvMWName := d.mwu.BuildManifestWorkName(rmnutil.MWTypePV)

existAndApplied, err := d.mwu.ManifestExistAndApplied(pvMWName, newPrimary)
Expand Down Expand Up @@ -1395,13 +1388,6 @@ func (d *DRPCInstance) restorePVFromBackup(homeCluster string) error {
func (d *DRPCInstance) processVRGManifestWork(homeCluster string) error {
d.log.Info("Processing VRG ManifestWork", "cluster", homeCluster)

if err := d.mwu.CreateOrUpdateVRGRolesManifestWork(homeCluster); err != nil {
d.log.Error(err, "failed to create or update VolumeReplicationGroup Roles manifest")

return fmt.Errorf("failed to create or update VolumeReplicationGroup Roles manifest in namespace %s (%w)",
homeCluster, err)
}

if err := d.mwu.CreateOrUpdateVRGManifestWork(
d.instance.Name, d.instance.Namespace,
homeCluster, d.instance.Spec.S3Endpoint,
Expand Down
103 changes: 103 additions & 0 deletions controllers/drpolicy_controller.go
@@ -0,0 +1,103 @@
/*
Copyright 2021 The RamenDR authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package controllers

import (
"context"
"fmt"

"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"

ramendrv1alpha1 "github.com/ramendr/ramen/api/v1alpha1"
"github.com/ramendr/ramen/controllers/util"
)

// DRPolicyReconciler reconciles a DRPolicy object
type DRPolicyReconciler struct {
client.Client
Log logr.Logger
Scheme *runtime.Scheme
}

//+kubebuilder:rbac:groups=ramendr.openshift.io,resources=drpolicies,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=ramendr.openshift.io,resources=drpolicies/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=ramendr.openshift.io,resources=drpolicies/finalizers,verbs=update

// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
// TODO(user): Modify the Reconcile function to compare the state specified by
// the DRPolicy object against the actual cluster state, and then
// perform operations to make the cluster state reflect the state specified by
// the user.
//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.9.2/pkg/reconcile
func (r *DRPolicyReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := r.Log.WithValues("DRPolicy", req.NamespacedName)
log.Info("reconcile enter")

defer log.Info("reconcile exit")

drpolicy := &ramendrv1alpha1.DRPolicy{}
if err := r.Client.Get(ctx, req.NamespacedName, drpolicy); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(fmt.Errorf("get: %w", err))
}

manifestWorkUtil := util.MWUtil{Client: r.Client, Ctx: ctx, Log: log, InstName: "", InstNamespace: ""}

const finalizerName = "drpolicies.ramendr.openshift.io/ramen"

switch drpolicy.ObjectMeta.DeletionTimestamp.IsZero() {
case true:
log.Info("create/update")

controllerutil.AddFinalizer(drpolicy, finalizerName)

if err := r.Update(ctx, drpolicy); err != nil {
return ctrl.Result{}, fmt.Errorf("finalizer add update: %w", err)
}

if err := manifestWorkUtil.ClusterRolesCreate(drpolicy); err != nil {
return ctrl.Result{}, fmt.Errorf("cluster roles create: %w", err)
}
default:
log.Info("delete")

if err := manifestWorkUtil.ClusterRolesDelete(drpolicy); err != nil {
return ctrl.Result{}, fmt.Errorf("cluster roles delete: %w", err)
}

controllerutil.RemoveFinalizer(drpolicy, finalizerName)

if err := r.Update(ctx, drpolicy); err != nil {
return ctrl.Result{}, fmt.Errorf("finalizer remove update: %w", err)
}
}

return ctrl.Result{}, nil
}

// SetupWithManager sets up the controller with the Manager.
func (r *DRPolicyReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&ramendrv1alpha1.DRPolicy{}).
Complete(r)
}
105 changes: 105 additions & 0 deletions controllers/drpolicy_controller_test.go
@@ -0,0 +1,105 @@
package controllers_test

import (
"context"
"fmt"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"

corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"

ramen "github.com/ramendr/ramen/api/v1alpha1"
"github.com/ramendr/ramen/controllers/util"
)

var _ = Describe("DrpolicyController", func() {
clusterRolesExpect := func(clusterNamesExpected *sets.String) {
Eventually(
func(g Gomega) {
clusterNames := sets.String{}
g.Expect(util.MWUtil{
Client: k8sClient,
Ctx: context.TODO(),
Log: nil,
InstName: "",
InstNamespace: "",
}.ClusterRolesList(&clusterNames)).To(Succeed())
fmt.Fprintf(
GinkgoWriter,
"expect: %v\nactual: %v\n",
*clusterNamesExpected,
clusterNames,
)
g.Expect(clusterNamesExpected.Equal(clusterNames)).To(BeTrue())
},
10,
0.25,
).Should(Succeed())
}
drpolicyCreate := func(drpolicy *ramen.DRPolicy, clusterNames *sets.String) {
for _, clusterName := range sets.NewString(drpolicy.Spec.ClusterNames...).Difference(*clusterNames).UnsortedList() {
Expect(k8sClient.Create(
context.TODO(),
&corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: clusterName}},
)).To(Succeed())
*clusterNames = clusterNames.Insert(clusterName)
}
Expect(k8sClient.Create(context.TODO(), drpolicy)).To(Succeed())
clusterRolesExpect(clusterNames)
}
drpolicyDelete := func(drpolicy *ramen.DRPolicy, clusterNames *sets.String) {
Expect(k8sClient.Delete(context.TODO(), drpolicy)).To(Succeed())
for _, clusterName := range sets.NewString(drpolicy.Spec.ClusterNames...).Difference(*clusterNames).UnsortedList() {
Expect(k8sClient.Delete(
context.TODO(),
&corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: clusterName}},
)).To(Succeed())
*clusterNames = clusterNames.Delete(clusterName)
}
clusterRolesExpect(clusterNames)
}
drpolicies := [...]ramen.DRPolicy{
{
ObjectMeta: metav1.ObjectMeta{Name: "drpolicy0"},
Spec: ramen.DRPolicySpec{ClusterNames: []string{"cluster-a", "cluster-b"}},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "drpolicy1"},
Spec: ramen.DRPolicySpec{ClusterNames: []string{"cluster-b", "cluster-c"}},
},
}
clusterNamesExpected := &sets.String{}
When("a 1st drpolicy is created", func() {
It("should create a cluster roles manifest work for each cluster specified in a 1st drpolicy", func() {
drpolicyCreate(&drpolicies[0], clusterNamesExpected)
})
})
When("a 1st drpolicy is updated to add some clusters and remove some other clusters", func() {
It("should create a cluster roles manifest work for each cluster added", func() {
})
It("should delete a cluster roles manifest work for each cluster removed", func() {
})
})
When("a 2nd drpolicy is created specifying some clusters in a 1st drpolicy and some not", func() {
It("should create a cluster roles manifest work for each cluster specified in a 2nd drpolicy but not a 1st drpolicy",
func() {
drpolicyCreate(&drpolicies[1], clusterNamesExpected)
},
)
})
When("a 1st drpolicy is deleted", func() {
It("should delete a cluster roles manifest work for each cluster specified in a 1st drpolicy but not a 2nd drpolicy",
func() {
drpolicyDelete(&drpolicies[0], clusterNamesExpected)
},
)
})
When("a 2nd drpolicy is deleted", func() {
It("should delete a cluster roles manifest work for each cluster specified in a 2nd drpolicy", func() {
drpolicyDelete(&drpolicies[1], clusterNamesExpected)
})
})
})
6 changes: 6 additions & 0 deletions controllers/suite_test.go
Expand Up @@ -104,6 +104,12 @@ var _ = BeforeSuite(func() {
})
Expect(err).ToNot(HaveOccurred())

Expect((&ramencontrollers.DRPolicyReconciler{
Client: k8sManager.GetClient(),
Log: ctrl.Log.WithName("controllers").WithName("DRPolicy"),
Scheme: k8sManager.GetScheme(),
}).SetupWithManager(k8sManager)).To(Succeed())

err = (&ramencontrollers.VolumeReplicationGroupReconciler{
Client: k8sManager.GetClient(),
Log: ctrl.Log.WithName("controllers").WithName("VolumeReplicationGroup"),
Expand Down

0 comments on commit ee4fc2a

Please sign in to comment.