Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ceph: move scheme initialization to the same place #8482

Merged
merged 1 commit into from Aug 6, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
8 changes: 1 addition & 7 deletions pkg/operator/ceph/client/controller.go
Expand Up @@ -78,15 +78,9 @@ func Add(mgr manager.Manager, context *clusterd.Context) error {

// newReconciler returns a new reconcile.Reconciler
func newReconciler(mgr manager.Manager, context *clusterd.Context) reconcile.Reconciler {
// Add the cephv1 scheme to the manager scheme so that the controller knows about it
mgrScheme := mgr.GetScheme()
if err := cephv1.AddToScheme(mgr.GetScheme()); err != nil {
panic(err)
}

return &ReconcileCephClient{
client: mgr.GetClient(),
scheme: mgrScheme,
scheme: mgr.GetScheme(),
context: context,
}
}
Expand Down
8 changes: 1 addition & 7 deletions pkg/operator/ceph/cluster/controller.go
Expand Up @@ -113,20 +113,14 @@ func Add(mgr manager.Manager, ctx *clusterd.Context, clusterController *ClusterC

// newReconciler returns a new reconcile.Reconciler
func newReconciler(mgr manager.Manager, ctx *clusterd.Context, clusterController *ClusterController) reconcile.Reconciler {
// Add the cephv1 scheme to the manager scheme so that the controller knows about it
mgrScheme := mgr.GetScheme()
if err := cephv1.AddToScheme(mgr.GetScheme()); err != nil {
panic(err)
}

// add "rook-" prefix to the controller name to make sure it is clear to all reading the events
// that they are coming from Rook. The controller name already has context that it is for Ceph
// and from the cluster controller.
clusterController.recorder = k8sutil.NewEventReporter(mgr.GetEventRecorderFor("rook-" + controllerName))

return &ReconcileCephCluster{
client: mgr.GetClient(),
scheme: mgrScheme,
scheme: mgr.GetScheme(),
context: ctx,
clusterController: clusterController,
}
Expand Down
7 changes: 1 addition & 6 deletions pkg/operator/ceph/cluster/rbd/controller.go
Expand Up @@ -92,14 +92,9 @@ func Add(mgr manager.Manager, context *clusterd.Context) error {

// newReconciler returns a new reconcile.Reconciler
func newReconciler(mgr manager.Manager, context *clusterd.Context) reconcile.Reconciler {
// Add the cephv1 scheme to the manager scheme so that the controller knows about it
mgrScheme := mgr.GetScheme()
if err := cephv1.AddToScheme(mgr.GetScheme()); err != nil {
panic(err)
}
return &ReconcileCephRBDMirror{
client: mgr.GetClient(),
scheme: mgrScheme,
scheme: mgr.GetScheme(),
context: context,
peers: make(map[string]*peerSpec),
}
Expand Down
28 changes: 28 additions & 0 deletions pkg/operator/ceph/cr_manager.go
Expand Up @@ -20,18 +20,46 @@ import (
"context"

"github.com/pkg/errors"
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
"github.com/rook/rook/pkg/apis/rook.io/v1alpha2"
"github.com/rook/rook/pkg/operator/ceph/cluster"
"github.com/rook/rook/pkg/operator/ceph/disruption/controllerconfig"
"k8s.io/apimachinery/pkg/runtime"

mapiv1 "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1"
healthchecking "github.com/openshift/machine-api-operator/pkg/apis/healthchecking/v1alpha1"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/manager"
)

var (
resourcesSchemeFuncs = []func(*runtime.Scheme) error{
clientgoscheme.AddToScheme,
mapiv1.AddToScheme,
healthchecking.AddToScheme,
cephv1.AddToScheme,
v1alpha2.AddToScheme,
}
)

func (o *Operator) startManager(namespaceToWatch string, context context.Context, mgrErrorCh chan error) {
logger.Info("setting up schemes")
// Setup Scheme for all resources
scheme := runtime.NewScheme()
for _, f := range resourcesSchemeFuncs {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What is the benefit of creating the schemes in this central place? It seems better to keep it with each type as it was before. If someone creates a new controller they may not see where to add the scheme here.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It was useful with the previous version of this patch as the schemes must be defined when building our own cached resources.
I still believe it's nice to have that centralized. If you create a new controller you still need to add the new CRD to pkg/apis/ceph.rook.io/v1/register.go.
At the end, these lines will never be touched again and fewer lines will be added in new controllers.

Thoughts?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ok, the functionality is really the same. It's fine either way, we can go with this.

err := f(scheme)
if err != nil {
mgrErrorCh <- errors.Wrap(err, "failed to add to scheme")
return
}
}

// Set up a manager
mgrOpts := manager.Options{
LeaderElection: false,
Namespace: namespaceToWatch,
Scheme: scheme,
}

logger.Info("setting up the controller-runtime manager")
Expand Down
9 changes: 1 addition & 8 deletions pkg/operator/ceph/disruption/clusterdisruption/add.go
Expand Up @@ -42,19 +42,12 @@ import (
// Read more about how Managers, Controllers, and their Watches, Handlers, Predicates, etc work here:
// https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg
func Add(mgr manager.Manager, context *controllerconfig.Context) error {

// Add the cephv1 scheme to the manager scheme
mgrScheme := mgr.GetScheme()
if err := cephv1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "failed to add ceph scheme to manager scheme")
}

// This will be used to associate namespaces and cephclusters.
sharedClusterMap := &ClusterMap{}

reconcileClusterDisruption := &ReconcileClusterDisruption{
client: mgr.GetClient(),
scheme: mgrScheme,
scheme: mgr.GetScheme(),
context: context,
clusterMap: sharedClusterMap,
}
Expand Down
11 changes: 1 addition & 10 deletions pkg/operator/ceph/disruption/machinedisruption/add.go
Expand Up @@ -18,7 +18,6 @@ package machinedisruption

import (
healthchecking "github.com/openshift/machine-api-operator/pkg/apis/healthchecking/v1alpha1"
"github.com/pkg/errors"
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
"github.com/rook/rook/pkg/operator/ceph/disruption/controllerconfig"
"sigs.k8s.io/controller-runtime/pkg/controller"
Expand All @@ -32,17 +31,9 @@ import (
// Read more about how Managers, Controllers, and their Watches, Handlers, Predicates, etc work here:
// https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg
func Add(mgr manager.Manager, ctx *controllerconfig.Context) error {
mgrScheme := mgr.GetScheme()
if err := healthchecking.AddToScheme(mgrScheme); err != nil {
return errors.Wrap(err, "failed to add to healthchecking scheme")
}
if err := cephv1.AddToScheme(mgrScheme); err != nil {
return errors.Wrap(err, "failed to add to ceph scheme")
}

reconcileMachineDisruption := &MachineDisruptionReconciler{
client: mgr.GetClient(),
scheme: mgrScheme,
scheme: mgr.GetScheme(),
context: ctx,
}

Expand Down
11 changes: 1 addition & 10 deletions pkg/operator/ceph/disruption/machinelabel/add.go
Expand Up @@ -19,7 +19,6 @@ package machinelabel
import (
mapiv1 "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1"
"github.com/pkg/errors"
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
"github.com/rook/rook/pkg/operator/ceph/disruption/controllerconfig"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
Expand All @@ -41,17 +40,9 @@ const (
// Read more about how Managers, Controllers, and their Watches, Handlers, Predicates, etc work here:
// https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg
func Add(mgr manager.Manager, context *controllerconfig.Context) error {
mgrScheme := mgr.GetScheme()
if err := cephv1.AddToScheme(mgrScheme); err != nil {
return errors.Wrap(err, "failed to add scheme to ceph")
}
if err := mapiv1.AddToScheme(mgrScheme); err != nil {
return errors.Wrap(err, "failed to add scheme to map")
}

reconcileMachineLabel := &ReconcileMachineLabel{
client: mgr.GetClient(),
scheme: mgrScheme,
scheme: mgr.GetScheme(),
options: context,
}

Expand Down
7 changes: 1 addition & 6 deletions pkg/operator/ceph/file/controller.go
Expand Up @@ -90,14 +90,9 @@ func Add(mgr manager.Manager, context *clusterd.Context) error {

// newReconciler returns a new reconcile.Reconciler
func newReconciler(mgr manager.Manager, context *clusterd.Context) reconcile.Reconciler {
// Add the cephv1 scheme to the manager scheme so that the controller knows about it
mgrScheme := mgr.GetScheme()
if err := cephv1.AddToScheme(mgr.GetScheme()); err != nil {
panic(err)
}
return &ReconcileCephFilesystem{
client: mgr.GetClient(),
scheme: mgrScheme,
scheme: mgr.GetScheme(),
context: context,
fsChannels: make(map[string]*fsHealth),
}
Expand Down
7 changes: 1 addition & 6 deletions pkg/operator/ceph/file/mirror/controller.go
Expand Up @@ -84,14 +84,9 @@ func Add(mgr manager.Manager, context *clusterd.Context) error {

// newReconciler returns a new reconcile.Reconciler
func newReconciler(mgr manager.Manager, context *clusterd.Context) reconcile.Reconciler {
// Add the cephv1 scheme to the manager scheme so that the controller knows about it
mgrScheme := mgr.GetScheme()
if err := cephv1.AddToScheme(mgr.GetScheme()); err != nil {
panic(err)
}
return &ReconcileFilesystemMirror{
client: mgr.GetClient(),
scheme: mgrScheme,
scheme: mgr.GetScheme(),
context: context,
}
}
Expand Down
8 changes: 1 addition & 7 deletions pkg/operator/ceph/nfs/controller.go
Expand Up @@ -84,15 +84,9 @@ func Add(mgr manager.Manager, context *clusterd.Context) error {

// newReconciler returns a new reconcile.Reconciler
func newReconciler(mgr manager.Manager, context *clusterd.Context) reconcile.Reconciler {
// Add the cephv1 scheme to the manager scheme so that the controller knows about it
mgrScheme := mgr.GetScheme()
if err := cephv1.AddToScheme(mgr.GetScheme()); err != nil {
panic(err)
}

return &ReconcileCephNFS{
client: mgr.GetClient(),
scheme: mgrScheme,
scheme: mgr.GetScheme(),
context: context,
}
}
Expand Down
8 changes: 1 addition & 7 deletions pkg/operator/ceph/object/controller.go
Expand Up @@ -99,16 +99,10 @@ func Add(mgr manager.Manager, context *clusterd.Context) error {

// newReconciler returns a new reconcile.Reconciler
func newReconciler(mgr manager.Manager, context *clusterd.Context) reconcile.Reconciler {
// Add the cephv1 scheme to the manager scheme so that the controller knows about it
mgrScheme := mgr.GetScheme()
if err := cephv1.AddToScheme(mgr.GetScheme()); err != nil {
panic(err)
}

context.Client = mgr.GetClient()
return &ReconcileCephObjectStore{
client: mgr.GetClient(),
scheme: mgrScheme,
scheme: mgr.GetScheme(),
context: context,
bktclient: bktclient.NewForConfigOrDie(context.KubeConfig),
objectStoreChannels: make(map[string]*objectStoreHealth),
Expand Down
8 changes: 1 addition & 7 deletions pkg/operator/ceph/object/realm/controller.go
Expand Up @@ -86,15 +86,9 @@ func Add(mgr manager.Manager, context *clusterd.Context) error {

// newReconciler returns a new reconcile.Reconciler
func newReconciler(mgr manager.Manager, context *clusterd.Context) reconcile.Reconciler {
// Add the cephv1 scheme to the manager scheme so that the controller knows about it
mgrScheme := mgr.GetScheme()
if err := cephv1.AddToScheme(mgr.GetScheme()); err != nil {
panic(err)
}

return &ReconcileObjectRealm{
client: mgr.GetClient(),
scheme: mgrScheme,
scheme: mgr.GetScheme(),
context: context,
}
}
Expand Down
8 changes: 1 addition & 7 deletions pkg/operator/ceph/object/user/controller.go
Expand Up @@ -85,15 +85,9 @@ func Add(mgr manager.Manager, context *clusterd.Context) error {

// newReconciler returns a new reconcile.Reconciler
func newReconciler(mgr manager.Manager, context *clusterd.Context) reconcile.Reconciler {
// Add the cephv1 scheme to the manager scheme so that the controller knows about it
mgrScheme := mgr.GetScheme()
if err := cephv1.AddToScheme(mgr.GetScheme()); err != nil {
panic(err)
}

return &ReconcileObjectStoreUser{
client: mgr.GetClient(),
scheme: mgrScheme,
scheme: mgr.GetScheme(),
context: context,
}
}
Expand Down
7 changes: 1 addition & 6 deletions pkg/operator/ceph/object/zone/controller.go
Expand Up @@ -82,14 +82,9 @@ func Add(mgr manager.Manager, context *clusterd.Context) error {

// newReconciler returns a new reconcile.Reconciler
func newReconciler(mgr manager.Manager, context *clusterd.Context) reconcile.Reconciler {
// Add the cephv1 scheme to the manager scheme so that the controller knows about it
mgrScheme := mgr.GetScheme()
if err := cephv1.AddToScheme(mgr.GetScheme()); err != nil {
panic(err)
}
return &ReconcileObjectZone{
client: mgr.GetClient(),
scheme: mgrScheme,
scheme: mgr.GetScheme(),
context: context,
}
}
Expand Down
7 changes: 1 addition & 6 deletions pkg/operator/ceph/object/zonegroup/controller.go
Expand Up @@ -80,14 +80,9 @@ func Add(mgr manager.Manager, context *clusterd.Context) error {

// newReconciler returns a new reconcile.Reconciler
func newReconciler(mgr manager.Manager, context *clusterd.Context) reconcile.Reconciler {
// Add the cephv1 scheme to the manager scheme so that the controller knows about it
mgrScheme := mgr.GetScheme()
if err := cephv1.AddToScheme(mgr.GetScheme()); err != nil {
panic(err)
}
return &ReconcileObjectZoneGroup{
client: mgr.GetClient(),
scheme: mgrScheme,
scheme: mgr.GetScheme(),
context: context,
}
}
Expand Down
7 changes: 1 addition & 6 deletions pkg/operator/ceph/pool/controller.go
Expand Up @@ -84,14 +84,9 @@ func Add(mgr manager.Manager, context *clusterd.Context) error {

// newReconciler returns a new reconcile.Reconciler
func newReconciler(mgr manager.Manager, context *clusterd.Context) reconcile.Reconciler {
// Add the cephv1 scheme to the manager scheme so that the controller knows about it
mgrScheme := mgr.GetScheme()
if err := cephv1.AddToScheme(mgr.GetScheme()); err != nil {
panic(err)
}
return &ReconcileCephBlockPool{
client: mgr.GetClient(),
scheme: mgrScheme,
scheme: mgr.GetScheme(),
context: context,
blockPoolChannels: make(map[string]*blockPoolHealth),
}
Expand Down