Skip to content

Commit

Permalink
Merge pull request #14078 from rook/mergify/bp/release-1.14/pr-14049
Browse files Browse the repository at this point in the history
operator: make CephBlockPoolRadosNamespace and CephFilesystemSubVolumeGroup get outputs more verbose (backport #14049)
  • Loading branch information
travisn committed Apr 17, 2024
2 parents e62089f + b30d822 commit 0bdd66b
Show file tree
Hide file tree
Showing 7 changed files with 188 additions and 109 deletions.
187 changes: 84 additions & 103 deletions Documentation/CRDs/specification.md
Expand Up @@ -12,8 +12,6 @@ Resource Types:
<ul><li>
<a href="#ceph.rook.io/v1.CephBlockPool">CephBlockPool</a>
</li><li>
<a href="#ceph.rook.io/v1.CephBlockPoolRadosNamespace">CephBlockPoolRadosNamespace</a>
</li><li>
<a href="#ceph.rook.io/v1.CephBucketNotification">CephBucketNotification</a>
</li><li>
<a href="#ceph.rook.io/v1.CephBucketTopic">CephBucketTopic</a>
Expand Down Expand Up @@ -146,107 +144,6 @@ CephBlockPoolStatus
</tr>
</tbody>
</table>
<h3 id="ceph.rook.io/v1.CephBlockPoolRadosNamespace">CephBlockPoolRadosNamespace
</h3>
<div>
<p>CephBlockPoolRadosNamespace represents a Ceph BlockPool Rados Namespace</p>
</div>
<table>
<thead>
<tr>
<th>Field</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>
<code>apiVersion</code><br/>
string</td>
<td>
<code>
ceph.rook.io/v1
</code>
</td>
</tr>
<tr>
<td>
<code>kind</code><br/>
string
</td>
<td><code>CephBlockPoolRadosNamespace</code></td>
</tr>
<tr>
<td>
<code>metadata</code><br/>
<em>
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#objectmeta-v1-meta">
Kubernetes meta/v1.ObjectMeta
</a>
</em>
</td>
<td>
Refer to the Kubernetes API documentation for the fields of the
<code>metadata</code> field.
</td>
</tr>
<tr>
<td>
<code>spec</code><br/>
<em>
<a href="#ceph.rook.io/v1.CephBlockPoolRadosNamespaceSpec">
CephBlockPoolRadosNamespaceSpec
</a>
</em>
</td>
<td>
<p>Spec represents the specification of a Ceph BlockPool Rados Namespace</p>
<br/>
<br/>
<table>
<tr>
<td>
<code>name</code><br/>
<em>
string
</em>
</td>
<td>
<em>(Optional)</em>
<p>The name of the CephBlockPoolRadosNamespaceSpec namespace. If not set, the default is the name of the CR.</p>
</td>
</tr>
<tr>
<td>
<code>blockPoolName</code><br/>
<em>
string
</em>
</td>
<td>
<p>BlockPoolName is the name of Ceph BlockPool. Typically it&rsquo;s the name of
the CephBlockPool CR.</p>
</td>
</tr>
</table>
</td>
</tr>
<tr>
<td>
<code>status</code><br/>
<em>
<a href="#ceph.rook.io/v1.CephBlockPoolRadosNamespaceStatus">
CephBlockPoolRadosNamespaceStatus
</a>
</em>
</td>
<td>
<em>(Optional)</em>
<p>Status represents the status of a CephBlockPool Rados Namespace</p>
</td>
</tr>
</tbody>
</table>
<h3 id="ceph.rook.io/v1.CephBucketNotification">CephBucketNotification
</h3>
<div>
Expand Down Expand Up @@ -3086,6 +2983,90 @@ string
</tr>
</tbody>
</table>
<h3 id="ceph.rook.io/v1.CephBlockPoolRadosNamespace">CephBlockPoolRadosNamespace
</h3>
<div>
<p>CephBlockPoolRadosNamespace represents a Ceph BlockPool Rados Namespace</p>
</div>
<table>
<thead>
<tr>
<th>Field</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>
<code>metadata</code><br/>
<em>
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#objectmeta-v1-meta">
Kubernetes meta/v1.ObjectMeta
</a>
</em>
</td>
<td>
Refer to the Kubernetes API documentation for the fields of the
<code>metadata</code> field.
</td>
</tr>
<tr>
<td>
<code>spec</code><br/>
<em>
<a href="#ceph.rook.io/v1.CephBlockPoolRadosNamespaceSpec">
CephBlockPoolRadosNamespaceSpec
</a>
</em>
</td>
<td>
<p>Spec represents the specification of a Ceph BlockPool Rados Namespace</p>
<br/>
<br/>
<table>
<tr>
<td>
<code>name</code><br/>
<em>
string
</em>
</td>
<td>
<em>(Optional)</em>
<p>The name of the CephBlockPoolRadosNamespaceSpec namespace. If not set, the default is the name of the CR.</p>
</td>
</tr>
<tr>
<td>
<code>blockPoolName</code><br/>
<em>
string
</em>
</td>
<td>
<p>BlockPoolName is the name of Ceph BlockPool. Typically it&rsquo;s the name of
the CephBlockPool CR.</p>
</td>
</tr>
</table>
</td>
</tr>
<tr>
<td>
<code>status</code><br/>
<em>
<a href="#ceph.rook.io/v1.CephBlockPoolRadosNamespaceStatus">
CephBlockPoolRadosNamespaceStatus
</a>
</em>
</td>
<td>
<em>(Optional)</em>
<p>Status represents the status of a CephBlockPool Rados Namespace</p>
</td>
</tr>
</tbody>
</table>
<h3 id="ceph.rook.io/v1.CephBlockPoolRadosNamespaceSpec">CephBlockPoolRadosNamespaceSpec
</h3>
<p>
Expand Down
24 changes: 23 additions & 1 deletion deploy/charts/rook-ceph/templates/resources.yaml
Expand Up @@ -16,7 +16,18 @@ spec:
singular: cephblockpoolradosnamespace
scope: Namespaced
versions:
- name: v1
- additionalPrinterColumns:
- jsonPath: .status.phase
name: Phase
type: string
- description: Name of the Ceph BlockPool
jsonPath: .spec.blockPoolName
name: BlockPool
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1
schema:
openAPIV3Schema:
description: CephBlockPoolRadosNamespace represents a Ceph BlockPool Rados Namespace
Expand Down Expand Up @@ -7955,6 +7966,17 @@ spec:
- jsonPath: .status.phase
name: Phase
type: string
- description: Name of the CephFileSystem
jsonPath: .spec.filesystemName
name: Filesystem
type: string
- jsonPath: .spec.quota
name: Quota
type: string
- jsonPath: .status.info.pinning
name: Pinning
priority: 1
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
Expand Down
24 changes: 23 additions & 1 deletion deploy/examples/crds.yaml
Expand Up @@ -19,7 +19,18 @@ spec:
singular: cephblockpoolradosnamespace
scope: Namespaced
versions:
- name: v1
- additionalPrinterColumns:
- jsonPath: .status.phase
name: Phase
type: string
- description: Name of the Ceph BlockPool
jsonPath: .spec.blockPoolName
name: BlockPool
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1
schema:
openAPIV3Schema:
description: CephBlockPoolRadosNamespace represents a Ceph BlockPool Rados Namespace
Expand Down Expand Up @@ -7949,6 +7960,17 @@ spec:
- jsonPath: .status.phase
name: Phase
type: string
- description: Name of the CephFileSystem
jsonPath: .spec.filesystemName
name: Filesystem
type: string
- jsonPath: .spec.quota
name: Quota
type: string
- jsonPath: .status.info.pinning
name: Pinning
priority: 1
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
Expand Down
7 changes: 6 additions & 1 deletion pkg/apis/ceph.rook.io/v1/types.go
Expand Up @@ -2983,6 +2983,9 @@ type StorageClassDeviceSet struct {

// CephFilesystemSubVolumeGroup represents a Ceph Filesystem SubVolumeGroup
// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase`
// +kubebuilder:printcolumn:name="Filesystem",type=string,JSONPath=`.spec.filesystemName`,description="Name of the CephFileSystem"
// +kubebuilder:printcolumn:name="Quota",type=string,JSONPath=`.spec.quota`
// +kubebuilder:printcolumn:name="Pinning",type=string,JSONPath=`.status.info.pinning`,priority=1
// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp`
// +kubebuilder:subresource:status
type CephFilesystemSubVolumeGroup struct {
Expand Down Expand Up @@ -3065,8 +3068,10 @@ type CephFilesystemSubVolumeGroupStatus struct {
// +genclient
// +genclient:noStatus
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object

// CephBlockPoolRadosNamespace represents a Ceph BlockPool Rados Namespace
// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase`
// +kubebuilder:printcolumn:name="BlockPool",type=string,JSONPath=`.spec.blockPoolName`,description="Name of the Ceph BlockPool"
// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp`
// +kubebuilder:subresource:status
type CephBlockPoolRadosNamespace struct {
metav1.TypeMeta `json:",inline"`
Expand Down
7 changes: 5 additions & 2 deletions pkg/daemon/ceph/client/subvolumegroup.go
Expand Up @@ -170,6 +170,9 @@ func PinCephFSSubVolumeGroup(context *clusterd.Context, clusterInfo *ClusterInfo
return nil
}

// validateConfiguration validates the provided pinning configuration.
// despite CRD validation, this ensures no duplicate values are set programmatically
// and to safeguard against potential internal changes of the configuration.
func validatePinningValues(pinning cephv1.CephFilesystemSubVolumeGroupSpecPinning) error {
numNils := 0
var err error
Expand All @@ -190,11 +193,11 @@ func validatePinningValues(pinning cephv1.CephFilesystemSubVolumeGroupSpecPinnin
if pinning.Random != nil {
numNils++
if (*pinning.Random < 0) || (*pinning.Random > 1.0) {
err = errors.Errorf("validate pinning type failed, Random: value %.2f is not between 0.0 and 1.1 (inclusive)", *pinning.Random)
err = errors.Errorf("validate pinning type failed, Random: value %.2f is not between 0.0 and 1.0 (inclusive)", *pinning.Random)
}
}
if numNils > 1 {
return fmt.Errorf("only one can be set")
return fmt.Errorf("only one pinning type can be set at a time")
}
if numNils == 0 {
return nil // pinning disabled
Expand Down
22 changes: 21 additions & 1 deletion pkg/operator/ceph/file/subvolumegroup/controller.go
Expand Up @@ -377,7 +377,11 @@ func (r *ReconcileCephFilesystemSubVolumeGroup) updateStatus(observedGeneration
}

cephFilesystemSubVolumeGroup.Status.Phase = status
cephFilesystemSubVolumeGroup.Status.Info = map[string]string{"clusterID": buildClusterID(cephFilesystemSubVolumeGroup)}
cephFilesystemSubVolumeGroup.Status.Info = map[string]string{
"clusterID": buildClusterID(cephFilesystemSubVolumeGroup),
"pinning": formatPinning(cephFilesystemSubVolumeGroup.Spec.Pinning),
}

if observedGeneration != k8sutil.ObservedGenerationNotAvailable {
cephFilesystemSubVolumeGroup.Status.ObservedGeneration = observedGeneration
}
Expand Down Expand Up @@ -409,3 +413,19 @@ func (r *ReconcileCephFilesystemSubVolumeGroup) cleanup(svg *cephv1.CephFilesyst
}
return nil
}

func formatPinning(pinning cephv1.CephFilesystemSubVolumeGroupSpecPinning) string {
var formatted string

if pinning.Export != nil {
formatted = fmt.Sprintf("export=%d", *pinning.Export)
} else if pinning.Distributed != nil {
formatted = fmt.Sprintf("distributed=%d", *pinning.Distributed)
} else if pinning.Random != nil {
formatted = fmt.Sprintf("random=%.2f", *pinning.Random)
} else {
formatted = fmt.Sprintf("distributed=%d", 1)
}

return formatted
}
26 changes: 26 additions & 0 deletions pkg/operator/ceph/file/subvolumegroup/controller_test.go
Expand Up @@ -338,3 +338,29 @@ func Test_buildClusterID(t *testing.T) {
clusterID := buildClusterID(cephFilesystemSubVolumeGroup)
assert.Equal(t, "29e92135b7e8c014079b9f9f3566777d", clusterID)
}

func Test_formatPinning(t *testing.T) {
pinning := &cephv1.CephFilesystemSubVolumeGroupSpecPinning{}
pinningStatus := formatPinning(*pinning)
assert.Equal(t, "distributed=1", pinningStatus)

distributedValue := 0
pinning.Distributed = &distributedValue
pinningStatus = formatPinning(*pinning)
assert.Equal(t, "distributed=0", pinningStatus)

pinning.Distributed = nil

exportValue := 42
pinning.Export = &exportValue
pinningStatus = formatPinning(*pinning)
assert.Equal(t, "export=42", pinningStatus)

pinning.Export = nil

randomValue := 0.31
pinning.Random = &randomValue
pinningStatus = formatPinning(*pinning)
assert.Equal(t, "random=0.31", pinningStatus)

}

0 comments on commit 0bdd66b

Please sign in to comment.