Skip to content

Commit 19d85aa

Browse files
authoredOct 9, 2024··
feat(health): Adding more health checks for Keycloak, Postgres, Grafana, SolrCloud (#20294)
* feat(health): Add healtchcheck for acid.zalan.do/postgresql Signed-off-by: Remo Zellmer <rze@vizrt.com> * feat(health): Add healthcheck for grafana.integreatly.org/Grafana and GrafanaDatasource Signed-off-by: Remo Zellmer <rze@vizrt.com> * feat(health): Add healthcheck for k8s.keycloak.org/Keycloak Signed-off-by: Remo Zellmer <rze@vizrt.com> * feat(health): Add healthcheck for solr.apache.org/SolrCloud Signed-off-by: Remo Zellmer <rze@vizrt.com> --------- Signed-off-by: Remo Zellmer <rze@vizrt.com>
1 parent e1472f3 commit 19d85aa

28 files changed

+1230
-0
lines changed
 
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
-- Waiting for status info => Progressing
2+
if obj.status == nil or obj.status.PostgresClusterStatus == nil then
3+
return {
4+
status = "Progressing",
5+
message = "Waiting for postgres cluster status...",
6+
}
7+
end
8+
9+
-- Running => Healthy
10+
if obj.status.PostgresClusterStatus == "Running" then
11+
return {
12+
status = "Healthy",
13+
message = obj.status.PostgresClusterStatus,
14+
}
15+
end
16+
17+
-- Creating/Updating => Progressing
18+
if obj.status.PostgresClusterStatus == "Creating" or obj.status.PostgresClusterStatus == "Updating" then
19+
return {
20+
status = "Progressing",
21+
message = obj.status.PostgresClusterStatus,
22+
}
23+
end
24+
25+
-- CreateFailed/UpdateFailed/SyncFailed/Invalid/etc => Degraded
26+
-- See https://github.com/zalando/postgres-operator/blob/0745ce7c/pkg/apis/acid.zalan.do/v1/const.go#L4-L13
27+
return {
28+
status = "Degraded",
29+
message = obj.status.PostgresClusterStatus,
30+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
tests:
2+
- healthStatus:
3+
status: Progressing
4+
message: "Waiting for postgres cluster status..."
5+
inputPath: testdata/provisioning.yaml
6+
- healthStatus:
7+
status: Progressing
8+
message: "Updating"
9+
inputPath: testdata/progressing.yaml
10+
- healthStatus:
11+
status: Healthy
12+
message: "Running"
13+
inputPath: testdata/healthy.yaml
14+
- healthStatus:
15+
status: Degraded
16+
message: "UpdateFailed"
17+
inputPath: testdata/degraded.yaml
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
apiVersion: acid.zalan.do/v1
2+
kind: postgresql
3+
metadata:
4+
annotations:
5+
argocd.argoproj.io/sync-wave: '1'
6+
argocd.argoproj.io/tracking-id: foobar-db:acid.zalan.do/postgresql:foo/foobar-db
7+
creationTimestamp: '2024-10-07T09:06:07Z'
8+
generation: 4
9+
name: foobar-db
10+
namespace: foo
11+
resourceVersion: '242244'
12+
uid: 741b63d5-8deb-45ef-af80-09d558d355a7
13+
spec:
14+
databases:
15+
foobar: root
16+
enableLogicalBackup: false
17+
numberOfInstances: 1
18+
postgresql:
19+
parameters:
20+
password_encryption: scram-sha-256
21+
version: '15'
22+
teamId: foobar
23+
users: {}
24+
volume:
25+
size: 1Gi
26+
status:
27+
PostgresClusterStatus: UpdateFailed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
apiVersion: acid.zalan.do/v1
2+
kind: postgresql
3+
metadata:
4+
annotations:
5+
argocd.argoproj.io/sync-wave: '1'
6+
argocd.argoproj.io/tracking-id: foobar-db:acid.zalan.do/postgresql:foo/foobar-db
7+
creationTimestamp: '2024-10-07T09:06:07Z'
8+
generation: 4
9+
name: foobar-db
10+
namespace: foo
11+
resourceVersion: '242244'
12+
uid: 741b63d5-8deb-45ef-af80-09d558d355a7
13+
spec:
14+
databases:
15+
foobar: root
16+
enableLogicalBackup: false
17+
numberOfInstances: 1
18+
postgresql:
19+
parameters:
20+
password_encryption: scram-sha-256
21+
version: '15'
22+
teamId: foobar
23+
users: {}
24+
volume:
25+
size: 1Gi
26+
status:
27+
PostgresClusterStatus: Running
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
apiVersion: acid.zalan.do/v1
2+
kind: postgresql
3+
metadata:
4+
annotations:
5+
argocd.argoproj.io/sync-wave: '1'
6+
argocd.argoproj.io/tracking-id: foobar-db:acid.zalan.do/postgresql:foo/foobar-db
7+
creationTimestamp: '2024-10-07T09:06:07Z'
8+
generation: 4
9+
name: foobar-db
10+
namespace: foo
11+
resourceVersion: '242244'
12+
uid: 741b63d5-8deb-45ef-af80-09d558d355a7
13+
spec:
14+
databases:
15+
foobar: root
16+
enableLogicalBackup: false
17+
numberOfInstances: 1
18+
postgresql:
19+
parameters:
20+
password_encryption: scram-sha-256
21+
version: '15'
22+
teamId: foobar
23+
users: {}
24+
volume:
25+
size: 1Gi
26+
status:
27+
PostgresClusterStatus: Updating
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
apiVersion: acid.zalan.do/v1
2+
kind: postgresql
3+
metadata:
4+
annotations:
5+
argocd.argoproj.io/sync-wave: '1'
6+
argocd.argoproj.io/tracking-id: foobar-db:acid.zalan.do/postgresql:foo/foobar-db
7+
name: foobar-db
8+
namespace: foo
9+
spec:
10+
databases:
11+
foobar: root
12+
enableLogicalBackup: false
13+
numberOfInstances: 1
14+
postgresql:
15+
parameters:
16+
password_encryption: scram-sha-256
17+
version: '15'
18+
teamId: foobar
19+
users: {}
20+
volume:
21+
size: 1Gi
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
2+
-- if no status info available yet, assume progressing
3+
if obj.status == nil or obj.status.stageStatus == nil then
4+
return {
5+
status = "Progressing",
6+
message = "Waiting for Grafana status info",
7+
}
8+
end
9+
10+
-- if last stage failed, we are stuck here
11+
if obj.status.stageStatus == "failed" then
12+
return {
13+
status = "Degraded",
14+
message = "Failed at stage " .. obj.status.stage,
15+
}
16+
end
17+
18+
-- only if "complete" stage was successful, Grafana can be considered healthy
19+
if obj.status.stage == "complete" and obj.status.stageStatus == "success" then
20+
return {
21+
status = "Healthy",
22+
message = "",
23+
}
24+
end
25+
26+
-- no final status yet, assume progressing
27+
return {
28+
status = "Progressing",
29+
message = obj.status.stage,
30+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
tests:
2+
- healthStatus:
3+
status: Progressing
4+
message: "Waiting for Grafana status info"
5+
inputPath: testdata/provisioning.yaml
6+
- healthStatus:
7+
status: Progressing
8+
message: "deployment"
9+
inputPath: testdata/progressing.yaml
10+
- healthStatus:
11+
status: Healthy
12+
message: ""
13+
inputPath: testdata/healthy.yaml
14+
- healthStatus:
15+
status: Degraded
16+
message: "Failed at stage ingress"
17+
inputPath: testdata/degraded.yaml
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
apiVersion: grafana.integreatly.org/v1beta1
2+
kind: Grafana
3+
metadata:
4+
annotations:
5+
argocd.argoproj.io/sync-wave: '1'
6+
argocd.argoproj.io/tracking-id: foobar-admin:grafana.integreatly.org/Grafana:foo/grafana
7+
creationTimestamp: '2024-10-07T08:46:00Z'
8+
generation: 3
9+
labels:
10+
dashboards: grafana
11+
folders: grafana
12+
name: grafana
13+
namespace: foo
14+
resourceVersion: '343511'
15+
uid: d2f0496d-cd5c-46bf-8630-de827b6d59b0
16+
spec:
17+
deployment:
18+
metadata: {}
19+
spec:
20+
template:
21+
metadata: {}
22+
spec:
23+
containers:
24+
- image: docker.io/grafana/grafana:11.1.4
25+
name: grafana
26+
volumeMounts:
27+
- mountPath: /etc/ssl/certs/ca-certificates.crt
28+
name: tls-ca-bundle
29+
readOnly: true
30+
subPath: tls-ca-bundle.pem
31+
volumes:
32+
- name: tls-ca-bundle
33+
secret:
34+
items:
35+
- key: tls-ca-bundle.pem
36+
path: tls-ca-bundle.pem
37+
secretName: tls-ca-bundle-secret
38+
version: 10.4.3
39+
status:
40+
adminUrl: http://grafana-service.foo:3000
41+
dashboards:
42+
- foo/dashboard-argocd/qPkgGHg7k
43+
datasources:
44+
- foo/cluster-local/927b3c23-e25f-4cbe-a82f-effbb0bbbf40
45+
stage: ingress
46+
stageStatus: failed
47+
version: 11.1.4
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
apiVersion: grafana.integreatly.org/v1beta1
2+
kind: Grafana
3+
metadata:
4+
annotations:
5+
argocd.argoproj.io/sync-wave: '1'
6+
argocd.argoproj.io/tracking-id: foobar-admin:grafana.integreatly.org/Grafana:foo/grafana
7+
creationTimestamp: '2024-10-07T08:46:00Z'
8+
generation: 3
9+
labels:
10+
dashboards: grafana
11+
folders: grafana
12+
name: grafana
13+
namespace: foo
14+
resourceVersion: '343511'
15+
uid: d2f0496d-cd5c-46bf-8630-de827b6d59b0
16+
spec:
17+
deployment:
18+
metadata: {}
19+
spec:
20+
template:
21+
metadata: {}
22+
spec:
23+
containers:
24+
- image: docker.io/grafana/grafana:11.1.4
25+
name: grafana
26+
volumeMounts:
27+
- mountPath: /etc/ssl/certs/ca-certificates.crt
28+
name: tls-ca-bundle
29+
readOnly: true
30+
subPath: tls-ca-bundle.pem
31+
volumes:
32+
- name: tls-ca-bundle
33+
secret:
34+
items:
35+
- key: tls-ca-bundle.pem
36+
path: tls-ca-bundle.pem
37+
secretName: tls-ca-bundle-secret
38+
version: 10.4.3
39+
status:
40+
adminUrl: http://grafana-service.foo:3000
41+
dashboards:
42+
- foo/dashboard-argocd/qPkgGHg7k
43+
datasources:
44+
- foo/cluster-local/927b3c23-e25f-4cbe-a82f-effbb0bbbf40
45+
stage: complete
46+
stageStatus: success
47+
version: 11.1.4
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
apiVersion: grafana.integreatly.org/v1beta1
2+
kind: Grafana
3+
metadata:
4+
annotations:
5+
argocd.argoproj.io/sync-wave: '1'
6+
argocd.argoproj.io/tracking-id: foobar-admin:grafana.integreatly.org/Grafana:foo/grafana
7+
creationTimestamp: '2024-10-07T08:46:00Z'
8+
generation: 3
9+
labels:
10+
dashboards: grafana
11+
folders: grafana
12+
name: grafana
13+
namespace: foo
14+
resourceVersion: '343511'
15+
uid: d2f0496d-cd5c-46bf-8630-de827b6d59b0
16+
spec:
17+
deployment:
18+
metadata: {}
19+
spec:
20+
template:
21+
metadata: {}
22+
spec:
23+
containers:
24+
- image: docker.io/grafana/grafana:11.1.4
25+
name: grafana
26+
volumeMounts:
27+
- mountPath: /etc/ssl/certs/ca-certificates.crt
28+
name: tls-ca-bundle
29+
readOnly: true
30+
subPath: tls-ca-bundle.pem
31+
volumes:
32+
- name: tls-ca-bundle
33+
secret:
34+
items:
35+
- key: tls-ca-bundle.pem
36+
path: tls-ca-bundle.pem
37+
secretName: tls-ca-bundle-secret
38+
version: 10.4.3
39+
status:
40+
adminUrl: http://grafana-service.foo:3000
41+
dashboards:
42+
- foo/dashboard-argocd/qPkgGHg7k
43+
datasources:
44+
- foo/cluster-local/927b3c23-e25f-4cbe-a82f-effbb0bbbf40
45+
stage: deployment
46+
stageStatus: success
47+
version: 11.1.4
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
apiVersion: grafana.integreatly.org/v1beta1
2+
kind: Grafana
3+
metadata:
4+
annotations:
5+
argocd.argoproj.io/sync-wave: '1'
6+
argocd.argoproj.io/tracking-id: foobar-admin:grafana.integreatly.org/Grafana:foo/grafana
7+
creationTimestamp: '2024-10-07T08:46:00Z'
8+
generation: 3
9+
labels:
10+
dashboards: grafana
11+
folders: grafana
12+
name: grafana
13+
namespace: foo
14+
resourceVersion: '343511'
15+
uid: d2f0496d-cd5c-46bf-8630-de827b6d59b0
16+
spec:
17+
deployment:
18+
metadata: {}
19+
spec:
20+
template:
21+
metadata: {}
22+
spec:
23+
containers:
24+
- image: docker.io/grafana/grafana:11.1.4
25+
name: grafana
26+
volumeMounts:
27+
- mountPath: /etc/ssl/certs/ca-certificates.crt
28+
name: tls-ca-bundle
29+
readOnly: true
30+
subPath: tls-ca-bundle.pem
31+
volumes:
32+
- name: tls-ca-bundle
33+
secret:
34+
items:
35+
- key: tls-ca-bundle.pem
36+
path: tls-ca-bundle.pem
37+
secretName: tls-ca-bundle-secret
38+
version: 10.4.3
39+
status:
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
2+
-- if UID not yet created, we are progressing
3+
if obj.status == nil or obj.status.uid == "" then
4+
return {
5+
status = "Progressing",
6+
message = "",
7+
}
8+
end
9+
10+
-- NoMatchingInstances distinguishes if we are healthy or degraded
11+
if obj.status.NoMatchingInstances then
12+
return {
13+
status = "Degraded",
14+
message = "can't find matching grafana instance",
15+
}
16+
end
17+
return {
18+
status = "Healthy",
19+
message = "",
20+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
tests:
2+
- healthStatus:
3+
status: Progressing
4+
message: ""
5+
inputPath: testdata/progressing.yaml
6+
- healthStatus:
7+
status: Healthy
8+
message: ""
9+
inputPath: testdata/healthy.yaml
10+
- healthStatus:
11+
status: Degraded
12+
message: "can't find matching grafana instance"
13+
inputPath: testdata/degraded.yaml
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
apiVersion: grafana.integreatly.org/v1beta1
2+
kind: GrafanaDatasource
3+
metadata:
4+
annotations:
5+
argocd.argoproj.io/sync-wave: '3'
6+
argocd.argoproj.io/tracking-id: foobar-admin:grafana.integreatly.org/GrafanaDatasource:foo/cluster-local
7+
creationTimestamp: '2024-10-07T09:37:21Z'
8+
generation: 1
9+
name: cluster-local
10+
namespace: foo
11+
resourceVersion: '356565'
12+
uid: 927b3c23-e25f-4cbe-a82f-effbb0bbbf40
13+
spec:
14+
allowCrossNamespaceImport: true
15+
datasource:
16+
access: proxy
17+
editable: true
18+
isDefault: true
19+
jsonData:
20+
httpHeaderName1: Authorization
21+
timeInterval: 5s
22+
tlsSkipVerify: true
23+
name: cluster-local
24+
secureJsonData:
25+
httpHeaderValue1: Bearer ${token}
26+
type: prometheus
27+
url: https://thanos-querier.openshift-monitoring.svc.cluster.local:9091
28+
instanceSelector:
29+
matchLabels:
30+
dashboards: invalid-selector
31+
resyncPeriod: 5m
32+
valuesFrom:
33+
- targetPath: secureJsonData.httpHeaderValue1
34+
valueFrom:
35+
secretKeyRef:
36+
key: token
37+
name: grafana-token
38+
status:
39+
NoMatchingInstances: true
40+
hash: 56e40622b6a72563637b7c5f33c26d1ce87839dd5897a4a263fbd3d947f951cb
41+
lastResync: '2024-10-09T10:30:40Z'
42+
uid: 927b3c23-e25f-4cbe-a82f-effbb0bbbf40
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
apiVersion: grafana.integreatly.org/v1beta1
2+
kind: GrafanaDatasource
3+
metadata:
4+
annotations:
5+
argocd.argoproj.io/sync-wave: '3'
6+
argocd.argoproj.io/tracking-id: foobar-admin:grafana.integreatly.org/GrafanaDatasource:foo/cluster-local
7+
creationTimestamp: '2024-10-07T09:37:21Z'
8+
generation: 1
9+
name: cluster-local
10+
namespace: foo
11+
resourceVersion: '356565'
12+
uid: 927b3c23-e25f-4cbe-a82f-effbb0bbbf40
13+
spec:
14+
allowCrossNamespaceImport: true
15+
datasource:
16+
access: proxy
17+
editable: true
18+
isDefault: true
19+
jsonData:
20+
httpHeaderName1: Authorization
21+
timeInterval: 5s
22+
tlsSkipVerify: true
23+
name: cluster-local
24+
secureJsonData:
25+
httpHeaderValue1: Bearer ${token}
26+
type: prometheus
27+
url: https://thanos-querier.openshift-monitoring.svc.cluster.local:9091
28+
instanceSelector:
29+
matchLabels:
30+
dashboards: grafana
31+
resyncPeriod: 5m
32+
valuesFrom:
33+
- targetPath: secureJsonData.httpHeaderValue1
34+
valueFrom:
35+
secretKeyRef:
36+
key: token
37+
name: grafana-token
38+
status:
39+
hash: 56e40622b6a72563637b7c5f33c26d1ce87839dd5897a4a263fbd3d947f951cb
40+
lastResync: '2024-10-09T10:30:40Z'
41+
uid: 927b3c23-e25f-4cbe-a82f-effbb0bbbf40
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
apiVersion: grafana.integreatly.org/v1beta1
2+
kind: GrafanaDatasource
3+
metadata:
4+
annotations:
5+
argocd.argoproj.io/sync-wave: '3'
6+
argocd.argoproj.io/tracking-id: foobar-admin:grafana.integreatly.org/GrafanaDatasource:foo/cluster-local
7+
name: cluster-local
8+
namespace: foo
9+
spec:
10+
allowCrossNamespaceImport: true
11+
datasource:
12+
access: proxy
13+
editable: true
14+
isDefault: true
15+
jsonData:
16+
httpHeaderName1: Authorization
17+
timeInterval: 5s
18+
tlsSkipVerify: true
19+
name: cluster-local
20+
secureJsonData:
21+
httpHeaderValue1: Bearer ${token}
22+
type: prometheus
23+
url: https://thanos-querier.openshift-monitoring.svc.cluster.local:9091
24+
instanceSelector:
25+
matchLabels:
26+
dashboards: grafana
27+
resyncPeriod: 5m
28+
valuesFrom:
29+
- targetPath: secureJsonData.httpHeaderValue1
30+
valueFrom:
31+
secretKeyRef:
32+
key: token
33+
name: grafana-token
34+
status:
35+
uid: ""
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
if obj.status == nil or obj.status.conditions == nil then
2+
-- no status info available yet
3+
return {
4+
status = "Progressing",
5+
message = "Waiting for Keycloak status conditions to exist",
6+
}
7+
end
8+
9+
-- Sort conditions by lastTransitionTime, from old to new.
10+
table.sort(obj.status.conditions, function(a, b)
11+
return a.lastTransitionTime < b.lastTransitionTime
12+
end)
13+
14+
for _, condition in ipairs(obj.status.conditions) do
15+
if condition.type == "Ready" and condition.status == "True" then
16+
return {
17+
status = "Healthy",
18+
message = "",
19+
}
20+
elseif condition.type == "HasErrors" and condition.status == "True" then
21+
return {
22+
status = "Degraded",
23+
message = "Has Errors: " .. condition.message,
24+
}
25+
end
26+
end
27+
28+
-- We couldn't find matching conditions yet, so assume progressing
29+
return {
30+
status = "Progressing",
31+
message = "",
32+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
tests:
2+
- healthStatus:
3+
status: Progressing
4+
message: "Waiting for Keycloak status conditions to exist"
5+
inputPath: testdata/provisioning.yaml
6+
- healthStatus:
7+
status: Progressing
8+
message: ""
9+
inputPath: testdata/progressing.yaml
10+
- healthStatus:
11+
status: Healthy
12+
message: ""
13+
inputPath: testdata/healthy.yaml
14+
- healthStatus:
15+
status: Degraded
16+
message: "Has Errors: Waiting for foo/keycloak-1 due to CrashLoopBackOff: back-off 10s"
17+
inputPath: testdata/degraded.yaml
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,73 @@
1+
apiVersion: k8s.keycloak.org/v2alpha1
2+
kind: Keycloak
3+
metadata:
4+
annotations:
5+
argocd.argoproj.io/sync-wave: '2'
6+
argocd.argoproj.io/tracking-id: foobar-keycloak:k8s.keycloak.org/Keycloak:foo/keycloak
7+
creationTimestamp: '2024-10-07T09:06:33Z'
8+
generation: 4
9+
name: keycloak
10+
namespace: foo
11+
resourceVersion: '343382'
12+
uid: 4e08e59c-1b6b-4b13-8a1a-bbce3f91bd68
13+
spec:
14+
db:
15+
host: keycloak-db
16+
passwordSecret:
17+
key: password
18+
name: keycloak.keycloak-db.credentials.postgresql.acid.zalan.do
19+
usernameSecret:
20+
key: username
21+
name: keycloak.keycloak-db.credentials.postgresql.acid.zalan.do
22+
vendor: postgres
23+
hostname:
24+
admin: https://keycloak.apps-crc.testing
25+
hostname: keycloak.apps-crc.testing
26+
http:
27+
httpEnabled: false
28+
tlsSecret: keycloak-tls
29+
ingress:
30+
enabled: false
31+
instances: 2
32+
unsupported:
33+
podTemplate:
34+
spec:
35+
containers:
36+
- env:
37+
- name: KC_HTTPS_TRUST_STORE_FILE
38+
value: /truststore/openshiftca.jks
39+
- name: KC_HTTPS_TRUST_STORE_PASSWORD
40+
value: OpenshiftCA
41+
- name: KC_HTTPS_TRUST_STORE_TYPE
42+
value: JKS
43+
- name: KC_LOG_LEVEL
44+
value: INFO
45+
volumeMounts:
46+
- mountPath: /truststore
47+
name: truststore-volume
48+
volumes:
49+
- name: truststore-volume
50+
secret:
51+
secretName: keycloak-truststore
52+
status:
53+
conditions:
54+
- lastTransitionTime: '2024-10-09T10:13:00.097073410Z'
55+
message: Waiting for more replicas
56+
observedGeneration: 5
57+
status: 'False'
58+
type: Ready
59+
- lastTransitionTime: '2024-10-09T10:14:12.070548569Z'
60+
message: >-
61+
Waiting for foo/keycloak-1 due to CrashLoopBackOff: back-off 10s
62+
observedGeneration: 5
63+
status: 'True'
64+
type: HasErrors
65+
- lastTransitionTime: '2024-10-09T10:12:59.087234931Z'
66+
message: Rolling out deployment update
67+
observedGeneration: 5
68+
status: 'True'
69+
type: RollingUpdate
70+
instances: 1
71+
observedGeneration: 5
72+
selector: >-
73+
app=keycloak,app.kubernetes.io/managed-by=keycloak-operator,app.kubernetes.io/instance=keycloak
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,77 @@
1+
apiVersion: k8s.keycloak.org/v2alpha1
2+
kind: Keycloak
3+
metadata:
4+
annotations:
5+
argocd.argoproj.io/sync-wave: '2'
6+
argocd.argoproj.io/tracking-id: foobar-keycloak:k8s.keycloak.org/Keycloak:foo/keycloak
7+
creationTimestamp: '2024-10-07T09:06:33Z'
8+
generation: 4
9+
name: keycloak
10+
namespace: foo
11+
resourceVersion: '343382'
12+
uid: 4e08e59c-1b6b-4b13-8a1a-bbce3f91bd68
13+
spec:
14+
additionalOptions:
15+
- name: proxy-headers
16+
value: xforwarded
17+
db:
18+
host: keycloak-db
19+
passwordSecret:
20+
key: password
21+
name: keycloak.keycloak-db.credentials.postgresql.acid.zalan.do
22+
usernameSecret:
23+
key: username
24+
name: keycloak.keycloak-db.credentials.postgresql.acid.zalan.do
25+
vendor: postgres
26+
hostname:
27+
admin: https://keycloak.apps-crc.testing
28+
hostname: keycloak.apps-crc.testing
29+
http:
30+
httpEnabled: false
31+
tlsSecret: keycloak-tls
32+
ingress:
33+
enabled: false
34+
instances: 2
35+
unsupported:
36+
podTemplate:
37+
spec:
38+
containers:
39+
- env:
40+
- name: KC_HTTPS_TRUST_STORE_FILE
41+
value: /truststore/openshiftca.jks
42+
- name: KC_HTTPS_TRUST_STORE_PASSWORD
43+
value: OpenshiftCA
44+
- name: KC_HTTPS_TRUST_STORE_TYPE
45+
value: JKS
46+
- name: KC_LOG_LEVEL
47+
value: INFO
48+
volumeMounts:
49+
- mountPath: /truststore
50+
name: truststore-volume
51+
volumes:
52+
- name: truststore-volume
53+
secret:
54+
secretName: keycloak-truststore
55+
status:
56+
conditions:
57+
- lastTransitionTime: '2024-10-09T09:55:28.695748046Z'
58+
message: ''
59+
observedGeneration: 4
60+
status: 'True'
61+
type: Ready
62+
- lastTransitionTime: '2024-10-08T11:11:08.814752530Z'
63+
message: >-
64+
warning: You need to specify these fields as the first-class citizen of
65+
the CR: proxy-headers
66+
observedGeneration: 4
67+
status: 'False'
68+
type: HasErrors
69+
- lastTransitionTime: '2024-10-09T09:47:33.600863636Z'
70+
message: ''
71+
observedGeneration: 4
72+
status: 'False'
73+
type: RollingUpdate
74+
instances: 2
75+
observedGeneration: 4
76+
selector: >-
77+
app=keycloak,app.kubernetes.io/managed-by=keycloak-operator,app.kubernetes.io/instance=keycloak
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,77 @@
1+
apiVersion: k8s.keycloak.org/v2alpha1
2+
kind: Keycloak
3+
metadata:
4+
annotations:
5+
argocd.argoproj.io/sync-wave: '2'
6+
argocd.argoproj.io/tracking-id: foobar-keycloak:k8s.keycloak.org/Keycloak:foo/keycloak
7+
creationTimestamp: '2024-10-07T09:06:33Z'
8+
generation: 4
9+
name: keycloak
10+
namespace: foo
11+
resourceVersion: '343382'
12+
uid: 4e08e59c-1b6b-4b13-8a1a-bbce3f91bd68
13+
spec:
14+
additionalOptions:
15+
- name: proxy-headers
16+
value: xforwarded
17+
db:
18+
host: keycloak-db
19+
passwordSecret:
20+
key: password
21+
name: keycloak.keycloak-db.credentials.postgresql.acid.zalan.do
22+
usernameSecret:
23+
key: username
24+
name: keycloak.keycloak-db.credentials.postgresql.acid.zalan.do
25+
vendor: postgres
26+
hostname:
27+
admin: https://keycloak.apps-crc.testing
28+
hostname: keycloak.apps-crc.testing
29+
http:
30+
httpEnabled: false
31+
tlsSecret: keycloak-tls
32+
ingress:
33+
enabled: false
34+
instances: 2
35+
unsupported:
36+
podTemplate:
37+
spec:
38+
containers:
39+
- env:
40+
- name: KC_HTTPS_TRUST_STORE_FILE
41+
value: /truststore/openshiftca.jks
42+
- name: KC_HTTPS_TRUST_STORE_PASSWORD
43+
value: OpenshiftCA
44+
- name: KC_HTTPS_TRUST_STORE_TYPE
45+
value: JKS
46+
- name: KC_LOG_LEVEL
47+
value: INFO
48+
volumeMounts:
49+
- mountPath: /truststore
50+
name: truststore-volume
51+
volumes:
52+
- name: truststore-volume
53+
secret:
54+
secretName: keycloak-truststore
55+
status:
56+
conditions:
57+
- lastTransitionTime: '2024-10-09T10:13:00.097073410Z'
58+
message: Waiting for more replicas
59+
observedGeneration: 5
60+
status: 'False'
61+
type: Ready
62+
- lastTransitionTime: '2024-10-08T11:11:08.814752530Z'
63+
message: >-
64+
warning: You need to specify these fields as the first-class citizen of
65+
the CR: proxy-headers
66+
observedGeneration: 5
67+
status: 'False'
68+
type: HasErrors
69+
- lastTransitionTime: '2024-10-09T10:12:59.087234931Z'
70+
message: Rolling out deployment update
71+
observedGeneration: 5
72+
status: 'True'
73+
type: RollingUpdate
74+
instances: 1
75+
observedGeneration: 5
76+
selector: >-
77+
app=keycloak,app.kubernetes.io/managed-by=keycloak-operator,app.kubernetes.io/instance=keycloak
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
apiVersion: k8s.keycloak.org/v2alpha1
2+
kind: Keycloak
3+
metadata:
4+
annotations:
5+
argocd.argoproj.io/sync-wave: '2'
6+
argocd.argoproj.io/tracking-id: foobar-keycloak:k8s.keycloak.org/Keycloak:foo/keycloak
7+
creationTimestamp: '2024-10-07T09:06:33Z'
8+
generation: 4
9+
name: keycloak
10+
namespace: foo
11+
resourceVersion: '343382'
12+
uid: 4e08e59c-1b6b-4b13-8a1a-bbce3f91bd68
13+
spec:
14+
db:
15+
host: keycloak-db
16+
passwordSecret:
17+
key: password
18+
name: keycloak.keycloak-db.credentials.postgresql.acid.zalan.do
19+
usernameSecret:
20+
key: username
21+
name: keycloak.keycloak-db.credentials.postgresql.acid.zalan.do
22+
vendor: postgres
23+
hostname:
24+
admin: https://keycloak.apps-crc.testing
25+
hostname: keycloak.apps-crc.testing
26+
http:
27+
httpEnabled: false
28+
tlsSecret: keycloak-tls
29+
ingress:
30+
enabled: false
31+
instances: 2
32+
unsupported:
33+
podTemplate:
34+
spec:
35+
containers:
36+
- env:
37+
- name: KC_HTTPS_TRUST_STORE_FILE
38+
value: /truststore/openshiftca.jks
39+
- name: KC_HTTPS_TRUST_STORE_PASSWORD
40+
value: OpenshiftCA
41+
- name: KC_HTTPS_TRUST_STORE_TYPE
42+
value: JKS
43+
- name: KC_LOG_LEVEL
44+
value: INFO
45+
volumeMounts:
46+
- mountPath: /truststore
47+
name: truststore-volume
48+
volumes:
49+
- name: truststore-volume
50+
secret:
51+
secretName: keycloak-truststore
52+
status:
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
-- There is no value in the manifest that can lead to conclude that
2+
-- this resource is in a "Degraded" state. Update this, if in the future
3+
-- this possibility arises.
4+
5+
if obj.status == nil or obj.status.solrNodes == nil then
6+
return {
7+
status = "Progressing",
8+
message = "Waiting for solr to exist",
9+
}
10+
end
11+
12+
for _, solrNode in ipairs(obj.status.solrNodes) do
13+
if not solrNode.ready then
14+
return {
15+
status = "Progressing",
16+
message = "Not all replicas are ready",
17+
}
18+
end
19+
end
20+
21+
return {
22+
status = "Healthy",
23+
message = "Solr is ready",
24+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
tests:
2+
- healthStatus:
3+
status: Progressing
4+
message: "Waiting for solr to exist"
5+
inputPath: testdata/provisioning.yaml
6+
- healthStatus:
7+
status: Progressing
8+
message: "Not all replicas are ready"
9+
inputPath: testdata/progressing.yaml
10+
- healthStatus:
11+
status: Healthy
12+
message: "Solr is ready"
13+
inputPath: testdata/healthy.yaml
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,118 @@
1+
apiVersion: solr.apache.org/v1beta1
2+
kind: SolrCloud
3+
metadata:
4+
annotations:
5+
argocd.argoproj.io/tracking-id: foobar-solr:solr.apache.org/SolrCloud:foo/solr
6+
creationTimestamp: '2024-10-07T09:30:03Z'
7+
finalizers:
8+
- storage.finalizers.solr.apache.org
9+
generation: 2
10+
labels:
11+
app.kubernetes.io/instance: foobar-solr
12+
app.kubernetes.io/name: solr
13+
app.kubernetes.io/version: 8.11.1
14+
helm.sh/chart: solr-0.8.1
15+
name: solr
16+
namespace: foo
17+
resourceVersion: '339148'
18+
uid: 42f073e1-bf7c-4d2f-923a-66886898e6a2
19+
spec:
20+
availability:
21+
podDisruptionBudget:
22+
enabled: true
23+
method: ClusterWide
24+
busyBoxImage:
25+
repository: library/busybox
26+
tag: 1.28.0-glibc
27+
customSolrKubeOptions:
28+
podOptions:
29+
defaultInitContainerResources: {}
30+
nodeSelector:
31+
node-role.kubernetes.io/worker: ''
32+
podSecurityContext:
33+
runAsGroup: 8983
34+
runAsNonRoot: true
35+
runAsUser: 8983
36+
seccompProfile:
37+
type: RuntimeDefault
38+
resources: {}
39+
serviceAccountName: solr-sa
40+
startupProbe:
41+
periodSeconds: 10
42+
timeoutSeconds: 30
43+
dataStorage:
44+
persistent:
45+
pvcTemplate:
46+
metadata:
47+
annotations:
48+
foobar: solr-data
49+
labels:
50+
foobar: solr-data
51+
name: solr-data
52+
spec:
53+
resources:
54+
requests:
55+
storage: 20Gi
56+
reclaimPolicy: Delete
57+
replicas: 1
58+
scaling:
59+
populatePodsOnScaleUp: true
60+
vacatePodsOnScaleDown: true
61+
solrAddressability:
62+
commonServicePort: 80
63+
podPort: 8983
64+
solrImage:
65+
repository: solr
66+
tag: '8.11'
67+
solrJavaMem: '-Xms1g -Xmx2g'
68+
solrLogLevel: DEBUG
69+
solrOpts: '-Dsolr.disable.shardsWhitelist=true'
70+
updateStrategy:
71+
managed: {}
72+
method: Managed
73+
zookeeperRef:
74+
provided:
75+
adminServerService: {}
76+
chroot: /
77+
clientService: {}
78+
config: {}
79+
headlessService: {}
80+
image:
81+
pullPolicy: IfNotPresent
82+
repository: pravega/zookeeper
83+
maxUnavailableReplicas: 1
84+
persistence:
85+
reclaimPolicy: Delete
86+
spec:
87+
accessModes:
88+
- ReadWriteOnce
89+
resources:
90+
requests:
91+
storage: 5Gi
92+
replicas: 1
93+
zookeeperPodPolicy:
94+
resources: {}
95+
securityContext:
96+
runAsNonRoot: true
97+
seccompProfile:
98+
type: RuntimeDefault
99+
status:
100+
internalCommonAddress: http://solr-solrcloud-common.foo
101+
podSelector: solr-cloud=solr,technology=solr-cloud
102+
readyReplicas: 1
103+
replicas: 1
104+
solrNodes:
105+
- internalAddress: http://solr-solrcloud-0.solr-solrcloud-headless.foo:8983
106+
name: solr-solrcloud-0
107+
nodeName: crc-j5m2n-master-0
108+
ready: true
109+
scheduledForDeletion: false
110+
specUpToDate: true
111+
version: '8.11'
112+
upToDateNodes: 1
113+
version: '8.11'
114+
zookeeperConnectionInfo:
115+
chroot: /
116+
externalConnectionString: N/A
117+
internalConnectionString: >-
118+
solr-solrcloud-zookeeper-0.solr-solrcloud-zookeeper-headless.foo.svc.cluster.local:2181
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,125 @@
1+
apiVersion: solr.apache.org/v1beta1
2+
kind: SolrCloud
3+
metadata:
4+
annotations:
5+
argocd.argoproj.io/tracking-id: foobar-solr:solr.apache.org/SolrCloud:foo/solr
6+
creationTimestamp: '2024-10-07T09:30:03Z'
7+
finalizers:
8+
- storage.finalizers.solr.apache.org
9+
generation: 2
10+
labels:
11+
app.kubernetes.io/instance: foobar-solr
12+
app.kubernetes.io/name: solr
13+
app.kubernetes.io/version: 8.11.1
14+
helm.sh/chart: solr-0.8.1
15+
name: solr
16+
namespace: foo
17+
resourceVersion: '339148'
18+
uid: 42f073e1-bf7c-4d2f-923a-66886898e6a2
19+
spec:
20+
availability:
21+
podDisruptionBudget:
22+
enabled: true
23+
method: ClusterWide
24+
busyBoxImage:
25+
repository: library/busybox
26+
tag: 1.28.0-glibc
27+
customSolrKubeOptions:
28+
podOptions:
29+
defaultInitContainerResources: {}
30+
nodeSelector:
31+
node-role.kubernetes.io/worker: ''
32+
podSecurityContext:
33+
runAsGroup: 8983
34+
runAsNonRoot: true
35+
runAsUser: 8983
36+
seccompProfile:
37+
type: RuntimeDefault
38+
resources: {}
39+
serviceAccountName: solr-sa
40+
startupProbe:
41+
periodSeconds: 10
42+
timeoutSeconds: 30
43+
dataStorage:
44+
persistent:
45+
pvcTemplate:
46+
metadata:
47+
annotations:
48+
foobar: solr-data
49+
labels:
50+
foobar: solr-data
51+
name: solr-data
52+
spec:
53+
resources:
54+
requests:
55+
storage: 20Gi
56+
reclaimPolicy: Delete
57+
replicas: 2
58+
scaling:
59+
populatePodsOnScaleUp: true
60+
vacatePodsOnScaleDown: true
61+
solrAddressability:
62+
commonServicePort: 80
63+
podPort: 8983
64+
solrImage:
65+
repository: solr
66+
tag: '8.11'
67+
solrJavaMem: '-Xms1g -Xmx2g'
68+
solrLogLevel: DEBUG
69+
solrOpts: '-Dsolr.disable.shardsWhitelist=true'
70+
updateStrategy:
71+
managed: {}
72+
method: Managed
73+
zookeeperRef:
74+
provided:
75+
adminServerService: {}
76+
chroot: /
77+
clientService: {}
78+
config: {}
79+
headlessService: {}
80+
image:
81+
pullPolicy: IfNotPresent
82+
repository: pravega/zookeeper
83+
maxUnavailableReplicas: 1
84+
persistence:
85+
reclaimPolicy: Delete
86+
spec:
87+
accessModes:
88+
- ReadWriteOnce
89+
resources:
90+
requests:
91+
storage: 5Gi
92+
replicas: 1
93+
zookeeperPodPolicy:
94+
resources: {}
95+
securityContext:
96+
runAsNonRoot: true
97+
seccompProfile:
98+
type: RuntimeDefault
99+
status:
100+
internalCommonAddress: http://solr-solrcloud-common.foo
101+
podSelector: solr-cloud=solr,technology=solr-cloud
102+
readyReplicas: 1
103+
replicas: 2
104+
solrNodes:
105+
- internalAddress: http://solr-solrcloud-0.solr-solrcloud-headless.foo:8983
106+
name: solr-solrcloud-0
107+
nodeName: crc-j5m2n-master-0
108+
ready: true
109+
scheduledForDeletion: false
110+
specUpToDate: true
111+
version: '8.11'
112+
- internalAddress: http://solr-solrcloud-1.solr-solrcloud-headless.foo:8983
113+
name: solr-solrcloud-1
114+
nodeName: ''
115+
ready: false
116+
scheduledForDeletion: false
117+
specUpToDate: true
118+
version: ''
119+
upToDateNodes: 2
120+
version: '8.11'
121+
zookeeperConnectionInfo:
122+
chroot: /
123+
externalConnectionString: N/A
124+
internalConnectionString: >-
125+
solr-solrcloud-zookeeper-0.solr-solrcloud-zookeeper-headless.foo.svc.cluster.local:2181
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,95 @@
1+
apiVersion: solr.apache.org/v1beta1
2+
kind: SolrCloud
3+
metadata:
4+
annotations:
5+
argocd.argoproj.io/tracking-id: foobar-solr:solr.apache.org/SolrCloud:foo/solr
6+
finalizers:
7+
- storage.finalizers.solr.apache.org
8+
labels:
9+
app.kubernetes.io/instance: foobar-solr
10+
app.kubernetes.io/name: solr
11+
app.kubernetes.io/version: 8.11.1
12+
helm.sh/chart: solr-0.8.1
13+
name: solr
14+
namespace: foo
15+
spec:
16+
availability:
17+
podDisruptionBudget:
18+
enabled: true
19+
method: ClusterWide
20+
busyBoxImage:
21+
repository: library/busybox
22+
tag: 1.28.0-glibc
23+
customSolrKubeOptions:
24+
podOptions:
25+
defaultInitContainerResources: {}
26+
nodeSelector:
27+
node-role.kubernetes.io/worker: ''
28+
podSecurityContext:
29+
runAsGroup: 8983
30+
runAsNonRoot: true
31+
runAsUser: 8983
32+
seccompProfile:
33+
type: RuntimeDefault
34+
resources: {}
35+
serviceAccountName: solr-sa
36+
startupProbe:
37+
periodSeconds: 10
38+
timeoutSeconds: 30
39+
dataStorage:
40+
persistent:
41+
pvcTemplate:
42+
metadata:
43+
annotations:
44+
foobar: solr-data
45+
labels:
46+
foobar: solr-data
47+
name: solr-data
48+
spec:
49+
resources:
50+
requests:
51+
storage: 20Gi
52+
reclaimPolicy: Delete
53+
replicas: 1
54+
scaling:
55+
populatePodsOnScaleUp: true
56+
vacatePodsOnScaleDown: true
57+
solrAddressability:
58+
commonServicePort: 80
59+
podPort: 8983
60+
solrImage:
61+
repository: solr
62+
tag: '8.11'
63+
solrJavaMem: '-Xms1g -Xmx2g'
64+
solrLogLevel: DEBUG
65+
solrOpts: '-Dsolr.disable.shardsWhitelist=true'
66+
updateStrategy:
67+
managed: {}
68+
method: Managed
69+
zookeeperRef:
70+
provided:
71+
adminServerService: {}
72+
chroot: /
73+
clientService: {}
74+
config: {}
75+
headlessService: {}
76+
image:
77+
pullPolicy: IfNotPresent
78+
repository: pravega/zookeeper
79+
maxUnavailableReplicas: 1
80+
persistence:
81+
reclaimPolicy: Delete
82+
spec:
83+
accessModes:
84+
- ReadWriteOnce
85+
resources:
86+
requests:
87+
storage: 5Gi
88+
replicas: 1
89+
zookeeperPodPolicy:
90+
resources: {}
91+
securityContext:
92+
runAsNonRoot: true
93+
seccompProfile:
94+
type: RuntimeDefault
95+
status:

0 commit comments

Comments
 (0)
Please sign in to comment.