diff --git a/pkg/db/seeds/appuio_cloud_loadbalancer.promql b/pkg/db/seeds/appuio_cloud_loadbalancer.promql index 426162f..69faaf1 100644 --- a/pkg/db/seeds/appuio_cloud_loadbalancer.promql +++ b/pkg/db/seeds/appuio_cloud_loadbalancer.promql @@ -12,11 +12,17 @@ sum_over_time( # Join the namespace label to get the tenant on(cluster_id, namespace) group_left(tenant_id) - label_replace( - kube_namespace_labels{label_appuio_io_organization=~".+"}, - "tenant_id", - "$1", - "label_appuio_io_organization", "(.*)" + ( + bottomk(1, + min by (cluster_id, namespace, tenant_id) ( + label_replace( + kube_namespace_labels{label_appuio_io_organization=~".+"}, + "tenant_id", + "$1", + "label_appuio_io_organization", "(.*)" + ) + ) + ) by(cluster_id, namespace) ), "product", "appuio_cloud_loadbalancer", diff --git a/pkg/db/seeds/appuio_cloud_memory.promql b/pkg/db/seeds/appuio_cloud_memory.promql index a3ee705..de4f598 100644 --- a/pkg/db/seeds/appuio_cloud_memory.promql +++ b/pkg/db/seeds/appuio_cloud_memory.promql @@ -60,11 +60,17 @@ sum_over_time( # Join namespace label `label_appuio_io_organization` as `tenant_id`. on(cluster_id, namespace) group_left(tenant_id) - label_replace( - kube_namespace_labels{label_appuio_io_organization=~".+"}, - "tenant_id", - "$1", - "label_appuio_io_organization", "(.*)" + ( + bottomk(1, + min by (cluster_id, namespace, tenant_id) ( + label_replace( + kube_namespace_labels{label_appuio_io_organization=~".+"}, + "tenant_id", + "$1", + "label_appuio_io_organization", "(.*)" + ) + ) + ) by(cluster_id, namespace) ), # At least return 128MiB 128 * 1024 * 1024 diff --git a/pkg/db/seeds/appuio_cloud_persistent_storage.promql b/pkg/db/seeds/appuio_cloud_persistent_storage.promql index f3d6c09..b9006b8 100644 --- a/pkg/db/seeds/appuio_cloud_persistent_storage.promql +++ b/pkg/db/seeds/appuio_cloud_persistent_storage.promql @@ -39,11 +39,17 @@ sum_over_time( # Join the namespace label to get the tenant on(cluster_id,namespace) group_left(tenant_id) - label_replace( - kube_namespace_labels{label_appuio_io_organization=~".+"}, - "tenant_id", - "$1", - "label_appuio_io_organization", "(.*)" + ( + bottomk(1, + min by (cluster_id, namespace, tenant_id) ( + label_replace( + kube_namespace_labels{label_appuio_io_organization=~".+"}, + "tenant_id", + "$1", + "label_appuio_io_organization", "(.*)" + ) + ) + ) by(cluster_id, namespace) ) ), 1024 * 1024 * 1024 diff --git a/pkg/db/seeds/promtest/appuio_cloud_loadbalancer.jsonnet b/pkg/db/seeds/promtest/appuio_cloud_loadbalancer.jsonnet new file mode 100644 index 0000000..8ae1a29 --- /dev/null +++ b/pkg/db/seeds/promtest/appuio_cloud_loadbalancer.jsonnet @@ -0,0 +1,88 @@ +local c = import 'common.libsonnet'; + +local query = importstr '../appuio_cloud_loadbalancer.promql'; + +local commonLabels = { + cluster_id: 'c-appuio-cloudscale-lpg-2', + tenant_id: 'c-appuio-cloudscale-lpg-2', +}; + +// One pvc, minimal (=1 byte) request +// 10 samples +local baseSeries = { + testprojectNamespaceOrgLabel: c.series('kube_namespace_labels', commonLabels { + namespace: 'testproject', + label_appuio_io_organization: 'cherry-pickers-inc', + }, '1x120'), + + pvCapacity: c.series('kube_service_spec_type', commonLabels { + type: 'LoadBalancer', + namespace: 'testproject', + }, '1x120'), +}; + +local baseCalculatedLabels = { + category: 'c-appuio-cloudscale-lpg-2:testproject', + cluster_id: 'c-appuio-cloudscale-lpg-2', + namespace: 'testproject', + product: 'appuio_cloud_loadbalancer:c-appuio-cloudscale-lpg-2:cherry-pickers-inc:testproject', + tenant_id: 'cherry-pickers-inc', +}; + +{ + tests: [ + c.test('minimal PVC', + baseSeries, + query, + { + labels: c.formatLabels(baseCalculatedLabels), + value: 60, + }), + + c.test('unrelated kube_namespace_labels changes do not throw errors - there is an overlap since series go stale only after a few missed scrapes', + baseSeries { + testprojectNamespaceOrgLabelUpdated: self.testprojectNamespaceOrgLabel { + _labels+:: { + custom_appuio_io_myid: '672004be-a86b-44e0-b446-1255a1f8b340', + }, + values: '_x30 1x30 _x60', + }, + }, + query, + { + labels: c.formatLabels(baseCalculatedLabels), + value: 60, + }), + + c.test('organization changes do not throw many-to-many errors - there is an overlap since series go stale only after a few missed scrapes', + baseSeries { + testprojectNamespaceOrgLabel+: { + // We cheat here and use an impossible value. + // Since we use min() and bottomk() in the query this priotizes this series less than the other. + // It's ugly but it prevents flaky tests since otherwise one of the series gets picked randomly. + values: '2x120', + }, + testprojectNamespaceOrgLabelUpdated: self.testprojectNamespaceOrgLabel { + _labels+:: { + label_appuio_io_organization: 'carrot-pickers-inc', + }, + values: '_x60 1x60', + }, + }, + query, + [ + { + labels: c.formatLabels(baseCalculatedLabels), + // 1 service * 29 * 2 because of the cheat above. + value: 29 * 2, + }, + { + labels: c.formatLabels(baseCalculatedLabels { + tenant_id: 'carrot-pickers-inc', + product: 'appuio_cloud_loadbalancer:c-appuio-cloudscale-lpg-2:carrot-pickers-inc:testproject', + }), + value: 31, + }, + ]), + ], +} diff --git a/pkg/db/seeds/promtest/appuio_cloud_memory.jsonnet b/pkg/db/seeds/promtest/appuio_cloud_memory.jsonnet index 8003590..19178ce 100644 --- a/pkg/db/seeds/promtest/appuio_cloud_memory.jsonnet +++ b/pkg/db/seeds/promtest/appuio_cloud_memory.jsonnet @@ -16,11 +16,11 @@ local baseSeries = { label_appuio_io_node_class: 'flex', label_kubernetes_io_hostname: 'flex-x666', node: 'flex-x666', - }, '1x10'), + }, '1x120'), testprojectNamespaceOrgLabel: c.series('kube_namespace_labels', commonLabels { namespace: 'testproject', label_appuio_io_organization: 'cherry-pickers-inc', - }, '1x10'), + }, '1x120'), local podLbls = commonLabels { namespace: 'testproject', @@ -30,21 +30,21 @@ local baseSeries = { // Phases runningPodPhase: c.series('kube_pod_status_phase', podLbls { phase: 'Running', - }, '1x10'), + }, '1x120'), // Requests runningPodMemoryRequests: c.series('kube_pod_container_resource_requests', podLbls { resource: 'memory', node: 'flex-x666', - }, '1x10'), + }, '1x120'), runningPodCPURequests: c.series('kube_pod_container_resource_requests', podLbls { resource: 'cpu', node: 'flex-x666', - }, '0x10'), + }, '0x120'), // Real usage runningPodMemoryUsage: c.series('container_memory_working_set_bytes', podLbls { image: 'busybox', node: 'flex-x666', - }, '1x10'), + }, '1x120'), }; local baseCalculatedLabels = { @@ -60,41 +60,58 @@ local baseCalculatedLabels = { local minMemoryRequestMib = 128; local cloudscaleFairUseRatio = 4294967296; +local subQueryTests = [ + c.test('sub CPU requests query sanity check', + baseSeries, + subCPUQuery, + { + labels: c.formatLabels(baseCalculatedLabels), + value: 0, + }), + c.test('sub memory requests query sanity check', + baseSeries, + subMemoryQuery, + { + labels: c.formatLabels(baseCalculatedLabels), + value: (minMemoryRequestMib - (1 / 1024 / 1024)) * 60, + }), +]; + { - tests: [ + tests: subQueryTests + [ c.test('minimal pod', baseSeries, query, { labels: c.formatLabels(baseCalculatedLabels), - value: minMemoryRequestMib * 10, + value: minMemoryRequestMib * 60, }), c.test('pod with higher memory usage', baseSeries { runningPodMemoryUsage+: { - values: '%sx10' % (500 * 1024 * 1024), + values: '%sx120' % (500 * 1024 * 1024), }, }, query, { labels: c.formatLabels(baseCalculatedLabels), - value: 500 * 10, + value: 500 * 60, }), c.test('pod with higher memory requests', baseSeries { runningPodMemoryRequests+: { - values: '%sx10' % (500 * 1024 * 1024), + values: '%sx120' % (500 * 1024 * 1024), }, }, query, { labels: c.formatLabels(baseCalculatedLabels), - value: 500 * 10, + value: 500 * 60, }), c.test('pod with CPU requests violating fair use', baseSeries { runningPodCPURequests+: { - values: '1x10', + values: '1x120', }, }, query, @@ -102,7 +119,7 @@ local cloudscaleFairUseRatio = 4294967296; labels: c.formatLabels(baseCalculatedLabels), // See per cluster fair use ratio in query // value: 2.048E+04, - value: (cloudscaleFairUseRatio / 1024 / 1024) * 10, + value: (cloudscaleFairUseRatio / 1024 / 1024) * 60, }), c.test('non-running pods are not counted', baseSeries { @@ -113,99 +130,111 @@ local cloudscaleFairUseRatio = 4294967296; }, succeededPodPhase: c.series('kube_pod_status_phase', lbls { phase: 'Succeeded', - }, '1x10'), + }, '1x120'), succeededPodMemoryRequests: c.series('kube_pod_container_resource_requests', lbls { resource: 'memory', node: 'flex-x666', - }, '1x10'), + }, '1x120'), succeededPodCPURequests: c.series('kube_pod_container_resource_requests', lbls { node: 'flex-x666', resource: 'cpu', - }, '1x10'), + }, '1x120'), }, query, { labels: c.formatLabels(baseCalculatedLabels), - value: minMemoryRequestMib * 10, + value: minMemoryRequestMib * 60, }), - c.test('unrelated kube node label changes do not throw errors - there is an overlap since series go stale only after a few missed scrapes', + c.test('unrelated kube_node_labels changes do not throw errors - there is an overlap since series go stale only after a few missed scrapes', baseSeries { - flexNodeLabel+: { - _labels+:: { - label_csi_driver_id: 'A09B8DDE-5435-4D74-923C-4866513E8F02', - }, - values: '1x10 _x10 stale', - }, flexNodeLabelUpdated: self.flexNodeLabel { _labels+:: { label_csi_driver_id: '18539CC3-0B6C-4E72-82BD-90A9BEF7D807', }, - values: '_x5 1x15', + values: '_x30 1x30 _x60', }, }, query, { labels: c.formatLabels(baseCalculatedLabels), - value: minMemoryRequestMib * 10, + value: minMemoryRequestMib * 60, }), - c.test('unrelated kube node label adds do not throw errors - there is an overlap since series go stale only after a few missed scrapes', + c.test('node class adds do not throw errors - there is an overlap since series go stale only after a few missed scrapes', baseSeries { flexNodeLabel+: { - values: '1x10 _x10 stale', + _labels+:: { + label_appuio_io_node_class:: null, + }, + values: '1x60', }, - flexNodeLabelUpdated: self.flexNodeLabel { + flexNodeLabelUpdated: super.flexNodeLabel { + values: '_x30 1x90', + }, + }, + query, + [ + // I'm not sure why this is 61min * minMemoryRequestMib. Other queries always result in 60min + // TODO investigate where the extra min comes from + { + labels: c.formatLabels(baseCalculatedLabels), + value: minMemoryRequestMib * 46, + }, + { + labels: c.formatLabels(baseCalculatedLabels { + label_appuio_io_node_class:: null, + product: 'appuio_cloud_memory:c-appuio-cloudscale-lpg-2:cherry-pickers-inc:testproject:', + }), + value: minMemoryRequestMib * 15, + }, + ]), + + c.test('unrelated kube_namespace_labels changes do not throw errors - there is an overlap since series go stale only after a few missed scrapes', + baseSeries { + testprojectNamespaceOrgLabelUpdated: self.testprojectNamespaceOrgLabel { _labels+:: { - label_csi_driver_id: '18539CC3-0B6C-4E72-82BD-90A9BEF7D807', + custom_appuio_io_myid: '672004be-a86b-44e0-b446-1255a1f8b340', }, - values: '_x5 1x15', + values: '_x30 1x30 _x60', }, }, query, { labels: c.formatLabels(baseCalculatedLabels), - value: minMemoryRequestMib * 10, + value: minMemoryRequestMib * 60, }), - c.test('node class adds do not throw errors - there is an overlap since series go stale only after a few missed scrapes', + + c.test('organization changes do not throw many-to-many errors - there is an overlap since series go stale only after a few missed scrapes', baseSeries { - flexNodeLabel+: { + testprojectNamespaceOrgLabel+: { + // We cheat here and use an impossible value. + // Since we use min() and bottomk() in the query this priotizes this series less than the other. + // It's ugly but it prevents flaky tests since otherwise one of the series gets picked randomly. + // Does not influence the result. The result is floored to a minimum of 128MiB. + values: '2x120', + }, + testprojectNamespaceOrgLabelUpdated: self.testprojectNamespaceOrgLabel { _labels+:: { - label_appuio_io_node_class:: null, + label_appuio_io_organization: 'carrot-pickers-inc', }, - values: '1x10 _x10 stale', - }, - flexNodeLabelUpdated: super.flexNodeLabel { - values: '_x5 1x15', + values: '_x60 1x60', }, }, query, [ - // I'm not sure why this is 11 * minMemoryRequestMib, might have something to do with the intervals or intra minute switching + // I'm not sure why this is 61min * minMemoryRequestMib. Other queries always result in 60min + // TODO investigate where the extra min comes from { labels: c.formatLabels(baseCalculatedLabels), - value: minMemoryRequestMib * 8, + value: minMemoryRequestMib * 30, }, { labels: c.formatLabels(baseCalculatedLabels { - label_appuio_io_node_class:: null, - product: 'appuio_cloud_memory:c-appuio-cloudscale-lpg-2:cherry-pickers-inc:testproject:', + tenant_id: 'carrot-pickers-inc', + product: 'appuio_cloud_memory:c-appuio-cloudscale-lpg-2:carrot-pickers-inc:testproject:flex', }), - value: minMemoryRequestMib * 3, + value: minMemoryRequestMib * 31, }, ]), - c.test('sub CPU requests query sanity check', - baseSeries, - subCPUQuery, - { - labels: c.formatLabels(baseCalculatedLabels), - value: 0, - }), - c.test('sub memory requests query sanity check', - baseSeries, - subMemoryQuery, - { - labels: c.formatLabels(baseCalculatedLabels), - value: (minMemoryRequestMib - (1 / 1024 / 1024)) * 10, - }), ], } diff --git a/pkg/db/seeds/promtest/appuio_cloud_persistent_storage.jsonnet b/pkg/db/seeds/promtest/appuio_cloud_persistent_storage.jsonnet index b1a7a79..7728915 100644 --- a/pkg/db/seeds/promtest/appuio_cloud_persistent_storage.jsonnet +++ b/pkg/db/seeds/promtest/appuio_cloud_persistent_storage.jsonnet @@ -13,21 +13,21 @@ local baseSeries = { testprojectNamespaceOrgLabel: c.series('kube_namespace_labels', commonLabels { namespace: 'testproject', label_appuio_io_organization: 'cherry-pickers-inc', - }, '1x10'), + }, '1x120'), local pvcID = 'pvc-da01b12d-2e31-44da-8312-f91169256221', pvCapacity: c.series('kube_persistentvolume_capacity_bytes', commonLabels { persistentvolume: pvcID, - }, '1x10'), + }, '1x120'), pvInfo: c.series('kube_persistentvolume_info', commonLabels { persistentvolume: pvcID, storageclass: 'ssd', - }, '1x10'), + }, '1x120'), pvcRef: c.series('kube_persistentvolume_claim_ref', commonLabels { claim_namespace: 'testproject', name: 'important-database', persistentvolume: pvcID, - }, '1x10'), + }, '1x120'), }; local baseCalculatedLabels = { @@ -46,33 +46,65 @@ local baseCalculatedLabels = { query, { labels: c.formatLabels(baseCalculatedLabels), - value: 10, + value: 60, }), c.test('higher than 1GiB request', baseSeries { pvCapacity+: { - values: '%sx10' % (5 * 1024 * 1024 * 1024), + values: '%sx120' % (5 * 1024 * 1024 * 1024), }, }, query, { labels: c.formatLabels(baseCalculatedLabels), - value: 5 * 10, + value: 5 * 60, }), - c.test('unrelated kube_persistentvolume_info changes do not throw errors - there is an overlap since series go stale only after a few missed scrapes', + c.test('unrelated kube_namespace_labels changes do not throw errors - there is an overlap since series go stale only after a few missed scrapes', baseSeries { - pvInfoUpdated: self.pvInfo { + testprojectNamespaceOrgLabelUpdated: self.testprojectNamespaceOrgLabel { _labels+:: { - csi_volume_handle: '672004be-a86b-44e0-b446-1255a1f8b340', + custom_appuio_io_myid: '672004be-a86b-44e0-b446-1255a1f8b340', }, - values: '_x5 1x5', + values: '_x30 1x30 _x60', }, }, query, { labels: c.formatLabels(baseCalculatedLabels), - value: 10, + value: 60, }), + + c.test('organization changes do not throw many-to-many errors - there is an overlap since series go stale only after a few missed scrapes', + baseSeries { + testprojectNamespaceOrgLabel+: { + // We cheat here and use an impossible value. + // Since we use min() and bottomk() in the query this priotizes this series less than the other. + // It's ugly but it prevents flaky tests since otherwise one of the series gets picked randomly. + // Does not influence the result. The result is floored to a minimum of 1GiB. + values: '2x120', + }, + testprojectNamespaceOrgLabelUpdated: self.testprojectNamespaceOrgLabel { + _labels+:: { + label_appuio_io_organization: 'carrot-pickers-inc', + }, + values: '_x60 1x60', + }, + }, + query, + [ + { + labels: c.formatLabels(baseCalculatedLabels), + value: 29, + }, + { + labels: c.formatLabels(baseCalculatedLabels { + tenant_id: 'carrot-pickers-inc', + product: 'appuio_cloud_persistent_storage:c-appuio-cloudscale-lpg-2:carrot-pickers-inc:testproject:ssd', + }), + value: 31, + }, + ]), + ], } diff --git a/pkg/db/seeds/promtest/common.libsonnet b/pkg/db/seeds/promtest/common.libsonnet index b3dd5e7..a6a2664 100644 --- a/pkg/db/seeds/promtest/common.libsonnet +++ b/pkg/db/seeds/promtest/common.libsonnet @@ -13,14 +13,14 @@ local series = function(name, labels, values) { // returns a test object with the given series and samples. Sample interval is 30s // the evaluation time is set one hour in the future since all our queries operate on a 1h window -local test = function(name, series, query, samples) { +local test = function(name, series, query, samples, interval='30s', eval_time='1h') { name: name, - interval: '30s', + interval: interval, input_series: if std.isArray(series) then series else std.objectValues(series), promql_expr_test: [ { expr: query, - eval_time: '1h', + eval_time: eval_time, exp_samples: if std.isArray(samples) then samples else [samples], }, ],