From 92028ef7d8f261d080fec075b0df7f13ebba075a Mon Sep 17 00:00:00 2001 From: Ryan Phillips Date: Wed, 5 Jun 2019 09:32:46 -0500 Subject: [PATCH] BZ171309: backport prometheus client_go race fix to 4.1 This is fixed in 4.2. ref: https://github.com/prometheus/client_golang/pull/513 ref: https://bugzilla.redhat.com/show_bug.cgi?id=1713098 --- glide.lock | 3 +- glide.yaml | 3 + .../prometheus/client_golang/OWNERS | 6 + .../client_golang/prometheus/registry.go | 12 +- .../client_golang/prometheus/registry_test.go | 105 ++++++++++++++++++ 5 files changed, 125 insertions(+), 4 deletions(-) create mode 100644 vendor/github.com/prometheus/client_golang/OWNERS diff --git a/glide.lock b/glide.lock index b60346549085..1884a2bb0e6a 100644 --- a/glide.lock +++ b/glide.lock @@ -1132,7 +1132,8 @@ imports: - fflib/v1 - fflib/v1/internal - name: github.com/prometheus/client_golang - version: e7e903064f5e9eb5da98208bae10b475d4db0f8c + version: origin-4.1-e7e903064f5e9eb5da98208bae10b475d4db0f8c + repo: https://github.com/openshift/prometheus-client_golang subpackages: - prometheus - prometheus/promhttp diff --git a/glide.yaml b/glide.yaml index 978385ab54a7..3a3292781700 100644 --- a/glide.yaml +++ b/glide.yaml @@ -97,6 +97,9 @@ import: - package: github.com/containers/image repo: https://github.com/openshift/containers-image.git version: openshift-3.8 +- package: github.com/prometheus/client_golang + repo: https://github.com/openshift/prometheus-client_golang + version: origin-4.1-e7e903064f5e9eb5da98208bae10b475d4db0f8c # pod - sjenning # origin-3.11-runc-871ba2e - package: github.com/opencontainers/runc diff --git a/vendor/github.com/prometheus/client_golang/OWNERS b/vendor/github.com/prometheus/client_golang/OWNERS new file mode 100644 index 000000000000..d55931d3f0de --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/OWNERS @@ -0,0 +1,6 @@ +reviewers: + - rphillips + - sjenning +approvers: + - rphillips + - sjenning diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 8c6b5bd8ee11..e225aa47bd4e 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -686,7 +686,13 @@ func checkMetricConsistency( dh := hashNew() // Make sure label pairs are sorted. We depend on it for the consistency // check. - sort.Sort(LabelPairSorter(dtoMetric.Label)) + if !sort.IsSorted(LabelPairSorter(dtoMetric.Label)) { + // We cannot sort dtoMetric.Label in place as it is immutable by contract. + copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label)) + copy(copiedLabels, dtoMetric.Label) + sort.Sort(LabelPairSorter(copiedLabels)) + dtoMetric.Label = copiedLabels + } for _, lp := range dtoMetric.Label { h = hashAdd(h, lp.GetValue()) h = hashAddByte(h, separatorByte) @@ -727,8 +733,8 @@ func checkDescConsistency( } // Is the desc consistent with the content of the metric? - lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label)) - lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...) + lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label)) + copy(lpsFromDesc, desc.constLabelPairs) for _, l := range desc.variableLabels { lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ Name: proto.String(l), diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry_test.go b/vendor/github.com/prometheus/client_golang/prometheus/registry_test.go index d016a15603fc..7e0a903c516b 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry_test.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry_test.go @@ -21,9 +21,13 @@ package prometheus_test import ( "bytes" + "fmt" + "math/rand" "net/http" "net/http/httptest" + "sync" "testing" + "time" dto "github.com/prometheus/client_model/go" @@ -544,3 +548,104 @@ func TestRegisterWithOrGet(t *testing.T) { t.Error("unexpected error:", err) } } + +// TestHistogramVecRegisterGatherConcurrency is an end-to-end test that +// concurrently calls Observe on random elements of a HistogramVec while the +// same HistogramVec is registered concurrently and the Gather method of the +// registry is called concurrently. +func TestHistogramVecRegisterGatherConcurrency(t *testing.T) { + labelNames := make([]string, 16) // Need at least 13 to expose #512. + for i := range labelNames { + labelNames[i] = fmt.Sprint("label_", i) + } + + var ( + reg = prometheus.NewPedanticRegistry() + hv = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "test_histogram", + Help: "This helps testing.", + ConstLabels: prometheus.Labels{"foo": "bar"}, + }, + labelNames, + ) + labelValues = []string{"a", "b", "c", "alpha", "beta", "gamma", "aleph", "beth", "gimel"} + quit = make(chan struct{}) + wg sync.WaitGroup + ) + + observe := func() { + defer wg.Done() + for { + select { + case <-quit: + return + default: + obs := rand.NormFloat64()*.1 + .2 + values := make([]string, 0, len(labelNames)) + for range labelNames { + values = append(values, labelValues[rand.Intn(len(labelValues))]) + } + hv.WithLabelValues(values...).Observe(obs) + } + } + } + + register := func() { + defer wg.Done() + for { + select { + case <-quit: + return + default: + if err := reg.Register(hv); err != nil { + if _, ok := err.(prometheus.AlreadyRegisteredError); !ok { + t.Error("Registering failed:", err) + } + } + time.Sleep(7 * time.Millisecond) + } + } + } + + gather := func() { + defer wg.Done() + for { + select { + case <-quit: + return + default: + if g, err := reg.Gather(); err != nil { + t.Error("Gathering failed:", err) + } else { + if len(g) == 0 { + continue + } + if len(g) != 1 { + t.Error("Gathered unexpected number of metric families:", len(g)) + } + if len(g[0].Metric[0].Label) != len(labelNames)+1 { + t.Error("Gathered unexpected number of label pairs:", len(g[0].Metric[0].Label)) + } + } + time.Sleep(4 * time.Millisecond) + } + } + } + + wg.Add(10) + go observe() + go observe() + go register() + go observe() + go gather() + go observe() + go register() + go observe() + go gather() + go observe() + + time.Sleep(time.Second) + close(quit) + wg.Wait() +}