Skip to content

Commit

Permalink
Failover Connector PR2 - core failover functionality (#29557)
Browse files Browse the repository at this point in the history
This is the 2nd PR for the failover connector that implements the core
failover functionality. It is currently in place for Traces and once
solidified will be repeated for metrics and logs

Link to tracking Issue: #20766

Note: Will add traces tests today but pushing up to begin review

cc: @djaglowski @fatsheep9146
  • Loading branch information
akats7 committed Dec 12, 2023
1 parent 0d7ec06 commit 88b3b93
Show file tree
Hide file tree
Showing 7 changed files with 595 additions and 13 deletions.
27 changes: 27 additions & 0 deletions .chloggen/failover-PR2.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# Use this changelog template to create an entry for release notes.

# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: new_component

# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
component: failoverconnector

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: PR provides core logic for failover connector and implements failover for trace signals

# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
issues: [20766]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext:

# If your change doesn't affect end users or the exported elements of any package,
# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
# Optional: The change log or logs in which this entry should be included.
# e.g. '[user]' or '[user, api]'
# Include 'user' if the change is relevant to end users.
# Include 'api' if there is a change to a library API.
# Default: '[user]'
change_logs: []
13 changes: 8 additions & 5 deletions connector/failoverconnector/factory_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,20 +15,23 @@ import (
)

func TestNewFactory(t *testing.T) {
traces0 := component.NewIDWithName(component.DataTypeTraces, "0")
traces1 := component.NewIDWithName(component.DataTypeTraces, "1")
traces2 := component.NewIDWithName(component.DataTypeTraces, "2")
cfg := &Config{
PipelinePriority: [][]component.ID{{component.NewIDWithName(component.DataTypeTraces, "0"), component.NewIDWithName(component.DataTypeTraces, "1")}, {component.NewIDWithName(component.DataTypeTraces, "2")}},
PipelinePriority: [][]component.ID{{traces0, traces1}, {traces2}},
RetryInterval: 5 * time.Minute,
RetryGap: 10 * time.Second,
MaxRetries: 5,
}

router := connectortest.NewTracesRouter(
connectortest.WithNopTraces(component.NewIDWithName(component.DataTypeTraces, "0")),
connectortest.WithNopTraces(component.NewIDWithName(component.DataTypeTraces, "1")),
connectortest.WithNopTraces(traces0),
connectortest.WithNopTraces(traces1),
connectortest.WithNopTraces(traces2),
)

factory := NewFactory()
conn, err := factory.CreateTracesToTraces(context.Background(),
conn, err := NewFactory().CreateTracesToTraces(context.Background(),
connectortest.NewNopCreateSettings(), cfg, router.(consumer.Traces))

assert.NoError(t, err)
Expand Down
121 changes: 121 additions & 0 deletions connector/failoverconnector/failover.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,19 +4,140 @@
package failoverconnector // import "github.com/open-telemetry/opentelemetry-collector-contrib/connector/failoverconnector"

import (
"context"
"errors"
"time"

"go.opentelemetry.io/collector/component"

"github.com/open-telemetry/opentelemetry-collector-contrib/connector/failoverconnector/internal/state"
)

type consumerProvider[C any] func(...component.ID) (C, error)

type failoverRouter[C any] struct {
consumerProvider consumerProvider[C]
cfg *Config
pS *state.PipelineSelector
consumers []C
rS *state.RetryState
}

var (
errNoValidPipeline = errors.New("All provided pipelines return errors")
errConsumer = errors.New("Error registering consumer")
)

func newFailoverRouter[C any](provider consumerProvider[C], cfg *Config) *failoverRouter[C] {
return &failoverRouter[C]{
consumerProvider: provider,
cfg: cfg,
pS: state.NewPipelineSelector(len(cfg.PipelinePriority), cfg.MaxRetries),
rS: &state.RetryState{},
}
}

func (f *failoverRouter[C]) getCurrentConsumer() (C, int, bool) {
// if currentIndex incremented passed bounds of pipeline list
var nilConsumer C
idx := f.pS.CurrentIndex()
if idx >= len(f.cfg.PipelinePriority) {
return nilConsumer, -1, false
}
return f.consumers[idx], idx, true
}

func (f *failoverRouter[C]) registerConsumers() error {
consumers := make([]C, 0)
for _, pipelines := range f.cfg.PipelinePriority {
newConsumer, err := f.consumerProvider(pipelines...)
if err != nil {
return errConsumer
}
consumers = append(consumers, newConsumer)
}
f.consumers = consumers
return nil
}

func (f *failoverRouter[C]) handlePipelineError(idx int) {
// avoids race condition in case of consumeSIGNAL invocations
// where index was updated during execution
if idx != f.pS.CurrentIndex() {
return
}
doRetry := f.pS.IndexIsStable(idx)
// UpdatePipelineIndex either increments the pipeline to the next priority
// or returns it to the stable
f.pS.UpdatePipelineIndex(idx)
// if the currentIndex is not the stableIndex, that means the currentIndex is a higher
// priority index that was set during a retry, in which case we don't want to start a
// new retry goroutine
if !doRetry {
return
}
// kill existing retry goroutine if error is from a stable pipeline that failed for the first time
ctx, cancel := context.WithCancel(context.Background())
f.rS.InvokeCancel()
f.rS.UpdateCancelFunc(cancel)
f.enableRetry(ctx)
}

func (f *failoverRouter[C]) enableRetry(ctx context.Context) {
go func() {
ticker := time.NewTicker(f.cfg.RetryInterval)
defer ticker.Stop()

stableIndex := f.pS.StableIndex()
var cancelFunc context.CancelFunc
// checkContinueRetry checks that any higher priority levels have retries remaining
// (have not exceeded their maxRetries)
for f.checkContinueRetry(stableIndex) {
select {
case <-ticker.C:
// When the nextRetry interval starts we kill the existing iteration through
// the higher priority pipelines if still in progress
if cancelFunc != nil {
cancelFunc()
}
cancelFunc = f.handleRetry(ctx, stableIndex)
case <-ctx.Done():
return
}
}
f.rS.InvokeCancel()
}()
}

// handleRetry is responsible for launching goroutine and returning cancelFunc for context to be called if new
// interval starts in the middle of the execution
func (f *failoverRouter[C]) handleRetry(parentCtx context.Context, stableIndex int) context.CancelFunc {
retryCtx, cancelFunc := context.WithCancel(parentCtx)
go f.pS.RetryHighPriorityPipelines(retryCtx, stableIndex, f.cfg.RetryGap)
return cancelFunc
}

// checkStopRetry checks if retry should be suspended if all higher priority levels have exceeded their max retries
func (f *failoverRouter[C]) checkContinueRetry(index int) bool {
for i := 0; i < index; i++ {
if f.pS.IndexRetryCount(i) < f.cfg.MaxRetries {
return true
}
}
return false
}

// reportStable reports back to the failoverRouter that the current priority level that was called by Consume.SIGNAL was
// stable
func (f *failoverRouter[C]) reportStable(idx int) {
f.pS.ReportStable(idx)
}

// For Testing
func (f *failoverRouter[C]) GetConsumerAtIndex(idx int) C {
return f.consumers[idx]
}

func (f *failoverRouter[C]) ModifyConsumerAtIndex(idx int, c C) {
f.consumers[idx] = c
}
149 changes: 149 additions & 0 deletions connector/failoverconnector/internal/state/pipeline_selector.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,149 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0

package state // import "github.com/open-telemetry/opentelemetry-collector-contrib/connector/failoverconnector/internal/state"

import (
"context"
"sync"
"time"
)

// PipelineSelector is meant to serve as the source of truth for the target priority level
type PipelineSelector struct {
currentIndex int
stableIndex int
lock sync.RWMutex
pipelineRetries []int
maxRetry int
}

// UpdatePipelineIndex is the main function that updates the pipeline indexes due to an error
// if the currentIndex is not the stableIndex, that means the currentIndex is a higher
// priority index that was set during a retry, in which case we return to the stable index
func (p *PipelineSelector) UpdatePipelineIndex(idx int) {
if p.IndexIsStable(idx) {
p.setToNextPriorityPipeline(idx)
return
}
p.setToStableIndex(idx)
}

// NextPipeline skips through any lower priority pipelines that have exceeded their maxRetries
// and sets the first that has not as the new stable
func (p *PipelineSelector) setToNextPriorityPipeline(idx int) {
p.lock.Lock()
defer p.lock.Unlock()
for ok := true; ok; ok = p.exceededMaxRetries(idx) {
idx++
}
p.stableIndex = idx
}

// retryHighPriorityPipelines responsible for single iteration through all higher priority pipelines
func (p *PipelineSelector) RetryHighPriorityPipelines(ctx context.Context, stableIndex int, retryGap time.Duration) {
ticker := time.NewTicker(retryGap)

defer ticker.Stop()

for i := 0; i < stableIndex; i++ {
// if stableIndex was updated to a higher priority level during the execution of the goroutine
// will return to avoid overwriting higher priority level with lower one
if stableIndex > p.StableIndex() {
return
}
// checks that max retries were not used for this index
if p.MaxRetriesUsed(i) {
continue
}
select {
// return when context is cancelled by parent goroutine
case <-ctx.Done():
return
case <-ticker.C:
// when ticker triggers currentIndex is updated
p.setToCurrentIndex(i)
}
}
}

func (p *PipelineSelector) exceededMaxRetries(idx int) bool {
return idx < len(p.pipelineRetries) && (p.pipelineRetries[idx] >= p.maxRetry)
}

// SetToStableIndex returns the CurrentIndex to the known Stable Index
func (p *PipelineSelector) setToStableIndex(idx int) {
p.lock.Lock()
defer p.lock.Unlock()
p.pipelineRetries[idx]++
p.currentIndex = p.stableIndex
}

// SetToRetryIndex accepts a param and sets the CurrentIndex to this index value
func (p *PipelineSelector) setToCurrentIndex(index int) {
p.lock.Lock()
defer p.lock.Unlock()
p.currentIndex = index
}

// MaxRetriesUsed exported access to maxRetriesUsed
func (p *PipelineSelector) MaxRetriesUsed(idx int) bool {
p.lock.RLock()
defer p.lock.RUnlock()
return p.pipelineRetries[idx] >= p.maxRetry
}

// SetNewStableIndex Update stableIndex to the passed stable index
func (p *PipelineSelector) setNewStableIndex(idx int) {
p.lock.Lock()
defer p.lock.Unlock()
p.pipelineRetries[idx] = 0
p.stableIndex = idx
}

// IndexIsStable returns if index passed is the stable index
func (p *PipelineSelector) IndexIsStable(idx int) bool {
p.lock.RLock()
defer p.lock.RUnlock()
return p.stableIndex == idx
}

func (p *PipelineSelector) StableIndex() int {
p.lock.RLock()
defer p.lock.RUnlock()
return p.stableIndex
}

func (p *PipelineSelector) CurrentIndex() int {
p.lock.RLock()
defer p.lock.RUnlock()
return p.currentIndex
}

func (p *PipelineSelector) IndexRetryCount(idx int) int {
p.lock.RLock()
defer p.lock.RUnlock()
return p.pipelineRetries[idx]
}

// reportStable reports back to the failoverRouter that the current priority level that was called by Consume.SIGNAL was
// stable
func (p *PipelineSelector) ReportStable(idx int) {
// is stableIndex is already the known stableIndex return
if p.IndexIsStable(idx) {
return
}
// if the stableIndex is a retried index, the update the stable index to the retried index
// NOTE retry will not stop due to potential higher priority index still available
p.setNewStableIndex(idx)
}

func NewPipelineSelector(lenPriority int, maxRetries int) *PipelineSelector {
return &PipelineSelector{
currentIndex: 0,
stableIndex: 0,
lock: sync.RWMutex{},
pipelineRetries: make([]int, lenPriority),
maxRetry: maxRetries,
}
}
44 changes: 44 additions & 0 deletions connector/failoverconnector/internal/state/utils.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0

package state // import "github.com/open-telemetry/opentelemetry-collector-contrib/connector/failoverconnector/internal/state"

import (
"context"
"sync"
)

type TryLock struct {
lock sync.Mutex
}

func (t *TryLock) TryExecute(fn func(int), arg int) {
if t.lock.TryLock() {
defer t.lock.Unlock()
fn(arg)
}
}

func NewTryLock() *TryLock {
return &TryLock{}
}

// Manages cancel function for retry goroutine, ends up cleaner than using channels
type RetryState struct {
lock sync.Mutex
cancelRetry context.CancelFunc
}

func (m *RetryState) UpdateCancelFunc(newCancelFunc context.CancelFunc) {
m.lock.Lock()
defer m.lock.Unlock()
m.cancelRetry = newCancelFunc
}

func (m *RetryState) InvokeCancel() {
m.lock.Lock()
defer m.lock.Unlock()
if m.cancelRetry != nil {
m.cancelRetry()
}
}

0 comments on commit 88b3b93

Please sign in to comment.