Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[IMPROVED] Raft layer improvements #4020

Merged
merged 4 commits into from Apr 3, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
24 changes: 12 additions & 12 deletions server/jetstream_cluster.go
Expand Up @@ -436,11 +436,9 @@ func (cc *jetStreamCluster) isStreamCurrent(account, stream string) bool {

// isStreamHealthy will determine if the stream is up to date or very close.
// For R1 it will make sure the stream is present on this server.
// Read lock should be held.
func (js *jetStream) isStreamHealthy(account, stream string) bool {
js.mu.RLock()
defer js.mu.RUnlock()
cc := js.cluster

if cc == nil {
// Non-clustered mode
return true
Expand Down Expand Up @@ -480,11 +478,9 @@ func (js *jetStream) isStreamHealthy(account, stream string) bool {

// isConsumerCurrent will determine if the consumer is up to date.
// For R1 it will make sure the consunmer is present on this server.
// Read lock should be held.
func (js *jetStream) isConsumerCurrent(account, stream, consumer string) bool {
js.mu.RLock()
defer js.mu.RUnlock()
cc := js.cluster

if cc == nil {
// Non-clustered mode
return true
Expand Down Expand Up @@ -1943,9 +1939,6 @@ func (js *jetStream) monitorStream(mset *stream, sa *streamAssignment, sendSnaps
return
}

// Make sure to stop the raft group on exit to prevent accidental memory bloat.
defer n.Stop()

// Make sure only one is running.
if mset != nil {
if mset.checkInMonitor() {
Expand All @@ -1954,6 +1947,11 @@ func (js *jetStream) monitorStream(mset *stream, sa *streamAssignment, sendSnaps
defer mset.clearMonitorRunning()
}

// Make sure to stop the raft group on exit to prevent accidental memory bloat.
// This should be below the checkInMonitor call though to avoid stopping it out
// from underneath the one that is running since it will be the same raft node.
defer n.Stop()

qch, lch, aq, uch, ourPeerId := n.QuitC(), n.LeadChangeC(), n.ApplyQ(), mset.updateC(), meta.ID()

s.Debugf("Starting stream monitor for '%s > %s' [%s]", sa.Client.serviceAccount(), sa.Config.Name, n.Group())
Expand Down Expand Up @@ -4187,15 +4185,17 @@ func (js *jetStream) monitorConsumer(o *consumer, ca *consumerAssignment) {
return
}

// Make sure to stop the raft group on exit to prevent accidental memory bloat.
defer n.Stop()

// Make sure only one is running.
if o.checkInMonitor() {
return
}
defer o.clearMonitorRunning()

// Make sure to stop the raft group on exit to prevent accidental memory bloat.
// This should be below the checkInMonitor call though to avoid stopping it out
// from underneath the one that is running since it will be the same raft node.
defer n.Stop()

qch, lch, aq, uch, ourPeerId := n.QuitC(), n.LeadChangeC(), n.ApplyQ(), o.updateC(), cc.meta.ID()

s.Debugf("Starting consumer monitor for '%s > %s > %s' [%s]", o.acc.Name, ca.Stream, ca.Name, n.Group())
Expand Down
5 changes: 3 additions & 2 deletions server/jetstream_cluster_3_test.go
Expand Up @@ -2242,14 +2242,15 @@ func TestJetStreamClusterMemLeaderRestart(t *testing.T) {

// Make sure that we have a META leader (there can always be a re-election)
c.waitOnLeader()
c.waitOnStreamLeader(globalAccountName, "foo")

// Should still have quorum and a new leader
checkFor(t, time.Second, 200*time.Millisecond, func() error {
checkFor(t, 5*time.Second, 200*time.Millisecond, func() error {
osi, err = jsc.StreamInfo("foo")
if err != nil {
return fmt.Errorf("expected healthy stream asset, got %s", err.Error())
}
if osi.Cluster.Leader == "" {
if osi.Cluster.Leader == _EMPTY_ {
return fmt.Errorf("expected healthy stream asset with new leader")
}
if osi.State.Msgs != uint64(toSend) {
Expand Down
5 changes: 5 additions & 0 deletions server/monitor.go
Expand Up @@ -3125,6 +3125,11 @@ func (s *Server) healthz(opts *HealthzOptions) *HealthStatus {
// Range across all accounts, the streams assigned to them, and the consumers.
// If they are assigned to this server check their status.
ourID := meta.ID()

// TODO(dlc) - Might be better here to not hold the lock the whole time.
js.mu.RLock()
defer js.mu.RUnlock()

for acc, asa := range cc.streams {
for stream, sa := range asa {
if sa.Group.isMember(ourID) {
Expand Down
10 changes: 2 additions & 8 deletions server/raft.go
Expand Up @@ -2511,13 +2511,6 @@ func (n *raft) applyCommit(index uint64) error {

// We pass these up as well.
committed = append(committed, e)

case EntryLeaderTransfer:
if n.state == Leader {
n.debug("Stepping down")
n.stepdown.push(noLeader)
}
// No-op
}
}
// Pass to the upper layers if we have normal entries.
Expand Down Expand Up @@ -3056,6 +3049,7 @@ func (n *raft) processAppendEntry(ae *appendEntry, sub *subscription) {
for _, e := range ae.entries {
switch e.Type {
case EntryLeaderTransfer:
// Only process these if they are new, so no replays or catchups.
if isNew {
maybeLeader := string(e.Data)
if maybeLeader == n.id && !n.observer && !n.paused {
Expand Down Expand Up @@ -3156,7 +3150,7 @@ func (n *raft) buildAppendEntry(entries []*Entry) *appendEntry {

// Determine if we should store an entry.
func (ae *appendEntry) shouldStore() bool {
return ae != nil && len(ae.entries) > 0 && ae.entries[0].Type != EntryLeaderTransfer
return ae != nil && len(ae.entries) > 0
}

// Store our append entry to our WAL.
Expand Down