Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

*: Limit calls to pretty.ToJSON to more verbose debugging, warnings, and errors to improve performance #7169

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
4 changes: 3 additions & 1 deletion balancer/rls/config.go
Expand Up @@ -143,7 +143,9 @@ type lbConfigJSON struct {
// - childPolicyConfigTargetFieldName:
// - must be set and non-empty
func (rlsBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
logger.Infof("Received JSON service config: %v", pretty.ToJSON(c))
if logger.V(2) {
logger.Infof("Received JSON service config: %v", pretty.ToJSON(c))
}
cfgJSON := &lbConfigJSON{}
if err := json.Unmarshal(c, cfgJSON); err != nil {
return nil, fmt.Errorf("rls: json unmarshal failed for service config %+v: %v", string(c), err)
Expand Down
4 changes: 3 additions & 1 deletion balancer/rls/control_channel.go
Expand Up @@ -209,7 +209,9 @@ func (cc *controlChannel) lookup(reqKeys map[string]string, reason rlspb.RouteLo
Reason: reason,
StaleHeaderData: staleHeaders,
}
cc.logger.Infof("Sending RLS request %+v", pretty.ToJSON(req))
if cc.logger.V(2) {
cc.logger.Infof("Sending RLS request %+v", pretty.ToJSON(req))
}

ctx, cancel := context.WithTimeout(context.Background(), cc.rpcTimeout)
defer cancel()
Expand Down
4 changes: 3 additions & 1 deletion balancer/weightedtarget/weightedtarget.go
Expand Up @@ -88,7 +88,9 @@ type weightedTargetBalancer struct {
// creates/deletes sub-balancers and sends them update. addresses are split into
// groups based on hierarchy path.
func (b *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig))
if b.logger.V(2) {
b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig))
}
newConfig, ok := s.BalancerConfig.(*LBConfig)
if !ok {
return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig)
Expand Down
4 changes: 3 additions & 1 deletion resolver_wrapper.go
Expand Up @@ -194,5 +194,7 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
} else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 {
updates = append(updates, "resolver returned new addresses")
}
channelz.Infof(logger, ccr.cc.channelz, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; "))
if channelz.IsOn() || logger.V(2) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What is the reason for the additional channelz.IsOn() bool conditional?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's similar to the reason for the logger.V(2) check. We only want to call pretty.ToJSON when we know we're going to use the result. channelz.Infof can add a trace event, log, or both, so we want to call it when channelz data collection is on, we're logging at depth 2, or both.

channelz.Infof(logger, ccr.cc.channelz, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; "))
}
}
8 changes: 6 additions & 2 deletions xds/internal/balancer/cdsbalancer/cdsbalancer.go
Expand Up @@ -287,7 +287,9 @@ func (b *cdsBalancer) UpdateClientConnState(state balancer.ClientConnState) erro
}
b.xdsClient = c
}
b.logger.Infof("Received balancer config update: %s", pretty.ToJSON(state.BalancerConfig))
if b.logger.V(2) {
b.logger.Infof("Received balancer config update: %s", pretty.ToJSON(state.BalancerConfig))
}

// The errors checked here should ideally never happen because the
// ServiceConfig in this case is prepared by the xdsResolver and is not
Expand Down Expand Up @@ -411,7 +413,9 @@ func (b *cdsBalancer) onClusterUpdate(name string, update xdsresource.ClusterUpd
return
}

b.logger.Infof("Received Cluster resource: %s", pretty.ToJSON(update))
if b.logger.V(2) {
b.logger.Infof("Received Cluster resource: %s", pretty.ToJSON(update))
}

// Update the watchers map with the update for the cluster.
state.lastUpdate = &update
Expand Down
4 changes: 3 additions & 1 deletion xds/internal/balancer/clusterimpl/clusterimpl.go
Expand Up @@ -211,7 +211,9 @@ func (b *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState)
return nil
}

b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig))
if b.logger.V(2) {
b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig))
}
newConfig, ok := s.BalancerConfig.(*LBConfig)
if !ok {
return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig)
Expand Down
4 changes: 3 additions & 1 deletion xds/internal/balancer/clustermanager/clustermanager.go
Expand Up @@ -128,7 +128,9 @@ func (b *bal) UpdateClientConnState(s balancer.ClientConnState) error {
if !ok {
return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig)
}
b.logger.Infof("update with config %+v, resolver state %+v", pretty.ToJSON(s.BalancerConfig), s.ResolverState)
if b.logger.V(2) {
b.logger.Infof("update with config %+v, resolver state %+v", pretty.ToJSON(s.BalancerConfig), s.ResolverState)
}

b.stateAggregator.pauseStateUpdates()
defer b.stateAggregator.resumeStateUpdates()
Expand Down
8 changes: 6 additions & 2 deletions xds/internal/balancer/clusterresolver/clusterresolver.go
Expand Up @@ -184,7 +184,9 @@ func (b *clusterResolverBalancer) handleClientConnUpdate(update *ccUpdate) {
return
}

b.logger.Infof("Received new balancer config: %v", pretty.ToJSON(update.state.BalancerConfig))
if b.logger.V(2) {
b.logger.Infof("Received new balancer config: %v", pretty.ToJSON(update.state.BalancerConfig))
}
cfg, _ := update.state.BalancerConfig.(*LBConfig)
if cfg == nil {
b.logger.Warningf("Ignoring unsupported balancer configuration of type: %T", update.state.BalancerConfig)
Expand Down Expand Up @@ -242,7 +244,9 @@ func (b *clusterResolverBalancer) updateChildConfig() {
b.logger.Warningf("Failed to parse child policy config. This should never happen because the config was generated: %v", err)
return
}
b.logger.Infof("Built child policy config: %v", pretty.ToJSON(childCfg))
if b.logger.V(2) {
b.logger.Infof("Built child policy config: %v", pretty.ToJSON(childCfg))
}

endpoints := make([]resolver.Endpoint, len(addrs))
for i, a := range addrs {
Expand Down
4 changes: 3 additions & 1 deletion xds/internal/balancer/priority/balancer.go
Expand Up @@ -115,7 +115,9 @@ type priorityBalancer struct {
}

func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
b.logger.Debugf("Received an update with balancer config: %+v", pretty.ToJSON(s.BalancerConfig))
if b.logger.V(2) {
b.logger.Infof("Received an update with balancer config: %+v", pretty.ToJSON(s.BalancerConfig))
}
newConfig, ok := s.BalancerConfig.(*LBConfig)
if !ok {
return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig)
Expand Down
4 changes: 3 additions & 1 deletion xds/internal/balancer/ringhash/ringhash.go
Expand Up @@ -268,7 +268,9 @@ func (b *ringhashBalancer) updateAddresses(addrs []resolver.Address) bool {
}

func (b *ringhashBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig))
if b.logger.V(2) {
b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig))
}
newConfig, ok := s.BalancerConfig.(*LBConfig)
if !ok {
return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig)
Expand Down
4 changes: 3 additions & 1 deletion xds/internal/xdsclient/authority.go
Expand Up @@ -482,7 +482,9 @@ func (a *authority) watchResource(rType xdsresource.Type, resourceName string, w

// If we have a cached copy of the resource, notify the new watcher.
if state.cache != nil {
a.logger.Debugf("Resource type %q with resource name %q found in cache: %s", rType.TypeName(), resourceName, state.cache.ToJSON())
if a.logger.V(2) {
a.logger.Debugf("Resource type %q with resource name %q found in cache: %s", rType.TypeName(), resourceName, state.cache.ToJSON())
}
resource := state.cache
a.serializer.Schedule(func(context.Context) { watcher.OnUpdate(resource) })
}
Expand Down
4 changes: 3 additions & 1 deletion xds/internal/xdsclient/bootstrap/bootstrap.go
Expand Up @@ -568,6 +568,8 @@ func newConfigFromContents(data []byte) (*Config, error) {
node.ClientFeatures = append(node.ClientFeatures, clientFeatureNoOverprovisioning, clientFeatureResourceWrapper)
config.NodeProto = node

logger.Debugf("Bootstrap config for creating xds-client: %v", pretty.ToJSON(config))
if logger.V(2) {
logger.Debugf("Bootstrap config for creating xds-client: %v", pretty.ToJSON(config))
}
return config, nil
}