From 3b63c2b110b802a8c65a2a6bdf2bfa170475e7de Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 16 Jun 2020 10:03:59 -0700 Subject: [PATCH 01/11] retry: re-enable retrying on non-IO transport errors (#3691) --- internal/transport/http2_client.go | 15 ++++++++++++++- stream.go | 30 ++++++++++++++++++++++++++---- 2 files changed, 40 insertions(+), 5 deletions(-) diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index 28b0c1662415..b43e21ffaf73 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -563,13 +563,26 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call return callAuthData, nil } +// PerformedIOError wraps an error to indicate IO may have been performed +// before the error occurred. +type PerformedIOError struct { + Err error +} + +// Error implements error. +func (p PerformedIOError) Error() string { + return p.Err.Error() +} + // NewStream creates a stream and registers it into the transport as "active" // streams. func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { ctx = peer.NewContext(ctx, t.getPeer()) headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { - return nil, err + // We may have performed I/O in the per-RPC creds callback, so do not + // allow transparent retry. + return nil, PerformedIOError{err} } s := t.newStream(ctx, callHdr) cleanup := func(err error) { diff --git a/stream.go b/stream.go index 68d5c14506d8..629af76bdfab 100644 --- a/stream.go +++ b/stream.go @@ -364,6 +364,11 @@ func (a *csAttempt) newStream() error { cs.callHdr.PreviousAttempts = cs.numRetries s, err := a.t.NewStream(cs.ctx, cs.callHdr) if err != nil { + if _, ok := err.(transport.PerformedIOError); ok { + // Return without converting to an RPC error so retry code can + // inspect. + return err + } return toRPCErr(err) } cs.attempt.s = s @@ -459,6 +464,22 @@ func (cs *clientStream) commitAttempt() { // shouldRetry returns nil if the RPC should be retried; otherwise it returns // the error that should be returned by the operation. func (cs *clientStream) shouldRetry(err error) error { + unprocessed := false + if cs.attempt.s == nil { + pioErr, ok := err.(transport.PerformedIOError) + if ok { + // Unwrap error. + err = toRPCErr(pioErr.Err) + } else { + unprocessed = true + } + if !ok && !cs.callInfo.failFast { + // In the event of a non-IO operation error from NewStream, we + // never attempted to write anything to the wire, so we can retry + // indefinitely for non-fail-fast RPCs. + return nil + } + } if cs.finished || cs.committed { // RPC is finished or committed; cannot retry. return err @@ -466,10 +487,11 @@ func (cs *clientStream) shouldRetry(err error) error { // Wait for the trailers. if cs.attempt.s != nil { <-cs.attempt.s.Done() - if cs.firstAttempt && cs.attempt.s.Unprocessed() { - // First attempt, stream unprocessed: transparently retry. - return nil - } + unprocessed = cs.attempt.s.Unprocessed() + } + if cs.firstAttempt && unprocessed { + // First attempt, stream unprocessed: transparently retry. + return nil } if cs.cc.dopts.disableRetry { return err From dfc058c6d9dd9b7395ec9b77b675fd54d3ff1512 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 16 Jun 2020 13:57:33 -0700 Subject: [PATCH 02/11] credentials: Add certificate provider plugin APIs. (#3654) Also add an implementation for the `Distributor` type which makes it easier to implement new `Provider` types. --- credentials/tls/certprovider/distributor.go | 110 ++++++ .../tls/certprovider/distributor_test.go | 159 ++++++++ credentials/tls/certprovider/provider.go | 104 ++++++ credentials/tls/certprovider/store.go | 120 ++++++ credentials/tls/certprovider/store_test.go | 351 ++++++++++++++++++ 5 files changed, 844 insertions(+) create mode 100644 credentials/tls/certprovider/distributor.go create mode 100644 credentials/tls/certprovider/distributor_test.go create mode 100644 credentials/tls/certprovider/provider.go create mode 100644 credentials/tls/certprovider/store.go create mode 100644 credentials/tls/certprovider/store_test.go diff --git a/credentials/tls/certprovider/distributor.go b/credentials/tls/certprovider/distributor.go new file mode 100644 index 000000000000..d6feb3afeba1 --- /dev/null +++ b/credentials/tls/certprovider/distributor.go @@ -0,0 +1,110 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package certprovider + +import ( + "context" + "sync" + + "google.golang.org/grpc/internal/grpcsync" +) + +// Distributor makes it easy for provider implementations to furnish new key +// materials by handling synchronization between the producer and consumers of +// the key material. +// +// Provider implementations which choose to use a Distributor should do the +// following: +// - create a new Distributor using the NewDistributor() function. +// - invoke the Set() method whenever they have new key material or errors to +// report. +// - delegate to the distributor when handing calls to KeyMaterial(). +// - invoke the Stop() method when they are done using the distributor. +type Distributor struct { + // mu protects the underlying key material. + mu sync.Mutex + km *KeyMaterial + pErr error + + ready *grpcsync.Event + closed *grpcsync.Event +} + +// NewDistributor returns a new Distributor. +func NewDistributor() *Distributor { + return &Distributor{ + ready: grpcsync.NewEvent(), + closed: grpcsync.NewEvent(), + } +} + +// Set updates the key material in the distributor with km. +// +// Provider implementations which use the distributor must not modify the +// contents of the KeyMaterial struct pointed to by km. +// +// A non-nil err value indicates the error that the provider implementation ran +// into when trying to fetch key material, and makes it possible to surface the +// error to the user. A non-nil error value passed here causes distributor's +// KeyMaterial() method to return nil key material. +func (d *Distributor) Set(km *KeyMaterial, err error) { + d.mu.Lock() + d.km = km + d.pErr = err + if err != nil { + // If a non-nil err is passed, we ignore the key material being passed. + d.km = nil + } + d.ready.Fire() + d.mu.Unlock() +} + +// KeyMaterial returns the most recent key material provided to the distributor. +// If no key material was provided at the time of this call, it will block until +// the deadline on the context expires or fresh key material arrives. +func (d *Distributor) KeyMaterial(ctx context.Context) (*KeyMaterial, error) { + if d.closed.HasFired() { + return nil, errProviderClosed + } + + if d.ready.HasFired() { + return d.keyMaterial() + } + + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-d.closed.Done(): + return nil, errProviderClosed + case <-d.ready.Done(): + return d.keyMaterial() + } +} + +func (d *Distributor) keyMaterial() (*KeyMaterial, error) { + d.mu.Lock() + defer d.mu.Unlock() + return d.km, d.pErr +} + +// Stop turns down the distributor, releases allocated resources and fails any +// active KeyMaterial() call waiting for new key material. +func (d *Distributor) Stop() { + d.closed.Fire() +} diff --git a/credentials/tls/certprovider/distributor_test.go b/credentials/tls/certprovider/distributor_test.go new file mode 100644 index 000000000000..e6c41564c293 --- /dev/null +++ b/credentials/tls/certprovider/distributor_test.go @@ -0,0 +1,159 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package certprovider + +import ( + "context" + "errors" + "fmt" + "reflect" + "sync" + "testing" + "time" +) + +var errProviderTestInternal = errors.New("provider internal error") + +// TestDistributor invokes the different methods on the Distributor type and +// verifies the results. +func (s) TestDistributor(t *testing.T) { + dist := NewDistributor() + + // Read cert/key files from testdata. + km, err := loadKeyMaterials() + if err != nil { + t.Fatal(err) + } + // wantKM1 has both local and root certs. + wantKM1 := *km + // wantKM2 has only local certs. Roots are nil-ed out. + wantKM2 := *km + wantKM2.Roots = nil + + // Create a goroutines which work in lockstep with the rest of the test. + // This goroutine reads the key material from the distributor while the rest + // of the test sets it. + var wg sync.WaitGroup + wg.Add(1) + errCh := make(chan error) + proceedCh := make(chan struct{}) + go func() { + defer wg.Done() + + // The first call to KeyMaterial() should timeout because no key + // material has been set on the distributor as yet. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout/2) + defer cancel() + if _, err := dist.KeyMaterial(ctx); err != context.DeadlineExceeded { + errCh <- err + return + } + proceedCh <- struct{}{} + + // This call to KeyMaterial() should return the key material with both + // the local certs and the root certs. + ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + gotKM, err := dist.KeyMaterial(ctx) + if err != nil { + errCh <- err + return + } + if !reflect.DeepEqual(gotKM, &wantKM1) { + errCh <- fmt.Errorf("provider.KeyMaterial() = %+v, want %+v", gotKM, wantKM1) + } + proceedCh <- struct{}{} + + // This call to KeyMaterial() should eventually return key material with + // only the local certs. + ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + for { + gotKM, err := dist.KeyMaterial(ctx) + if err != nil { + errCh <- err + return + } + if reflect.DeepEqual(gotKM, &wantKM2) { + break + } + } + proceedCh <- struct{}{} + + // This call to KeyMaterial() should return nil key material and a + // non-nil error. + ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + for { + gotKM, err := dist.KeyMaterial(ctx) + if gotKM == nil && err == errProviderTestInternal { + break + } + if err != nil { + // If we have gotten any error other than + // errProviderTestInternal, we should bail out. + errCh <- err + return + } + } + proceedCh <- struct{}{} + + // This call to KeyMaterial() should eventually return errProviderClosed + // error. + ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + for { + if _, err := dist.KeyMaterial(ctx); err == errProviderClosed { + break + } + time.Sleep(100 * time.Millisecond) + } + }() + + waitAndDo(t, proceedCh, errCh, func() { + dist.Set(&wantKM1, nil) + }) + + waitAndDo(t, proceedCh, errCh, func() { + dist.Set(&wantKM2, nil) + }) + + waitAndDo(t, proceedCh, errCh, func() { + dist.Set(&wantKM2, errProviderTestInternal) + }) + + waitAndDo(t, proceedCh, errCh, func() { + dist.Stop() + }) + +} + +func waitAndDo(t *testing.T, proceedCh chan struct{}, errCh chan error, do func()) { + t.Helper() + + timer := time.NewTimer(defaultTestTimeout) + select { + case <-timer.C: + t.Fatalf("test timed out when waiting for event from distributor") + case <-proceedCh: + do() + case err := <-errCh: + t.Fatal(err) + } +} diff --git a/credentials/tls/certprovider/provider.go b/credentials/tls/certprovider/provider.go new file mode 100644 index 000000000000..58c93c1e731f --- /dev/null +++ b/credentials/tls/certprovider/provider.go @@ -0,0 +1,104 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package certprovider defines APIs for certificate providers in gRPC. +// +// Experimental +// +// Notice: All APIs in this package are experimental and may be removed in a +// later release. +package certprovider + +import ( + "context" + "crypto/tls" + "crypto/x509" + "errors" +) + +var ( + // errProviderClosed is returned by Distributor.KeyMaterial when it is + // closed. + errProviderClosed = errors.New("provider instance is closed") + + // m is a map from name to provider builder. + m = make(map[string]Builder) +) + +// Register registers the provider builder, whose name as returned by its Name() +// method will be used as the name registered with this builder. Registered +// Builders are used by the Store to create Providers. +func Register(b Builder) { + m[b.Name()] = b +} + +// getBuilder returns the provider builder registered with the given name. +// If no builder is registered with the provided name, nil will be returned. +func getBuilder(name string) Builder { + if b, ok := m[name]; ok { + return b + } + return nil +} + +// Builder creates a Provider. +type Builder interface { + // Build creates a new provider with the provided config. + Build(StableConfig) Provider + + // ParseConfig converts config input in a format specific to individual + // implementations and returns an implementation of the StableConfig + // interface. + // Equivalent configurations must return StableConfig types whose + // Canonical() method returns the same output. + ParseConfig(interface{}) (StableConfig, error) + + // Name returns the name of providers built by this builder. + Name() string +} + +// StableConfig wraps the method to return a stable provider configuration. +type StableConfig interface { + // Canonical returns provider config as an arbitrary byte slice. + // Equivalent configurations must return the same output. + Canonical() []byte +} + +// Provider makes it possible to keep channel credential implementations up to +// date with secrets that they rely on to secure communications on the +// underlying channel. +// +// Provider implementations are free to rely on local or remote sources to fetch +// the latest secrets, and free to share any state between different +// instantiations as they deem fit. +type Provider interface { + // KeyMaterial returns the key material sourced by the provider. + // Callers are expected to use the returned value as read-only. + KeyMaterial(ctx context.Context) (*KeyMaterial, error) + + // Close cleans up resources allocated by the provider. + Close() +} + +// KeyMaterial wraps the certificates and keys returned by a provider instance. +type KeyMaterial struct { + // Certs contains a slice of cert/key pairs used to prove local identity. + Certs []tls.Certificate + // Roots contains the set of trusted roots to validate the peer's identity. + Roots *x509.CertPool +} diff --git a/credentials/tls/certprovider/store.go b/credentials/tls/certprovider/store.go new file mode 100644 index 000000000000..0d2b29b675b1 --- /dev/null +++ b/credentials/tls/certprovider/store.go @@ -0,0 +1,120 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package certprovider + +import ( + "fmt" + "sync" +) + +// provStore is the global singleton certificate provider store. +var provStore = &store{ + providers: make(map[storeKey]*wrappedProvider), +} + +// storeKey acts as the key to the map of providers maintained by the store. A +// combination of provider name and configuration is used to uniquely identify +// every provider instance in the store. Go maps need to be indexed by +// comparable types, so the provider configuration is converted from +// `interface{}` to string using the ParseConfig method while creating this key. +type storeKey struct { + // name of the certificate provider. + name string + // configuration of the certificate provider in string form. + config string +} + +// wrappedProvider wraps a provider instance with a reference count. +type wrappedProvider struct { + Provider + refCount int + + // A reference to the key and store are also kept here to override the + // Close method on the provider. + storeKey storeKey + store *store +} + +// store is a collection of provider instances, safe for concurrent access. +type store struct { + mu sync.Mutex + providers map[storeKey]*wrappedProvider +} + +// GetProvider returns a provider instance corresponding to name and config. +// name is the registered name of the provider and config is the +// provider-specific configuration. Implementations of the Builder interface +// should clearly document the type of configuration accepted by them. +// +// If a provider exists for the (name+config) combination, its reference count +// is incremented before returning. If no provider exists for the (name+config) +// combination, a new one is created using the registered builder. If no +// registered builder is found, or the provider configuration is rejected by it, +// a non-nil error is returned. +func GetProvider(name string, config interface{}) (Provider, error) { + provStore.mu.Lock() + defer provStore.mu.Unlock() + + builder := getBuilder(name) + if builder == nil { + return nil, fmt.Errorf("no registered builder for provider name: %s", name) + } + stableConfig, err := builder.ParseConfig(config) + if err != nil { + return nil, err + } + + sk := storeKey{ + name: name, + config: string(stableConfig.Canonical()), + } + if wp, ok := provStore.providers[sk]; ok { + wp.refCount++ + return wp, nil + } + + provider := builder.Build(stableConfig) + if provider == nil { + return nil, fmt.Errorf("certprovider.Build(%v) failed", sk) + } + wp := &wrappedProvider{ + Provider: provider, + refCount: 1, + storeKey: sk, + store: provStore, + } + provStore.providers[sk] = wp + return wp, nil +} + +// Close overrides the Close method of the embedded provider. It releases the +// reference held by the caller on the underlying provider and if the +// provider's reference count reaches zero, it is removed from the store, and +// its Close method is also invoked. +func (wp *wrappedProvider) Close() { + ps := wp.store + ps.mu.Lock() + defer ps.mu.Unlock() + + wp.refCount-- + if wp.refCount == 0 { + wp.Provider.Close() + delete(ps.providers, wp.storeKey) + } +} diff --git a/credentials/tls/certprovider/store_test.go b/credentials/tls/certprovider/store_test.go new file mode 100644 index 000000000000..6cd02e012dfb --- /dev/null +++ b/credentials/tls/certprovider/store_test.go @@ -0,0 +1,351 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package certprovider + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "reflect" + "testing" + "time" + + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/testdata" +) + +const ( + fakeProvider1Name = "fake-certificate-provider-1" + fakeProvider2Name = "fake-certificate-provider-2" + fakeConfig = "my fake config" + defaultTestTimeout = 1 * time.Second +) + +func init() { + Register(&fakeProviderBuilder{name: fakeProvider1Name}) + Register(&fakeProviderBuilder{name: fakeProvider2Name}) +} + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +// fakeProviderBuilder builds new instances of fakeProvider and interprets the +// config provided to it as a string. +type fakeProviderBuilder struct { + name string +} + +func (b *fakeProviderBuilder) Build(StableConfig) Provider { + ctx, cancel := context.WithCancel(context.Background()) + p := &fakeProvider{ + Distributor: NewDistributor(), + cancel: cancel, + done: make(chan struct{}), + kmCh: make(chan *KeyMaterial, 2), + } + go p.run(ctx) + return p +} + +func (b *fakeProviderBuilder) ParseConfig(config interface{}) (StableConfig, error) { + s, ok := config.(string) + if !ok { + return nil, fmt.Errorf("provider %s received bad config %v", b.name, config) + } + return &fakeStableConfig{config: s}, nil +} + +func (b *fakeProviderBuilder) Name() string { + return b.name +} + +type fakeStableConfig struct { + config string +} + +func (c *fakeStableConfig) Canonical() []byte { + return []byte(c.config) +} + +// fakeProvider is an implementation of the Provider interface which embeds a +// Distributor and exposes two channels for the user: +// 1. to be notified when the provider is closed +// 2. to push new key material into the provider +type fakeProvider struct { + *Distributor + + // Used to cancel the run goroutine when the provider is closed. + cancel context.CancelFunc + // This channel is closed when the provider is closed. Tests should block on + // this to make sure the provider is closed. + done chan struct{} + // Tests can push new key material on this channel, and the provider will + // return this on subsequent calls to KeyMaterial(). + kmCh chan *KeyMaterial +} + +func (p *fakeProvider) run(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case km := <-p.kmCh: + p.Distributor.Set(km, nil) + } + } +} + +func (p *fakeProvider) Close() { + p.cancel() + p.Distributor.Stop() +} + +// loadKeyMaterials is a helper to read cert/key files from testdata and convert +// them into a KeyMaterial struct. +func loadKeyMaterials() (*KeyMaterial, error) { + certs, err := tls.LoadX509KeyPair(testdata.Path("server1.pem"), testdata.Path("server1.key")) + if err != nil { + return nil, err + } + + pemData, err := ioutil.ReadFile(testdata.Path("ca.pem")) + if err != nil { + return nil, err + } + roots := x509.NewCertPool() + roots.AppendCertsFromPEM(pemData) + return &KeyMaterial{Certs: []tls.Certificate{certs}, Roots: roots}, nil +} + +func makeProvider(t *testing.T, name, config string) (Provider, *fakeProvider) { + t.Helper() + + prov, err := GetProvider(name, config) + if err != nil { + t.Fatal(err) + } + + // The store returns a wrappedProvider, which holds a reference to the + // actual provider, which in our case in the fakeProvider. + wp := prov.(*wrappedProvider) + fp := wp.Provider.(*fakeProvider) + return prov, fp +} + +// TestStoreWithSingleProvider creates a single provider through the store and +// calls methods on it. +func (s) TestStoreWithSingleProvider(t *testing.T) { + prov, fp := makeProvider(t, fakeProvider1Name, fakeConfig) + + // Push key materials into the provider. + wantKM, err := loadKeyMaterials() + if err != nil { + t.Fatal(err) + } + fp.kmCh <- wantKM + + // Get key materials from the provider and compare it to the ones we pushed + // above. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + gotKM, err := prov.KeyMaterial(ctx) + if err != nil { + t.Fatalf("provider.KeyMaterial() = %v", err) + } + // TODO(easwars): Remove all references to reflect.DeepEqual and use + // cmp.Equal instead. Currently, the later panics because x509.Certificate + // type defines an Equal method, but does not check for nil. This has been + // fixed in + // https://github.com/golang/go/commit/89865f8ba64ccb27f439cce6daaa37c9aa38f351, + // but this is only available starting go1.14. So, once we remove support + // for go1.13, we can make the switch. + if !reflect.DeepEqual(gotKM, wantKM) { + t.Fatalf("provider.KeyMaterial() = %+v, want %+v", gotKM, wantKM) + } + + // Close the provider and retry the KeyMaterial() call, and expect it to + // fail with a known error. + prov.Close() + if _, err := prov.KeyMaterial(ctx); err != errProviderClosed { + t.Fatalf("provider.KeyMaterial() = %v, wantErr: %v", err, errProviderClosed) + } +} + +// TestStoreWithSingleProviderWithSharing creates multiple instances of the same +// type of provider through the store (and expects the store's sharing mechanism +// to kick in) and calls methods on it. +func (s) TestStoreWithSingleProviderWithSharing(t *testing.T) { + prov1, fp1 := makeProvider(t, fakeProvider1Name, fakeConfig) + prov2, _ := makeProvider(t, fakeProvider1Name, fakeConfig) + + // Push key materials into the fake provider1. + wantKM, err := loadKeyMaterials() + if err != nil { + t.Fatal(err) + } + fp1.kmCh <- wantKM + + // Get key materials from the fake provider2 and compare it to the ones we + // pushed above. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + gotKM, err := prov2.KeyMaterial(ctx) + if err != nil { + t.Fatalf("provider.KeyMaterial() = %v", err) + } + if !reflect.DeepEqual(gotKM, wantKM) { + t.Fatalf("provider.KeyMaterial() = %+v, want %+v", gotKM, wantKM) + } + + // Close the provider1 and retry the KeyMaterial() call on prov2, and expect + // it to succeed. + prov1.Close() + if _, err := prov2.KeyMaterial(ctx); err != nil { + t.Fatalf("provider.KeyMaterial() = %v", err) + } + + prov2.Close() + if _, err := prov2.KeyMaterial(ctx); err != errProviderClosed { + t.Fatalf("provider.KeyMaterial() = %v, wantErr: %v", err, errProviderClosed) + } +} + +// TestStoreWithSingleProviderWithoutSharing creates multiple instances of the +// same type of provider through the store with different configs. The store +// would end up creating different provider instances for these and no sharing +// would take place. +func (s) TestStoreWithSingleProviderWithoutSharing(t *testing.T) { + prov1, fp1 := makeProvider(t, fakeProvider1Name, fakeConfig+"1111") + prov2, fp2 := makeProvider(t, fakeProvider1Name, fakeConfig+"2222") + + // Push the same key materials into the two providers. + wantKM, err := loadKeyMaterials() + if err != nil { + t.Fatal(err) + } + fp1.kmCh <- wantKM + fp2.kmCh <- wantKM + + // Get key materials from the fake provider1 and compare it to the ones we + // pushed above. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + gotKM, err := prov1.KeyMaterial(ctx) + if err != nil { + t.Fatalf("provider.KeyMaterial() = %v", err) + } + if !reflect.DeepEqual(gotKM, wantKM) { + t.Fatalf("provider.KeyMaterial() = %+v, want %+v", gotKM, wantKM) + } + + // Get key materials from the fake provider2 and compare it to the ones we + // pushed above. + gotKM, err = prov2.KeyMaterial(ctx) + if err != nil { + t.Fatalf("provider.KeyMaterial() = %v", err) + } + if !reflect.DeepEqual(gotKM, wantKM) { + t.Fatalf("provider.KeyMaterial() = %+v, want %+v", gotKM, wantKM) + } + + // Update the key materials used by provider1, and make sure provider2 is + // not affected. + newKM, err := loadKeyMaterials() + if err != nil { + t.Fatal(err) + } + newKM.Roots = nil + fp1.kmCh <- newKM + + gotKM, err = prov2.KeyMaterial(ctx) + if err != nil { + t.Fatalf("provider.KeyMaterial() = %v", err) + } + if !reflect.DeepEqual(gotKM, wantKM) { + t.Fatalf("provider.KeyMaterial() = %+v, want %+v", gotKM, wantKM) + } + + // Close the provider1 and retry the KeyMaterial() call on prov2, and expect + // it to succeed. + prov1.Close() + if _, err := prov2.KeyMaterial(ctx); err != nil { + t.Fatalf("provider.KeyMaterial() = %v", err) + } + + prov2.Close() + if _, err := prov2.KeyMaterial(ctx); err != errProviderClosed { + t.Fatalf("provider.KeyMaterial() = %v, wantErr: %v", err, errProviderClosed) + } +} + +// TestStoreWithMultipleProviders creates multiple providers of different types +// and make sure closing of one does not affect the other. +func (s) TestStoreWithMultipleProviders(t *testing.T) { + prov1, fp1 := makeProvider(t, fakeProvider1Name, fakeConfig) + prov2, fp2 := makeProvider(t, fakeProvider2Name, fakeConfig) + + // Push key materials into the fake providers. + wantKM, err := loadKeyMaterials() + if err != nil { + t.Fatal(err) + } + fp1.kmCh <- wantKM + fp2.kmCh <- wantKM + + // Get key materials from the fake provider1 and compare it to the ones we + // pushed above. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + gotKM, err := prov1.KeyMaterial(ctx) + if err != nil { + t.Fatalf("provider.KeyMaterial() = %v", err) + } + if !reflect.DeepEqual(gotKM, wantKM) { + t.Fatalf("provider.KeyMaterial() = %+v, want %+v", gotKM, wantKM) + } + + // Get key materials from the fake provider2 and compare it to the ones we + // pushed above. + gotKM, err = prov2.KeyMaterial(ctx) + if err != nil { + t.Fatalf("provider.KeyMaterial() = %v", err) + } + if !reflect.DeepEqual(gotKM, wantKM) { + t.Fatalf("provider.KeyMaterial() = %+v, want %+v", gotKM, wantKM) + } + + // Close the provider1 and retry the KeyMaterial() call on prov2, and expect + // it to succeed. + prov1.Close() + if _, err := prov2.KeyMaterial(ctx); err != nil { + t.Fatalf("provider.KeyMaterial() = %v", err) + } + + prov2.Close() + if _, err := prov2.KeyMaterial(ctx); err != errProviderClosed { + t.Fatalf("provider.KeyMaterial() = %v, wantErr: %v", err, errProviderClosed) + } +} From 9a465503579e4f97b81d4e2ddafdd1daef80aa93 Mon Sep 17 00:00:00 2001 From: d-reidenbach <66145057+d-reidenbach@users.noreply.github.com> Date: Wed, 17 Jun 2020 04:11:41 +0000 Subject: [PATCH 03/11] alts: Added peer attributes accessor for alts context and updated test method (#3675) --- .../alts/internal/authinfo/authinfo.go | 6 +++++ .../alts/internal/authinfo/authinfo_test.go | 25 ++++++++++++++----- 2 files changed, 25 insertions(+), 6 deletions(-) diff --git a/credentials/alts/internal/authinfo/authinfo.go b/credentials/alts/internal/authinfo/authinfo.go index 9c53d6b53fb8..ebea57da1ded 100644 --- a/credentials/alts/internal/authinfo/authinfo.go +++ b/credentials/alts/internal/authinfo/authinfo.go @@ -48,6 +48,7 @@ func newAuthInfo(result *altspb.HandshakerResult) *altsAuthInfo { PeerServiceAccount: result.GetPeerIdentity().GetServiceAccount(), LocalServiceAccount: result.GetLocalIdentity().GetServiceAccount(), PeerRpcVersions: result.GetPeerRpcVersions(), + PeerAttributes: result.GetPeerIdentity().GetAttributes(), }, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}, } @@ -87,3 +88,8 @@ func (s *altsAuthInfo) LocalServiceAccount() string { func (s *altsAuthInfo) PeerRPCVersions() *altspb.RpcProtocolVersions { return s.p.GetPeerRpcVersions() } + +// PeerAttributes returns the context's peer attributes. +func (s *altsAuthInfo) PeerAttributes() map[string]string { + return s.p.GetPeerAttributes() +} diff --git a/credentials/alts/internal/authinfo/authinfo_test.go b/credentials/alts/internal/authinfo/authinfo_test.go index 10ac1b4bccac..722eeddc4a8d 100644 --- a/credentials/alts/internal/authinfo/authinfo_test.go +++ b/credentials/alts/internal/authinfo/authinfo_test.go @@ -35,15 +35,19 @@ func Test(t *testing.T) { } const ( - testAppProtocol = "my_app" - testRecordProtocol = "very_secure_protocol" - testPeerAccount = "peer_service_account" - testLocalAccount = "local_service_account" - testPeerHostname = "peer_hostname" - testLocalHostname = "local_hostname" + testAppProtocol = "my_app" + testRecordProtocol = "very_secure_protocol" + testPeerAccount = "peer_service_account" + testLocalAccount = "local_service_account" + testPeerHostname = "peer_hostname" + testLocalHostname = "local_hostname" + testLocalPeerAttributeKey = "peer" + testLocalPeerAttributeValue = "attributes" ) func (s) TestALTSAuthInfo(t *testing.T) { + testPeerAttributes := make(map[string]string) + testPeerAttributes[testLocalPeerAttributeKey] = testLocalPeerAttributeValue for _, tc := range []struct { result *altspb.HandshakerResult outAppProtocol string @@ -52,6 +56,7 @@ func (s) TestALTSAuthInfo(t *testing.T) { outPeerAccount string outLocalAccount string outPeerRPCVersions *altspb.RpcProtocolVersions + outPeerAttributes map[string]string }{ { &altspb.HandshakerResult{ @@ -61,6 +66,7 @@ func (s) TestALTSAuthInfo(t *testing.T) { IdentityOneof: &altspb.Identity_ServiceAccount{ ServiceAccount: testPeerAccount, }, + Attributes: testPeerAttributes, }, LocalIdentity: &altspb.Identity{ IdentityOneof: &altspb.Identity_ServiceAccount{ @@ -74,6 +80,7 @@ func (s) TestALTSAuthInfo(t *testing.T) { testPeerAccount, testLocalAccount, nil, + testPeerAttributes, }, { &altspb.HandshakerResult{ @@ -83,6 +90,7 @@ func (s) TestALTSAuthInfo(t *testing.T) { IdentityOneof: &altspb.Identity_Hostname{ Hostname: testPeerHostname, }, + Attributes: testPeerAttributes, }, LocalIdentity: &altspb.Identity{ IdentityOneof: &altspb.Identity_Hostname{ @@ -115,6 +123,7 @@ func (s) TestALTSAuthInfo(t *testing.T) { Minor: 11, }, }, + testPeerAttributes, }, } { authInfo := newAuthInfo(tc.result) @@ -139,5 +148,9 @@ func (s) TestALTSAuthInfo(t *testing.T) { if got, want := authInfo.PeerRPCVersions(), tc.outPeerRPCVersions; !reflect.DeepEqual(got, want) { t.Errorf("authinfo.PeerRpcVersions()=%v, want %v", got, want) } + if got, want := authInfo.PeerAttributes(), tc.outPeerAttributes; !reflect.DeepEqual(got, want) { + t.Errorf("authinfo.PeerAttributes()=%v, want %v", got, want) + } + } } From 4e63bcab52b7ad5c264159e447231710de919b99 Mon Sep 17 00:00:00 2001 From: Zou Nengren Date: Wed, 24 Jun 2020 00:49:44 +0800 Subject: [PATCH 04/11] test: replace manual.GenerateAndRegisterManualResolver with grpc.WithResolvers (#3700) --- balancer/grpclb/grpclb_test.go | 45 +++++++--------- balancer/roundrobin/roundrobin_test.go | 35 +++++-------- balancer_switching_test.go | 35 +++++-------- clientconn_test.go | 39 ++++++-------- examples/features/debugging/client/main.go | 5 +- examples/features/health/client/main.go | 4 +- pickfirst_test.go | 38 ++++++-------- resolver_conn_wrapper_test.go | 14 +++-- test/balancer_test.go | 30 +++++------ test/channelz_test.go | 60 +++++++++------------- test/creds_test.go | 10 ++-- test/end2end_test.go | 44 +++++++--------- test/healthcheck_test.go | 8 +-- 13 files changed, 152 insertions(+), 215 deletions(-) diff --git a/balancer/grpclb/grpclb_test.go b/balancer/grpclb/grpclb_test.go index d55fe3eb8be5..48082e2069fa 100644 --- a/balancer/grpclb/grpclb_test.go +++ b/balancer/grpclb/grpclb_test.go @@ -396,8 +396,7 @@ func newLoadBalancer(numberOfBackends int, statsChan chan *lbpb.ClientStats) (ts var grpclbConfig = `{"loadBalancingConfig": [{"grpclb": {}}]}` func (s) TestGRPCLB(t *testing.T) { - r, cleanup := manual.GenerateAndRegisterManualResolver() - defer cleanup() + r := manual.NewBuilderWithScheme("whatever") tss, cleanup, err := newLoadBalancer(1, nil) if err != nil { @@ -419,7 +418,7 @@ func (s) TestGRPCLB(t *testing.T) { creds := serverNameCheckCreds{} ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, + cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, grpc.WithResolvers(r), grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer)) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) @@ -444,8 +443,7 @@ func (s) TestGRPCLB(t *testing.T) { // The remote balancer sends response with duplicates to grpclb client. func (s) TestGRPCLBWeighted(t *testing.T) { - r, cleanup := manual.GenerateAndRegisterManualResolver() - defer cleanup() + r := manual.NewBuilderWithScheme("whatever") tss, cleanup, err := newLoadBalancer(2, nil) if err != nil { @@ -470,7 +468,7 @@ func (s) TestGRPCLBWeighted(t *testing.T) { creds := serverNameCheckCreds{} ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, + cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, grpc.WithResolvers(r), grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer)) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) @@ -510,8 +508,7 @@ func (s) TestGRPCLBWeighted(t *testing.T) { } func (s) TestDropRequest(t *testing.T) { - r, cleanup := manual.GenerateAndRegisterManualResolver() - defer cleanup() + r := manual.NewBuilderWithScheme("whatever") tss, cleanup, err := newLoadBalancer(2, nil) if err != nil { @@ -536,7 +533,7 @@ func (s) TestDropRequest(t *testing.T) { creds := serverNameCheckCreds{} ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, + cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, grpc.WithResolvers(r), grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer)) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) @@ -661,8 +658,7 @@ func (s) TestDropRequest(t *testing.T) { // When the balancer in use disconnects, grpclb should connect to the next address from resolved balancer address list. func (s) TestBalancerDisconnects(t *testing.T) { - r, cleanup := manual.GenerateAndRegisterManualResolver() - defer cleanup() + r := manual.NewBuilderWithScheme("whatever") var ( tests []*testServers @@ -694,7 +690,7 @@ func (s) TestBalancerDisconnects(t *testing.T) { creds := serverNameCheckCreds{} ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, + cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, grpc.WithResolvers(r), grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer)) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) @@ -739,8 +735,7 @@ func (s) TestFallback(t *testing.T) { balancer.Register(newLBBuilderWithFallbackTimeout(100 * time.Millisecond)) defer balancer.Register(newLBBuilder()) - r, cleanup := manual.GenerateAndRegisterManualResolver() - defer cleanup() + r := manual.NewBuilderWithScheme("whatever") tss, cleanup, err := newLoadBalancer(1, nil) if err != nil { @@ -771,7 +766,7 @@ func (s) TestFallback(t *testing.T) { creds := serverNameCheckCreds{} ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, + cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, grpc.WithResolvers(r), grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer)) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) @@ -867,8 +862,7 @@ func (s) TestFallback(t *testing.T) { } func (s) TestExplicitFallback(t *testing.T) { - r, cleanup := manual.GenerateAndRegisterManualResolver() - defer cleanup() + r := manual.NewBuilderWithScheme("whatever") tss, cleanup, err := newLoadBalancer(1, nil) if err != nil { @@ -899,7 +893,7 @@ func (s) TestExplicitFallback(t *testing.T) { creds := serverNameCheckCreds{} ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, + cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, grpc.WithResolvers(r), grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer)) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) @@ -971,7 +965,7 @@ func (s) TestExplicitFallback(t *testing.T) { func (s) TestFallBackWithNoServerAddress(t *testing.T) { resolveNowCh := make(chan struct{}, 1) - r, cleanup := manual.GenerateAndRegisterManualResolver() + r := manual.NewBuilderWithScheme("whatever") r.ResolveNowCallback = func(resolver.ResolveNowOptions) { select { case <-resolveNowCh: @@ -979,7 +973,6 @@ func (s) TestFallBackWithNoServerAddress(t *testing.T) { } resolveNowCh <- struct{}{} } - defer cleanup() tss, cleanup, err := newLoadBalancer(1, nil) if err != nil { @@ -1009,7 +1002,7 @@ func (s) TestFallBackWithNoServerAddress(t *testing.T) { creds := serverNameCheckCreds{} ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, + cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, grpc.WithResolvers(r), grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer)) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) @@ -1090,8 +1083,7 @@ func (s) TestFallBackWithNoServerAddress(t *testing.T) { } func (s) TestGRPCLBPickFirst(t *testing.T) { - r, cleanup := manual.GenerateAndRegisterManualResolver() - defer cleanup() + r := manual.NewBuilderWithScheme("whatever") tss, cleanup, err := newLoadBalancer(3, nil) if err != nil { @@ -1120,7 +1112,7 @@ func (s) TestGRPCLBPickFirst(t *testing.T) { creds := serverNameCheckCreds{} ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, + cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, grpc.WithResolvers(r), grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer)) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) @@ -1245,8 +1237,7 @@ func checkStats(stats, expected *rpcStats) error { } func runAndCheckStats(t *testing.T, drop bool, statsChan chan *lbpb.ClientStats, runRPCs func(*grpc.ClientConn), statsWant *rpcStats) error { - r, cleanup := manual.GenerateAndRegisterManualResolver() - defer cleanup() + r := manual.NewBuilderWithScheme("whatever") tss, cleanup, err := newLoadBalancer(1, statsChan) if err != nil { @@ -1270,7 +1261,7 @@ func runAndCheckStats(t *testing.T, drop bool, statsChan chan *lbpb.ClientStats, ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, + cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, grpc.WithResolvers(r), grpc.WithTransportCredentials(&creds), grpc.WithPerRPCCredentials(failPreRPCCred{}), grpc.WithContextDialer(fakeNameDialer)) diff --git a/balancer/roundrobin/roundrobin_test.go b/balancer/roundrobin/roundrobin_test.go index c5d066ed94c8..5a8ba481c9f0 100644 --- a/balancer/roundrobin/roundrobin_test.go +++ b/balancer/roundrobin/roundrobin_test.go @@ -98,8 +98,7 @@ func startTestServers(count int) (_ *test, err error) { } func (s) TestOneBackend(t *testing.T) { - r, cleanup := manual.GenerateAndRegisterManualResolver() - defer cleanup() + r := manual.NewBuilderWithScheme("whatever") test, err := startTestServers(1) if err != nil { @@ -107,7 +106,7 @@ func (s) TestOneBackend(t *testing.T) { } defer test.cleanup() - cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithBalancerName(roundrobin.Name)) + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithResolvers(r), grpc.WithBalancerName(roundrobin.Name)) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -128,8 +127,7 @@ func (s) TestOneBackend(t *testing.T) { } func (s) TestBackendsRoundRobin(t *testing.T) { - r, cleanup := manual.GenerateAndRegisterManualResolver() - defer cleanup() + r := manual.NewBuilderWithScheme("whatever") backendCount := 5 test, err := startTestServers(backendCount) @@ -138,7 +136,7 @@ func (s) TestBackendsRoundRobin(t *testing.T) { } defer test.cleanup() - cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithBalancerName(roundrobin.Name)) + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithResolvers(r), grpc.WithBalancerName(roundrobin.Name)) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -187,8 +185,7 @@ func (s) TestBackendsRoundRobin(t *testing.T) { } func (s) TestAddressesRemoved(t *testing.T) { - r, cleanup := manual.GenerateAndRegisterManualResolver() - defer cleanup() + r := manual.NewBuilderWithScheme("whatever") test, err := startTestServers(1) if err != nil { @@ -196,7 +193,7 @@ func (s) TestAddressesRemoved(t *testing.T) { } defer test.cleanup() - cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithBalancerName(roundrobin.Name)) + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithResolvers(r), grpc.WithBalancerName(roundrobin.Name)) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -233,8 +230,7 @@ func (s) TestAddressesRemoved(t *testing.T) { } func (s) TestCloseWithPendingRPC(t *testing.T) { - r, cleanup := manual.GenerateAndRegisterManualResolver() - defer cleanup() + r := manual.NewBuilderWithScheme("whatever") test, err := startTestServers(1) if err != nil { @@ -242,7 +238,7 @@ func (s) TestCloseWithPendingRPC(t *testing.T) { } defer test.cleanup() - cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithBalancerName(roundrobin.Name)) + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithResolvers(r), grpc.WithBalancerName(roundrobin.Name)) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -266,8 +262,7 @@ func (s) TestCloseWithPendingRPC(t *testing.T) { } func (s) TestNewAddressWhileBlocking(t *testing.T) { - r, cleanup := manual.GenerateAndRegisterManualResolver() - defer cleanup() + r := manual.NewBuilderWithScheme("whatever") test, err := startTestServers(1) if err != nil { @@ -275,7 +270,7 @@ func (s) TestNewAddressWhileBlocking(t *testing.T) { } defer test.cleanup() - cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithBalancerName(roundrobin.Name)) + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithResolvers(r), grpc.WithBalancerName(roundrobin.Name)) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -313,8 +308,7 @@ func (s) TestNewAddressWhileBlocking(t *testing.T) { } func (s) TestOneServerDown(t *testing.T) { - r, cleanup := manual.GenerateAndRegisterManualResolver() - defer cleanup() + r := manual.NewBuilderWithScheme("whatever") backendCount := 3 test, err := startTestServers(backendCount) @@ -323,7 +317,7 @@ func (s) TestOneServerDown(t *testing.T) { } defer test.cleanup() - cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithBalancerName(roundrobin.Name)) + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithResolvers(r), grpc.WithBalancerName(roundrobin.Name)) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -410,8 +404,7 @@ func (s) TestOneServerDown(t *testing.T) { } func (s) TestAllServersDown(t *testing.T) { - r, cleanup := manual.GenerateAndRegisterManualResolver() - defer cleanup() + r := manual.NewBuilderWithScheme("whatever") backendCount := 3 test, err := startTestServers(backendCount) @@ -420,7 +413,7 @@ func (s) TestAllServersDown(t *testing.T) { } defer test.cleanup() - cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithBalancerName(roundrobin.Name)) + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithResolvers(r), grpc.WithBalancerName(roundrobin.Name)) if err != nil { t.Fatalf("failed to dial: %v", err) } diff --git a/balancer_switching_test.go b/balancer_switching_test.go index f47754bfdfeb..ed132121280c 100644 --- a/balancer_switching_test.go +++ b/balancer_switching_test.go @@ -147,14 +147,13 @@ func checkRoundRobin(cc *ClientConn, servers []*server) error { } func (s) TestSwitchBalancer(t *testing.T) { - r, rcleanup := manual.GenerateAndRegisterManualResolver() - defer rcleanup() + r := manual.NewBuilderWithScheme("whatever") const numServers = 2 servers, scleanup := startServers(t, numServers, math.MaxInt32) defer scleanup() - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{})) + cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -179,14 +178,13 @@ func (s) TestSwitchBalancer(t *testing.T) { // Test that balancer specified by dial option will not be overridden. func (s) TestBalancerDialOption(t *testing.T) { - r, rcleanup := manual.GenerateAndRegisterManualResolver() - defer rcleanup() + r := manual.NewBuilderWithScheme("whatever") const numServers = 2 servers, scleanup := startServers(t, numServers, math.MaxInt32) defer scleanup() - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{}), WithBalancerName(roundrobin.Name)) + cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{}), WithBalancerName(roundrobin.Name)) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -207,10 +205,9 @@ func (s) TestBalancerDialOption(t *testing.T) { // First addr update contains grpclb. func (s) TestSwitchBalancerGRPCLBFirst(t *testing.T) { - r, rcleanup := manual.GenerateAndRegisterManualResolver() - defer rcleanup() + r := manual.NewBuilderWithScheme("whatever") - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{})) + cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -268,10 +265,9 @@ func (s) TestSwitchBalancerGRPCLBFirst(t *testing.T) { // First addr update does not contain grpclb. func (s) TestSwitchBalancerGRPCLBSecond(t *testing.T) { - r, rcleanup := manual.GenerateAndRegisterManualResolver() - defer rcleanup() + r := manual.NewBuilderWithScheme("whatever") - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{})) + cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -345,10 +341,9 @@ func (s) TestSwitchBalancerGRPCLBSecond(t *testing.T) { // when the resolved address doesn't contain grpclb addresses, balancer will be // switched back to roundrobin. func (s) TestSwitchBalancerGRPCLBRoundRobin(t *testing.T) { - r, rcleanup := manual.GenerateAndRegisterManualResolver() - defer rcleanup() + r := manual.NewBuilderWithScheme("whatever") - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{})) + cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -408,10 +403,9 @@ func (s) TestSwitchBalancerGRPCLBRoundRobin(t *testing.T) { // service config won't take effect. But when there's no grpclb address in a new // resolved address list, balancer will be switched to the new one. func (s) TestSwitchBalancerGRPCLBServiceConfig(t *testing.T) { - r, rcleanup := manual.GenerateAndRegisterManualResolver() - defer rcleanup() + r := manual.NewBuilderWithScheme("whatever") - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{})) + cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -495,14 +489,13 @@ func (s) TestSwitchBalancerGRPCLBWithGRPCLBNotRegistered(t *testing.T) { internal.BalancerUnregister("grpclb") defer balancer.Register(&magicalLB{}) - r, rcleanup := manual.GenerateAndRegisterManualResolver() - defer rcleanup() + r := manual.NewBuilderWithScheme("whatever") const numServers = 3 servers, scleanup := startServers(t, numServers, math.MaxInt32) defer scleanup() - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{})) + cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } diff --git a/clientconn_test.go b/clientconn_test.go index 524b9736c1a0..9b95f6cec087 100644 --- a/clientconn_test.go +++ b/clientconn_test.go @@ -66,10 +66,9 @@ func (s) TestDialWithTimeout(t *testing.T) { <-dialDone // Close conn only after dial returns. }() - r, cleanup := manual.GenerateAndRegisterManualResolver() - defer cleanup() + r := manual.NewBuilderWithScheme("whatever") r.InitialState(resolver.State{Addresses: []resolver.Address{lisAddr}}) - client, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithTimeout(5*time.Second)) + client, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithTimeout(5*time.Second)) close(dialDone) if err != nil { t.Fatalf("Dial failed. Err: %v", err) @@ -119,10 +118,9 @@ func (s) TestDialWithMultipleBackendsNotSendingServerPreface(t *testing.T) { } }() - r, cleanup := manual.GenerateAndRegisterManualResolver() - defer cleanup() + r := manual.NewBuilderWithScheme("whatever") r.InitialState(resolver.State{Addresses: []resolver.Address{lis1Addr, lis2Addr}}) - client, err := Dial(r.Scheme()+":///test.server", WithInsecure()) + client, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r)) if err != nil { t.Fatalf("Dial failed. Err: %v", err) } @@ -642,10 +640,9 @@ func (s) TestConnectParamsWithMinConnectTimeout(t *testing.T) { } func (s) TestResolverServiceConfigBeforeAddressNotPanic(t *testing.T) { - r, rcleanup := manual.GenerateAndRegisterManualResolver() - defer rcleanup() + r := manual.NewBuilderWithScheme("whatever") - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure()) + cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r)) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -660,10 +657,9 @@ func (s) TestResolverServiceConfigBeforeAddressNotPanic(t *testing.T) { func (s) TestResolverServiceConfigWhileClosingNotPanic(t *testing.T) { for i := 0; i < 10; i++ { // Run this multiple times to make sure it doesn't panic. - r, rcleanup := manual.GenerateAndRegisterManualResolver() - defer rcleanup() + r := manual.NewBuilderWithScheme(fmt.Sprintf("whatever-%d", i)) - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure()) + cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r)) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -674,10 +670,9 @@ func (s) TestResolverServiceConfigWhileClosingNotPanic(t *testing.T) { } func (s) TestResolverEmptyUpdateNotPanic(t *testing.T) { - r, rcleanup := manual.GenerateAndRegisterManualResolver() - defer rcleanup() + r := manual.NewBuilderWithScheme("whatever") - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure()) + cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r)) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -754,10 +749,9 @@ func (s) TestClientUpdatesParamsAfterGoAway(t *testing.T) { } func (s) TestDisableServiceConfigOption(t *testing.T) { - r, cleanup := manual.GenerateAndRegisterManualResolver() - defer cleanup() + r := manual.NewBuilderWithScheme("whatever") addr := r.Scheme() + ":///non.existent" - cc, err := Dial(addr, WithInsecure(), WithDisableServiceConfig()) + cc, err := Dial(addr, WithInsecure(), WithResolvers(r), WithDisableServiceConfig()) if err != nil { t.Fatalf("Dial(%s, _) = _, %v, want _, ", addr, err) } @@ -1013,8 +1007,7 @@ func (s) TestUpdateAddresses_RetryFromFirstAddr(t *testing.T) { } func (s) TestDefaultServiceConfig(t *testing.T) { - r, cleanup := manual.GenerateAndRegisterManualResolver() - defer cleanup() + r := manual.NewBuilderWithScheme("whatever") addr := r.Scheme() + ":///non.existent" js := `{ "methodConfig": [ @@ -1055,7 +1048,7 @@ func testInvalidDefaultServiceConfig(t *testing.T) { } func testDefaultServiceConfigWhenResolverServiceConfigDisabled(t *testing.T, r *manual.Resolver, addr string, js string) { - cc, err := Dial(addr, WithInsecure(), WithDisableServiceConfig(), WithDefaultServiceConfig(js)) + cc, err := Dial(addr, WithInsecure(), WithDisableServiceConfig(), WithResolvers(r), WithDefaultServiceConfig(js)) if err != nil { t.Fatalf("Dial(%s, _) = _, %v, want _, ", addr, err) } @@ -1071,7 +1064,7 @@ func testDefaultServiceConfigWhenResolverServiceConfigDisabled(t *testing.T, r * } func testDefaultServiceConfigWhenResolverDoesNotReturnServiceConfig(t *testing.T, r *manual.Resolver, addr string, js string) { - cc, err := Dial(addr, WithInsecure(), WithDefaultServiceConfig(js)) + cc, err := Dial(addr, WithInsecure(), WithResolvers(r), WithDefaultServiceConfig(js)) if err != nil { t.Fatalf("Dial(%s, _) = _, %v, want _, ", addr, err) } @@ -1085,7 +1078,7 @@ func testDefaultServiceConfigWhenResolverDoesNotReturnServiceConfig(t *testing.T } func testDefaultServiceConfigWhenResolverReturnInvalidServiceConfig(t *testing.T, r *manual.Resolver, addr string, js string) { - cc, err := Dial(addr, WithInsecure(), WithDefaultServiceConfig(js)) + cc, err := Dial(addr, WithInsecure(), WithResolvers(r), WithDefaultServiceConfig(js)) if err != nil { t.Fatalf("Dial(%s, _) = _, %v, want _, ", addr, err) } diff --git a/examples/features/debugging/client/main.go b/examples/features/debugging/client/main.go index 33b7a0a1475f..29ac0fe89920 100644 --- a/examples/features/debugging/client/main.go +++ b/examples/features/debugging/client/main.go @@ -51,10 +51,9 @@ func main() { defer s.Stop() /***** Initialize manual resolver and Dial *****/ - r, rcleanup := manual.GenerateAndRegisterManualResolver() - defer rcleanup() + r := manual.NewBuilderWithScheme("whatever") // Set up a connection to the server. - conn, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithBalancerName("round_robin")) + conn, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithResolvers(r), grpc.WithBalancerName("round_robin")) if err != nil { log.Fatalf("did not connect: %v", err) } diff --git a/examples/features/health/client/main.go b/examples/features/health/client/main.go index 1a11782acf84..9cbc03f90a47 100644 --- a/examples/features/health/client/main.go +++ b/examples/features/health/client/main.go @@ -54,8 +54,7 @@ func callUnaryEcho(c pb.EchoClient) { func main() { flag.Parse() - r, cleanup := manual.GenerateAndRegisterManualResolver() - defer cleanup() + r := manual.NewBuilderWithScheme("whatever") r.InitialState(resolver.State{ Addresses: []resolver.Address{ {Addr: "localhost:50051"}, @@ -68,6 +67,7 @@ func main() { options := []grpc.DialOption{ grpc.WithInsecure(), grpc.WithBlock(), + grpc.WithResolvers(r), grpc.WithDefaultServiceConfig(serviceConfig), } diff --git a/pickfirst_test.go b/pickfirst_test.go index a69cec1c51de..9ece7844a355 100644 --- a/pickfirst_test.go +++ b/pickfirst_test.go @@ -39,14 +39,16 @@ func errorDesc(err error) string { } func (s) TestOneBackendPickfirst(t *testing.T) { - r, rcleanup := manual.GenerateAndRegisterManualResolver() - defer rcleanup() + r := manual.NewBuilderWithScheme("whatever") numServers := 1 servers, scleanup := startServers(t, numServers, math.MaxInt32) defer scleanup() - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{})) + cc, err := Dial(r.Scheme()+":///test.server", + WithInsecure(), + WithResolvers(r), + WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -72,14 +74,13 @@ func (s) TestOneBackendPickfirst(t *testing.T) { } func (s) TestBackendsPickfirst(t *testing.T) { - r, rcleanup := manual.GenerateAndRegisterManualResolver() - defer rcleanup() + r := manual.NewBuilderWithScheme("whatever") numServers := 2 servers, scleanup := startServers(t, numServers, math.MaxInt32) defer scleanup() - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{})) + cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -105,14 +106,13 @@ func (s) TestBackendsPickfirst(t *testing.T) { } func (s) TestNewAddressWhileBlockingPickfirst(t *testing.T) { - r, rcleanup := manual.GenerateAndRegisterManualResolver() - defer rcleanup() + r := manual.NewBuilderWithScheme("whatever") numServers := 1 servers, scleanup := startServers(t, numServers, math.MaxInt32) defer scleanup() - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{})) + cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -141,14 +141,13 @@ func (s) TestNewAddressWhileBlockingPickfirst(t *testing.T) { } func (s) TestCloseWithPendingRPCPickfirst(t *testing.T) { - r, rcleanup := manual.GenerateAndRegisterManualResolver() - defer rcleanup() + r := manual.NewBuilderWithScheme("whatever") numServers := 1 _, scleanup := startServers(t, numServers, math.MaxInt32) defer scleanup() - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{})) + cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -177,14 +176,13 @@ func (s) TestCloseWithPendingRPCPickfirst(t *testing.T) { } func (s) TestOneServerDownPickfirst(t *testing.T) { - r, rcleanup := manual.GenerateAndRegisterManualResolver() - defer rcleanup() + r := manual.NewBuilderWithScheme("whatever") numServers := 2 servers, scleanup := startServers(t, numServers, math.MaxInt32) defer scleanup() - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{})) + cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -218,14 +216,13 @@ func (s) TestOneServerDownPickfirst(t *testing.T) { } func (s) TestAllServersDownPickfirst(t *testing.T) { - r, rcleanup := manual.GenerateAndRegisterManualResolver() - defer rcleanup() + r := manual.NewBuilderWithScheme("whatever") numServers := 2 servers, scleanup := startServers(t, numServers, math.MaxInt32) defer scleanup() - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{})) + cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -261,14 +258,13 @@ func (s) TestAllServersDownPickfirst(t *testing.T) { } func (s) TestAddressesRemovedPickfirst(t *testing.T) { - r, rcleanup := manual.GenerateAndRegisterManualResolver() - defer rcleanup() + r := manual.NewBuilderWithScheme("whatever") numServers := 3 servers, scleanup := startServers(t, numServers, math.MaxInt32) defer scleanup() - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{})) + cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } diff --git a/resolver_conn_wrapper_test.go b/resolver_conn_wrapper_test.go index 9f22c8b90f6c..e125976a5359 100644 --- a/resolver_conn_wrapper_test.go +++ b/resolver_conn_wrapper_test.go @@ -77,14 +77,14 @@ func testResolverErrorPolling(t *testing.T, badUpdate func(*manual.Resolver), go return 0 } - r, rcleanup := manual.GenerateAndRegisterManualResolver() - defer rcleanup() + r := manual.NewBuilderWithScheme("whatever") rn := make(chan struct{}) defer func() { close(rn) }() r.ResolveNowCallback = func(resolver.ResolveNowOptions) { rn <- struct{}{} } defaultDialOptions := []DialOption{ WithInsecure(), + WithResolvers(r), withResolveNowBackoff(resolverBackoff), } cc, err := Dial(r.Scheme()+":///test.server", append(defaultDialOptions, dopts...)...) @@ -173,11 +173,10 @@ func (s) TestServiceConfigErrorPolling(t *testing.T) { // sure there is no data race in this code path, and also that there is no // deadlock. func (s) TestResolverErrorInBuild(t *testing.T) { - r, rcleanup := manual.GenerateAndRegisterManualResolver() - defer rcleanup() + r := manual.NewBuilderWithScheme("whatever") r.InitialState(resolver.State{ServiceConfig: &serviceconfig.ParseResult{Err: errors.New("resolver build err")}}) - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure()) + cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r)) if err != nil { t.Fatalf("Dial(_, _) = _, %v; want _, nil", err) } @@ -194,10 +193,9 @@ func (s) TestResolverErrorInBuild(t *testing.T) { } func (s) TestServiceConfigErrorRPC(t *testing.T) { - r, rcleanup := manual.GenerateAndRegisterManualResolver() - defer rcleanup() + r := manual.NewBuilderWithScheme("whatever") - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure()) + cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r)) if err != nil { t.Fatalf("Dial(_, _) = _, %v; want _, nil", err) } diff --git a/test/balancer_test.go b/test/balancer_test.go index 3cd4e0e91fb7..7fa96d8680ee 100644 --- a/test/balancer_test.go +++ b/test/balancer_test.go @@ -321,13 +321,12 @@ func (testBalancerKeepAddresses) Close() { // Make sure that non-grpclb balancers don't get grpclb addresses even if name // resolver sends them func (s) TestNonGRPCLBBalancerGetsNoGRPCLBAddress(t *testing.T) { - r, rcleanup := manual.GenerateAndRegisterManualResolver() - defer rcleanup() + r := manual.NewBuilderWithScheme("whatever") b := newTestBalancerKeepAddresses() balancer.Register(b) - cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithResolvers(r), grpc.WithBalancerName(b.Name())) if err != nil { t.Fatalf("failed to dial: %v", err) @@ -433,8 +432,7 @@ func (s) TestAddressAttributesInNewSubConn(t *testing.T) { stub.Register(attrBalancerName, bf) t.Logf("Registered balancer %s...", attrBalancerName) - r, cleanup := manual.GenerateAndRegisterManualResolver() - defer cleanup() + r := manual.NewBuilderWithScheme("whatever") t.Logf("Registered manual resolver with scheme %s...", r.Scheme()) lis, err := net.Listen("tcp", "localhost:0") @@ -451,6 +449,7 @@ func (s) TestAddressAttributesInNewSubConn(t *testing.T) { creds := &attrTransportCreds{} dopts := []grpc.DialOption{ grpc.WithTransportCredentials(creds), + grpc.WithResolvers(r), grpc.WithDefaultServiceConfig(fmt.Sprintf(`{ "loadBalancingConfig": [{"%v": {}}] }`, attrBalancerName)), } cc, err := grpc.Dial(r.Scheme()+":///test.server", dopts...) @@ -517,10 +516,9 @@ func (s) TestServersSwap(t *testing.T) { defer cleanup() // Initialize client - r, cleanup := manual.GenerateAndRegisterManualResolver() - defer cleanup() + r := manual.NewBuilderWithScheme("whatever") r.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: addr1}}}) - cc, err := grpc.DialContext(ctx, r.Scheme()+":///", grpc.WithInsecure()) + cc, err := grpc.DialContext(ctx, r.Scheme()+":///", grpc.WithInsecure(), grpc.WithResolvers(r)) if err != nil { t.Fatalf("Error creating client: %v", err) } @@ -570,15 +568,14 @@ func (s) TestEmptyAddrs(t *testing.T) { go s.Serve(lis) // Initialize pickfirst client - pfr, cleanup := manual.GenerateAndRegisterManualResolver() - defer cleanup() + pfr := manual.NewBuilderWithScheme("whatever") pfrnCalled := grpcsync.NewEvent() pfr.ResolveNowCallback = func(resolver.ResolveNowOptions) { pfrnCalled.Fire() } pfr.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: lis.Addr().String()}}}) - pfcc, err := grpc.DialContext(ctx, pfr.Scheme()+":///", grpc.WithInsecure()) + pfcc, err := grpc.DialContext(ctx, pfr.Scheme()+":///", grpc.WithInsecure(), grpc.WithResolvers(pfr)) if err != nil { t.Fatalf("Error creating client: %v", err) } @@ -596,15 +593,15 @@ func (s) TestEmptyAddrs(t *testing.T) { <-pfrnCalled.Done() // Initialize roundrobin client - rrr, cleanup := manual.GenerateAndRegisterManualResolver() - defer cleanup() + rrr := manual.NewBuilderWithScheme("whatever") + rrrnCalled := grpcsync.NewEvent() rrr.ResolveNowCallback = func(resolver.ResolveNowOptions) { rrrnCalled.Fire() } rrr.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: lis.Addr().String()}}}) - rrcc, err := grpc.DialContext(ctx, rrr.Scheme()+":///", grpc.WithInsecure(), + rrcc, err := grpc.DialContext(ctx, rrr.Scheme()+":///", grpc.WithInsecure(), grpc.WithResolvers(rrr), grpc.WithDefaultServiceConfig(fmt.Sprintf(`{ "loadBalancingConfig": [{"%v": {}}] }`, roundrobin.Name))) if err != nil { t.Fatalf("Error creating client: %v", err) @@ -660,10 +657,9 @@ func (s) TestWaitForReady(t *testing.T) { go s.Serve(lis) // Initialize client - r, cleanup := manual.GenerateAndRegisterManualResolver() - defer cleanup() + r := manual.NewBuilderWithScheme("whatever") - cc, err := grpc.DialContext(ctx, r.Scheme()+":///", grpc.WithInsecure()) + cc, err := grpc.DialContext(ctx, r.Scheme()+":///", grpc.WithInsecure(), grpc.WithResolvers(r)) if err != nil { t.Fatalf("Error creating client: %v", err) } diff --git a/test/channelz_test.go b/test/channelz_test.go index c69e0cec2e6e..37140bb2ce89 100644 --- a/test/channelz_test.go +++ b/test/channelz_test.go @@ -209,12 +209,11 @@ func (s) TestCZNestedChannelRegistrationAndDeletion(t *testing.T) { // avoid calling API to set balancer type, which will void service config's change of balancer. e.balancer = "" te := newTest(t, e) - r, cleanup := manual.GenerateAndRegisterManualResolver() - defer cleanup() + r := manual.NewBuilderWithScheme("whatever") resolvedAddrs := []resolver.Address{{Addr: "127.0.0.1:0", Type: resolver.GRPCLB, ServerName: "grpclb.server"}} r.InitialState(resolver.State{Addresses: resolvedAddrs}) te.resolverScheme = r.Scheme() - te.clientConn() + te.clientConn(grpc.WithResolvers(r)) defer te.tearDown() if err := verifyResultWithDelay(func() (bool, error) { @@ -255,14 +254,13 @@ func (s) TestCZClientSubChannelSocketRegistrationAndDeletion(t *testing.T) { te := newTest(t, e) var svrAddrs []resolver.Address te.startServers(&testServer{security: e.security}, num) - r, cleanup := manual.GenerateAndRegisterManualResolver() - defer cleanup() + r := manual.NewBuilderWithScheme("whatever") for _, a := range te.srvAddrs { svrAddrs = append(svrAddrs, resolver.Address{Addr: a}) } r.InitialState(resolver.State{Addresses: svrAddrs}) te.resolverScheme = r.Scheme() - te.clientConn() + te.clientConn(grpc.WithResolvers(r)) defer te.tearDown() // Here, we just wait for all sockets to be up. In the future, if we implement // IDLE, we may need to make several rpc calls to create the sockets. @@ -507,14 +505,13 @@ func (s) TestCZChannelMetrics(t *testing.T) { te.maxClientSendMsgSize = newInt(8) var svrAddrs []resolver.Address te.startServers(&testServer{security: e.security}, num) - r, cleanup := manual.GenerateAndRegisterManualResolver() - defer cleanup() + r := manual.NewBuilderWithScheme("whatever") for _, a := range te.srvAddrs { svrAddrs = append(svrAddrs, resolver.Address{Addr: a}) } r.InitialState(resolver.State{Addresses: svrAddrs}) te.resolverScheme = r.Scheme() - cc := te.clientConn() + cc := te.clientConn(grpc.WithResolvers(r)) defer te.tearDown() tc := testpb.NewTestServiceClient(cc) if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { @@ -1397,12 +1394,11 @@ func (s) TestCZChannelTraceCreationDeletion(t *testing.T) { // avoid calling API to set balancer type, which will void service config's change of balancer. e.balancer = "" te := newTest(t, e) - r, cleanup := manual.GenerateAndRegisterManualResolver() - defer cleanup() + r := manual.NewBuilderWithScheme("whatever") resolvedAddrs := []resolver.Address{{Addr: "127.0.0.1:0", Type: resolver.GRPCLB, ServerName: "grpclb.server"}} r.InitialState(resolver.State{Addresses: resolvedAddrs}) te.resolverScheme = r.Scheme() - te.clientConn() + te.clientConn(grpc.WithResolvers(r)) defer te.tearDown() var nestedConn int64 if err := verifyResultWithDelay(func() (bool, error) { @@ -1472,11 +1468,10 @@ func (s) TestCZSubChannelTraceCreationDeletion(t *testing.T) { e := tcpClearRREnv te := newTest(t, e) te.startServer(&testServer{security: e.security}) - r, cleanup := manual.GenerateAndRegisterManualResolver() - defer cleanup() + r := manual.NewBuilderWithScheme("whatever") r.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: te.srvAddr}}}) te.resolverScheme = r.Scheme() - te.clientConn() + te.clientConn(grpc.WithResolvers(r)) defer te.tearDown() var subConn int64 // Here, we just wait for all sockets to be up. In the future, if we implement @@ -1566,12 +1561,11 @@ func (s) TestCZChannelAddressResolutionChange(t *testing.T) { e.balancer = "" te := newTest(t, e) te.startServer(&testServer{security: e.security}) - r, cleanup := manual.GenerateAndRegisterManualResolver() - defer cleanup() + r := manual.NewBuilderWithScheme("whatever") addrs := []resolver.Address{{Addr: te.srvAddr}} r.InitialState(resolver.State{Addresses: addrs}) te.resolverScheme = r.Scheme() - te.clientConn() + te.clientConn(grpc.WithResolvers(r)) defer te.tearDown() var cid int64 // Here, we just wait for all sockets to be up. In the future, if we implement @@ -1670,15 +1664,14 @@ func (s) TestCZSubChannelPickedNewAddress(t *testing.T) { e.balancer = "" te := newTest(t, e) te.startServers(&testServer{security: e.security}, 3) - r, cleanup := manual.GenerateAndRegisterManualResolver() - defer cleanup() + r := manual.NewBuilderWithScheme("whatever") var svrAddrs []resolver.Address for _, a := range te.srvAddrs { svrAddrs = append(svrAddrs, resolver.Address{Addr: a}) } r.InitialState(resolver.State{Addresses: svrAddrs}) te.resolverScheme = r.Scheme() - cc := te.clientConn() + cc := te.clientConn(grpc.WithResolvers(r)) defer te.tearDown() tc := testpb.NewTestServiceClient(cc) // make sure the connection is up @@ -1730,11 +1723,10 @@ func (s) TestCZSubChannelConnectivityState(t *testing.T) { e := tcpClearRREnv te := newTest(t, e) te.startServer(&testServer{security: e.security}) - r, cleanup := manual.GenerateAndRegisterManualResolver() - defer cleanup() + r := manual.NewBuilderWithScheme("whatever") r.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: te.srvAddr}}}) te.resolverScheme = r.Scheme() - cc := te.clientConn() + cc := te.clientConn(grpc.WithResolvers(r)) defer te.tearDown() tc := testpb.NewTestServiceClient(cc) // make sure the connection is up @@ -1825,11 +1817,10 @@ func (s) TestCZChannelConnectivityState(t *testing.T) { e := tcpClearRREnv te := newTest(t, e) te.startServer(&testServer{security: e.security}) - r, cleanup := manual.GenerateAndRegisterManualResolver() - defer cleanup() + r := manual.NewBuilderWithScheme("whatever") r.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: te.srvAddr}}}) te.resolverScheme = r.Scheme() - cc := te.clientConn() + cc := te.clientConn(grpc.WithResolvers(r)) defer te.tearDown() tc := testpb.NewTestServiceClient(cc) // make sure the connection is up @@ -1886,12 +1877,11 @@ func (s) TestCZTraceOverwriteChannelDeletion(t *testing.T) { te := newTest(t, e) channelz.SetMaxTraceEntry(1) defer channelz.ResetMaxTraceEntryToDefault() - r, cleanup := manual.GenerateAndRegisterManualResolver() - defer cleanup() + r := manual.NewBuilderWithScheme("whatever") resolvedAddrs := []resolver.Address{{Addr: "127.0.0.1:0", Type: resolver.GRPCLB, ServerName: "grpclb.server"}} r.InitialState(resolver.State{Addresses: resolvedAddrs}) te.resolverScheme = r.Scheme() - te.clientConn() + te.clientConn(grpc.WithResolvers(r)) defer te.tearDown() var nestedConn int64 if err := verifyResultWithDelay(func() (bool, error) { @@ -1950,11 +1940,10 @@ func (s) TestCZTraceOverwriteSubChannelDeletion(t *testing.T) { channelz.SetMaxTraceEntry(1) defer channelz.ResetMaxTraceEntryToDefault() te.startServer(&testServer{security: e.security}) - r, cleanup := manual.GenerateAndRegisterManualResolver() - defer cleanup() + r := manual.NewBuilderWithScheme("whatever") r.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: te.srvAddr}}}) te.resolverScheme = r.Scheme() - te.clientConn() + te.clientConn(grpc.WithResolvers(r)) defer te.tearDown() var subConn int64 // Here, we just wait for all sockets to be up. In the future, if we implement @@ -2009,11 +1998,10 @@ func (s) TestCZTraceTopChannelDeletionTraceClear(t *testing.T) { e := tcpClearRREnv te := newTest(t, e) te.startServer(&testServer{security: e.security}) - r, cleanup := manual.GenerateAndRegisterManualResolver() - defer cleanup() + r := manual.NewBuilderWithScheme("whatever") r.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: te.srvAddr}}}) te.resolverScheme = r.Scheme() - te.clientConn() + te.clientConn(grpc.WithResolvers(r)) var subConn int64 // Here, we just wait for all sockets to be up. In the future, if we implement // IDLE, we may need to make several rpc calls to create the sockets. diff --git a/test/creds_test.go b/test/creds_test.go index 8f87af125ec3..b25336908adb 100644 --- a/test/creds_test.go +++ b/test/creds_test.go @@ -411,10 +411,9 @@ func (s) TestCredsHandshakeAuthority(t *testing.T) { go s.Serve(lis) defer s.Stop() - r, rcleanup := manual.GenerateAndRegisterManualResolver() - defer rcleanup() + r := manual.NewBuilderWithScheme("whatever") - cc, err := grpc.Dial(r.Scheme()+":///"+testAuthority, grpc.WithTransportCredentials(cred)) + cc, err := grpc.Dial(r.Scheme()+":///"+testAuthority, grpc.WithTransportCredentials(cred), grpc.WithResolvers(r)) if err != nil { t.Fatalf("grpc.Dial(%q) = %v", lis.Addr().String(), err) } @@ -452,10 +451,9 @@ func (s) TestCredsHandshakeServerNameAuthority(t *testing.T) { go s.Serve(lis) defer s.Stop() - r, rcleanup := manual.GenerateAndRegisterManualResolver() - defer rcleanup() + r := manual.NewBuilderWithScheme("whatever") - cc, err := grpc.Dial(r.Scheme()+":///"+testAuthority, grpc.WithTransportCredentials(cred)) + cc, err := grpc.Dial(r.Scheme()+":///"+testAuthority, grpc.WithTransportCredentials(cred), grpc.WithResolvers(r)) if err != nil { t.Fatalf("grpc.Dial(%q) = %v", lis.Addr().String(), err) } diff --git a/test/end2end_test.go b/test/end2end_test.go index 82150fe11be9..3e129c3fc159 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -1435,11 +1435,10 @@ func newDuration(b time.Duration) (a *time.Duration) { func (s) TestGetMethodConfig(t *testing.T) { te := testServiceConfigSetup(t, tcpClearRREnv) defer te.tearDown() - r, rcleanup := manual.GenerateAndRegisterManualResolver() - defer rcleanup() + r := manual.NewBuilderWithScheme("whatever") te.resolverScheme = r.Scheme() - cc := te.clientConn() + cc := te.clientConn(grpc.WithResolvers(r)) addrs := []resolver.Address{{Addr: te.srvAddr}} r.UpdateState(resolver.State{ Addresses: addrs, @@ -1521,12 +1520,11 @@ func (s) TestGetMethodConfig(t *testing.T) { func (s) TestServiceConfigWaitForReady(t *testing.T) { te := testServiceConfigSetup(t, tcpClearRREnv) defer te.tearDown() - r, rcleanup := manual.GenerateAndRegisterManualResolver() - defer rcleanup() + r := manual.NewBuilderWithScheme("whatever") // Case1: Client API set failfast to be false, and service config set wait_for_ready to be false, Client API should win, and the rpc will wait until deadline exceeds. te.resolverScheme = r.Scheme() - cc := te.clientConn() + cc := te.clientConn(grpc.WithResolvers(r)) addrs := []resolver.Address{{Addr: te.srvAddr}} r.UpdateState(resolver.State{ Addresses: addrs, @@ -1610,12 +1608,11 @@ func (s) TestServiceConfigWaitForReady(t *testing.T) { func (s) TestServiceConfigTimeout(t *testing.T) { te := testServiceConfigSetup(t, tcpClearRREnv) defer te.tearDown() - r, rcleanup := manual.GenerateAndRegisterManualResolver() - defer rcleanup() + r := manual.NewBuilderWithScheme("whatever") // Case1: Client API sets timeout to be 1ns and ServiceConfig sets timeout to be 1hr. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds. te.resolverScheme = r.Scheme() - cc := te.clientConn() + cc := te.clientConn(grpc.WithResolvers(r)) addrs := []resolver.Address{{Addr: te.srvAddr}} r.UpdateState(resolver.State{ Addresses: addrs, @@ -1708,8 +1705,7 @@ func (s) TestServiceConfigTimeout(t *testing.T) { func (s) TestServiceConfigMaxMsgSize(t *testing.T) { e := tcpClearRREnv - r, rcleanup := manual.GenerateAndRegisterManualResolver() - defer rcleanup() + r := manual.NewBuilderWithScheme("whatever") // Setting up values and objects shared across all test cases. const smallSize = 1 @@ -1736,7 +1732,7 @@ func (s) TestServiceConfigMaxMsgSize(t *testing.T) { te1.resolverScheme = r.Scheme() te1.nonBlockingDial = true te1.startServer(&testServer{security: e.security}) - cc1 := te1.clientConn() + cc1 := te1.clientConn(grpc.WithResolvers(r)) addrs := []resolver.Address{{Addr: te1.srvAddr}} sc := parseCfg(r, `{ @@ -1827,7 +1823,7 @@ func (s) TestServiceConfigMaxMsgSize(t *testing.T) { te2.startServer(&testServer{security: e.security}) defer te2.tearDown() - cc2 := te2.clientConn() + cc2 := te2.clientConn(grpc.WithResolvers(r)) r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: te2.srvAddr}}, ServiceConfig: sc}) tc = testpb.NewTestServiceClient(cc2) @@ -1888,7 +1884,7 @@ func (s) TestServiceConfigMaxMsgSize(t *testing.T) { te3.startServer(&testServer{security: e.security}) defer te3.tearDown() - cc3 := te3.clientConn() + cc3 := te3.clientConn(grpc.WithResolvers(r)) r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: te3.srvAddr}}, ServiceConfig: sc}) tc = testpb.NewTestServiceClient(cc3) @@ -1971,12 +1967,11 @@ func (s) TestStreamingRPCWithTimeoutInServiceConfigRecv(t *testing.T) { te := testServiceConfigSetup(t, tcpClearRREnv) te.startServer(&testServer{security: tcpClearRREnv.security}) defer te.tearDown() - r, rcleanup := manual.GenerateAndRegisterManualResolver() - defer rcleanup() + r := manual.NewBuilderWithScheme("whatever") te.resolverScheme = r.Scheme() te.nonBlockingDial = true - cc := te.clientConn() + cc := te.clientConn(grpc.WithResolvers(r)) tc := testpb.NewTestServiceClient(cc) r.UpdateState(resolver.State{ @@ -5017,9 +5012,8 @@ func (ss *stubServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallSer // Start starts the server and creates a client connected to it. func (ss *stubServer) Start(sopts []grpc.ServerOption, dopts ...grpc.DialOption) error { - r, cleanup := manual.GenerateAndRegisterManualResolver() + r := manual.NewBuilderWithScheme("whatever") ss.r = r - ss.cleanups = append(ss.cleanups, cleanup) lis, err := net.Listen("tcp", "localhost:0") if err != nil { @@ -5036,7 +5030,7 @@ func (ss *stubServer) Start(sopts []grpc.ServerOption, dopts ...grpc.DialOption) target := ss.r.Scheme() + ":///" + ss.addr - opts := append([]grpc.DialOption{grpc.WithInsecure()}, dopts...) + opts := append([]grpc.DialOption{grpc.WithInsecure(), grpc.WithResolvers(r)}, dopts...) cc, err := grpc.Dial(target, opts...) if err != nil { return fmt.Errorf("grpc.Dial(%q) = %v", target, err) @@ -6693,12 +6687,11 @@ func (s) TestGoAwayThenClose(t *testing.T) { testpb.RegisterTestServiceServer(s2, ts) go s2.Serve(lis2) - r, rcleanup := manual.GenerateAndRegisterManualResolver() - defer rcleanup() + r := manual.NewBuilderWithScheme("whatever") r.InitialState(resolver.State{Addresses: []resolver.Address{ {Addr: lis1.Addr().String()}, }}) - cc, err := grpc.DialContext(ctx, r.Scheme()+":///", grpc.WithInsecure()) + cc, err := grpc.DialContext(ctx, r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithInsecure()) if err != nil { t.Fatalf("Error creating client: %v", err) } @@ -6763,12 +6756,11 @@ func (s) TestRPCWaitsForResolver(t *testing.T) { te := testServiceConfigSetup(t, tcpClearRREnv) te.startServer(&testServer{security: tcpClearRREnv.security}) defer te.tearDown() - r, rcleanup := manual.GenerateAndRegisterManualResolver() - defer rcleanup() + r := manual.NewBuilderWithScheme("whatever") te.resolverScheme = r.Scheme() te.nonBlockingDial = true - cc := te.clientConn() + cc := te.clientConn(grpc.WithResolvers(r)) tc := testpb.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) diff --git a/test/healthcheck_test.go b/test/healthcheck_test.go index ec0c13e02743..0a60f8c927ac 100644 --- a/test/healthcheck_test.go +++ b/test/healthcheck_test.go @@ -152,19 +152,19 @@ type clientConfig struct { } func setupClient(c *clientConfig) (cc *grpc.ClientConn, r *manual.Resolver, deferFunc func(), err error) { - r, rcleanup := manual.GenerateAndRegisterManualResolver() + r = manual.NewBuilderWithScheme("whatever") var opts []grpc.DialOption - opts = append(opts, grpc.WithInsecure(), grpc.WithBalancerName(c.balancerName)) + opts = append(opts, grpc.WithInsecure(), grpc.WithResolvers(r), grpc.WithBalancerName(c.balancerName)) if c.testHealthCheckFuncWrapper != nil { opts = append(opts, internal.WithHealthCheckFunc.(func(internal.HealthChecker) grpc.DialOption)(c.testHealthCheckFuncWrapper)) } opts = append(opts, c.extraDialOption...) cc, err = grpc.Dial(r.Scheme()+":///test.server", opts...) if err != nil { - rcleanup() + return nil, nil, nil, fmt.Errorf("dial failed due to err: %v", err) } - return cc, r, func() { cc.Close(); rcleanup() }, nil + return cc, r, func() { cc.Close() }, nil } func (s) TestHealthCheckWatchStateChange(t *testing.T) { From 38aafd89f814f347db56a52efd055961651078ad Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Wed, 24 Jun 2020 12:44:51 -0700 Subject: [PATCH 05/11] vet.sh: require ALL modules are tidy; tidy some that are not (#3709) --- cmd/protoc-gen-go-grpc/go.sum | 2 ++ security/advancedtls/go.mod | 10 ++++++++- security/advancedtls/go.sum | 42 +++++++++++++++++++++++++++++++++-- vet.sh | 6 ++--- 4 files changed, 54 insertions(+), 6 deletions(-) diff --git a/cmd/protoc-gen-go-grpc/go.sum b/cmd/protoc-gen-go-grpc/go.sum index 3e741dc38700..92baf2631b73 100644 --- a/cmd/protoc-gen-go-grpc/go.sum +++ b/cmd/protoc-gen-go-grpc/go.sum @@ -5,7 +5,9 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/security/advancedtls/go.mod b/security/advancedtls/go.mod index a9f7f46dbbbe..392985d74469 100644 --- a/security/advancedtls/go.mod +++ b/security/advancedtls/go.mod @@ -2,4 +2,12 @@ module google.golang.org/grpc/security/advancedtls go 1.13 -require google.golang.org/grpc v1.27.0 +require ( + github.com/golang/protobuf v1.3.5 // indirect + github.com/google/go-cmp v0.4.0 // indirect + golang.org/x/net v0.0.0-20200602114024-627f9648deb9 // indirect + golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980 // indirect + golang.org/x/text v0.3.3 // indirect + google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884 // indirect + google.golang.org/grpc v1.29.1 +) diff --git a/security/advancedtls/go.sum b/security/advancedtls/go.sum index 0a0042b33388..fd8b6c4732d7 100644 --- a/security/advancedtls/go.sum +++ b/security/advancedtls/go.sum @@ -1,53 +1,91 @@ cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f h1:WBZRG4aNOuI15bLRrCgN8fCq8E5Xuty6jGbmSNEvSsU= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473 h1:4cmBvAEBNJaGARUEs3/suWRyfyBfhf7I60WBZq+bv2w= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4 h1:rEvIZUSZ3fx39WIi3JkQqQBitGwpELBIYWeBVh6wn+E= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4 h1:c2HOrn5iMezYjSlGPncknSEr/8x5LELb/ilJbXi9DEA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20200602114024-627f9648deb9 h1:pNX+40auqi2JqRfOP1akLGtYcn15TUbkhwuCO3foqqM= +golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980 h1:OjiUf46hAmXblsZdnoSXsEUSKU8r1UEzcL5RVZ4gO9Y= +golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884 h1:fiNLklpBwWK1mth30Hlwk+fcdBmIALlgF5iy77O37Ig= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vet.sh b/vet.sh index f0a67298a5b0..bfd5d5411053 100755 --- a/vet.sh +++ b/vet.sh @@ -108,10 +108,10 @@ if [[ -z "${VET_SKIP_PROTO}" ]]; then (git status; git --no-pager diff; exit 1) fi -# - Check that our module is tidy. +# - Check that our modules are tidy. if go help mod >& /dev/null; then - go mod tidy && \ - git status --porcelain 2>&1 | fail_on_output || \ + find . -name 'go.mod' | xargs -IXXX bash -c 'cd $(dirname XXX); go mod tidy' + git status --porcelain 2>&1 | fail_on_output || \ (git status; git --no-pager diff; exit 1) fi From 7a808837ae926d5e44454a9a34af1d92cafeea10 Mon Sep 17 00:00:00 2001 From: Patrice Chalin Date: Thu, 25 Jun 2020 13:40:16 -0400 Subject: [PATCH 06/11] examples: make test script output easier to read (#3711) --- examples/examples_test.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/examples_test.sh b/examples/examples_test.sh index a382b18c8276..85e2958bff8e 100755 --- a/examples/examples_test.sh +++ b/examples/examples_test.sh @@ -15,7 +15,7 @@ # limitations under the License. # -set +e -x +set +e export TMPDIR=$(mktemp -d) trap "rm -rf ${TMPDIR}" EXIT @@ -154,6 +154,6 @@ for example in ${EXAMPLES[@]}; do pass "client log contains expected output: ${EXPECTED_CLIENT_OUTPUT[$example]}" fi fi - clean + echo "" done From 31d22c78fb16fff5d12d823f797b3f9995285b68 Mon Sep 17 00:00:00 2001 From: Patrice Chalin Date: Thu, 25 Jun 2020 17:28:39 -0400 Subject: [PATCH 07/11] examples: use grpc v1.30.0 rather than v1.30.0-dev.1 (#3710) --- examples/go.mod | 6 +++--- examples/go.sum | 17 +++++++++++++++-- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/examples/go.mod b/examples/go.mod index 8be62bc7b4a3..cbd35e6337c1 100644 --- a/examples/go.mod +++ b/examples/go.mod @@ -4,8 +4,8 @@ go 1.11 require ( github.com/golang/mock v1.1.1 - github.com/golang/protobuf v1.4.0 + github.com/golang/protobuf v1.4.2 golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be - google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 - google.golang.org/grpc v1.30.0-dev.1 + google.golang.org/genproto v0.0.0-20200624020401-64a14ca9d1ad + google.golang.org/grpc v1.30.0 ) diff --git a/examples/go.sum b/examples/go.sum index 7aa49f559848..7e80c324a9af 100644 --- a/examples/go.sum +++ b/examples/go.sum @@ -6,6 +6,7 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f h1:WBZRG4aNOuI15bLRrCgN8fCq8E5Xuty6jGbmSNEvSsU= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4 h1:rEvIZUSZ3fx39WIi3JkQqQBitGwpELBIYWeBVh6wn+E= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= @@ -22,6 +23,9 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0 h1:oOuy+ugB+P/kBdUnG5QaMXSIyJ1q38wWSojYCb3z5VQ= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -60,16 +64,25 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200624020401-64a14ca9d1ad h1:uAwc13+y0Y8QZLTYhLCu6lHhnG99ecQU5FYTj8zxAng= +google.golang.org/genproto v0.0.0-20200624020401-64a14ca9d1ad/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.30.0-dev.1 h1:UPWdABFs9zu2kdq7GrCUcfnVgCT65hSpvHmy0RiKn0M= -google.golang.org/grpc v1.30.0-dev.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.30.0 h1:M5a8xTlYTxwMn5ZFkwhRabsygDY5G8TYLyQDBxJNAxE= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0 h1:qdOKuR/EIArgaWNjetjgTzgVTAZ+S/WXVrq9HW9zimw= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 42419544077668f30ecb673e1a16bd826c95fe88 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 25 Jun 2020 20:03:47 -0700 Subject: [PATCH 08/11] xds: Move test only utility method to testutils. (#3715) --- xds/internal/balancer/lrs/lrs_test.go | 5 +++-- xds/internal/internal.go | 11 --------- xds/internal/testutils/locality.go | 32 +++++++++++++++++++++++++++ 3 files changed, 35 insertions(+), 13 deletions(-) create mode 100644 xds/internal/testutils/locality.go diff --git a/xds/internal/balancer/lrs/lrs_test.go b/xds/internal/balancer/lrs/lrs_test.go index 09b22ac9fbe6..b18c3d7e218d 100644 --- a/xds/internal/balancer/lrs/lrs_test.go +++ b/xds/internal/balancer/lrs/lrs_test.go @@ -39,6 +39,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/testutils" ) const ( @@ -286,7 +287,7 @@ func Test_lrsStore_buildStats_rpcCounts(t *testing.T) { ) } upstreamLocalityStats = append(upstreamLocalityStats, &endpointpb.UpstreamLocalityStats{ - Locality: l.ToProto(), + Locality: testutils.LocalityIDToProto(l), TotalSuccessfulRequests: count.success, TotalRequestsInProgress: tempInProgress, TotalErrorRequests: count.failure, @@ -298,7 +299,7 @@ func Test_lrsStore_buildStats_rpcCounts(t *testing.T) { for l, c := range inProgressCounts { if _, ok := counts[l]; !ok { upstreamLocalityStats = append(upstreamLocalityStats, &endpointpb.UpstreamLocalityStats{ - Locality: l.ToProto(), + Locality: testutils.LocalityIDToProto(l), TotalRequestsInProgress: c, }) } diff --git a/xds/internal/internal.go b/xds/internal/internal.go index b2c980003dd8..8b17cf930242 100644 --- a/xds/internal/internal.go +++ b/xds/internal/internal.go @@ -21,8 +21,6 @@ package internal import ( "fmt" - - corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" ) type clientID string @@ -49,12 +47,3 @@ type LocalityID struct { func (l LocalityID) String() string { return fmt.Sprintf("%s-%s-%s", l.Region, l.Zone, l.SubZone) } - -// ToProto convert Locality to the proto representation. -func (l LocalityID) ToProto() *corepb.Locality { - return &corepb.Locality{ - Region: l.Region, - Zone: l.Zone, - SubZone: l.SubZone, - } -} diff --git a/xds/internal/testutils/locality.go b/xds/internal/testutils/locality.go new file mode 100644 index 000000000000..a4e4cc59b865 --- /dev/null +++ b/xds/internal/testutils/locality.go @@ -0,0 +1,32 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package testutils + +import ( + corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" + "google.golang.org/grpc/xds/internal" +) + +// LocalityIDToProto converts a LocalityID to its proto representation. +func LocalityIDToProto(l internal.LocalityID) *corepb.Locality { + return &corepb.Locality{ + Region: l.Region, + Zone: l.Zone, + SubZone: l.SubZone, + } +} From 506b7730668b5a13465224b0d8133f974a3f843d Mon Sep 17 00:00:00 2001 From: Garrett Gutierrez Date: Fri, 26 Jun 2020 12:04:47 -0700 Subject: [PATCH 09/11] Implemented component logging (#3617) --- balancer/base/balancer.go | 16 +-- balancer/grpclb/grpclb.go | 21 ++-- balancer/grpclb/grpclb_remote_balancer.go | 23 ++-- balancer/rls/internal/balancer.go | 17 +-- balancer/rls/internal/cache/cache.go | 6 +- balancer/rls/internal/config.go | 5 +- balancer/roundrobin/roundrobin.go | 4 +- balancer_conn_wrappers.go | 2 +- channelz/service/service.go | 3 + channelz/service/service_test.go | 20 +-- clientconn.go | 44 +++---- credentials/alts/alts.go | 3 +- dialoptions.go | 3 +- grpclog/component.go | 117 ++++++++++++++++++ grpclog/glogger/glogger.go | 34 ++--- grpclog/loggerv2.go | 4 + internal/binarylog/binarylog.go | 4 +- internal/binarylog/env_config.go | 4 +- internal/binarylog/method_logger.go | 13 +- internal/binarylog/sink.go | 3 +- internal/channelz/funcs.go | 18 +-- internal/channelz/logging.go | 52 ++++---- internal/channelz/types.go | 25 ++-- internal/channelz/types_nonlinux.go | 4 +- internal/grpclog/grpclog.go | 13 +- internal/resolver/dns/dns_resolver.go | 10 +- internal/syscall/syscall_linux.go | 4 +- internal/syscall/syscall_nonlinux.go | 3 +- internal/transport/controlbuf.go | 12 +- internal/transport/http2_client.go | 16 ++- internal/transport/http2_server.go | 43 +++++-- internal/transport/http_util.go | 6 +- internal/transport/log.go | 44 ------- internal/transport/transport.go | 2 + picker_wrapper.go | 5 +- pickfirst.go | 17 ++- resolver_conn_wrapper.go | 14 +-- server.go | 27 ++-- service_config.go | 9 +- stream.go | 4 +- .../balancer/edsbalancer/eds_impl_priority.go | 5 +- xds/internal/balancer/lrs/lrs.go | 24 ++-- xds/internal/balancer/orca/orca.go | 6 +- xds/internal/client/client_loadreport.go | 4 +- 44 files changed, 426 insertions(+), 287 deletions(-) create mode 100644 grpclog/component.go delete mode 100644 internal/transport/log.go diff --git a/balancer/base/balancer.go b/balancer/base/balancer.go index d62b4b6069a8..32d782f1cf5c 100644 --- a/balancer/base/balancer.go +++ b/balancer/base/balancer.go @@ -28,6 +28,8 @@ import ( "google.golang.org/grpc/resolver" ) +var logger = grpclog.Component("balancer") + type baseBuilder struct { name string pickerBuilder PickerBuilder @@ -91,8 +93,8 @@ func (b *baseBalancer) ResolverError(err error) { func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { // TODO: handle s.ResolverState.ServiceConfig? - if grpclog.V(2) { - grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s) + if logger.V(2) { + logger.Info("base.baseBalancer: got new ClientConn state: ", s) } // Successful resolution; clear resolver error and ensure we return nil. b.resolverErr = nil @@ -104,7 +106,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { // a is a new address (not existing in b.subConns). sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) if err != nil { - grpclog.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) + logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) continue } b.subConns[a] = sc @@ -168,13 +170,13 @@ func (b *baseBalancer) regeneratePicker() { func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { s := state.ConnectivityState - if grpclog.V(2) { - grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) + if logger.V(2) { + logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) } oldS, ok := b.scStates[sc] if !ok { - if grpclog.V(2) { - grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + if logger.V(2) { + logger.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) } return } diff --git a/balancer/grpclb/grpclb.go b/balancer/grpclb/grpclb.go index 59f38f9047d5..a7424cf8d2d7 100644 --- a/balancer/grpclb/grpclb.go +++ b/balancer/grpclb/grpclb.go @@ -50,6 +50,7 @@ const ( ) var errServerTerminatedConnection = errors.New("grpclb: failed to recv server list: server terminated connection") +var logger = grpclog.Component("grpclb") func convertDuration(d *durationpb.Duration) time.Duration { if d == nil { @@ -150,11 +151,11 @@ func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) bal if opt.CredsBundle != nil { lb.grpclbClientConnCreds, err = opt.CredsBundle.NewWithMode(internal.CredsBundleModeBalancer) if err != nil { - grpclog.Warningf("lbBalancer: client connection creds NewWithMode failed: %v", err) + logger.Warningf("lbBalancer: client connection creds NewWithMode failed: %v", err) } lb.grpclbBackendCreds, err = opt.CredsBundle.NewWithMode(internal.CredsBundleModeBackendFromBalancer) if err != nil { - grpclog.Warningf("lbBalancer: backend creds NewWithMode failed: %v", err) + logger.Warningf("lbBalancer: backend creds NewWithMode failed: %v", err) } } @@ -310,16 +311,16 @@ func (lb *lbBalancer) aggregateSubConnStates() connectivity.State { func (lb *lbBalancer) UpdateSubConnState(sc balancer.SubConn, scs balancer.SubConnState) { s := scs.ConnectivityState - if grpclog.V(2) { - grpclog.Infof("lbBalancer: handle SubConn state change: %p, %v", sc, s) + if logger.V(2) { + logger.Infof("lbBalancer: handle SubConn state change: %p, %v", sc, s) } lb.mu.Lock() defer lb.mu.Unlock() oldS, ok := lb.scStates[sc] if !ok { - if grpclog.V(2) { - grpclog.Infof("lbBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + if logger.V(2) { + logger.Infof("lbBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) } return } @@ -393,8 +394,8 @@ func (lb *lbBalancer) handleServiceConfig(gc *grpclbServiceConfig) { if lb.usePickFirst == newUsePickFirst { return } - if grpclog.V(2) { - grpclog.Infof("lbBalancer: switching mode, new usePickFirst: %+v", newUsePickFirst) + if logger.V(2) { + logger.Infof("lbBalancer: switching mode, new usePickFirst: %+v", newUsePickFirst) } lb.refreshSubConns(lb.backendAddrs, lb.inFallback, newUsePickFirst) } @@ -405,8 +406,8 @@ func (lb *lbBalancer) ResolverError(error) { } func (lb *lbBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { - if grpclog.V(2) { - grpclog.Infof("lbBalancer: UpdateClientConnState: %+v", ccs) + if logger.V(2) { + logger.Infof("lbBalancer: UpdateClientConnState: %+v", ccs) } gc, _ := ccs.BalancerConfig.(*grpclbServiceConfig) lb.handleServiceConfig(gc) diff --git a/balancer/grpclb/grpclb_remote_balancer.go b/balancer/grpclb/grpclb_remote_balancer.go index 302d71316d59..8eb45be28e32 100644 --- a/balancer/grpclb/grpclb_remote_balancer.go +++ b/balancer/grpclb/grpclb_remote_balancer.go @@ -33,7 +33,6 @@ import ( "google.golang.org/grpc/balancer" lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" @@ -44,8 +43,8 @@ import ( // processServerList updates balancer's internal state, create/remove SubConns // and regenerates picker using the received serverList. func (lb *lbBalancer) processServerList(l *lbpb.ServerList) { - if grpclog.V(2) { - grpclog.Infof("lbBalancer: processing server list: %+v", l) + if logger.V(2) { + logger.Infof("lbBalancer: processing server list: %+v", l) } lb.mu.Lock() defer lb.mu.Unlock() @@ -56,8 +55,8 @@ func (lb *lbBalancer) processServerList(l *lbpb.ServerList) { // If the new server list == old server list, do nothing. if cmp.Equal(lb.fullServerList, l.Servers, cmp.Comparer(proto.Equal)) { - if grpclog.V(2) { - grpclog.Infof("lbBalancer: new serverlist same as the previous one, ignoring") + if logger.V(2) { + logger.Infof("lbBalancer: new serverlist same as the previous one, ignoring") } return } @@ -81,8 +80,8 @@ func (lb *lbBalancer) processServerList(l *lbpb.ServerList) { Addr: fmt.Sprintf("%s:%d", ipStr, s.Port), Metadata: &md, } - if grpclog.V(2) { - grpclog.Infof("lbBalancer: server list entry[%d]: ipStr:|%s|, port:|%d|, load balancer token:|%v|", + if logger.V(2) { + logger.Infof("lbBalancer: server list entry[%d]: ipStr:|%s|, port:|%d|, load balancer token:|%v|", i, ipStr, s.Port, s.LoadBalanceToken) } backendAddrs = append(backendAddrs, addr) @@ -150,7 +149,7 @@ func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback // This bypasses the cc wrapper with SubConn cache. sc, err := lb.cc.cc.NewSubConn(backendAddrs, opts) if err != nil { - grpclog.Warningf("grpclb: failed to create new SubConn: %v", err) + logger.Warningf("grpclb: failed to create new SubConn: %v", err) return } sc.Connect() @@ -173,7 +172,7 @@ func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback // Use addrWithMD to create the SubConn. sc, err := lb.cc.NewSubConn([]resolver.Address{addr}, opts) if err != nil { - grpclog.Warningf("grpclb: failed to create new SubConn: %v", err) + logger.Warningf("grpclb: failed to create new SubConn: %v", err) continue } lb.subConns[addrWithoutMD] = sc // Use the addr without MD as key for the map. @@ -245,7 +244,7 @@ func (lb *lbBalancer) newRemoteBalancerCCWrapper() { // receive ServerName as authority. cc, err := grpc.DialContext(context.Background(), lb.manualResolver.Scheme()+":///grpclb.subClientConn", dopts...) if err != nil { - grpclog.Fatalf("failed to dial: %v", err) + logger.Fatalf("failed to dial: %v", err) } ccw := &remoteBalancerCCWrapper{ cc: cc, @@ -373,9 +372,9 @@ func (ccw *remoteBalancerCCWrapper) watchRemoteBalancer() { default: if err != nil { if err == errServerTerminatedConnection { - grpclog.Info(err) + logger.Info(err) } else { - grpclog.Warning(err) + logger.Warning(err) } } } diff --git a/balancer/rls/internal/balancer.go b/balancer/rls/internal/balancer.go index 2b8162485047..968e8e9310da 100644 --- a/balancer/rls/internal/balancer.go +++ b/balancer/rls/internal/balancer.go @@ -34,6 +34,7 @@ var ( // For overriding in tests. newRLSClientFunc = newRLSClient + logger = grpclog.Component("rls") ) // rlsBalancer implements the RLS LB policy. @@ -75,18 +76,18 @@ func (lb *rlsBalancer) run() { // channel accordingly. // TODO(easwars): Handle updates to other fields in the service config. func (lb *rlsBalancer) handleClientConnUpdate(ccs *balancer.ClientConnState) { - grpclog.Infof("rls: service config: %+v", ccs.BalancerConfig) + logger.Infof("rls: service config: %+v", ccs.BalancerConfig) lb.mu.Lock() defer lb.mu.Unlock() if lb.done.HasFired() { - grpclog.Warning("rls: received service config after balancer close") + logger.Warning("rls: received service config after balancer close") return } newCfg := ccs.BalancerConfig.(*lbConfig) if lb.lbCfg.Equal(newCfg) { - grpclog.Info("rls: new service config matches existing config") + logger.Info("rls: new service config matches existing config") return } @@ -109,12 +110,12 @@ func (lb *rlsBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error func (lb *rlsBalancer) ResolverError(error) { // ResolverError is called by gRPC when the name resolver reports an error. // TODO(easwars): How do we handle this? - grpclog.Fatal("rls: ResolverError is not yet unimplemented") + logger.Fatal("rls: ResolverError is not yet unimplemented") } // UpdateSubConnState implements balancer.V2Balancer interface. func (lb *rlsBalancer) UpdateSubConnState(_ balancer.SubConn, _ balancer.SubConnState) { - grpclog.Fatal("rls: UpdateSubConnState is not yet implemented") + logger.Fatal("rls: UpdateSubConnState is not yet implemented") } // Cleans up the resources allocated by the LB policy including the clientConn @@ -162,7 +163,7 @@ func (lb *rlsBalancer) updateControlChannel(newCfg *lbConfig) { cc, err := grpc.Dial(newCfg.lookupService, dopts...) if err != nil { - grpclog.Errorf("rls: dialRLS(%s, %v): %v", newCfg.lookupService, lb.opts, err) + logger.Errorf("rls: dialRLS(%s, %v): %v", newCfg.lookupService, lb.opts, err) // An error from a non-blocking dial indicates something serious. We // should continue to use the old control channel if one exists, and // return so that the rest of the config updates can be processes. @@ -185,14 +186,14 @@ func dialCreds(opts balancer.BuildOptions) grpc.DialOption { switch { case opts.DialCreds != nil: if err := opts.DialCreds.OverrideServerName(server); err != nil { - grpclog.Warningf("rls: OverrideServerName(%s) = (%v), using Insecure", server, err) + logger.Warningf("rls: OverrideServerName(%s) = (%v), using Insecure", server, err) return grpc.WithInsecure() } return grpc.WithTransportCredentials(opts.DialCreds) case opts.CredsBundle != nil: return grpc.WithTransportCredentials(opts.CredsBundle.TransportCredentials()) default: - grpclog.Warning("rls: no credentials available, using Insecure") + logger.Warning("rls: no credentials available, using Insecure") return grpc.WithInsecure() } } diff --git a/balancer/rls/internal/cache/cache.go b/balancer/rls/internal/cache/cache.go index dd03695e0e9d..b975c3078fdb 100644 --- a/balancer/rls/internal/cache/cache.go +++ b/balancer/rls/internal/cache/cache.go @@ -30,6 +30,8 @@ import ( "google.golang.org/grpc/internal/backoff" ) +var logger = grpclog.Component("rls") + // Key represents the cache key used to uniquely identify a cache entry. type Key struct { // Path is the full path of the incoming RPC request. @@ -175,7 +177,7 @@ func (lru *LRU) removeToFit(newSize int64) { if elem == nil { // This is a corner case where the cache is empty, but the new entry // to be added is bigger than maxSize. - grpclog.Info("rls: newly added cache entry exceeds cache maxSize") + logger.Info("rls: newly added cache entry exceeds cache maxSize") return } @@ -184,7 +186,7 @@ func (lru *LRU) removeToFit(newSize int64) { // When the oldest entry is too new (it hasn't even spent a default // minimum amount of time in the cache), we abort and allow the // cache to grow bigger than the configured maxSize. - grpclog.Info("rls: LRU eviction finds oldest entry to be too new. Allowing cache to exceed maxSize momentarily") + logger.Info("rls: LRU eviction finds oldest entry to be too new. Allowing cache to exceed maxSize momentarily") return } lru.removeElement(elem) diff --git a/balancer/rls/internal/config.go b/balancer/rls/internal/config.go index 816ab093a650..0a8d2d91fa82 100644 --- a/balancer/rls/internal/config.go +++ b/balancer/rls/internal/config.go @@ -32,7 +32,6 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/rls/internal/keys" rlspb "google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" @@ -238,11 +237,11 @@ func (*rlsBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, return nil, fmt.Errorf("rls: stale_age is set, but max_age is not in service config {%+v}", string(c)) } if staleAge >= maxAge { - grpclog.Info("rls: stale_age {%v} is greater than max_age {%v}, ignoring it", staleAge, maxAge) + logger.Info("rls: stale_age {%v} is greater than max_age {%v}, ignoring it", staleAge, maxAge) staleAge = 0 } if maxAge == 0 || maxAge > maxMaxAge { - grpclog.Infof("rls: max_age in service config is %v, using %v", maxAge, maxMaxAge) + logger.Infof("rls: max_age in service config is %v, using %v", maxAge, maxMaxAge) maxAge = maxMaxAge } diff --git a/balancer/roundrobin/roundrobin.go b/balancer/roundrobin/roundrobin.go index a02b372cf204..43c2a15373a1 100644 --- a/balancer/roundrobin/roundrobin.go +++ b/balancer/roundrobin/roundrobin.go @@ -33,6 +33,8 @@ import ( // Name is the name of round_robin balancer. const Name = "round_robin" +var logger = grpclog.Component("roundrobin") + // newBuilder creates a new roundrobin balancer builder. func newBuilder() balancer.Builder { return base.NewBalancerBuilder(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true}) @@ -45,7 +47,7 @@ func init() { type rrPickerBuilder struct{} func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { - grpclog.Infof("roundrobinPicker: newPicker called with info: %v", info) + logger.Infof("roundrobinPicker: newPicker called with info: %v", info) if len(info.ReadySCs) == 0 { return base.NewErrPicker(balancer.ErrNoSubConnAvailable) } diff --git a/balancer_conn_wrappers.go b/balancer_conn_wrappers.go index 807d1919777e..11e592aabb01 100644 --- a/balancer_conn_wrappers.go +++ b/balancer_conn_wrappers.go @@ -220,7 +220,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { ac, err := cc.newAddrConn(addrs, opts) if err != nil { - channelz.Warningf(acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) + channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) return } acbw.ac = ac diff --git a/channelz/service/service.go b/channelz/service/service.go index 702b74c03e60..4d175fef823d 100644 --- a/channelz/service/service.go +++ b/channelz/service/service.go @@ -31,6 +31,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/status" ) @@ -39,6 +40,8 @@ func init() { channelz.TurnOn() } +var logger = grpclog.Component("channelz") + // RegisterChannelzServiceToServer registers the channelz service to the given server. func RegisterChannelzServiceToServer(s *grpc.Server) { channelzgrpc.RegisterChannelzServer(s, newCZServer()) diff --git a/channelz/service/service_test.go b/channelz/service/service_test.go index 37616a101ce8..e6a7d8eba3be 100644 --- a/channelz/service/service_test.go +++ b/channelz/service/service_test.go @@ -468,12 +468,12 @@ func (s) TestGetChannel(t *testing.T) { refNames := []string{"top channel 1", "nested channel 1", "sub channel 2", "nested channel 3"} ids := make([]int64, 4) ids[0] = channelz.RegisterChannel(&dummyChannel{}, 0, refNames[0]) - channelz.AddTraceEvent(ids[0], 0, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(logger, ids[0], 0, &channelz.TraceEventDesc{ Desc: "Channel Created", Severity: channelz.CtINFO, }) ids[1] = channelz.RegisterChannel(&dummyChannel{}, ids[0], refNames[1]) - channelz.AddTraceEvent(ids[1], 0, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(logger, ids[1], 0, &channelz.TraceEventDesc{ Desc: "Channel Created", Severity: channelz.CtINFO, Parent: &channelz.TraceEventDesc{ @@ -483,7 +483,7 @@ func (s) TestGetChannel(t *testing.T) { }) ids[2] = channelz.RegisterSubChannel(&dummyChannel{}, ids[0], refNames[2]) - channelz.AddTraceEvent(ids[2], 0, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(logger, ids[2], 0, &channelz.TraceEventDesc{ Desc: "SubChannel Created", Severity: channelz.CtINFO, Parent: &channelz.TraceEventDesc{ @@ -492,7 +492,7 @@ func (s) TestGetChannel(t *testing.T) { }, }) ids[3] = channelz.RegisterChannel(&dummyChannel{}, ids[1], refNames[3]) - channelz.AddTraceEvent(ids[3], 0, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(logger, ids[3], 0, &channelz.TraceEventDesc{ Desc: "Channel Created", Severity: channelz.CtINFO, Parent: &channelz.TraceEventDesc{ @@ -500,11 +500,11 @@ func (s) TestGetChannel(t *testing.T) { Severity: channelz.CtINFO, }, }) - channelz.AddTraceEvent(ids[0], 0, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(logger, ids[0], 0, &channelz.TraceEventDesc{ Desc: fmt.Sprintf("Channel Connectivity change to %v", connectivity.Ready), Severity: channelz.CtINFO, }) - channelz.AddTraceEvent(ids[0], 0, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(logger, ids[0], 0, &channelz.TraceEventDesc{ Desc: "Resolver returns an empty address list", Severity: channelz.CtWarning, }) @@ -571,12 +571,12 @@ func (s) TestGetSubChannel(t *testing.T) { refNames := []string{"top channel 1", "sub channel 1", "socket 1", "socket 2"} ids := make([]int64, 4) ids[0] = channelz.RegisterChannel(&dummyChannel{}, 0, refNames[0]) - channelz.AddTraceEvent(ids[0], 0, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(logger, ids[0], 0, &channelz.TraceEventDesc{ Desc: "Channel Created", Severity: channelz.CtINFO, }) ids[1] = channelz.RegisterSubChannel(&dummyChannel{}, ids[0], refNames[1]) - channelz.AddTraceEvent(ids[1], 0, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(logger, ids[1], 0, &channelz.TraceEventDesc{ Desc: subchanCreated, Severity: channelz.CtINFO, Parent: &channelz.TraceEventDesc{ @@ -586,11 +586,11 @@ func (s) TestGetSubChannel(t *testing.T) { }) ids[2] = channelz.RegisterNormalSocket(&dummySocket{}, ids[1], refNames[2]) ids[3] = channelz.RegisterNormalSocket(&dummySocket{}, ids[1], refNames[3]) - channelz.AddTraceEvent(ids[1], 0, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(logger, ids[1], 0, &channelz.TraceEventDesc{ Desc: subchanConnectivityChange, Severity: channelz.CtINFO, }) - channelz.AddTraceEvent(ids[1], 0, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(logger, ids[1], 0, &channelz.TraceEventDesc{ Desc: subChanPickNewAddress, Severity: channelz.CtINFO, }) diff --git a/clientconn.go b/clientconn.go index ef327e8af4f7..3ed6034f1ad1 100644 --- a/clientconn.go +++ b/clientconn.go @@ -149,7 +149,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * if channelz.IsOn() { if cc.dopts.channelzParentID != 0 { cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) - channelz.AddTraceEvent(cc.channelzID, 0, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(logger, cc.channelzID, 0, &channelz.TraceEventDesc{ Desc: "Channel Created", Severity: channelz.CtINFO, Parent: &channelz.TraceEventDesc{ @@ -159,7 +159,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * }) } else { cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) - channelz.Info(cc.channelzID, "Channel Created") + channelz.Info(logger, cc.channelzID, "Channel Created") } cc.csMgr.channelzID = cc.channelzID } @@ -245,13 +245,13 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * // Determine the resolver to use. cc.parsedTarget = grpcutil.ParseTarget(cc.target) - channelz.Infof(cc.channelzID, "parsed scheme: %q", cc.parsedTarget.Scheme) + channelz.Infof(logger, cc.channelzID, "parsed scheme: %q", cc.parsedTarget.Scheme) resolverBuilder := cc.getResolver(cc.parsedTarget.Scheme) if resolverBuilder == nil { // If resolver builder is still nil, the parsed target's scheme is // not registered. Fallback to default resolver and set Endpoint to // the original target. - channelz.Infof(cc.channelzID, "scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) + channelz.Infof(logger, cc.channelzID, "scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) cc.parsedTarget = resolver.Target{ Scheme: resolver.GetDefaultScheme(), Endpoint: target, @@ -422,7 +422,7 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) { return } csm.state = state - channelz.Infof(csm.channelzID, "Channel Connectivity change to %v", state) + channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state) if csm.notifyChan != nil { // There are other goroutines waiting on this channel. close(csm.notifyChan) @@ -675,9 +675,9 @@ func (cc *ClientConn) switchBalancer(name string) { return } - channelz.Infof(cc.channelzID, "ClientConn switching balancer to %q", name) + channelz.Infof(logger, cc.channelzID, "ClientConn switching balancer to %q", name) if cc.dopts.balancerBuilder != nil { - channelz.Info(cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead") + channelz.Info(logger, cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead") return } if cc.balancerWrapper != nil { @@ -686,11 +686,11 @@ func (cc *ClientConn) switchBalancer(name string) { builder := balancer.Get(name) if builder == nil { - channelz.Warningf(cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName) - channelz.Infof(cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name) + channelz.Warningf(logger, cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName) + channelz.Infof(logger, cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name) builder = newPickfirstBuilder() } else { - channelz.Infof(cc.channelzID, "Channel switches to new LB policy %q", name) + channelz.Infof(logger, cc.channelzID, "Channel switches to new LB policy %q", name) } cc.curBalancerName = builder.Name() @@ -731,7 +731,7 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub } if channelz.IsOn() { ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") - channelz.AddTraceEvent(ac.channelzID, 0, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ Desc: "Subchannel Created", Severity: channelz.CtINFO, Parent: &channelz.TraceEventDesc{ @@ -829,7 +829,7 @@ func (ac *addrConn) connect() error { func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { ac.mu.Lock() defer ac.mu.Unlock() - channelz.Infof(ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) if ac.state == connectivity.Shutdown || ac.state == connectivity.TransientFailure || ac.state == connectivity.Idle { @@ -849,7 +849,7 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { break } } - channelz.Infof(ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) + channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) if curAddrFound { ac.addrs = addrs } @@ -1020,7 +1020,7 @@ func (cc *ClientConn) Close() error { Severity: channelz.CtINFO, } } - channelz.AddTraceEvent(cc.channelzID, 0, ted) + channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to // the entity being deleted, and thus prevent it from being deleted right away. channelz.RemoveEntry(cc.channelzID) @@ -1064,7 +1064,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) return } ac.state = s - channelz.Infof(ac.channelzID, "Subchannel Connectivity change to %v", s) + channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) } @@ -1201,7 +1201,7 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T } ac.mu.Unlock() - channelz.Infof(ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) + channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline) if err == nil { @@ -1276,7 +1276,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onPrefaceReceipt, onGoAway, onClose) if err != nil { // newTr is either nil, or closed. - channelz.Warningf(ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v. Reconnecting...", addr, err) + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v. Reconnecting...", addr, err) return nil, nil, err } @@ -1284,7 +1284,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne case <-time.After(time.Until(connectDeadline)): // We didn't get the preface in time. newTr.Close() - channelz.Warningf(ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) return nil, nil, errors.New("timed out waiting for server handshake") case <-prefaceReceived: // We got the preface - huzzah! things are good. @@ -1331,7 +1331,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { // The health package is not imported to set health check function. // // TODO: add a link to the health check doc in the error message. - channelz.Error(ac.channelzID, "Health check is requested but health check function is not set.") + channelz.Error(logger, ac.channelzID, "Health check is requested but health check function is not set.") return } @@ -1361,9 +1361,9 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) if err != nil { if status.Code(err) == codes.Unimplemented { - channelz.Error(ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled") + channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled") } else { - channelz.Errorf(ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err) + channelz.Errorf(logger, ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err) } } }() @@ -1428,7 +1428,7 @@ func (ac *addrConn) tearDown(err error) { ac.mu.Lock() } if channelz.IsOn() { - channelz.AddTraceEvent(ac.channelzID, 0, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ Desc: "Subchannel Deleted", Severity: channelz.CtINFO, Parent: &channelz.TraceEventDesc{ diff --git a/credentials/alts/alts.go b/credentials/alts/alts.go index 5c9e8b1c471b..729c4b43b5fc 100644 --- a/credentials/alts/alts.go +++ b/credentials/alts/alts.go @@ -67,6 +67,7 @@ var ( // ServerHandshake is running on a platform where the trustworthiness of // the handshaker service is not guaranteed. ErrUntrustedPlatform = errors.New("ALTS: untrusted platform. ALTS is only supported on GCP") + logger = grpclog.Component("alts") ) // AuthInfo exposes security information from the ALTS handshake to the @@ -307,7 +308,7 @@ func compareRPCVersions(v1, v2 *altspb.RpcProtocolVersions_Version) int { // agreed on. func checkRPCVersions(local, peer *altspb.RpcProtocolVersions) (bool, *altspb.RpcProtocolVersions_Version) { if local == nil || peer == nil { - grpclog.Error("invalid checkRPCVersions argument, either local or peer is nil.") + logger.Error("invalid checkRPCVersions argument, either local or peer is nil.") return false, nil } diff --git a/dialoptions.go b/dialoptions.go index d5030c076178..decb4c5ee891 100644 --- a/dialoptions.go +++ b/dialoptions.go @@ -27,7 +27,6 @@ import ( "google.golang.org/grpc/backoff" "google.golang.org/grpc/balancer" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" internalbackoff "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" @@ -423,7 +422,7 @@ func WithUserAgent(s string) DialOption { // for the client transport. func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { if kp.Time < internal.KeepaliveMinPingTime { - grpclog.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) + logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) kp.Time = internal.KeepaliveMinPingTime } return newFuncDialOption(func(o *dialOptions) { diff --git a/grpclog/component.go b/grpclog/component.go new file mode 100644 index 000000000000..b513281a34ce --- /dev/null +++ b/grpclog/component.go @@ -0,0 +1,117 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "fmt" + + "google.golang.org/grpc/internal/grpclog" +) + +// componentData records the settings for a component. +type componentData struct { + name string +} + +var cache = map[string]*componentData{} + +func (c *componentData) InfoDepth(depth int, args ...interface{}) { + args = append([]interface{}{"[" + string(c.name) + "]"}, args...) + grpclog.InfoDepth(depth+1, args...) +} + +func (c *componentData) WarningDepth(depth int, args ...interface{}) { + args = append([]interface{}{"[" + string(c.name) + "]"}, args...) + grpclog.WarningDepth(depth+1, args...) +} + +func (c *componentData) ErrorDepth(depth int, args ...interface{}) { + args = append([]interface{}{"[" + string(c.name) + "]"}, args...) + grpclog.ErrorDepth(depth+1, args...) +} + +func (c *componentData) FatalDepth(depth int, args ...interface{}) { + args = append([]interface{}{"[" + string(c.name) + "]"}, args...) + grpclog.FatalDepth(depth+1, args...) +} + +func (c *componentData) Info(args ...interface{}) { + c.InfoDepth(1, args...) +} + +func (c *componentData) Warning(args ...interface{}) { + c.WarningDepth(1, args...) +} + +func (c *componentData) Error(args ...interface{}) { + c.ErrorDepth(1, args...) +} + +func (c *componentData) Fatal(args ...interface{}) { + c.FatalDepth(1, args...) +} + +func (c *componentData) Infof(format string, args ...interface{}) { + c.InfoDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Warningf(format string, args ...interface{}) { + c.WarningDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Errorf(format string, args ...interface{}) { + c.ErrorDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Fatalf(format string, args ...interface{}) { + c.FatalDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Infoln(args ...interface{}) { + c.InfoDepth(1, args...) +} + +func (c *componentData) Warningln(args ...interface{}) { + c.WarningDepth(1, args...) +} + +func (c *componentData) Errorln(args ...interface{}) { + c.ErrorDepth(1, args...) +} + +func (c *componentData) Fatalln(args ...interface{}) { + c.FatalDepth(1, args...) +} + +func (c *componentData) V(l int) bool { + return grpclog.Logger.V(l) +} + +// Component creates a new component and returns it for logging. If a component +// with the name already exists, nothing will be created and it will be +// returned. SetLoggerV2 will panic if it is called with a logger created by +// Component. +func Component(componentName string) DepthLoggerV2 { + if cData, ok := cache[componentName]; ok { + return cData + } + c := &componentData{componentName} + cache[componentName] = c + return c +} diff --git a/grpclog/glogger/glogger.go b/grpclog/glogger/glogger.go index 3d995c1c076f..4427dc078bb4 100644 --- a/grpclog/glogger/glogger.go +++ b/grpclog/glogger/glogger.go @@ -27,6 +27,8 @@ import ( "google.golang.org/grpc/grpclog" ) +const d = 2 + func init() { grpclog.SetLoggerV2(&glogger{}) } @@ -34,67 +36,67 @@ func init() { type glogger struct{} func (g *glogger) Info(args ...interface{}) { - glog.InfoDepth(2, args...) + glog.InfoDepth(d, args...) } func (g *glogger) Infoln(args ...interface{}) { - glog.InfoDepth(2, fmt.Sprintln(args...)) + glog.InfoDepth(d, fmt.Sprintln(args...)) } func (g *glogger) Infof(format string, args ...interface{}) { - glog.InfoDepth(2, fmt.Sprintf(format, args...)) + glog.InfoDepth(d, fmt.Sprintf(format, args...)) } func (g *glogger) InfoDepth(depth int, args ...interface{}) { - glog.InfoDepth(depth+2, args...) + glog.InfoDepth(depth+d, args...) } func (g *glogger) Warning(args ...interface{}) { - glog.WarningDepth(2, args...) + glog.WarningDepth(d, args...) } func (g *glogger) Warningln(args ...interface{}) { - glog.WarningDepth(2, fmt.Sprintln(args...)) + glog.WarningDepth(d, fmt.Sprintln(args...)) } func (g *glogger) Warningf(format string, args ...interface{}) { - glog.WarningDepth(2, fmt.Sprintf(format, args...)) + glog.WarningDepth(d, fmt.Sprintf(format, args...)) } func (g *glogger) WarningDepth(depth int, args ...interface{}) { - glog.WarningDepth(depth+2, args...) + glog.WarningDepth(depth+d, args...) } func (g *glogger) Error(args ...interface{}) { - glog.ErrorDepth(2, args...) + glog.ErrorDepth(d, args...) } func (g *glogger) Errorln(args ...interface{}) { - glog.ErrorDepth(2, fmt.Sprintln(args...)) + glog.ErrorDepth(d, fmt.Sprintln(args...)) } func (g *glogger) Errorf(format string, args ...interface{}) { - glog.ErrorDepth(2, fmt.Sprintf(format, args...)) + glog.ErrorDepth(d, fmt.Sprintf(format, args...)) } func (g *glogger) ErrorDepth(depth int, args ...interface{}) { - glog.ErrorDepth(depth+2, args...) + glog.ErrorDepth(depth+d, args...) } func (g *glogger) Fatal(args ...interface{}) { - glog.FatalDepth(2, args...) + glog.FatalDepth(d, args...) } func (g *glogger) Fatalln(args ...interface{}) { - glog.FatalDepth(2, fmt.Sprintln(args...)) + glog.FatalDepth(d, fmt.Sprintln(args...)) } func (g *glogger) Fatalf(format string, args ...interface{}) { - glog.FatalDepth(2, fmt.Sprintf(format, args...)) + glog.FatalDepth(d, fmt.Sprintf(format, args...)) } func (g *glogger) FatalDepth(depth int, args ...interface{}) { - glog.FatalDepth(depth+2, args...) + glog.FatalDepth(depth+d, args...) } func (g *glogger) V(l int) bool { diff --git a/grpclog/loggerv2.go b/grpclog/loggerv2.go index 23612b7c41b5..8eba2d0e0eff 100644 --- a/grpclog/loggerv2.go +++ b/grpclog/loggerv2.go @@ -67,6 +67,9 @@ type LoggerV2 interface { // SetLoggerV2 sets logger that is used in grpc to a V2 logger. // Not mutex-protected, should be called before any gRPC functions. func SetLoggerV2(l LoggerV2) { + if _, ok := l.(*componentData); ok { + panic("cannot use component logger as grpclog logger") + } grpclog.Logger = l grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2) } @@ -203,6 +206,7 @@ func (g *loggerT) V(l int) bool { // // This API is EXPERIMENTAL. type DepthLoggerV2 interface { + LoggerV2 // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. InfoDepth(depth int, args ...interface{}) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. diff --git a/internal/binarylog/binarylog.go b/internal/binarylog/binarylog.go index b7a3dd8f918d..5cc3aeddb213 100644 --- a/internal/binarylog/binarylog.go +++ b/internal/binarylog/binarylog.go @@ -40,6 +40,8 @@ type Logger interface { // It is used to get a methodLogger for each individual method. var binLogger Logger +var grpclogLogger = grpclog.Component("binarylog") + // SetLogger sets the binarg logger. // // Only call this at init time. @@ -149,7 +151,7 @@ func (l *logger) setBlacklist(method string) error { func (l *logger) getMethodLogger(methodName string) *MethodLogger { s, m, err := grpcutil.ParseMethod(methodName) if err != nil { - grpclog.Infof("binarylogging: failed to parse %q: %v", methodName, err) + grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err) return nil } if ml, ok := l.methods[s+"/"+m]; ok { diff --git a/internal/binarylog/env_config.go b/internal/binarylog/env_config.go index be30d0e65e70..d8f4e7602fde 100644 --- a/internal/binarylog/env_config.go +++ b/internal/binarylog/env_config.go @@ -24,8 +24,6 @@ import ( "regexp" "strconv" "strings" - - "google.golang.org/grpc/grpclog" ) // NewLoggerFromConfigString reads the string and build a logger. It can be used @@ -52,7 +50,7 @@ func NewLoggerFromConfigString(s string) Logger { methods := strings.Split(s, ",") for _, method := range methods { if err := l.fillMethodLoggerWithConfigString(method); err != nil { - grpclog.Warningf("failed to parse binary log config: %v", err) + grpclogLogger.Warningf("failed to parse binary log config: %v", err) return nil } } diff --git a/internal/binarylog/method_logger.go b/internal/binarylog/method_logger.go index 160f6e8616f5..5e1083539b49 100644 --- a/internal/binarylog/method_logger.go +++ b/internal/binarylog/method_logger.go @@ -27,7 +27,6 @@ import ( "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" ) @@ -219,12 +218,12 @@ func (c *ClientMessage) toProto() *pb.GrpcLogEntry { if m, ok := c.Message.(proto.Message); ok { data, err = proto.Marshal(m) if err != nil { - grpclog.Infof("binarylogging: failed to marshal proto message: %v", err) + grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err) } } else if b, ok := c.Message.([]byte); ok { data = b } else { - grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte") + grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") } ret := &pb.GrpcLogEntry{ Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, @@ -259,12 +258,12 @@ func (c *ServerMessage) toProto() *pb.GrpcLogEntry { if m, ok := c.Message.(proto.Message); ok { data, err = proto.Marshal(m) if err != nil { - grpclog.Infof("binarylogging: failed to marshal proto message: %v", err) + grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err) } } else if b, ok := c.Message.([]byte); ok { data = b } else { - grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte") + grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") } ret := &pb.GrpcLogEntry{ Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, @@ -315,7 +314,7 @@ type ServerTrailer struct { func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { st, ok := status.FromError(c.Err) if !ok { - grpclog.Info("binarylogging: error in trailer is not a status error") + grpclogLogger.Info("binarylogging: error in trailer is not a status error") } var ( detailsBytes []byte @@ -325,7 +324,7 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { if stProto != nil && len(stProto.Details) != 0 { detailsBytes, err = proto.Marshal(stProto) if err != nil { - grpclog.Infof("binarylogging: failed to marshal status proto: %v", err) + grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err) } } ret := &pb.GrpcLogEntry{ diff --git a/internal/binarylog/sink.go b/internal/binarylog/sink.go index a2e7c346dd03..835f51040cb0 100644 --- a/internal/binarylog/sink.go +++ b/internal/binarylog/sink.go @@ -29,7 +29,6 @@ import ( "github.com/golang/protobuf/proto" pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" - "google.golang.org/grpc/grpclog" ) var ( @@ -78,7 +77,7 @@ type writerSink struct { func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { b, err := proto.Marshal(e) if err != nil { - grpclog.Infof("binary logging: failed to marshal proto message: %v", err) + grpclogLogger.Infof("binary logging: failed to marshal proto message: %v", err) } hdr := make([]byte, 4) binary.BigEndian.PutUint32(hdr, uint32(len(b))) diff --git a/internal/channelz/funcs.go b/internal/channelz/funcs.go index e4252e5be9f2..81d3dd33e62c 100644 --- a/internal/channelz/funcs.go +++ b/internal/channelz/funcs.go @@ -30,7 +30,7 @@ import ( "sync/atomic" "time" - "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/grpclog" ) const ( @@ -216,7 +216,7 @@ func RegisterChannel(c Channel, pid int64, ref string) int64 { // by pid). It returns the unique channelz tracking id assigned to this subchannel. func RegisterSubChannel(c Channel, pid int64, ref string) int64 { if pid == 0 { - grpclog.ErrorDepth(0, "a SubChannel's parent id cannot be 0") + logger.Error("a SubChannel's parent id cannot be 0") return 0 } id := idGen.genID() @@ -253,7 +253,7 @@ func RegisterServer(s Server, ref string) int64 { // this listen socket. func RegisterListenSocket(s Socket, pid int64, ref string) int64 { if pid == 0 { - grpclog.ErrorDepth(0, "a ListenSocket's parent id cannot be 0") + logger.Error("a ListenSocket's parent id cannot be 0") return 0 } id := idGen.genID() @@ -268,7 +268,7 @@ func RegisterListenSocket(s Socket, pid int64, ref string) int64 { // this normal socket. func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { if pid == 0 { - grpclog.ErrorDepth(0, "a NormalSocket's parent id cannot be 0") + logger.Error("a NormalSocket's parent id cannot be 0") return 0 } id := idGen.genID() @@ -294,17 +294,17 @@ type TraceEventDesc struct { } // AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc. -func AddTraceEvent(id int64, depth int, desc *TraceEventDesc) { +func AddTraceEvent(l grpclog.DepthLoggerV2, id int64, depth int, desc *TraceEventDesc) { for d := desc; d != nil; d = d.Parent { switch d.Severity { case CtUNKNOWN: - grpclog.InfoDepth(depth+1, d.Desc) + l.InfoDepth(depth+1, d.Desc) case CtINFO: - grpclog.InfoDepth(depth+1, d.Desc) + l.InfoDepth(depth+1, d.Desc) case CtWarning: - grpclog.WarningDepth(depth+1, d.Desc) + l.WarningDepth(depth+1, d.Desc) case CtError: - grpclog.ErrorDepth(depth+1, d.Desc) + l.ErrorDepth(depth+1, d.Desc) } } if getMaxTraceEntry() == 0 { diff --git a/internal/channelz/logging.go b/internal/channelz/logging.go index 59c7bedecd9f..e94039ee20b5 100644 --- a/internal/channelz/logging.go +++ b/internal/channelz/logging.go @@ -21,80 +21,82 @@ package channelz import ( "fmt" - "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/grpclog" ) -// Info logs through grpclog.Info and adds a trace event if channelz is on. -func Info(id int64, args ...interface{}) { +var logger = grpclog.Component("channelz") + +// Info logs and adds a trace event if channelz is on. +func Info(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { if IsOn() { - AddTraceEvent(id, 1, &TraceEventDesc{ + AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtINFO, }) } else { - grpclog.InfoDepth(1, args...) + l.InfoDepth(1, args...) } } -// Infof logs through grpclog.Infof and adds a trace event if channelz is on. -func Infof(id int64, format string, args ...interface{}) { +// Infof logs and adds a trace event if channelz is on. +func Infof(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { msg := fmt.Sprintf(format, args...) if IsOn() { - AddTraceEvent(id, 1, &TraceEventDesc{ + AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: msg, Severity: CtINFO, }) } else { - grpclog.InfoDepth(1, msg) + l.InfoDepth(1, msg) } } -// Warning logs through grpclog.Warning and adds a trace event if channelz is on. -func Warning(id int64, args ...interface{}) { +// Warning logs and adds a trace event if channelz is on. +func Warning(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { if IsOn() { - AddTraceEvent(id, 1, &TraceEventDesc{ + AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtWarning, }) } else { - grpclog.WarningDepth(1, args...) + l.WarningDepth(1, args...) } } -// Warningf logs through grpclog.Warningf and adds a trace event if channelz is on. -func Warningf(id int64, format string, args ...interface{}) { +// Warningf logs and adds a trace event if channelz is on. +func Warningf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { msg := fmt.Sprintf(format, args...) if IsOn() { - AddTraceEvent(id, 1, &TraceEventDesc{ + AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: msg, Severity: CtWarning, }) } else { - grpclog.WarningDepth(1, msg) + l.WarningDepth(1, msg) } } -// Error logs through grpclog.Error and adds a trace event if channelz is on. -func Error(id int64, args ...interface{}) { +// Error logs and adds a trace event if channelz is on. +func Error(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { if IsOn() { - AddTraceEvent(id, 1, &TraceEventDesc{ + AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtError, }) } else { - grpclog.ErrorDepth(1, args...) + l.ErrorDepth(1, args...) } } -// Errorf logs through grpclog.Errorf and adds a trace event if channelz is on. -func Errorf(id int64, format string, args ...interface{}) { +// Errorf logs and adds a trace event if channelz is on. +func Errorf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { msg := fmt.Sprintf(format, args...) if IsOn() { - AddTraceEvent(id, 1, &TraceEventDesc{ + AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: msg, Severity: CtError, }) } else { - grpclog.ErrorDepth(1, msg) + l.ErrorDepth(1, msg) } } diff --git a/internal/channelz/types.go b/internal/channelz/types.go index 17c2274cb3de..075dc7d16714 100644 --- a/internal/channelz/types.go +++ b/internal/channelz/types.go @@ -26,7 +26,6 @@ import ( "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" ) // entry represents a node in the channelz database. @@ -60,17 +59,17 @@ func (d *dummyEntry) addChild(id int64, e entry) { // the addrConn will create a new transport. And when registering the new transport in // channelz, its parent addrConn could have already been torn down and deleted // from channelz tracking, and thus reach the code here. - grpclog.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) + logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) } func (d *dummyEntry) deleteChild(id int64) { // It is possible for a normal program to reach here under race condition. // Refer to the example described in addChild(). - grpclog.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) + logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) } func (d *dummyEntry) triggerDelete() { - grpclog.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) + logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) } func (*dummyEntry) deleteSelfIfReady() { @@ -215,7 +214,7 @@ func (c *channel) addChild(id int64, e entry) { case *channel: c.nestedChans[id] = v.refName default: - grpclog.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) + logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) } } @@ -326,7 +325,7 @@ func (sc *subChannel) addChild(id int64, e entry) { if v, ok := e.(*normalSocket); ok { sc.sockets[id] = v.refName } else { - grpclog.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) + logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) } } @@ -493,11 +492,11 @@ type listenSocket struct { } func (ls *listenSocket) addChild(id int64, e entry) { - grpclog.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) + logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) } func (ls *listenSocket) deleteChild(id int64) { - grpclog.Errorf("cannot delete a child (id = %d) from a listen socket", id) + logger.Errorf("cannot delete a child (id = %d) from a listen socket", id) } func (ls *listenSocket) triggerDelete() { @@ -506,7 +505,7 @@ func (ls *listenSocket) triggerDelete() { } func (ls *listenSocket) deleteSelfIfReady() { - grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket") + logger.Errorf("cannot call deleteSelfIfReady on a listen socket") } func (ls *listenSocket) getParentID() int64 { @@ -522,11 +521,11 @@ type normalSocket struct { } func (ns *normalSocket) addChild(id int64, e entry) { - grpclog.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e) + logger.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e) } func (ns *normalSocket) deleteChild(id int64) { - grpclog.Errorf("cannot delete a child (id = %d) from a normal socket", id) + logger.Errorf("cannot delete a child (id = %d) from a normal socket", id) } func (ns *normalSocket) triggerDelete() { @@ -535,7 +534,7 @@ func (ns *normalSocket) triggerDelete() { } func (ns *normalSocket) deleteSelfIfReady() { - grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket") + logger.Errorf("cannot call deleteSelfIfReady on a normal socket") } func (ns *normalSocket) getParentID() int64 { @@ -594,7 +593,7 @@ func (s *server) addChild(id int64, e entry) { case *listenSocket: s.listenSockets[id] = v.refName default: - grpclog.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) + logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) } } diff --git a/internal/channelz/types_nonlinux.go b/internal/channelz/types_nonlinux.go index 79edbefc4331..19c2fc521dcf 100644 --- a/internal/channelz/types_nonlinux.go +++ b/internal/channelz/types_nonlinux.go @@ -22,8 +22,6 @@ package channelz import ( "sync" - - "google.golang.org/grpc/grpclog" ) var once sync.Once @@ -39,6 +37,6 @@ type SocketOptionData struct { // Windows OS doesn't support Socket Option func (s *SocketOptionData) Getsockopt(fd uintptr) { once.Do(func() { - grpclog.Warningln("Channelz: socket options are not supported on non-linux os and appengine.") + logger.Warning("Channelz: socket options are not supported on non-linux os and appengine.") }) } diff --git a/internal/grpclog/grpclog.go b/internal/grpclog/grpclog.go index 8c8e19fce1d5..745a166f02cf 100644 --- a/internal/grpclog/grpclog.go +++ b/internal/grpclog/grpclog.go @@ -19,6 +19,10 @@ // Package grpclog (internal) defines depth logging for grpc. package grpclog +import ( + "os" +) + // Logger is the logger used for the non-depth log functions. var Logger LoggerV2 @@ -30,7 +34,7 @@ func InfoDepth(depth int, args ...interface{}) { if DepthLogger != nil { DepthLogger.InfoDepth(depth, args...) } else { - Logger.Info(args...) + Logger.Infoln(args...) } } @@ -39,7 +43,7 @@ func WarningDepth(depth int, args ...interface{}) { if DepthLogger != nil { DepthLogger.WarningDepth(depth, args...) } else { - Logger.Warning(args...) + Logger.Warningln(args...) } } @@ -48,7 +52,7 @@ func ErrorDepth(depth int, args ...interface{}) { if DepthLogger != nil { DepthLogger.ErrorDepth(depth, args...) } else { - Logger.Error(args...) + Logger.Errorln(args...) } } @@ -57,8 +61,9 @@ func FatalDepth(depth int, args ...interface{}) { if DepthLogger != nil { DepthLogger.FatalDepth(depth, args...) } else { - Logger.Fatal(args...) + Logger.Fatalln(args...) } + os.Exit(1) } // LoggerV2 does underlying logging work for grpclog. diff --git a/internal/resolver/dns/dns_resolver.go b/internal/resolver/dns/dns_resolver.go index 9d08dd8ab092..304235566589 100644 --- a/internal/resolver/dns/dns_resolver.go +++ b/internal/resolver/dns/dns_resolver.go @@ -44,6 +44,8 @@ import ( // addresses from SRV records. Must not be changed after init time. var EnableSRVLookups = false +var logger = grpclog.Component("dns") + func init() { resolver.Register(NewBuilder()) } @@ -272,7 +274,7 @@ func handleDNSError(err error, lookupType string) error { err = filterError(err) if err != nil { err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err) - grpclog.Infoln(err) + logger.Info(err) } return err } @@ -295,7 +297,7 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { // TXT record must have "grpc_config=" attribute in order to be used as service config. if !strings.HasPrefix(res, txtAttribute) { - grpclog.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) + logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) // This is not an error; it is the equivalent of not having a service config. return nil } @@ -421,12 +423,12 @@ func canaryingSC(js string) string { var rcs []rawChoice err := json.Unmarshal([]byte(js), &rcs) if err != nil { - grpclog.Warningf("dns: error parsing service config json: %v", err) + logger.Warningf("dns: error parsing service config json: %v", err) return "" } cliHostname, err := os.Hostname() if err != nil { - grpclog.Warningf("dns: error getting client hostname: %v", err) + logger.Warningf("dns: error getting client hostname: %v", err) return "" } var sc string diff --git a/internal/syscall/syscall_linux.go b/internal/syscall/syscall_linux.go index 43281a3e078d..c50468a0fc89 100644 --- a/internal/syscall/syscall_linux.go +++ b/internal/syscall/syscall_linux.go @@ -32,11 +32,13 @@ import ( "google.golang.org/grpc/grpclog" ) +var logger = grpclog.Component("core") + // GetCPUTime returns the how much CPU time has passed since the start of this process. func GetCPUTime() int64 { var ts unix.Timespec if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil { - grpclog.Fatal(err) + logger.Fatal(err) } return ts.Nano() } diff --git a/internal/syscall/syscall_nonlinux.go b/internal/syscall/syscall_nonlinux.go index ae0a9117e7b9..adae60d65188 100644 --- a/internal/syscall/syscall_nonlinux.go +++ b/internal/syscall/syscall_nonlinux.go @@ -31,10 +31,11 @@ import ( ) var once sync.Once +var logger = grpclog.Component("core") func log() { once.Do(func() { - grpclog.Info("CPU time info is unavailable on non-linux or appengine environment.") + logger.Info("CPU time info is unavailable on non-linux or appengine environment.") }) } diff --git a/internal/transport/controlbuf.go b/internal/transport/controlbuf.go index d4bb19c3bb53..40ef23923fda 100644 --- a/internal/transport/controlbuf.go +++ b/internal/transport/controlbuf.go @@ -505,7 +505,9 @@ func (l *loopyWriter) run() (err error) { // 1. When the connection is closed by some other known issue. // 2. User closed the connection. // 3. A graceful close of connection. - infof("transport: loopyWriter.run returning. %v", err) + if logger.V(logLevel) { + logger.Infof("transport: loopyWriter.run returning. %v", err) + } err = nil } }() @@ -605,7 +607,9 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error { if l.side == serverSide { str, ok := l.estdStreams[h.streamID] if !ok { - warningf("transport: loopy doesn't recognize the stream: %d", h.streamID) + if logger.V(logLevel) { + logger.Warningf("transport: loopy doesn't recognize the stream: %d", h.streamID) + } return nil } // Case 1.A: Server is responding back with headers. @@ -658,7 +662,9 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He l.hBuf.Reset() for _, f := range hf { if err := l.hEnc.WriteField(f); err != nil { - warningf("transport: loopyWriter.writeHeader encountered error while encoding headers:", err) + if logger.V(logLevel) { + logger.Warningf("transport: loopyWriter.writeHeader encountered error while encoding headers: %v", err) + } } } var ( diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index b43e21ffaf73..47af2ebeade8 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -354,7 +354,9 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) err := t.loopy.run() if err != nil { - errorf("transport: loopyWriter.run returning. Err: %v", err) + if logger.V(logLevel) { + logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) + } } // If it's a connection error, let reader goroutine handle it // since there might be data in the buffers. @@ -1013,7 +1015,9 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { } statusCode, ok := http2ErrConvTab[f.ErrCode] if !ok { - warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) + if logger.V(logLevel) { + logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) + } statusCode = codes.Unknown } if statusCode == codes.Canceled { @@ -1095,7 +1099,9 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { return } if f.ErrCode == http2.ErrCodeEnhanceYourCalm { - infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") + if logger.V(logLevel) { + logger.Infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") + } } id := f.LastStreamID if id > 0 && id%2 != 1 { @@ -1325,7 +1331,9 @@ func (t *http2Client) reader() { case *http2.WindowUpdateFrame: t.handleWindowUpdate(frame) default: - errorf("transport: http2Client.reader got unhandled frame type %v.", frame) + if logger.V(logLevel) { + logger.Errorf("transport: http2Client.reader got unhandled frame type %v.", frame) + } } } } diff --git a/internal/transport/http2_server.go b/internal/transport/http2_server.go index e8c757321287..788cf1e4df01 100644 --- a/internal/transport/http2_server.go +++ b/internal/transport/http2_server.go @@ -37,7 +37,6 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/keepalive" @@ -289,7 +288,9 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler if err := t.loopy.run(); err != nil { - errorf("transport: loopyWriter.run returning. Err: %v", err) + if logger.V(logLevel) { + logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) + } } t.conn.Close() close(t.writerDone) @@ -360,7 +361,9 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } s.ctx, err = t.inTapHandle(s.ctx, info) if err != nil { - warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) + if logger.V(logLevel) { + logger.Warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) + } t.controlBuf.put(&cleanupStream{ streamID: s.id, rst: true, @@ -391,7 +394,9 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( if streamID%2 != 1 || streamID <= t.maxStreamID { t.mu.Unlock() // illegal gRPC stream id. - errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) + if logger.V(logLevel) { + logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) + } s.cancel() return true } @@ -454,7 +459,9 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) if err != nil { if se, ok := err.(http2.StreamError); ok { - warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) + if logger.V(logLevel) { + logger.Warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) + } t.mu.Lock() s := t.activeStreams[se.StreamID] t.mu.Unlock() @@ -474,7 +481,9 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. t.Close() return } - warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) + if logger.V(logLevel) { + logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) + } t.Close() return } @@ -497,7 +506,9 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. case *http2.GoAwayFrame: // TODO: Handle GoAway from the client appropriately. default: - errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + if logger.V(logLevel) { + logger.Errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + } } } } @@ -719,7 +730,9 @@ func (t *http2Server) handlePing(f *http2.PingFrame) { if t.pingStrikes > maxPingStrikes { // Send goaway and close the connection. - errorf("transport: Got too many pings from the client, closing the connection.") + if logger.V(logLevel) { + logger.Errorf("transport: Got too many pings from the client, closing the connection.") + } t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) } } @@ -752,7 +765,9 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool { var sz int64 for _, f := range hdrFrame.hf { if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { - errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) + if logger.V(logLevel) { + logger.Errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) + } return false } } @@ -849,7 +864,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. - grpclog.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err) + logger.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err) } else { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) } @@ -980,7 +995,9 @@ func (t *http2Server) keepalive() { select { case <-ageTimer.C: // Close the connection after grace period. - infof("transport: closing server transport due to maximum connection age.") + if logger.V(logLevel) { + logger.Infof("transport: closing server transport due to maximum connection age.") + } t.Close() case <-t.done: } @@ -997,7 +1014,9 @@ func (t *http2Server) keepalive() { continue } if outstandingPing && kpTimeoutLeft <= 0 { - infof("transport: closing server transport due to idleness.") + if logger.V(logLevel) { + logger.Infof("transport: closing server transport due to idleness.") + } t.Close() return } diff --git a/internal/transport/http_util.go b/internal/transport/http_util.go index 8f5f3349d906..e68cdcb6cc2d 100644 --- a/internal/transport/http_util.go +++ b/internal/transport/http_util.go @@ -37,6 +37,7 @@ import ( "golang.org/x/net/http2/hpack" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" "google.golang.org/grpc/status" ) @@ -97,6 +98,7 @@ var ( // 504 Gateway timeout - UNAVAILABLE. http.StatusGatewayTimeout: codes.Unavailable, } + logger = grpclog.Component("transport") ) type parsedHeaderData struct { @@ -412,7 +414,9 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) { } v, err := decodeMetadataHeader(f.Name, f.Value) if err != nil { - errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) + if logger.V(logLevel) { + logger.Errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) + } return } d.addMetadata(f.Name, v) diff --git a/internal/transport/log.go b/internal/transport/log.go deleted file mode 100644 index 879df80c4de7..000000000000 --- a/internal/transport/log.go +++ /dev/null @@ -1,44 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// This file contains wrappers for grpclog functions. -// The transport package only logs to verbose level 2 by default. - -package transport - -import "google.golang.org/grpc/grpclog" - -const logLevel = 2 - -func infof(format string, args ...interface{}) { - if grpclog.V(logLevel) { - grpclog.Infof(format, args...) - } -} - -func warningf(format string, args ...interface{}) { - if grpclog.V(logLevel) { - grpclog.Warningf(format, args...) - } -} - -func errorf(format string, args ...interface{}) { - if grpclog.V(logLevel) { - grpclog.Errorf(format, args...) - } -} diff --git a/internal/transport/transport.go b/internal/transport/transport.go index 1ffd96ff43d1..b74030a96878 100644 --- a/internal/transport/transport.go +++ b/internal/transport/transport.go @@ -41,6 +41,8 @@ import ( "google.golang.org/grpc/tap" ) +const logLevel = 2 + type bufferPool struct { pool sync.Pool } diff --git a/picker_wrapper.go b/picker_wrapper.go index 7f3edaaedc60..a58174b6f436 100644 --- a/picker_wrapper.go +++ b/picker_wrapper.go @@ -25,7 +25,6 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/status" @@ -145,7 +144,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. acw, ok := pickResult.SubConn.(*acBalancerWrapper) if !ok { - grpclog.Error("subconn returned from pick is not *acBalancerWrapper") + logger.Error("subconn returned from pick is not *acBalancerWrapper") continue } if t, ok := acw.getAddrConn().getReadyTransport(); ok { @@ -159,7 +158,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. // DoneInfo with default value works. pickResult.Done(balancer.DoneInfo{}) } - grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick") + logger.Infof("blockingPicker: the picked transport is not ready, loop back to repick") // If ok == false, ac.state is not READY. // A valid picker always returns READY subConn. This means the state of ac // just changed, and picker will be updated shortly. diff --git a/pickfirst.go b/pickfirst.go index 4b7340ad3ecc..56e33f6c76b7 100644 --- a/pickfirst.go +++ b/pickfirst.go @@ -24,7 +24,6 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/grpclog" ) // PickFirstBalancerName is the name of the pick_first balancer. @@ -58,8 +57,8 @@ func (b *pickfirstBalancer) ResolverError(err error) { Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, }) } - if grpclog.V(2) { - grpclog.Infof("pickfirstBalancer: ResolverError called with error %v", err) + if logger.V(2) { + logger.Infof("pickfirstBalancer: ResolverError called with error %v", err) } } @@ -72,8 +71,8 @@ func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) e var err error b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{}) if err != nil { - if grpclog.V(2) { - grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) + if logger.V(2) { + logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) } b.state = connectivity.TransientFailure b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, @@ -92,12 +91,12 @@ func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) e } func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { - if grpclog.V(2) { - grpclog.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", sc, s) + if logger.V(2) { + logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", sc, s) } if b.sc != sc { - if grpclog.V(2) { - grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized") + if logger.V(2) { + logger.Infof("pickfirstBalancer: ignored state change because sc is not recognized") } return } diff --git a/resolver_conn_wrapper.go b/resolver_conn_wrapper.go index edfda866c001..265002a75e00 100644 --- a/resolver_conn_wrapper.go +++ b/resolver_conn_wrapper.go @@ -140,7 +140,7 @@ func (ccr *ccResolverWrapper) UpdateState(s resolver.State) { if ccr.done.HasFired() { return } - channelz.Infof(ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s) + channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s) if channelz.IsOn() { ccr.addChannelzTraceEvent(s) } @@ -152,7 +152,7 @@ func (ccr *ccResolverWrapper) ReportError(err error) { if ccr.done.HasFired() { return } - channelz.Warningf(ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) + channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) ccr.poll(ccr.cc.updateResolverState(resolver.State{}, err)) } @@ -161,7 +161,7 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { if ccr.done.HasFired() { return } - channelz.Infof(ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs) + channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs) if channelz.IsOn() { ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) } @@ -175,14 +175,14 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { if ccr.done.HasFired() { return } - channelz.Infof(ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc) + channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc) if ccr.cc.dopts.disableServiceConfig { - channelz.Info(ccr.cc.channelzID, "Service config lookups disabled; ignoring config") + channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config") return } scpr := parseServiceConfig(sc) if scpr.Err != nil { - channelz.Warningf(ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) + channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) ccr.poll(balancer.ErrBadResolverState) return } @@ -215,7 +215,7 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { updates = append(updates, "resolver returned new addresses") } - channelz.AddTraceEvent(ccr.cc.channelzID, 0, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(logger, ccr.cc.channelzID, 0, &channelz.TraceEventDesc{ Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")), Severity: channelz.CtINFO, }) diff --git a/server.go b/server.go index c2c7cae6c5dc..0cac3cf5f2f7 100644 --- a/server.go +++ b/server.go @@ -59,6 +59,7 @@ const ( ) var statusOK = status.New(codes.OK, "") +var logger = grpclog.Component("core") type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) @@ -223,7 +224,7 @@ func InitialConnWindowSize(s int32) ServerOption { // KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { if kp.Time > 0 && kp.Time < time.Second { - grpclog.Warning("Adjusting keepalive ping interval to minimum period of 1s") + logger.Warning("Adjusting keepalive ping interval to minimum period of 1s") kp.Time = time.Second } @@ -537,7 +538,7 @@ func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { ht := reflect.TypeOf(sd.HandlerType).Elem() st := reflect.TypeOf(ss) if !st.Implements(ht) { - grpclog.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) + logger.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) } s.register(sd, ss) } @@ -547,10 +548,10 @@ func (s *Server) register(sd *ServiceDesc, ss interface{}) { defer s.mu.Unlock() s.printf("RegisterService(%q)", sd.ServiceName) if s.serve { - grpclog.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName) + logger.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName) } if _, ok := s.m[sd.ServiceName]; ok { - grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) + logger.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) } srv := &service{ server: ss, @@ -756,7 +757,7 @@ func (s *Server) handleRawConn(rawConn net.Conn) { s.mu.Lock() s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) s.mu.Unlock() - channelz.Warningf(s.channelzID, "grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) + channelz.Warningf(logger, s.channelzID, "grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) rawConn.Close() } rawConn.SetDeadline(time.Time{}) @@ -803,7 +804,7 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) s.mu.Unlock() c.Close() - channelz.Warning(s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) + channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) return nil } @@ -957,12 +958,12 @@ func (s *Server) incrCallsFailed() { func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { - channelz.Error(s.channelzID, "grpc: server failed to encode response: ", err) + channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err) return err } compData, err := compress(data, cp, comp) if err != nil { - channelz.Error(s.channelzID, "grpc: server failed to compress response: ", err) + channelz.Error(logger, s.channelzID, "grpc: server failed to compress response: ", err) return err } hdr, payload := msgHeader(data, compData) @@ -1136,7 +1137,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if err != nil { if st, ok := status.FromError(err); ok { if e := t.WriteStatus(stream, st); e != nil { - channelz.Warningf(s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e) + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e) } } return err @@ -1181,7 +1182,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. trInfo.tr.SetError() } if e := t.WriteStatus(stream, appStatus); e != nil { - channelz.Warningf(s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) } if binlog != nil { if h, _ := stream.Header(); h.Len() > 0 { @@ -1210,7 +1211,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } if sts, ok := status.FromError(err); ok { if e := t.WriteStatus(stream, sts); e != nil { - channelz.Warningf(s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) } } else { switch st := err.(type) { @@ -1478,7 +1479,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() } - channelz.Warningf(s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) + channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } if trInfo != nil { trInfo.tr.Finish() @@ -1519,7 +1520,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() } - channelz.Warningf(s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) + channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } if trInfo != nil { trInfo.tr.Finish() diff --git a/service_config.go b/service_config.go index 3132a66cd68c..c9f164c9ae43 100644 --- a/service_config.go +++ b/service_config.go @@ -27,7 +27,6 @@ import ( "time" "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/serviceconfig" @@ -269,7 +268,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { var rsc jsonSC err := json.Unmarshal([]byte(js), &rsc) if err != nil { - grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) return &serviceconfig.ParseResult{Err: err} } sc := ServiceConfig{ @@ -295,7 +294,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { } d, err := parseDuration(m.Timeout) if err != nil { - grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) return &serviceconfig.ParseResult{Err: err} } @@ -304,7 +303,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { Timeout: d, } if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { - grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) return &serviceconfig.ParseResult{Err: err} } if m.MaxRequestMessageBytes != nil { @@ -357,7 +356,7 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) { *mb <= 0 || jrp.BackoffMultiplier <= 0 || len(jrp.RetryableStatusCodes) == 0 { - grpclog.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) + logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) return nil, nil } diff --git a/stream.go b/stream.go index 629af76bdfab..34e4010d0dba 100644 --- a/stream.go +++ b/stream.go @@ -510,13 +510,13 @@ func (cs *clientStream) shouldRetry(err error) error { if len(sps) == 1 { var e error if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { - channelz.Infof(cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0]) + channelz.Infof(logger, cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0]) cs.retryThrottler.throttle() // This counts as a failure for throttling. return err } hasPushback = true } else if len(sps) > 1 { - channelz.Warningf(cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps) + channelz.Warningf(logger, cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps) cs.retryThrottler.throttle() // This counts as a failure for throttling. return err } diff --git a/xds/internal/balancer/edsbalancer/eds_impl_priority.go b/xds/internal/balancer/edsbalancer/eds_impl_priority.go index c2ce31238542..66854e17ea42 100644 --- a/xds/internal/balancer/edsbalancer/eds_impl_priority.go +++ b/xds/internal/balancer/edsbalancer/eds_impl_priority.go @@ -28,6 +28,7 @@ import ( "google.golang.org/grpc/grpclog" ) +var logger = grpclog.Component("xds") var errAllPrioritiesRemoved = errors.New("eds: no locality is provided, all priorities are removed") // handlePriorityChange handles priority after EDS adds/removes a @@ -134,13 +135,13 @@ func (edsImpl *edsBalancerImpl) handlePriorityWithNewState(priority priorityType defer edsImpl.priorityMu.Unlock() if !edsImpl.priorityInUse.isSet() { - grpclog.Infof("eds: received picker update when no priority is in use (EDS returned an empty list)") + logger.Infof("eds: received picker update when no priority is in use (EDS returned an empty list)") return false } if edsImpl.priorityInUse.higherThan(priority) { // Lower priorities should all be closed, this is an unexpected update. - grpclog.Infof("eds: received picker update from priority lower then priorityInUse") + logger.Infof("eds: received picker update from priority lower then priorityInUse") return false } diff --git a/xds/internal/balancer/lrs/lrs.go b/xds/internal/balancer/lrs/lrs.go index eaf5997a9dc8..4bc20ef6e124 100644 --- a/xds/internal/balancer/lrs/lrs.go +++ b/xds/internal/balancer/lrs/lrs.go @@ -36,6 +36,8 @@ import ( const negativeOneUInt64 = ^uint64(0) +var logger = grpclog.Component("xds") + // Store defines the interface for a load store. It keeps loads and can report // them to a server when requested. type Store interface { @@ -310,25 +312,25 @@ func (ls *lrsStore) ReportTo(ctx context.Context, cc *grpc.ClientConn, clusterNa doBackoff = true stream, err := c.StreamLoadStats(ctx) if err != nil { - grpclog.Warningf("lrs: failed to create stream: %v", err) + logger.Warningf("lrs: failed to create stream: %v", err) continue } - grpclog.Infof("lrs: created LRS stream") + logger.Infof("lrs: created LRS stream") req := &lrspb.LoadStatsRequest{Node: node} - grpclog.Infof("lrs: sending init LoadStatsRequest: %v", req) + logger.Infof("lrs: sending init LoadStatsRequest: %v", req) if err := stream.Send(req); err != nil { - grpclog.Warningf("lrs: failed to send first request: %v", err) + logger.Warningf("lrs: failed to send first request: %v", err) continue } first, err := stream.Recv() if err != nil { - grpclog.Warningf("lrs: failed to receive first response: %v", err) + logger.Warningf("lrs: failed to receive first response: %v", err) continue } - grpclog.Infof("lrs: received first LoadStatsResponse: %+v", first) + logger.Infof("lrs: received first LoadStatsResponse: %+v", first) interval, err := ptypes.Duration(first.LoadReportingInterval) if err != nil { - grpclog.Warningf("lrs: failed to convert report interval: %v", err) + logger.Warningf("lrs: failed to convert report interval: %v", err) continue } // The LRS client should join the clusters it knows with the cluster @@ -343,12 +345,12 @@ func (ls *lrsStore) ReportTo(ctx context.Context, cc *grpc.ClientConn, clusterNa } } if !clusterFoundInResponse { - grpclog.Warningf("lrs: received clusters %v does not contain expected {%v}", first.Clusters, clusterName) + logger.Warningf("lrs: received clusters %v does not contain expected {%v}", first.Clusters, clusterName) continue } if first.ReportEndpointGranularity { // TODO: fixme to support per endpoint loads. - grpclog.Warningf("lrs: endpoint loads requested, but not supported by current implementation") + logger.Warningf("lrs: endpoint loads requested, but not supported by current implementation") continue } @@ -369,9 +371,9 @@ func (ls *lrsStore) sendLoads(ctx context.Context, stream lrsgrpc.LoadReportingS return } req := &lrspb.LoadStatsRequest{ClusterStats: ls.buildStats(clusterName)} - grpclog.Infof("lrs: sending LRS loads: %+v", req) + logger.Infof("lrs: sending LRS loads: %+v", req) if err := stream.Send(req); err != nil { - grpclog.Warningf("lrs: failed to send report: %v", err) + logger.Warningf("lrs: failed to send report: %v", err) return } } diff --git a/xds/internal/balancer/orca/orca.go b/xds/internal/balancer/orca/orca.go index 2bcd5cfd0e13..28016806eec4 100644 --- a/xds/internal/balancer/orca/orca.go +++ b/xds/internal/balancer/orca/orca.go @@ -27,6 +27,8 @@ import ( const mdKey = "X-Endpoint-Load-Metrics-Bin" +var logger = grpclog.Component("xds") + // toBytes converts a orca load report into bytes. func toBytes(r *orcapb.OrcaLoadReport) []byte { if r == nil { @@ -35,7 +37,7 @@ func toBytes(r *orcapb.OrcaLoadReport) []byte { b, err := proto.Marshal(r) if err != nil { - grpclog.Warningf("orca: failed to marshal load report: %v", err) + logger.Warningf("orca: failed to marshal load report: %v", err) return nil } return b @@ -54,7 +56,7 @@ func ToMetadata(r *orcapb.OrcaLoadReport) metadata.MD { func fromBytes(b []byte) *orcapb.OrcaLoadReport { ret := new(orcapb.OrcaLoadReport) if err := proto.Unmarshal(b, ret); err != nil { - grpclog.Warningf("orca: failed to unmarshal load report: %v", err) + logger.Warningf("orca: failed to unmarshal load report: %v", err) return nil } return ret diff --git a/xds/internal/client/client_loadreport.go b/xds/internal/client/client_loadreport.go index 42766f997163..314a47a8defb 100644 --- a/xds/internal/client/client_loadreport.go +++ b/xds/internal/client/client_loadreport.go @@ -30,6 +30,8 @@ import ( const nodeMetadataHostnameKey = "PROXYLESS_CLIENT_HOSTNAME" +var logger = grpclog.Component("xds") + // ReportLoad sends the load of the given clusterName from loadStore to the // given server. If the server is not an empty string, and is different from the // xds server, a new ClientConn will be created. @@ -53,7 +55,7 @@ func (c *Client) ReportLoad(server string, clusterName string, loadStore lrs.Sto ccNew, err := grpc.Dial(server, dopts...) if err != nil { // An error from a non-blocking dial indicates something serious. - grpclog.Infof("xds: failed to dial load report server {%s}: %v", server, err) + logger.Infof("xds: failed to dial load report server {%s}: %v", server, err) return func() {} } cc = ccNew From c95dc4da23cb6f3d4c8bedd53d4af72b46f55616 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Fri, 26 Jun 2020 12:56:03 -0700 Subject: [PATCH 10/11] doc: mark CustomCodec as deprecated (#3698) --- server.go | 14 +++++++++++--- vet.sh | 1 + 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/server.go b/server.go index 0cac3cf5f2f7..1b56cc2d11fd 100644 --- a/server.go +++ b/server.go @@ -243,6 +243,12 @@ func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { // CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. // // This will override any lookups by content-subtype for Codecs registered with RegisterCodec. +// +// Deprecated: register codecs using encoding.RegisterCodec. The server will +// automatically use registered codecs based on the incoming requests' headers. +// See also +// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. +// Will be supported throughout 1.x. func CustomCodec(codec Codec) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.codec = codec @@ -255,7 +261,8 @@ func CustomCodec(codec Codec) ServerOption { // default, server messages will be sent using the same compressor with which // request messages were sent. // -// Deprecated: use encoding.RegisterCompressor instead. +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. func RPCCompressor(cp Compressor) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.cp = cp @@ -266,7 +273,8 @@ func RPCCompressor(cp Compressor) ServerOption { // messages. It has higher priority than decompressors registered via // encoding.RegisterCompressor. // -// Deprecated: use encoding.RegisterCompressor instead. +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. func RPCDecompressor(dc Decompressor) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.dc = dc @@ -276,7 +284,7 @@ func RPCDecompressor(dc Decompressor) ServerOption { // MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive. // If this is not set, gRPC uses the default limit. // -// Deprecated: use MaxRecvMsgSize instead. +// Deprecated: use MaxRecvMsgSize instead. Will be supported throughout 1.x. func MaxMsgSize(m int) ServerOption { return MaxRecvMsgSize(m) } diff --git a/vet.sh b/vet.sh index bfd5d5411053..8b7dff19adb3 100755 --- a/vet.sh +++ b/vet.sh @@ -135,6 +135,7 @@ balancer.Picker grpc.CallCustomCodec grpc.Code grpc.Compressor +grpc.CustomCodec grpc.Decompressor grpc.MaxMsgSize grpc.MethodConfig From 68098483a7afa91b353453641408e3968ad92738 Mon Sep 17 00:00:00 2001 From: cindyxue <32377977+cindyxue@users.noreply.github.com> Date: Sat, 27 Jun 2020 16:05:33 -0700 Subject: [PATCH 11/11] advancedtls: Add system default CAs to config function (#3663) * Add system default CAs to config function --- security/advancedtls/advancedtls.go | 28 ++++++++++-- security/advancedtls/advancedtls_test.go | 56 ++++++++++++++++++++++++ 2 files changed, 81 insertions(+), 3 deletions(-) diff --git a/security/advancedtls/advancedtls.go b/security/advancedtls/advancedtls.go index db78a566262e..745938ac5295 100644 --- a/security/advancedtls/advancedtls.go +++ b/security/advancedtls/advancedtls.go @@ -81,6 +81,8 @@ type GetRootCAsResults struct { // RootCertificateOptions contains a field and a function for obtaining root // trust certificates. // It is used by both ClientOptions and ServerOptions. +// If users want to use default verification, but did not provide a valid +// RootCertificateOptions, we use the system default trust certificates. type RootCertificateOptions struct { // If field RootCACerts is set, field GetRootCAs will be ignored. RootCACerts // will be used every time when verifying the peer certificates, without @@ -184,15 +186,26 @@ func (o *ClientOptions) config() (*tls.Config, error) { return nil, fmt.Errorf( "client needs to provide custom verification mechanism if choose to skip default verification") } + rootCAs := o.RootCACerts + if o.VType != SkipVerification && o.RootCACerts == nil && o.GetRootCAs == nil { + // Set rootCAs to system default. + systemRootCAs, err := x509.SystemCertPool() + if err != nil { + return nil, err + } + rootCAs = systemRootCAs + } // We have to set InsecureSkipVerify to true to skip the default checks and // use the verification function we built from buildVerifyFunc. config := &tls.Config{ ServerName: o.ServerNameOverride, Certificates: o.Certificates, GetClientCertificate: o.GetClientCertificate, - RootCAs: o.RootCACerts, InsecureSkipVerify: true, } + if rootCAs != nil { + config.RootCAs = rootCAs + } return config, nil } @@ -204,6 +217,15 @@ func (o *ServerOptions) config() (*tls.Config, error) { return nil, fmt.Errorf( "server needs to provide custom verification mechanism if choose to skip default verification, but require client certificate(s)") } + clientCAs := o.RootCACerts + if o.VType != SkipVerification && o.RootCACerts == nil && o.GetRootCAs == nil && o.RequireClientCert { + // Set clientCAs to system default. + systemRootCAs, err := x509.SystemCertPool() + if err != nil { + return nil, err + } + clientCAs = systemRootCAs + } clientAuth := tls.NoClientCert if o.RequireClientCert { // We have to set clientAuth to RequireAnyClientCert to force underlying @@ -216,8 +238,8 @@ func (o *ServerOptions) config() (*tls.Config, error) { Certificates: o.Certificates, GetCertificate: o.GetCertificate, } - if o.RootCACerts != nil { - config.ClientCAs = o.RootCACerts + if clientCAs != nil { + config.ClientCAs = clientCAs } return config, nil } diff --git a/security/advancedtls/advancedtls_test.go b/security/advancedtls/advancedtls_test.go index ab6ba59068ac..263bf7df418c 100644 --- a/security/advancedtls/advancedtls_test.go +++ b/security/advancedtls/advancedtls_test.go @@ -623,3 +623,59 @@ func TestWrapSyscallConn(t *testing.T) { wrapConn) } } + +func TestOptionsConfig(t *testing.T) { + serverPeerCert, err := tls.LoadX509KeyPair(testdata.Path("server_cert_1.pem"), + testdata.Path("server_key_1.pem")) + if err != nil { + t.Fatalf("Server is unable to parse peer certificates. Error: %v", err) + } + tests := []struct { + desc string + clientVType VerificationType + serverMutualTLS bool + serverCert []tls.Certificate + serverVType VerificationType + }{ + { + desc: "Client uses system-provided RootCAs; server uses system-provided ClientCAs", + clientVType: CertVerification, + serverMutualTLS: true, + serverCert: []tls.Certificate{serverPeerCert}, + serverVType: CertAndHostVerification, + }, + } + for _, test := range tests { + test := test + t.Run(test.desc, func(t *testing.T) { + serverOptions := &ServerOptions{ + Certificates: test.serverCert, + RequireClientCert: test.serverMutualTLS, + VType: test.serverVType, + } + serverConfig, err := serverOptions.config() + if err != nil { + t.Fatalf("Unable to generate serverConfig. Error: %v", err) + } + // Verify that the system-provided certificates would be used + // when no verification method was set in serverOptions. + if serverOptions.RootCACerts == nil && serverOptions.GetRootCAs == nil && + serverOptions.RequireClientCert && serverConfig.ClientCAs == nil { + t.Fatalf("Failed to assign system-provided certificates on the server side.") + } + clientOptions := &ClientOptions{ + VType: test.clientVType, + } + clientConfig, err := clientOptions.config() + if err != nil { + t.Fatalf("Unable to generate clientConfig. Error: %v", err) + } + // Verify that the system-provided certificates would be used + // when no verification method was set in clientOptions. + if clientOptions.RootCACerts == nil && clientOptions.GetRootCAs == nil && + clientConfig.RootCAs == nil { + t.Fatalf("Failed to assign system-provided certificates on the client side.") + } + }) + } +}