From 79e9c9571a1949d3abae203a127fa5d4f02fb071 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Mon, 8 Nov 2021 11:11:24 -0800 Subject: [PATCH] xds/client: move unmarshal functions and types to a separate package (#4904) --- xds/csds/csds.go | 13 +- xds/csds/csds_test.go | 9 +- .../balancer/cdsbalancer/cdsbalancer.go | 7 +- .../cdsbalancer/cdsbalancer_security_test.go | 24 +- .../balancer/cdsbalancer/cdsbalancer_test.go | 31 +- .../balancer/cdsbalancer/cluster_handler.go | 21 +- .../cdsbalancer/cluster_handler_test.go | 120 +- .../clusterresolver/clusterresolver_test.go | 15 +- .../balancer/clusterresolver/configbuilder.go | 20 +- .../clusterresolver/configbuilder_test.go | 184 +-- .../balancer/clusterresolver/eds_impl_test.go | 5 +- .../clusterresolver/resource_resolver.go | 7 +- .../clusterresolver/resource_resolver_test.go | 8 +- .../balancer/clusterresolver/testutil_test.go | 26 +- xds/internal/resolver/serviceconfig.go | 24 +- xds/internal/resolver/serviceconfig_test.go | 16 +- xds/internal/resolver/watch_service.go | 13 +- xds/internal/resolver/watch_service_test.go | 138 +- xds/internal/resolver/xds_resolver_test.go | 161 +- xds/internal/server/conn_wrapper.go | 8 +- xds/internal/server/listener_wrapper.go | 19 +- xds/internal/server/listener_wrapper_test.go | 28 +- xds/internal/server/rds_handler.go | 12 +- xds/internal/server/rds_handler_test.go | 30 +- xds/internal/testutils/fakeclient/client.go | 31 +- xds/internal/xdsclient/attributes.go | 9 +- xds/internal/xdsclient/callback.go | 29 +- xds/internal/xdsclient/client.go | 533 +------ xds/internal/xdsclient/client_test.go | 49 +- xds/internal/xdsclient/dump.go | 17 +- xds/internal/xdsclient/dump_test.go | 121 +- xds/internal/xdsclient/v2/ack_test.go | 3 +- xds/internal/xdsclient/v2/cds_test.go | 39 +- xds/internal/xdsclient/v2/client.go | 19 +- xds/internal/xdsclient/v2/client_test.go | 47 +- xds/internal/xdsclient/v2/eds_test.go | 45 +- xds/internal/xdsclient/v2/lds_test.go | 68 +- xds/internal/xdsclient/v2/rds_test.go | 72 +- xds/internal/xdsclient/v3/client.go | 19 +- xds/internal/xdsclient/watchers.go | 29 +- .../xdsclient/watchers_cluster_test.go | 125 +- .../xdsclient/watchers_endpoints_test.go | 93 +- .../xdsclient/watchers_listener_test.go | 139 +- xds/internal/xdsclient/watchers_route_test.go | 103 +- xds/internal/xdsclient/xds.go | 1345 ----------------- .../{ => xdsresource}/filter_chain.go | 41 +- .../{ => xdsresource}/filter_chain_test.go | 53 +- .../xdsclient/{ => xdsresource}/matcher.go | 3 +- .../{ => xdsresource}/matcher_path.go | 3 +- .../{ => xdsresource}/matcher_path_test.go | 9 +- .../{ => xdsresource}/matcher_test.go | 7 +- .../xdsclient/xdsresource/test_utils_test.go | 52 + xds/internal/xdsclient/xdsresource/type.go | 107 ++ .../xdsclient/xdsresource/type_cds.go | 87 ++ .../xdsclient/xdsresource/type_eds.go | 80 + .../xdsclient/xdsresource/type_lds.go | 87 ++ .../xdsclient/xdsresource/type_rds.go | 245 +++ .../xdsclient/xdsresource/unmarshal.go | 174 +++ .../xdsclient/xdsresource/unmarshal_cds.go | 456 ++++++ .../unmarshal_cds_test.go} | 5 +- .../xdsclient/xdsresource/unmarshal_eds.go | 131 ++ .../unmarshal_eds_test.go} | 5 +- .../xdsclient/xdsresource/unmarshal_lds.go | 297 ++++ .../unmarshal_lds_test.go} | 5 +- .../xdsclient/xdsresource/unmarshal_rds.go | 373 +++++ .../unmarshal_rds_test.go} | 5 +- xds/server.go | 9 +- xds/server_test.go | 23 +- 68 files changed, 3218 insertions(+), 2913 deletions(-) delete mode 100644 xds/internal/xdsclient/xds.go rename xds/internal/xdsclient/{ => xdsresource}/filter_chain.go (95%) rename xds/internal/xdsclient/{ => xdsresource}/filter_chain_test.go (98%) rename xds/internal/xdsclient/{ => xdsresource}/matcher.go (99%) rename xds/internal/xdsclient/{ => xdsresource}/matcher_path.go (99%) rename xds/internal/xdsclient/{ => xdsresource}/matcher_path_test.go (94%) rename xds/internal/xdsclient/{ => xdsresource}/matcher_test.go (98%) create mode 100644 xds/internal/xdsclient/xdsresource/test_utils_test.go create mode 100644 xds/internal/xdsclient/xdsresource/type.go create mode 100644 xds/internal/xdsclient/xdsresource/type_cds.go create mode 100644 xds/internal/xdsclient/xdsresource/type_eds.go create mode 100644 xds/internal/xdsclient/xdsresource/type_lds.go create mode 100644 xds/internal/xdsclient/xdsresource/type_rds.go create mode 100644 xds/internal/xdsclient/xdsresource/unmarshal.go create mode 100644 xds/internal/xdsclient/xdsresource/unmarshal_cds.go rename xds/internal/xdsclient/{cds_test.go => xdsresource/unmarshal_cds_test.go} (99%) create mode 100644 xds/internal/xdsclient/xdsresource/unmarshal_eds.go rename xds/internal/xdsclient/{eds_test.go => xdsresource/unmarshal_eds_test.go} (99%) create mode 100644 xds/internal/xdsclient/xdsresource/unmarshal_lds.go rename xds/internal/xdsclient/{lds_test.go => xdsresource/unmarshal_lds_test.go} (99%) create mode 100644 xds/internal/xdsclient/xdsresource/unmarshal_rds.go rename xds/internal/xdsclient/{rds_test.go => xdsresource/unmarshal_rds_test.go} (99%) diff --git a/xds/csds/csds.go b/xds/csds/csds.go index 23f9c760b637..5b9d1c467cb2 100644 --- a/xds/csds/csds.go +++ b/xds/csds/csds.go @@ -37,6 +37,7 @@ import ( "google.golang.org/grpc/grpclog" "google.golang.org/grpc/status" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/types/known/timestamppb" _ "google.golang.org/grpc/xds/internal/xdsclient/v2" // Register v2 xds_client. @@ -197,17 +198,17 @@ func dumpToGenericXdsConfig(typeURL string, dumpF func() (string, map[string]xds return ret } -func serviceStatusToProto(serviceStatus xdsclient.ServiceStatus) v3adminpb.ClientResourceStatus { +func serviceStatusToProto(serviceStatus xdsresource.ServiceStatus) v3adminpb.ClientResourceStatus { switch serviceStatus { - case xdsclient.ServiceStatusUnknown: + case xdsresource.ServiceStatusUnknown: return v3adminpb.ClientResourceStatus_UNKNOWN - case xdsclient.ServiceStatusRequested: + case xdsresource.ServiceStatusRequested: return v3adminpb.ClientResourceStatus_REQUESTED - case xdsclient.ServiceStatusNotExist: + case xdsresource.ServiceStatusNotExist: return v3adminpb.ClientResourceStatus_DOES_NOT_EXIST - case xdsclient.ServiceStatusACKed: + case xdsresource.ServiceStatusACKed: return v3adminpb.ClientResourceStatus_ACKED - case xdsclient.ServiceStatusNACKed: + case xdsresource.ServiceStatusNACKed: return v3adminpb.ClientResourceStatus_NACKED default: return v3adminpb.ClientResourceStatus_UNKNOWN diff --git a/xds/csds/csds_test.go b/xds/csds/csds_test.go index 88df77a62242..0bf305899de8 100644 --- a/xds/csds/csds_test.go +++ b/xds/csds/csds_test.go @@ -35,6 +35,7 @@ import ( _ "google.golang.org/grpc/xds/internal/httpfilter/router" "google.golang.org/grpc/xds/internal/testutils/e2e" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/testing/protocmp" "google.golang.org/protobuf/types/known/anypb" @@ -140,16 +141,16 @@ func TestCSDS(t *testing.T) { defer cleanup() for _, target := range ldsTargets { - xdsC.WatchListener(target, func(xdsclient.ListenerUpdate, error) {}) + xdsC.WatchListener(target, func(xdsresource.ListenerUpdate, error) {}) } for _, target := range rdsTargets { - xdsC.WatchRouteConfig(target, func(xdsclient.RouteConfigUpdate, error) {}) + xdsC.WatchRouteConfig(target, func(xdsresource.RouteConfigUpdate, error) {}) } for _, target := range cdsTargets { - xdsC.WatchCluster(target, func(xdsclient.ClusterUpdate, error) {}) + xdsC.WatchCluster(target, func(xdsresource.ClusterUpdate, error) {}) } for _, target := range edsTargets { - xdsC.WatchEndpoints(target, func(xdsclient.EndpointsUpdate, error) {}) + xdsC.WatchEndpoints(target, func(xdsresource.EndpointsUpdate, error) {}) } for i := 0; i < retryCount; i++ { diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/xds/internal/balancer/cdsbalancer/cdsbalancer.go index 82d2a96958e2..9c128dfb4639 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -38,6 +38,7 @@ import ( "google.golang.org/grpc/xds/internal/balancer/clusterresolver" "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) const ( @@ -185,7 +186,7 @@ func (b *cdsBalancer) handleClientConnUpdate(update *ccUpdate) { // management server, creates appropriate certificate provider plugins, and // updates the HandhakeInfo which is added as an address attribute in // NewSubConn() calls. -func (b *cdsBalancer) handleSecurityConfig(config *xdsclient.SecurityConfig) error { +func (b *cdsBalancer) handleSecurityConfig(config *xdsresource.SecurityConfig) error { // If xdsCredentials are not in use, i.e, the user did not want to get // security configuration from an xDS server, we should not be acting on the // received security config here. Doing so poses a security threat. @@ -310,7 +311,7 @@ func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { dms := make([]clusterresolver.DiscoveryMechanism, len(update.updates)) for i, cu := range update.updates { switch cu.ClusterType { - case xdsclient.ClusterTypeEDS: + case xdsresource.ClusterTypeEDS: dms[i] = clusterresolver.DiscoveryMechanism{ Type: clusterresolver.DiscoveryMechanismTypeEDS, Cluster: cu.ClusterName, @@ -324,7 +325,7 @@ func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { dms[i].LoadReportingServerName = new(string) } - case xdsclient.ClusterTypeLogicalDNS: + case xdsresource.ClusterTypeLogicalDNS: dms[i] = clusterresolver.DiscoveryMechanism{ Type: clusterresolver.DiscoveryMechanismTypeLogicalDNS, DNSHostname: cu.DNSHostName, diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go index 9483818e306e..778d711f2190 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go @@ -36,8 +36,8 @@ import ( "google.golang.org/grpc/resolver" xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" - "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) const ( @@ -57,17 +57,17 @@ var ( } fpb1, fpb2 *fakeProviderBuilder bootstrapConfig *bootstrap.Config - cdsUpdateWithGoodSecurityCfg = xdsclient.ClusterUpdate{ + cdsUpdateWithGoodSecurityCfg = xdsresource.ClusterUpdate{ ClusterName: serviceName, - SecurityCfg: &xdsclient.SecurityConfig{ + SecurityCfg: &xdsresource.SecurityConfig{ RootInstanceName: "default1", IdentityInstanceName: "default2", SubjectAltNameMatchers: testSANMatchers, }, } - cdsUpdateWithMissingSecurityCfg = xdsclient.ClusterUpdate{ + cdsUpdateWithMissingSecurityCfg = xdsresource.ClusterUpdate{ ClusterName: serviceName, - SecurityCfg: &xdsclient.SecurityConfig{ + SecurityCfg: &xdsresource.SecurityConfig{ RootInstanceName: "not-default", }, } @@ -250,7 +250,7 @@ func (s) TestSecurityConfigWithoutXDSCreds(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} + cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} wantCCS := edsCCS(serviceName, nil, false, nil) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() @@ -306,7 +306,7 @@ func (s) TestNoSecurityConfigWithXDSCreds(t *testing.T) { // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. No security config is // passed to the CDS balancer as part of this update. - cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} + cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} wantCCS := edsCCS(serviceName, nil, false, nil) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() @@ -566,7 +566,7 @@ func (s) TestSecurityConfigUpdate_GoodToFallback(t *testing.T) { // an update which contains bad security config. So, we expect the CDS // balancer to forward this error to the EDS balancer and eventually the // channel needs to be put in a bad state. - cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} + cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { t.Fatal(err) } @@ -671,9 +671,9 @@ func (s) TestSecurityConfigUpdate_GoodToGood(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - cdsUpdate := xdsclient.ClusterUpdate{ + cdsUpdate := xdsresource.ClusterUpdate{ ClusterName: serviceName, - SecurityCfg: &xdsclient.SecurityConfig{ + SecurityCfg: &xdsresource.SecurityConfig{ RootInstanceName: "default1", SubjectAltNameMatchers: testSANMatchers, }, @@ -696,9 +696,9 @@ func (s) TestSecurityConfigUpdate_GoodToGood(t *testing.T) { } // Push another update with a new security configuration. - cdsUpdate = xdsclient.ClusterUpdate{ + cdsUpdate = xdsresource.ClusterUpdate{ ClusterName: serviceName, - SecurityCfg: &xdsclient.SecurityConfig{ + SecurityCfg: &xdsresource.SecurityConfig{ RootInstanceName: "default2", SubjectAltNameMatchers: testSANMatchers, }, diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go index 30b612fc7d01..7979f82e8f6e 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go @@ -39,6 +39,7 @@ import ( xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) const ( @@ -58,7 +59,7 @@ func Test(t *testing.T) { // cdsWatchInfo wraps the update and the error sent in a CDS watch callback. type cdsWatchInfo struct { - update xdsclient.ClusterUpdate + update xdsresource.ClusterUpdate err error } @@ -361,25 +362,25 @@ func (s) TestHandleClusterUpdate(t *testing.T) { tests := []struct { name string - cdsUpdate xdsclient.ClusterUpdate + cdsUpdate xdsresource.ClusterUpdate updateErr error wantCCS balancer.ClientConnState }{ { name: "happy-case-with-lrs", - cdsUpdate: xdsclient.ClusterUpdate{ClusterName: serviceName, EnableLRS: true}, + cdsUpdate: xdsresource.ClusterUpdate{ClusterName: serviceName, EnableLRS: true}, wantCCS: edsCCS(serviceName, nil, true, nil), }, { name: "happy-case-without-lrs", - cdsUpdate: xdsclient.ClusterUpdate{ClusterName: serviceName}, + cdsUpdate: xdsresource.ClusterUpdate{ClusterName: serviceName}, wantCCS: edsCCS(serviceName, nil, false, nil), }, { name: "happy-case-with-ring-hash-lb-policy", - cdsUpdate: xdsclient.ClusterUpdate{ + cdsUpdate: xdsresource.ClusterUpdate{ ClusterName: serviceName, - LBPolicy: &xdsclient.ClusterLBPolicyRingHash{MinimumRingSize: 10, MaximumRingSize: 100}, + LBPolicy: &xdsresource.ClusterLBPolicyRingHash{MinimumRingSize: 10, MaximumRingSize: 100}, }, wantCCS: edsCCS(serviceName, nil, false, &internalserviceconfig.BalancerConfig{ Name: ringhash.Name, @@ -417,7 +418,7 @@ func (s) TestHandleClusterUpdateError(t *testing.T) { // resolver error at this point should result in the CDS balancer returning // an error picker. watcherErr := errors.New("cdsBalancer watcher error") - xdsC.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{}, watcherErr) + xdsC.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{}, watcherErr) // Since the error being pushed here is not a resource-not-found-error, the // registered watch should not be cancelled. @@ -451,14 +452,14 @@ func (s) TestHandleClusterUpdateError(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} + cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} wantCCS := edsCCS(serviceName, nil, false, nil) if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { t.Fatal(err) } // Again push a non-resource-not-found-error through the watcher callback. - xdsC.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{}, watcherErr) + xdsC.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{}, watcherErr) // Make sure the registered watch is not cancelled. sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) defer sCancel() @@ -472,7 +473,7 @@ func (s) TestHandleClusterUpdateError(t *testing.T) { // Push a resource-not-found-error this time around. resourceErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "cdsBalancer resource not found error") - xdsC.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{}, resourceErr) + xdsC.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{}, resourceErr) // Make sure that the watch is not cancelled. This error indicates that the // request cluster resource is not found. We should continue to watch it. sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) @@ -536,7 +537,7 @@ func (s) TestResolverError(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} + cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} wantCCS := edsCCS(serviceName, nil, false, nil) if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { t.Fatal(err) @@ -585,7 +586,7 @@ func (s) TestUpdateSubConnState(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} + cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} wantCCS := edsCCS(serviceName, nil, false, nil) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() @@ -620,7 +621,7 @@ func (s) TestCircuitBreaking(t *testing.T) { // will trigger the watch handler on the CDS balancer, which will update // the service's counter with the new max requests. var maxRequests uint32 = 1 - cdsUpdate := xdsclient.ClusterUpdate{ClusterName: clusterName, MaxRequests: &maxRequests} + cdsUpdate := xdsresource.ClusterUpdate{ClusterName: clusterName, MaxRequests: &maxRequests} wantCCS := edsCCS(clusterName, &maxRequests, false, nil) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() @@ -653,7 +654,7 @@ func (s) TestClose(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} + cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} wantCCS := edsCCS(serviceName, nil, false, nil) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() @@ -724,7 +725,7 @@ func (s) TestExitIdle(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} + cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} wantCCS := edsCCS(serviceName, nil, false, nil) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() diff --git a/xds/internal/balancer/cdsbalancer/cluster_handler.go b/xds/internal/balancer/cdsbalancer/cluster_handler.go index 163a8c0a2e18..a10d8d772f2b 100644 --- a/xds/internal/balancer/cdsbalancer/cluster_handler.go +++ b/xds/internal/balancer/cdsbalancer/cluster_handler.go @@ -21,6 +21,7 @@ import ( "sync" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) var errNotReceivedUpdate = errors.New("tried to construct a cluster update on a cluster that has not received an update") @@ -31,17 +32,17 @@ var errNotReceivedUpdate = errors.New("tried to construct a cluster update on a // (if one doesn't already exist) and pushing the update to it. type clusterHandlerUpdate struct { // securityCfg is the Security Config from the top (root) cluster. - securityCfg *xdsclient.SecurityConfig + securityCfg *xdsresource.SecurityConfig // lbPolicy is the lb policy from the top (root) cluster. // // Currently, we only support roundrobin or ringhash, and since roundrobin // does need configs, this is only set to the ringhash config, if the policy // is ringhash. In the future, if we support more policies, we can make this // an interface, and set it to config of the other policies. - lbPolicy *xdsclient.ClusterLBPolicyRingHash + lbPolicy *xdsresource.ClusterLBPolicyRingHash // updates is a list of ClusterUpdates from all the leaf clusters. - updates []xdsclient.ClusterUpdate + updates []xdsresource.ClusterUpdate err error } @@ -139,7 +140,7 @@ type clusterNode struct { // A ClusterUpdate in order to build a list of cluster updates for CDS to // send down to child XdsClusterResolverLoadBalancingPolicy. - clusterUpdate xdsclient.ClusterUpdate + clusterUpdate xdsresource.ClusterUpdate // This boolean determines whether this Node has received an update or not. // This isn't the best practice, but this will protect a list of Cluster @@ -176,7 +177,7 @@ func (c *clusterNode) delete() { } // Construct cluster update (potentially a list of ClusterUpdates) for a node. -func (c *clusterNode) constructClusterUpdate() ([]xdsclient.ClusterUpdate, error) { +func (c *clusterNode) constructClusterUpdate() ([]xdsresource.ClusterUpdate, error) { // If the cluster has not yet received an update, the cluster update is not // yet ready. if !c.receivedUpdate { @@ -185,13 +186,13 @@ func (c *clusterNode) constructClusterUpdate() ([]xdsclient.ClusterUpdate, error // Base case - LogicalDNS or EDS. Both of these cluster types will be tied // to a single ClusterUpdate. - if c.clusterUpdate.ClusterType != xdsclient.ClusterTypeAggregate { - return []xdsclient.ClusterUpdate{c.clusterUpdate}, nil + if c.clusterUpdate.ClusterType != xdsresource.ClusterTypeAggregate { + return []xdsresource.ClusterUpdate{c.clusterUpdate}, nil } // If an aggregate construct a list by recursively calling down to all of // it's children. - var childrenUpdates []xdsclient.ClusterUpdate + var childrenUpdates []xdsresource.ClusterUpdate for _, child := range c.children { childUpdateList, err := child.constructClusterUpdate() if err != nil { @@ -206,7 +207,7 @@ func (c *clusterNode) constructClusterUpdate() ([]xdsclient.ClusterUpdate, error // also handles any logic with regards to any child state that may have changed. // At the end of the handleResp(), the clusterUpdate will be pinged in certain // situations to try and construct an update to send back to CDS. -func (c *clusterNode) handleResp(clusterUpdate xdsclient.ClusterUpdate, err error) { +func (c *clusterNode) handleResp(clusterUpdate xdsresource.ClusterUpdate, err error) { c.clusterHandler.clusterMutex.Lock() defer c.clusterHandler.clusterMutex.Unlock() if err != nil { // Write this error for run() to pick up in CDS LB policy. @@ -230,7 +231,7 @@ func (c *clusterNode) handleResp(clusterUpdate xdsclient.ClusterUpdate, err erro // handler to return. Also, if there was any children from previously, // delete the children, as the cluster type is no longer an aggregate // cluster. - if clusterUpdate.ClusterType != xdsclient.ClusterTypeAggregate { + if clusterUpdate.ClusterType != xdsresource.ClusterTypeAggregate { for _, child := range c.children { child.delete() } diff --git a/xds/internal/balancer/cdsbalancer/cluster_handler_test.go b/xds/internal/balancer/cdsbalancer/cluster_handler_test.go index cb9b4e14da3c..4a00fe7d542a 100644 --- a/xds/internal/balancer/cdsbalancer/cluster_handler_test.go +++ b/xds/internal/balancer/cdsbalancer/cluster_handler_test.go @@ -23,7 +23,7 @@ import ( "github.com/google/go-cmp/cmp" "google.golang.org/grpc/xds/internal/testutils/fakeclient" - "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) const ( @@ -50,32 +50,32 @@ func (s) TestSuccessCaseLeafNode(t *testing.T) { tests := []struct { name string clusterName string - clusterUpdate xdsclient.ClusterUpdate - lbPolicy *xdsclient.ClusterLBPolicyRingHash + clusterUpdate xdsresource.ClusterUpdate + lbPolicy *xdsresource.ClusterLBPolicyRingHash }{ { name: "test-update-root-cluster-EDS-success", clusterName: edsService, - clusterUpdate: xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeEDS, + clusterUpdate: xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeEDS, ClusterName: edsService, }, }, { name: "test-update-root-cluster-EDS-with-ring-hash", clusterName: logicalDNSService, - clusterUpdate: xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeLogicalDNS, + clusterUpdate: xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeLogicalDNS, ClusterName: logicalDNSService, - LBPolicy: &xdsclient.ClusterLBPolicyRingHash{MinimumRingSize: 10, MaximumRingSize: 100}, + LBPolicy: &xdsresource.ClusterLBPolicyRingHash{MinimumRingSize: 10, MaximumRingSize: 100}, }, - lbPolicy: &xdsclient.ClusterLBPolicyRingHash{MinimumRingSize: 10, MaximumRingSize: 100}, + lbPolicy: &xdsresource.ClusterLBPolicyRingHash{MinimumRingSize: 10, MaximumRingSize: 100}, }, { name: "test-update-root-cluster-Logical-DNS-success", clusterName: logicalDNSService, - clusterUpdate: xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeLogicalDNS, + clusterUpdate: xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeLogicalDNS, ClusterName: logicalDNSService, }, }, @@ -107,7 +107,7 @@ func (s) TestSuccessCaseLeafNode(t *testing.T) { fakeClient.InvokeWatchClusterCallback(test.clusterUpdate, nil) select { case chu := <-ch.updateChannel: - if diff := cmp.Diff(chu.updates, []xdsclient.ClusterUpdate{test.clusterUpdate}); diff != "" { + if diff := cmp.Diff(chu.updates, []xdsresource.ClusterUpdate{test.clusterUpdate}); diff != "" { t.Fatalf("got unexpected cluster update, diff (-got, +want): %v", diff) } if diff := cmp.Diff(chu.lbPolicy, test.lbPolicy); diff != "" { @@ -140,29 +140,29 @@ func (s) TestSuccessCaseLeafNodeThenNewUpdate(t *testing.T) { tests := []struct { name string clusterName string - clusterUpdate xdsclient.ClusterUpdate - newClusterUpdate xdsclient.ClusterUpdate + clusterUpdate xdsresource.ClusterUpdate + newClusterUpdate xdsresource.ClusterUpdate }{ {name: "test-update-root-cluster-then-new-update-EDS-success", clusterName: edsService, - clusterUpdate: xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeEDS, + clusterUpdate: xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeEDS, ClusterName: edsService, }, - newClusterUpdate: xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeEDS, + newClusterUpdate: xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeEDS, ClusterName: edsService2, }, }, { name: "test-update-root-cluster-then-new-update-Logical-DNS-success", clusterName: logicalDNSService, - clusterUpdate: xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeLogicalDNS, + clusterUpdate: xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeLogicalDNS, ClusterName: logicalDNSService, }, - newClusterUpdate: xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeLogicalDNS, + newClusterUpdate: xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeLogicalDNS, ClusterName: logicalDNSService2, }, }, @@ -204,7 +204,7 @@ func (s) TestSuccessCaseLeafNodeThenNewUpdate(t *testing.T) { fakeClient.InvokeWatchClusterCallback(test.newClusterUpdate, nil) select { case chu := <-ch.updateChannel: - if diff := cmp.Diff(chu.updates, []xdsclient.ClusterUpdate{test.newClusterUpdate}); diff != "" { + if diff := cmp.Diff(chu.updates, []xdsresource.ClusterUpdate{test.newClusterUpdate}); diff != "" { t.Fatalf("got unexpected cluster update, diff (-got, +want): %v", diff) } case <-ctx.Done(): @@ -242,8 +242,8 @@ func (s) TestUpdateRootClusterAggregateSuccess(t *testing.T) { // start watches for the aggregate cluster's children. The ping to the // clusterHandler at the end of handleResp should be a no-op, as neither the // EDS or LogicalDNS child clusters have received an update yet. - fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeAggregate, + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, ClusterName: aggregateClusterService, PrioritizedClusterNames: []string{edsService, logicalDNSService}, }, nil) @@ -290,8 +290,8 @@ func (s) TestUpdateRootClusterAggregateSuccess(t *testing.T) { } // Send callback for the EDS child cluster. - fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeEDS, + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeEDS, ClusterName: edsService, }, nil) @@ -306,8 +306,8 @@ func (s) TestUpdateRootClusterAggregateSuccess(t *testing.T) { // Invoke callback for Logical DNS child cluster. - fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeLogicalDNS, + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeLogicalDNS, ClusterName: logicalDNSService, }, nil) @@ -320,11 +320,11 @@ func (s) TestUpdateRootClusterAggregateSuccess(t *testing.T) { // ordered as per the cluster update. select { case chu := <-ch.updateChannel: - if diff := cmp.Diff(chu.updates, []xdsclient.ClusterUpdate{{ - ClusterType: xdsclient.ClusterTypeEDS, + if diff := cmp.Diff(chu.updates, []xdsresource.ClusterUpdate{{ + ClusterType: xdsresource.ClusterTypeEDS, ClusterName: edsService, }, { - ClusterType: xdsclient.ClusterTypeLogicalDNS, + ClusterType: xdsresource.ClusterTypeLogicalDNS, ClusterName: logicalDNSService, }}); diff != "" { t.Fatalf("got unexpected cluster update, diff (-got, +want): %v", diff) @@ -352,19 +352,19 @@ func (s) TestUpdateRootClusterAggregateThenChangeChild(t *testing.T) { t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) } - fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeAggregate, + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, ClusterName: aggregateClusterService, PrioritizedClusterNames: []string{edsService, logicalDNSService}, }, nil) fakeClient.WaitForWatchCluster(ctx) fakeClient.WaitForWatchCluster(ctx) - fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeEDS, + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeEDS, ClusterName: edsService, }, nil) - fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeLogicalDNS, + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeLogicalDNS, ClusterName: logicalDNSService, }, nil) @@ -374,8 +374,8 @@ func (s) TestUpdateRootClusterAggregateThenChangeChild(t *testing.T) { t.Fatal("Timed out waiting for the cluster update to be written to the update buffer.") } - fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeAggregate, + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, ClusterName: aggregateClusterService, PrioritizedClusterNames: []string{edsService, logicalDNSService2}, }, nil) @@ -414,8 +414,8 @@ func (s) TestUpdateRootClusterAggregateThenChangeChild(t *testing.T) { // Invoke a callback for the new logicalDNSService2 - this will fill out the // tree with successful updates. - fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeLogicalDNS, + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeLogicalDNS, ClusterName: logicalDNSService2, }, nil) @@ -427,11 +427,11 @@ func (s) TestUpdateRootClusterAggregateThenChangeChild(t *testing.T) { select { case chu := <-ch.updateChannel: - if diff := cmp.Diff(chu.updates, []xdsclient.ClusterUpdate{{ - ClusterType: xdsclient.ClusterTypeEDS, + if diff := cmp.Diff(chu.updates, []xdsresource.ClusterUpdate{{ + ClusterType: xdsresource.ClusterTypeEDS, ClusterName: edsService, }, { - ClusterType: xdsclient.ClusterTypeLogicalDNS, + ClusterType: xdsresource.ClusterTypeLogicalDNS, ClusterName: logicalDNSService2, }}); diff != "" { t.Fatalf("got unexpected cluster update, diff (-got, +want): %v", diff) @@ -459,19 +459,19 @@ func (s) TestUpdateRootClusterAggregateThenChangeRootToEDS(t *testing.T) { t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) } - fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeAggregate, + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, ClusterName: aggregateClusterService, PrioritizedClusterNames: []string{edsService, logicalDNSService}, }, nil) fakeClient.WaitForWatchCluster(ctx) fakeClient.WaitForWatchCluster(ctx) - fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeEDS, + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeEDS, ClusterName: edsService, }, nil) - fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeLogicalDNS, + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeLogicalDNS, ClusterName: logicalDNSService, }, nil) @@ -536,7 +536,7 @@ func (s) TestHandleRespInvokedWithError(t *testing.T) { if err != nil { t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) } - fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{}, errors.New("some error")) + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{}, errors.New("some error")) select { case chu := <-ch.updateChannel: if chu.err.Error() != "some error" { @@ -563,8 +563,8 @@ func (s) TestSwitchClusterNodeBetweenLeafAndAggregated(t *testing.T) { if err != nil { t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) } - fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeEDS, + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeEDS, ClusterName: edsService2, }, nil) select { @@ -574,8 +574,8 @@ func (s) TestSwitchClusterNodeBetweenLeafAndAggregated(t *testing.T) { } // Switch the cluster to an aggregate cluster, this should cause two new // child watches to be created. - fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeAggregate, + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, ClusterName: edsService2, PrioritizedClusterNames: []string{edsService, logicalDNSService}, }, nil) @@ -632,8 +632,8 @@ func (s) TestSwitchClusterNodeBetweenLeafAndAggregated(t *testing.T) { // Switch the cluster back to an EDS Cluster. This should cause the two // children to be deleted. - fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeEDS, + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeEDS, ClusterName: edsService2, }, nil) @@ -673,8 +673,8 @@ func (s) TestSwitchClusterNodeBetweenLeafAndAggregated(t *testing.T) { // Then an update should successfully be written to the update buffer. select { case chu := <-ch.updateChannel: - if diff := cmp.Diff(chu.updates, []xdsclient.ClusterUpdate{{ - ClusterType: xdsclient.ClusterTypeEDS, + if diff := cmp.Diff(chu.updates, []xdsresource.ClusterUpdate{{ + ClusterType: xdsresource.ClusterTypeEDS, ClusterName: edsService2, }}); diff != "" { t.Fatalf("got unexpected cluster update, diff (-got, +want): %v", diff) diff --git a/xds/internal/balancer/clusterresolver/clusterresolver_test.go b/xds/internal/balancer/clusterresolver/clusterresolver_test.go index 6af81f89f1f3..2cd692bbade9 100644 --- a/xds/internal/balancer/clusterresolver/clusterresolver_test.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver_test.go @@ -33,6 +33,7 @@ import ( "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" _ "google.golang.org/grpc/xds/internal/xdsclient/v2" // V2 client registration. ) @@ -47,10 +48,10 @@ const ( var ( // A non-empty endpoints update which is expected to be accepted by the EDS // LB policy. - defaultEndpointsUpdate = xdsclient.EndpointsUpdate{ - Localities: []xdsclient.Locality{ + defaultEndpointsUpdate = xdsresource.EndpointsUpdate{ + Localities: []xdsresource.Locality{ { - Endpoints: []xdsclient.Endpoint{{Address: "endpoint1"}}, + Endpoints: []xdsresource.Endpoint{{Address: "endpoint1"}}, ID: internal.LocalityID{Zone: "zone"}, Priority: 1, Weight: 100, @@ -270,7 +271,7 @@ func (s) TestErrorFromXDSClientUpdate(t *testing.T) { if _, err := xdsC.WaitForWatchEDS(ctx); err != nil { t.Fatalf("xdsClient.WatchEndpoints failed with error: %v", err) } - xdsC.InvokeWatchEDSCallback("", xdsclient.EndpointsUpdate{}, nil) + xdsC.InvokeWatchEDSCallback("", xdsresource.EndpointsUpdate{}, nil) edsLB, err := waitForNewChildLB(ctx, edsLBCh) if err != nil { t.Fatal(err) @@ -280,7 +281,7 @@ func (s) TestErrorFromXDSClientUpdate(t *testing.T) { } connectionErr := xdsclient.NewErrorf(xdsclient.ErrorTypeConnection, "connection error") - xdsC.InvokeWatchEDSCallback("", xdsclient.EndpointsUpdate{}, connectionErr) + xdsC.InvokeWatchEDSCallback("", xdsresource.EndpointsUpdate{}, connectionErr) sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) defer sCancel() @@ -298,7 +299,7 @@ func (s) TestErrorFromXDSClientUpdate(t *testing.T) { } resourceErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "clusterResolverBalancer resource not found error") - xdsC.InvokeWatchEDSCallback("", xdsclient.EndpointsUpdate{}, resourceErr) + xdsC.InvokeWatchEDSCallback("", xdsresource.EndpointsUpdate{}, resourceErr) // Even if error is resource not found, watch shouldn't be canceled, because // this is an EDS resource removed (and xds client actually never sends this // error, but we still handles it). @@ -359,7 +360,7 @@ func (s) TestErrorFromResolver(t *testing.T) { if _, err := xdsC.WaitForWatchEDS(ctx); err != nil { t.Fatalf("xdsClient.WatchEndpoints failed with error: %v", err) } - xdsC.InvokeWatchEDSCallback("", xdsclient.EndpointsUpdate{}, nil) + xdsC.InvokeWatchEDSCallback("", xdsresource.EndpointsUpdate{}, nil) edsLB, err := waitForNewChildLB(ctx, edsLBCh) if err != nil { t.Fatal(err) diff --git a/xds/internal/balancer/clusterresolver/configbuilder.go b/xds/internal/balancer/clusterresolver/configbuilder.go index 475497d48950..741744ee3fc1 100644 --- a/xds/internal/balancer/clusterresolver/configbuilder.go +++ b/xds/internal/balancer/clusterresolver/configbuilder.go @@ -33,7 +33,7 @@ import ( "google.golang.org/grpc/xds/internal/balancer/priority" "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/balancer/weightedtarget" - "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) const million = 1000000 @@ -48,7 +48,7 @@ const million = 1000000 type priorityConfig struct { mechanism DiscoveryMechanism // edsResp is set only if type is EDS. - edsResp xdsclient.EndpointsUpdate + edsResp xdsresource.EndpointsUpdate // addresses is set only if type is DNS. addresses []string } @@ -169,7 +169,7 @@ func buildClusterImplConfigForDNS(parentPriority int, addrStrs []string) (string // - map{"p0":p0_config, "p1":p1_config} // - [p0_address_0, p0_address_1, p1_address_0, p1_address_1] // - p0 addresses' hierarchy attributes are set to p0 -func buildClusterImplConfigForEDS(parentPriority int, edsResp xdsclient.EndpointsUpdate, mechanism DiscoveryMechanism, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]string, map[string]*clusterimpl.LBConfig, []resolver.Address, error) { +func buildClusterImplConfigForEDS(parentPriority int, edsResp xdsresource.EndpointsUpdate, mechanism DiscoveryMechanism, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]string, map[string]*clusterimpl.LBConfig, []resolver.Address, error) { drops := make([]clusterimpl.DropConfig, 0, len(edsResp.Drops)) for _, d := range edsResp.Drops { drops = append(drops, clusterimpl.DropConfig{ @@ -205,9 +205,9 @@ func buildClusterImplConfigForEDS(parentPriority int, edsResp xdsclient.Endpoint // For example, for L0-p0, L1-p0, L2-p1, results will be // - ["p0", "p1"] // - map{"p0":[L0, L1], "p1":[L2]} -func groupLocalitiesByPriority(localities []xdsclient.Locality) ([]string, map[string][]xdsclient.Locality) { +func groupLocalitiesByPriority(localities []xdsresource.Locality) ([]string, map[string][]xdsresource.Locality) { var priorityIntSlice []int - priorities := make(map[string][]xdsclient.Locality) + priorities := make(map[string][]xdsresource.Locality) for _, locality := range localities { if locality.Weight == 0 { continue @@ -252,7 +252,7 @@ var rrBalancerConfig = &internalserviceconfig.BalancerConfig{Name: roundrobin.Na // priorityLocalitiesToClusterImpl takes a list of localities (with the same // priority), and generates a cluster impl policy config, and a list of // addresses. -func priorityLocalitiesToClusterImpl(localities []xdsclient.Locality, priorityName string, mechanism DiscoveryMechanism, drops []clusterimpl.DropConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) (*clusterimpl.LBConfig, []resolver.Address, error) { +func priorityLocalitiesToClusterImpl(localities []xdsresource.Locality, priorityName string, mechanism DiscoveryMechanism, drops []clusterimpl.DropConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) (*clusterimpl.LBConfig, []resolver.Address, error) { clusterImplCfg := &clusterimpl.LBConfig{ Cluster: mechanism.Cluster, EDSServiceName: mechanism.EDSServiceName, @@ -293,7 +293,7 @@ func priorityLocalitiesToClusterImpl(localities []xdsclient.Locality, priorityNa // // The addresses have path hierarchy set to [priority-name], so priority knows // which child policy they are for. -func localitiesToRingHash(localities []xdsclient.Locality, priorityName string) []resolver.Address { +func localitiesToRingHash(localities []xdsresource.Locality, priorityName string) []resolver.Address { var addrs []resolver.Address for _, locality := range localities { var lw uint32 = 1 @@ -308,7 +308,7 @@ func localitiesToRingHash(localities []xdsclient.Locality, priorityName string) // Filter out all "unhealthy" endpoints (unknown and healthy are // both considered to be healthy: // https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/core/health_check.proto#envoy-api-enum-core-healthstatus). - if endpoint.HealthStatus != xdsclient.EndpointHealthStatusHealthy && endpoint.HealthStatus != xdsclient.EndpointHealthStatusUnknown { + if endpoint.HealthStatus != xdsresource.EndpointHealthStatusHealthy && endpoint.HealthStatus != xdsresource.EndpointHealthStatusUnknown { continue } @@ -333,7 +333,7 @@ func localitiesToRingHash(localities []xdsclient.Locality, priorityName string) // // The addresses have path hierarchy set to [priority-name, locality-name], so // priority and weighted target know which child policy they are for. -func localitiesToWeightedTarget(localities []xdsclient.Locality, priorityName string, childPolicy *internalserviceconfig.BalancerConfig) (*weightedtarget.LBConfig, []resolver.Address) { +func localitiesToWeightedTarget(localities []xdsresource.Locality, priorityName string, childPolicy *internalserviceconfig.BalancerConfig) (*weightedtarget.LBConfig, []resolver.Address) { weightedTargets := make(map[string]weightedtarget.Target) var addrs []resolver.Address for _, locality := range localities { @@ -346,7 +346,7 @@ func localitiesToWeightedTarget(localities []xdsclient.Locality, priorityName st // Filter out all "unhealthy" endpoints (unknown and healthy are // both considered to be healthy: // https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/core/health_check.proto#envoy-api-enum-core-healthstatus). - if endpoint.HealthStatus != xdsclient.EndpointHealthStatusHealthy && endpoint.HealthStatus != xdsclient.EndpointHealthStatusUnknown { + if endpoint.HealthStatus != xdsresource.EndpointHealthStatusHealthy && endpoint.HealthStatus != xdsresource.EndpointHealthStatusUnknown { continue } diff --git a/xds/internal/balancer/clusterresolver/configbuilder_test.go b/xds/internal/balancer/clusterresolver/configbuilder_test.go index 3e2ad8a2e64e..c2b68b946f06 100644 --- a/xds/internal/balancer/clusterresolver/configbuilder_test.go +++ b/xds/internal/balancer/clusterresolver/configbuilder_test.go @@ -38,7 +38,7 @@ import ( "google.golang.org/grpc/xds/internal/balancer/priority" "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/balancer/weightedtarget" - "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) const ( @@ -55,9 +55,9 @@ const ( var ( testLocalityIDs []internal.LocalityID testAddressStrs [][]string - testEndpoints [][]xdsclient.Endpoint + testEndpoints [][]xdsresource.Endpoint - testLocalitiesP0, testLocalitiesP1 []xdsclient.Locality + testLocalitiesP0, testLocalitiesP1 []xdsresource.Locality addrCmpOpts = cmp.Options{ cmp.AllowUnexported(attributes.Attributes{}), @@ -75,21 +75,21 @@ func init() { testLocalityIDs = append(testLocalityIDs, internal.LocalityID{Zone: fmt.Sprintf("test-zone-%d", i)}) var ( addrs []string - ends []xdsclient.Endpoint + ends []xdsresource.Endpoint ) for j := 0; j < addressPerLocality; j++ { addr := fmt.Sprintf("addr-%d-%d", i, j) addrs = append(addrs, addr) - ends = append(ends, xdsclient.Endpoint{ + ends = append(ends, xdsresource.Endpoint{ Address: addr, - HealthStatus: xdsclient.EndpointHealthStatusHealthy, + HealthStatus: xdsresource.EndpointHealthStatusHealthy, }) } testAddressStrs = append(testAddressStrs, addrs) testEndpoints = append(testEndpoints, ends) } - testLocalitiesP0 = []xdsclient.Locality{ + testLocalitiesP0 = []xdsresource.Locality{ { Endpoints: testEndpoints[0], ID: testLocalityIDs[0], @@ -103,7 +103,7 @@ func init() { Priority: 0, }, } - testLocalitiesP1 = []xdsclient.Locality{ + testLocalitiesP1 = []xdsresource.Locality{ { Endpoints: testEndpoints[2], ID: testLocalityIDs[2], @@ -131,15 +131,15 @@ func TestBuildPriorityConfigJSON(t *testing.T) { Type: DiscoveryMechanismTypeEDS, EDSServiceName: testEDSServiceName, }, - edsResp: xdsclient.EndpointsUpdate{ - Drops: []xdsclient.OverloadDropConfig{ + edsResp: xdsresource.EndpointsUpdate{ + Drops: []xdsresource.OverloadDropConfig{ { Category: testDropCategory, Numerator: testDropOverMillion, Denominator: million, }, }, - Localities: []xdsclient.Locality{ + Localities: []xdsresource.Locality{ testLocalitiesP0[0], testLocalitiesP0[1], testLocalitiesP1[0], @@ -181,15 +181,15 @@ func TestBuildPriorityConfig(t *testing.T) { Type: DiscoveryMechanismTypeEDS, EDSServiceName: testEDSServiceName, }, - edsResp: xdsclient.EndpointsUpdate{ - Drops: []xdsclient.OverloadDropConfig{ + edsResp: xdsresource.EndpointsUpdate{ + Drops: []xdsresource.OverloadDropConfig{ { Category: testDropCategory, Numerator: testDropOverMillion, Denominator: million, }, }, - Localities: []xdsclient.Locality{ + Localities: []xdsresource.Locality{ testLocalitiesP0[0], testLocalitiesP0[1], testLocalitiesP1[0], @@ -333,15 +333,15 @@ func TestBuildClusterImplConfigForDNS(t *testing.T) { func TestBuildClusterImplConfigForEDS(t *testing.T) { gotNames, gotConfigs, gotAddrs, _ := buildClusterImplConfigForEDS( 2, - xdsclient.EndpointsUpdate{ - Drops: []xdsclient.OverloadDropConfig{ + xdsresource.EndpointsUpdate{ + Drops: []xdsresource.OverloadDropConfig{ { Category: testDropCategory, Numerator: testDropOverMillion, Denominator: million, }, }, - Localities: []xdsclient.Locality{ + Localities: []xdsresource.Locality{ { Endpoints: testEndpoints[3], ID: testLocalityIDs[3], @@ -461,42 +461,42 @@ func TestBuildClusterImplConfigForEDS(t *testing.T) { func TestGroupLocalitiesByPriority(t *testing.T) { tests := []struct { name string - localities []xdsclient.Locality + localities []xdsresource.Locality wantPriorities []string - wantLocalities map[string][]xdsclient.Locality + wantLocalities map[string][]xdsresource.Locality }{ { name: "1 locality 1 priority", - localities: []xdsclient.Locality{testLocalitiesP0[0]}, + localities: []xdsresource.Locality{testLocalitiesP0[0]}, wantPriorities: []string{"0"}, - wantLocalities: map[string][]xdsclient.Locality{ + wantLocalities: map[string][]xdsresource.Locality{ "0": {testLocalitiesP0[0]}, }, }, { name: "2 locality 1 priority", - localities: []xdsclient.Locality{testLocalitiesP0[0], testLocalitiesP0[1]}, + localities: []xdsresource.Locality{testLocalitiesP0[0], testLocalitiesP0[1]}, wantPriorities: []string{"0"}, - wantLocalities: map[string][]xdsclient.Locality{ + wantLocalities: map[string][]xdsresource.Locality{ "0": {testLocalitiesP0[0], testLocalitiesP0[1]}, }, }, { name: "1 locality in each", - localities: []xdsclient.Locality{testLocalitiesP0[0], testLocalitiesP1[0]}, + localities: []xdsresource.Locality{testLocalitiesP0[0], testLocalitiesP1[0]}, wantPriorities: []string{"0", "1"}, - wantLocalities: map[string][]xdsclient.Locality{ + wantLocalities: map[string][]xdsresource.Locality{ "0": {testLocalitiesP0[0]}, "1": {testLocalitiesP1[0]}, }, }, { name: "2 localities in each sorted", - localities: []xdsclient.Locality{ + localities: []xdsresource.Locality{ testLocalitiesP0[0], testLocalitiesP0[1], testLocalitiesP1[0], testLocalitiesP1[1]}, wantPriorities: []string{"0", "1"}, - wantLocalities: map[string][]xdsclient.Locality{ + wantLocalities: map[string][]xdsresource.Locality{ "0": {testLocalitiesP0[0], testLocalitiesP0[1]}, "1": {testLocalitiesP1[0], testLocalitiesP1[1]}, }, @@ -506,11 +506,11 @@ func TestGroupLocalitiesByPriority(t *testing.T) { // returned priority list must be sorted [p0, p1], because the list // order is the priority order. name: "2 localities in each needs to sort", - localities: []xdsclient.Locality{ + localities: []xdsresource.Locality{ testLocalitiesP1[1], testLocalitiesP0[1], testLocalitiesP1[0], testLocalitiesP0[0]}, wantPriorities: []string{"0", "1"}, - wantLocalities: map[string][]xdsclient.Locality{ + wantLocalities: map[string][]xdsresource.Locality{ "0": {testLocalitiesP0[1], testLocalitiesP0[0]}, "1": {testLocalitiesP1[1], testLocalitiesP1[0]}, }, @@ -563,7 +563,7 @@ func TestDedupSortedIntSlice(t *testing.T) { func TestPriorityLocalitiesToClusterImpl(t *testing.T) { tests := []struct { name string - localities []xdsclient.Locality + localities []xdsresource.Locality priorityName string mechanism DiscoveryMechanism childPolicy *internalserviceconfig.BalancerConfig @@ -572,19 +572,19 @@ func TestPriorityLocalitiesToClusterImpl(t *testing.T) { wantErr bool }{{ name: "round robin as child, no LRS", - localities: []xdsclient.Locality{ + localities: []xdsresource.Locality{ { - Endpoints: []xdsclient.Endpoint{ - {Address: "addr-1-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 90}, - {Address: "addr-1-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 10}, + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-1-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 90}, + {Address: "addr-1-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 10}, }, ID: internal.LocalityID{Zone: "test-zone-1"}, Weight: 20, }, { - Endpoints: []xdsclient.Endpoint{ - {Address: "addr-2-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 90}, - {Address: "addr-2-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 10}, + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-2-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 90}, + {Address: "addr-2-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 10}, }, ID: internal.LocalityID{Zone: "test-zone-2"}, Weight: 80, @@ -630,19 +630,19 @@ func TestPriorityLocalitiesToClusterImpl(t *testing.T) { }, { name: "ring_hash as child", - localities: []xdsclient.Locality{ + localities: []xdsresource.Locality{ { - Endpoints: []xdsclient.Endpoint{ - {Address: "addr-1-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 90}, - {Address: "addr-1-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 10}, + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-1-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 90}, + {Address: "addr-1-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 10}, }, ID: internal.LocalityID{Zone: "test-zone-1"}, Weight: 20, }, { - Endpoints: []xdsclient.Endpoint{ - {Address: "addr-2-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 90}, - {Address: "addr-2-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 10}, + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-2-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 90}, + {Address: "addr-2-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 10}, }, ID: internal.LocalityID{Zone: "test-zone-2"}, Weight: 80, @@ -666,10 +666,10 @@ func TestPriorityLocalitiesToClusterImpl(t *testing.T) { }, { name: "unsupported child", - localities: []xdsclient.Locality{{ - Endpoints: []xdsclient.Endpoint{ - {Address: "addr-1-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 90}, - {Address: "addr-1-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 10}, + localities: []xdsresource.Locality{{ + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-1-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 90}, + {Address: "addr-1-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 10}, }, ID: internal.LocalityID{Zone: "test-zone-1"}, Weight: 20, @@ -698,7 +698,7 @@ func TestPriorityLocalitiesToClusterImpl(t *testing.T) { func TestLocalitiesToWeightedTarget(t *testing.T) { tests := []struct { name string - localities []xdsclient.Locality + localities []xdsresource.Locality priorityName string childPolicy *internalserviceconfig.BalancerConfig lrsServer *string @@ -707,19 +707,19 @@ func TestLocalitiesToWeightedTarget(t *testing.T) { }{ { name: "roundrobin as child, with LRS", - localities: []xdsclient.Locality{ + localities: []xdsresource.Locality{ { - Endpoints: []xdsclient.Endpoint{ - {Address: "addr-1-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy}, - {Address: "addr-1-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy}, + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-1-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy}, + {Address: "addr-1-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy}, }, ID: internal.LocalityID{Zone: "test-zone-1"}, Weight: 20, }, { - Endpoints: []xdsclient.Endpoint{ - {Address: "addr-2-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy}, - {Address: "addr-2-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy}, + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-2-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy}, + {Address: "addr-2-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy}, }, ID: internal.LocalityID{Zone: "test-zone-2"}, Weight: 80, @@ -749,19 +749,19 @@ func TestLocalitiesToWeightedTarget(t *testing.T) { }, { name: "roundrobin as child, no LRS", - localities: []xdsclient.Locality{ + localities: []xdsresource.Locality{ { - Endpoints: []xdsclient.Endpoint{ - {Address: "addr-1-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy}, - {Address: "addr-1-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy}, + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-1-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy}, + {Address: "addr-1-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy}, }, ID: internal.LocalityID{Zone: "test-zone-1"}, Weight: 20, }, { - Endpoints: []xdsclient.Endpoint{ - {Address: "addr-2-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy}, - {Address: "addr-2-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy}, + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-2-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy}, + {Address: "addr-2-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy}, }, ID: internal.LocalityID{Zone: "test-zone-2"}, Weight: 80, @@ -795,19 +795,19 @@ func TestLocalitiesToWeightedTarget(t *testing.T) { }, { name: "weighted round robin as child, no LRS", - localities: []xdsclient.Locality{ + localities: []xdsresource.Locality{ { - Endpoints: []xdsclient.Endpoint{ - {Address: "addr-1-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 90}, - {Address: "addr-1-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 10}, + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-1-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 90}, + {Address: "addr-1-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 10}, }, ID: internal.LocalityID{Zone: "test-zone-1"}, Weight: 20, }, { - Endpoints: []xdsclient.Endpoint{ - {Address: "addr-2-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 90}, - {Address: "addr-2-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 10}, + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-2-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 90}, + {Address: "addr-2-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 10}, }, ID: internal.LocalityID{Zone: "test-zone-2"}, Weight: 80, @@ -856,26 +856,26 @@ func TestLocalitiesToWeightedTarget(t *testing.T) { func TestLocalitiesToRingHash(t *testing.T) { tests := []struct { name string - localities []xdsclient.Locality + localities []xdsresource.Locality priorityName string wantAddrs []resolver.Address }{ { // Check that address weights are locality_weight * endpoint_weight. name: "with locality and endpoint weight", - localities: []xdsclient.Locality{ + localities: []xdsresource.Locality{ { - Endpoints: []xdsclient.Endpoint{ - {Address: "addr-1-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 90}, - {Address: "addr-1-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 10}, + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-1-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 90}, + {Address: "addr-1-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 10}, }, ID: internal.LocalityID{Zone: "test-zone-1"}, Weight: 20, }, { - Endpoints: []xdsclient.Endpoint{ - {Address: "addr-2-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 90}, - {Address: "addr-2-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 10}, + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-2-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 90}, + {Address: "addr-2-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 10}, }, ID: internal.LocalityID{Zone: "test-zone-2"}, Weight: 80, @@ -892,19 +892,19 @@ func TestLocalitiesToRingHash(t *testing.T) { { // Check that endpoint_weight is 0, weight is the locality weight. name: "locality weight only", - localities: []xdsclient.Locality{ + localities: []xdsresource.Locality{ { - Endpoints: []xdsclient.Endpoint{ - {Address: "addr-1-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy}, - {Address: "addr-1-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy}, + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-1-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy}, + {Address: "addr-1-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy}, }, ID: internal.LocalityID{Zone: "test-zone-1"}, Weight: 20, }, { - Endpoints: []xdsclient.Endpoint{ - {Address: "addr-2-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy}, - {Address: "addr-2-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy}, + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-2-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy}, + {Address: "addr-2-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy}, }, ID: internal.LocalityID{Zone: "test-zone-2"}, Weight: 80, @@ -921,18 +921,18 @@ func TestLocalitiesToRingHash(t *testing.T) { { // Check that locality_weight is 0, weight is the endpoint weight. name: "endpoint weight only", - localities: []xdsclient.Locality{ + localities: []xdsresource.Locality{ { - Endpoints: []xdsclient.Endpoint{ - {Address: "addr-1-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 90}, - {Address: "addr-1-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 10}, + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-1-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 90}, + {Address: "addr-1-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 10}, }, ID: internal.LocalityID{Zone: "test-zone-1"}, }, { - Endpoints: []xdsclient.Endpoint{ - {Address: "addr-2-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 90}, - {Address: "addr-2-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 10}, + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-2-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 90}, + {Address: "addr-2-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 10}, }, ID: internal.LocalityID{Zone: "test-zone-2"}, }, diff --git a/xds/internal/balancer/clusterresolver/eds_impl_test.go b/xds/internal/balancer/clusterresolver/eds_impl_test.go index 00814a6212b2..feb96cfa56b5 100644 --- a/xds/internal/balancer/clusterresolver/eds_impl_test.go +++ b/xds/internal/balancer/clusterresolver/eds_impl_test.go @@ -36,6 +36,7 @@ import ( "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) var ( @@ -405,7 +406,7 @@ func (s) TestEDS_EmptyUpdate(t *testing.T) { defer func() { balancergroup.DefaultSubBalancerCloseTimeout = oldCacheTimeout }() // The first update is an empty update. - xdsC.InvokeWatchEDSCallback("", xdsclient.EndpointsUpdate{}, nil) + xdsC.InvokeWatchEDSCallback("", xdsresource.EndpointsUpdate{}, nil) // Pick should fail with transient failure, and all priority removed error. if err := testErrPickerFromCh(cc.NewPickerCh, priority.ErrAllPrioritiesRemoved); err != nil { t.Fatal(err) @@ -425,7 +426,7 @@ func (s) TestEDS_EmptyUpdate(t *testing.T) { t.Fatal(err) } - xdsC.InvokeWatchEDSCallback("", xdsclient.EndpointsUpdate{}, nil) + xdsC.InvokeWatchEDSCallback("", xdsresource.EndpointsUpdate{}, nil) // Pick should fail with transient failure, and all priority removed error. if err := testErrPickerFromCh(cc.NewPickerCh, priority.ErrAllPrioritiesRemoved); err != nil { t.Fatal(err) diff --git a/xds/internal/balancer/clusterresolver/resource_resolver.go b/xds/internal/balancer/clusterresolver/resource_resolver.go index 2125bd2326f2..9d7db26ad14a 100644 --- a/xds/internal/balancer/clusterresolver/resource_resolver.go +++ b/xds/internal/balancer/clusterresolver/resource_resolver.go @@ -22,6 +22,7 @@ import ( "sync" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) // resourceUpdate is a combined update from all the resources, in the order of @@ -186,7 +187,7 @@ func (rr *resourceResolver) generate() { return } switch uu := u.(type) { - case xdsclient.EndpointsUpdate: + case xdsresource.EndpointsUpdate: ret = append(ret, priorityConfig{mechanism: rDM.dm, edsResp: uu}) case []string: ret = append(ret, priorityConfig{mechanism: rDM.dm, addresses: uu}) @@ -202,7 +203,7 @@ func (rr *resourceResolver) generate() { type edsDiscoveryMechanism struct { cancel func() - update xdsclient.EndpointsUpdate + update xdsresource.EndpointsUpdate updateReceived bool } @@ -224,7 +225,7 @@ func (er *edsDiscoveryMechanism) stop() { func newEDSResolver(nameToWatch string, xdsc xdsclient.XDSClient, topLevelResolver *resourceResolver) *edsDiscoveryMechanism { ret := &edsDiscoveryMechanism{} topLevelResolver.parent.logger.Infof("EDS watch started on %v", nameToWatch) - cancel := xdsc.WatchEndpoints(nameToWatch, func(update xdsclient.EndpointsUpdate, err error) { + cancel := xdsc.WatchEndpoints(nameToWatch, func(update xdsresource.EndpointsUpdate, err error) { topLevelResolver.mu.Lock() defer topLevelResolver.mu.Unlock() if err != nil { diff --git a/xds/internal/balancer/clusterresolver/resource_resolver_test.go b/xds/internal/balancer/clusterresolver/resource_resolver_test.go index 2a365850cd78..432fdd9ceb65 100644 --- a/xds/internal/balancer/clusterresolver/resource_resolver_test.go +++ b/xds/internal/balancer/clusterresolver/resource_resolver_test.go @@ -28,7 +28,7 @@ import ( "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" - xdsclient "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) const ( @@ -36,7 +36,7 @@ const ( ) var ( - testEDSUpdates []xdsclient.EndpointsUpdate + testEDSUpdates []xdsresource.EndpointsUpdate ) func init() { @@ -54,7 +54,7 @@ func (s) TestResourceResolverOneEDSResource(t *testing.T) { name string clusterName, edsName string wantName string - edsUpdate xdsclient.EndpointsUpdate + edsUpdate xdsresource.EndpointsUpdate want []priorityConfig }{ {name: "watch EDS", @@ -779,7 +779,7 @@ func (s) TestResourceResolverError(t *testing.T) { // Invoke callback with an error, should get an update. edsErr := fmt.Errorf("EDS error") - fakeClient.InvokeWatchEDSCallback(gotEDSName1, xdsclient.EndpointsUpdate{}, edsErr) + fakeClient.InvokeWatchEDSCallback(gotEDSName1, xdsresource.EndpointsUpdate{}, edsErr) select { case u := <-rr.updateChannel: if u.err != edsErr { diff --git a/xds/internal/balancer/clusterresolver/testutil_test.go b/xds/internal/balancer/clusterresolver/testutil_test.go index 48759603827a..999621a7b3e4 100644 --- a/xds/internal/balancer/clusterresolver/testutil_test.go +++ b/xds/internal/balancer/clusterresolver/testutil_test.go @@ -30,14 +30,14 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/testutils" - "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) // parseEDSRespProtoForTesting parses EDS response, and panic if parsing fails. // // TODO: delete this. The EDS balancer tests should build an EndpointsUpdate // directly, instead of building and parsing a proto message. -func parseEDSRespProtoForTesting(m *xdspb.ClusterLoadAssignment) xdsclient.EndpointsUpdate { +func parseEDSRespProtoForTesting(m *xdspb.ClusterLoadAssignment) xdsresource.EndpointsUpdate { u, err := parseEDSRespProto(m) if err != nil { panic(err.Error()) @@ -46,8 +46,8 @@ func parseEDSRespProtoForTesting(m *xdspb.ClusterLoadAssignment) xdsclient.Endpo } // parseEDSRespProto turns EDS response proto message to EndpointsUpdate. -func parseEDSRespProto(m *xdspb.ClusterLoadAssignment) (xdsclient.EndpointsUpdate, error) { - ret := xdsclient.EndpointsUpdate{} +func parseEDSRespProto(m *xdspb.ClusterLoadAssignment) (xdsresource.EndpointsUpdate, error) { + ret := xdsresource.EndpointsUpdate{} for _, dropPolicy := range m.GetPolicy().GetDropOverloads() { ret.Drops = append(ret.Drops, parseDropPolicy(dropPolicy)) } @@ -55,7 +55,7 @@ func parseEDSRespProto(m *xdspb.ClusterLoadAssignment) (xdsclient.EndpointsUpdat for _, locality := range m.Endpoints { l := locality.GetLocality() if l == nil { - return xdsclient.EndpointsUpdate{}, fmt.Errorf("EDS response contains a locality without ID, locality: %+v", locality) + return xdsresource.EndpointsUpdate{}, fmt.Errorf("EDS response contains a locality without ID, locality: %+v", locality) } lid := internal.LocalityID{ Region: l.Region, @@ -64,7 +64,7 @@ func parseEDSRespProto(m *xdspb.ClusterLoadAssignment) (xdsclient.EndpointsUpdat } priority := locality.GetPriority() priorities[priority] = struct{}{} - ret.Localities = append(ret.Localities, xdsclient.Locality{ + ret.Localities = append(ret.Localities, xdsresource.Locality{ ID: lid, Endpoints: parseEndpoints(locality.GetLbEndpoints()), Weight: locality.GetLoadBalancingWeight().GetValue(), @@ -73,7 +73,7 @@ func parseEDSRespProto(m *xdspb.ClusterLoadAssignment) (xdsclient.EndpointsUpdat } for i := 0; i < len(priorities); i++ { if _, ok := priorities[uint32(i)]; !ok { - return xdsclient.EndpointsUpdate{}, fmt.Errorf("priority %v missing (with different priorities %v received)", i, priorities) + return xdsresource.EndpointsUpdate{}, fmt.Errorf("priority %v missing (with different priorities %v received)", i, priorities) } } return ret, nil @@ -83,7 +83,7 @@ func parseAddress(socketAddress *corepb.SocketAddress) string { return net.JoinHostPort(socketAddress.GetAddress(), strconv.Itoa(int(socketAddress.GetPortValue()))) } -func parseDropPolicy(dropPolicy *xdspb.ClusterLoadAssignment_Policy_DropOverload) xdsclient.OverloadDropConfig { +func parseDropPolicy(dropPolicy *xdspb.ClusterLoadAssignment_Policy_DropOverload) xdsresource.OverloadDropConfig { percentage := dropPolicy.GetDropPercentage() var ( numerator = percentage.GetNumerator() @@ -97,18 +97,18 @@ func parseDropPolicy(dropPolicy *xdspb.ClusterLoadAssignment_Policy_DropOverload case typepb.FractionalPercent_MILLION: denominator = 1000000 } - return xdsclient.OverloadDropConfig{ + return xdsresource.OverloadDropConfig{ Category: dropPolicy.GetCategory(), Numerator: numerator, Denominator: denominator, } } -func parseEndpoints(lbEndpoints []*endpointpb.LbEndpoint) []xdsclient.Endpoint { - endpoints := make([]xdsclient.Endpoint, 0, len(lbEndpoints)) +func parseEndpoints(lbEndpoints []*endpointpb.LbEndpoint) []xdsresource.Endpoint { + endpoints := make([]xdsresource.Endpoint, 0, len(lbEndpoints)) for _, lbEndpoint := range lbEndpoints { - endpoints = append(endpoints, xdsclient.Endpoint{ - HealthStatus: xdsclient.EndpointHealthStatus(lbEndpoint.GetHealthStatus()), + endpoints = append(endpoints, xdsresource.Endpoint{ + HealthStatus: xdsresource.EndpointHealthStatus(lbEndpoint.GetHealthStatus()), Address: parseAddress(lbEndpoint.GetEndpoint().GetAddress().GetSocketAddress()), Weight: lbEndpoint.GetLoadBalancingWeight().GetValue(), }) diff --git a/xds/internal/resolver/serviceconfig.go b/xds/internal/resolver/serviceconfig.go index ddf699f938b3..c418bc5d758c 100644 --- a/xds/internal/resolver/serviceconfig.go +++ b/xds/internal/resolver/serviceconfig.go @@ -40,7 +40,7 @@ import ( "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/httpfilter" "google.golang.org/grpc/xds/internal/httpfilter/router" - "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) const ( @@ -109,7 +109,7 @@ type virtualHost struct { // map from filter name to its config httpFilterConfigOverride map[string]httpfilter.FilterConfig // retry policy present in virtual host - retryConfig *xdsclient.RetryConfig + retryConfig *xdsresource.RetryConfig } // routeCluster holds information about a cluster as referenced by a route. @@ -120,13 +120,13 @@ type routeCluster struct { } type route struct { - m *xdsclient.CompositeMatcher // converted from route matchers - clusters wrr.WRR // holds *routeCluster entries + m *xdsresource.CompositeMatcher // converted from route matchers + clusters wrr.WRR // holds *routeCluster entries maxStreamDuration time.Duration // map from filter name to its config httpFilterConfigOverride map[string]httpfilter.FilterConfig - retryConfig *xdsclient.RetryConfig - hashPolicies []*xdsclient.HashPolicy + retryConfig *xdsresource.RetryConfig + hashPolicies []*xdsresource.HashPolicy } func (r route) String() string { @@ -138,7 +138,7 @@ type configSelector struct { virtualHost virtualHost routes []route clusters map[string]*clusterInfo - httpFilterConfig []xdsclient.HTTPFilter + httpFilterConfig []xdsresource.HTTPFilter } var errNoMatchedRouteFound = status.Errorf(codes.Unavailable, "no matched route was found") @@ -208,7 +208,7 @@ func (cs *configSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RP return config, nil } -func retryConfigToPolicy(config *xdsclient.RetryConfig) *serviceconfig.RetryPolicy { +func retryConfigToPolicy(config *xdsresource.RetryConfig) *serviceconfig.RetryPolicy { return &serviceconfig.RetryPolicy{ MaxAttempts: int(config.NumRetries) + 1, InitialBackoff: config.RetryBackoff.BaseInterval, @@ -218,14 +218,14 @@ func retryConfigToPolicy(config *xdsclient.RetryConfig) *serviceconfig.RetryPoli } } -func (cs *configSelector) generateHash(rpcInfo iresolver.RPCInfo, hashPolicies []*xdsclient.HashPolicy) uint64 { +func (cs *configSelector) generateHash(rpcInfo iresolver.RPCInfo, hashPolicies []*xdsresource.HashPolicy) uint64 { var hash uint64 var generatedHash bool for _, policy := range hashPolicies { var policyHash uint64 var generatedPolicyHash bool switch policy.HashPolicyType { - case xdsclient.HashPolicyTypeHeader: + case xdsresource.HashPolicyTypeHeader: md, ok := metadata.FromOutgoingContext(rpcInfo.Context) if !ok { continue @@ -242,7 +242,7 @@ func (cs *configSelector) generateHash(rpcInfo iresolver.RPCInfo, hashPolicies [ policyHash = xxhash.Sum64String(joinedValues) generatedHash = true generatedPolicyHash = true - case xdsclient.HashPolicyTypeChannelID: + case xdsresource.HashPolicyTypeChannelID: // Hash the ClientConn pointer which logically uniquely // identifies the client. policyHash = xxhash.Sum64String(fmt.Sprintf("%p", &cs.r.cc)) @@ -372,7 +372,7 @@ func (r *xdsResolver) newConfigSelector(su serviceUpdate) (*configSelector, erro cs.routes[i].clusters = clusters var err error - cs.routes[i].m, err = xdsclient.RouteToMatcher(rt) + cs.routes[i].m, err = xdsresource.RouteToMatcher(rt) if err != nil { return nil, err } diff --git a/xds/internal/resolver/serviceconfig_test.go b/xds/internal/resolver/serviceconfig_test.go index a1a48944dc46..98d633a9e190 100644 --- a/xds/internal/resolver/serviceconfig_test.go +++ b/xds/internal/resolver/serviceconfig_test.go @@ -29,7 +29,7 @@ import ( iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/metadata" _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // To parse LB config - "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) func (s) TestPruneActiveClusters(t *testing.T) { @@ -57,7 +57,7 @@ func (s) TestGenerateRequestHash(t *testing.T) { } tests := []struct { name string - hashPolicies []*xdsclient.HashPolicy + hashPolicies []*xdsresource.HashPolicy requestHashWant uint64 rpcInfo iresolver.RPCInfo }{ @@ -65,8 +65,8 @@ func (s) TestGenerateRequestHash(t *testing.T) { // hash policies that specify to hash headers. { name: "test-generate-request-hash-headers", - hashPolicies: []*xdsclient.HashPolicy{{ - HashPolicyType: xdsclient.HashPolicyTypeHeader, + hashPolicies: []*xdsresource.HashPolicy{{ + HashPolicyType: xdsresource.HashPolicyTypeHeader, HeaderName: ":path", Regex: func() *regexp.Regexp { return regexp.MustCompile("/products") }(), // Will replace /products with /new-products, to test find and replace functionality. RegexSubstitution: "/new-products", @@ -82,8 +82,8 @@ func (s) TestGenerateRequestHash(t *testing.T) { // ClientConn (the pointer). { name: "test-generate-request-hash-channel-id", - hashPolicies: []*xdsclient.HashPolicy{{ - HashPolicyType: xdsclient.HashPolicyTypeChannelID, + hashPolicies: []*xdsresource.HashPolicy{{ + HashPolicyType: xdsresource.HashPolicyTypeChannelID, }}, requestHashWant: xxhash.Sum64String(fmt.Sprintf("%p", &cs.r.cc)), rpcInfo: iresolver.RPCInfo{}, @@ -93,8 +93,8 @@ func (s) TestGenerateRequestHash(t *testing.T) { // strings in the headers. { name: "test-generate-request-hash-empty-string", - hashPolicies: []*xdsclient.HashPolicy{{ - HashPolicyType: xdsclient.HashPolicyTypeHeader, + hashPolicies: []*xdsresource.HashPolicy{{ + HashPolicyType: xdsresource.HashPolicyTypeHeader, HeaderName: ":path", Regex: func() *regexp.Regexp { return regexp.MustCompile("") }(), RegexSubstitution: "e", diff --git a/xds/internal/resolver/watch_service.go b/xds/internal/resolver/watch_service.go index da0bf95f3b9f..4801fc40e43d 100644 --- a/xds/internal/resolver/watch_service.go +++ b/xds/internal/resolver/watch_service.go @@ -26,6 +26,7 @@ import ( "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) // serviceUpdate contains information received from the LDS/RDS responses which @@ -33,7 +34,7 @@ import ( // making a LDS to get the RouteConfig name. type serviceUpdate struct { // virtualHost contains routes and other configuration to route RPCs. - virtualHost *xdsclient.VirtualHost + virtualHost *xdsresource.VirtualHost // ldsConfig contains configuration that applies to all routes. ldsConfig ldsConfig } @@ -44,7 +45,7 @@ type ldsConfig struct { // maxStreamDuration is from the HTTP connection manager's // common_http_protocol_options field. maxStreamDuration time.Duration - httpFilterConfig []xdsclient.HTTPFilter + httpFilterConfig []xdsresource.HTTPFilter } // watchService uses LDS and RDS to discover information about the provided @@ -81,7 +82,7 @@ type serviceUpdateWatcher struct { rdsCancel func() } -func (w *serviceUpdateWatcher) handleLDSResp(update xdsclient.ListenerUpdate, err error) { +func (w *serviceUpdateWatcher) handleLDSResp(update xdsresource.ListenerUpdate, err error) { w.logger.Infof("received LDS update: %+v, err: %v", pretty.ToJSON(update), err) w.mu.Lock() defer w.mu.Unlock() @@ -150,8 +151,8 @@ func (w *serviceUpdateWatcher) handleLDSResp(update xdsclient.ListenerUpdate, er w.rdsCancel = w.c.WatchRouteConfig(update.RouteConfigName, w.handleRDSResp) } -func (w *serviceUpdateWatcher) updateVirtualHostsFromRDS(update xdsclient.RouteConfigUpdate) { - matchVh := xdsclient.FindBestMatchingVirtualHost(w.serviceName, update.VirtualHosts) +func (w *serviceUpdateWatcher) updateVirtualHostsFromRDS(update xdsresource.RouteConfigUpdate) { + matchVh := xdsresource.FindBestMatchingVirtualHost(w.serviceName, update.VirtualHosts) if matchVh == nil { // No matching virtual host found. w.serviceCb(serviceUpdate{}, fmt.Errorf("no matching virtual host found for %q", w.serviceName)) @@ -162,7 +163,7 @@ func (w *serviceUpdateWatcher) updateVirtualHostsFromRDS(update xdsclient.RouteC w.serviceCb(w.lastUpdate, nil) } -func (w *serviceUpdateWatcher) handleRDSResp(update xdsclient.RouteConfigUpdate, err error) { +func (w *serviceUpdateWatcher) handleRDSResp(update xdsresource.RouteConfigUpdate, err error) { w.logger.Infof("received RDS update: %+v, err: %v", pretty.ToJSON(update), err) w.mu.Lock() defer w.mu.Unlock() diff --git a/xds/internal/resolver/watch_service_test.go b/xds/internal/resolver/watch_service_test.go index 1bf65c4d4506..1a4b45bc8ad2 100644 --- a/xds/internal/resolver/watch_service_test.go +++ b/xds/internal/resolver/watch_service_test.go @@ -28,38 +28,38 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" - "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/proto" ) func (s) TestFindBestMatchingVirtualHost(t *testing.T) { var ( - oneExactMatch = &xdsclient.VirtualHost{ + oneExactMatch = &xdsresource.VirtualHost{ Domains: []string{"foo.bar.com"}, } - oneSuffixMatch = &xdsclient.VirtualHost{ + oneSuffixMatch = &xdsresource.VirtualHost{ Domains: []string{"*.bar.com"}, } - onePrefixMatch = &xdsclient.VirtualHost{ + onePrefixMatch = &xdsresource.VirtualHost{ Domains: []string{"foo.bar.*"}, } - oneUniversalMatch = &xdsclient.VirtualHost{ + oneUniversalMatch = &xdsresource.VirtualHost{ Domains: []string{"*"}, } - longExactMatch = &xdsclient.VirtualHost{ + longExactMatch = &xdsresource.VirtualHost{ Domains: []string{"v2.foo.bar.com"}, } - multipleMatch = &xdsclient.VirtualHost{ + multipleMatch = &xdsresource.VirtualHost{ Domains: []string{"pi.foo.bar.com", "314.*", "*.159"}, } - vhs = []*xdsclient.VirtualHost{oneExactMatch, oneSuffixMatch, onePrefixMatch, oneUniversalMatch, longExactMatch, multipleMatch} + vhs = []*xdsresource.VirtualHost{oneExactMatch, oneSuffixMatch, onePrefixMatch, oneUniversalMatch, longExactMatch, multipleMatch} ) tests := []struct { name string host string - vHosts []*xdsclient.VirtualHost - want *xdsclient.VirtualHost + vHosts []*xdsresource.VirtualHost + want *xdsresource.VirtualHost }{ {name: "exact-match", host: "foo.bar.com", vHosts: vhs, want: oneExactMatch}, {name: "suffix-match", host: "123.bar.com", vHosts: vhs, want: oneSuffixMatch}, @@ -75,7 +75,7 @@ func (s) TestFindBestMatchingVirtualHost(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := xdsclient.FindBestMatchingVirtualHost(tt.host, tt.vHosts); !cmp.Equal(got, tt.want, cmp.Comparer(proto.Equal)) { + if got := xdsresource.FindBestMatchingVirtualHost(tt.host, tt.vHosts); !cmp.Equal(got, tt.want, cmp.Comparer(proto.Equal)) { t.Errorf("findBestMatchingxdsclient.VirtualHost() = %v, want %v", got, tt.want) } }) @@ -117,15 +117,15 @@ func (s) TestServiceWatch(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) - wantUpdate := serviceUpdate{virtualHost: &xdsclient.VirtualHost{Domains: []string{"target"}, Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}}} - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + wantUpdate := serviceUpdate{virtualHost: &xdsresource.VirtualHost{Domains: []string{"target"}, Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}}} + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}, }, }, }, nil) @@ -133,22 +133,22 @@ func (s) TestServiceWatch(t *testing.T) { t.Fatal(err) } - wantUpdate2 := serviceUpdate{virtualHost: &xdsclient.VirtualHost{Domains: []string{"target"}, - Routes: []*xdsclient.Route{{ + wantUpdate2 := serviceUpdate{virtualHost: &xdsresource.VirtualHost{Domains: []string{"target"}, + Routes: []*xdsresource.Route{{ Path: newStringP(""), - WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}, + WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}, }}, }} - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Path: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Path: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}, }, { // Another virtual host, with different domains. Domains: []string{"random"}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}, }, }, }, nil) @@ -171,15 +171,15 @@ func (s) TestServiceWatchLDSUpdate(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) - wantUpdate := serviceUpdate{virtualHost: &xdsclient.VirtualHost{Domains: []string{"target"}, Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}}} - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + wantUpdate := serviceUpdate{virtualHost: &xdsresource.VirtualHost{Domains: []string{"target"}, Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}}} + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}, }, }, }, nil) @@ -188,19 +188,19 @@ func (s) TestServiceWatchLDSUpdate(t *testing.T) { } // Another LDS update with a different RDS_name. - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr + "2"}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr + "2"}, nil) if _, err := xdsC.WaitForCancelRouteConfigWatch(ctx); err != nil { t.Fatalf("wait for cancel route watch failed: %v, want nil", err) } waitForWatchRouteConfig(ctx, t, xdsC, routeStr+"2") // RDS update for the new name. - wantUpdate2 := serviceUpdate{virtualHost: &xdsclient.VirtualHost{Domains: []string{"target"}, Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster + "2": {Weight: 1}}}}}} - xdsC.InvokeWatchRouteConfigCallback(routeStr+"2", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + wantUpdate2 := serviceUpdate{virtualHost: &xdsresource.VirtualHost{Domains: []string{"target"}, Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster + "2": {Weight: 1}}}}}} + xdsC.InvokeWatchRouteConfigCallback(routeStr+"2", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster + "2": {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster + "2": {Weight: 1}}}}, }, }, }, nil) @@ -223,19 +223,19 @@ func (s) TestServiceWatchLDSUpdateMaxStreamDuration(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, MaxStreamDuration: time.Second}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, MaxStreamDuration: time.Second}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) - wantUpdate := serviceUpdate{virtualHost: &xdsclient.VirtualHost{Domains: []string{"target"}, Routes: []*xdsclient.Route{{ + wantUpdate := serviceUpdate{virtualHost: &xdsresource.VirtualHost{Domains: []string{"target"}, Routes: []*xdsresource.Route{{ Prefix: newStringP(""), - WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}}, + WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}}, ldsConfig: ldsConfig{maxStreamDuration: time.Second}, } - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}, }, }, }, nil) @@ -244,22 +244,22 @@ func (s) TestServiceWatchLDSUpdateMaxStreamDuration(t *testing.T) { } // Another LDS update with the same RDS_name but different MaxStreamDuration (zero in this case). - wantUpdate2 := serviceUpdate{virtualHost: &xdsclient.VirtualHost{Domains: []string{"target"}, Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}}} - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr}, nil) + wantUpdate2 := serviceUpdate{virtualHost: &xdsresource.VirtualHost{Domains: []string{"target"}, Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}}} + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr}, nil) if err := verifyServiceUpdate(ctx, serviceUpdateCh, wantUpdate2); err != nil { t.Fatal(err) } // RDS update. - wantUpdate3 := serviceUpdate{virtualHost: &xdsclient.VirtualHost{Domains: []string{"target"}, Routes: []*xdsclient.Route{{ + wantUpdate3 := serviceUpdate{virtualHost: &xdsresource.VirtualHost{Domains: []string{"target"}, Routes: []*xdsresource.Route{{ Prefix: newStringP(""), - WeightedClusters: map[string]xdsclient.WeightedCluster{cluster + "2": {Weight: 1}}}}, + WeightedClusters: map[string]xdsresource.WeightedCluster{cluster + "2": {Weight: 1}}}}, }} - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster + "2": {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster + "2": {Weight: 1}}}}, }, }, }, nil) @@ -282,18 +282,18 @@ func (s) TestServiceNotCancelRDSOnSameLDSUpdate(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) - wantUpdate := serviceUpdate{virtualHost: &xdsclient.VirtualHost{Domains: []string{"target"}, Routes: []*xdsclient.Route{{ + wantUpdate := serviceUpdate{virtualHost: &xdsresource.VirtualHost{Domains: []string{"target"}, Routes: []*xdsresource.Route{{ Prefix: newStringP(""), - WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, + WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}, }} - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}, }, }, }, nil) @@ -303,7 +303,7 @@ func (s) TestServiceNotCancelRDSOnSameLDSUpdate(t *testing.T) { } // Another LDS update with a the same RDS_name. - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr}, nil) sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() if _, err := xdsC.WaitForCancelRouteConfigWatch(sCtx); err != context.DeadlineExceeded { @@ -327,14 +327,14 @@ func (s) TestServiceWatchInlineRDS(t *testing.T) { // First LDS update is LDS with RDS name to watch. waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) - wantUpdate := serviceUpdate{virtualHost: &xdsclient.VirtualHost{Domains: []string{"target"}, Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}}} - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + wantUpdate := serviceUpdate{virtualHost: &xdsresource.VirtualHost{Domains: []string{"target"}, Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}}} + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}, }, }, }, nil) @@ -343,15 +343,15 @@ func (s) TestServiceWatchInlineRDS(t *testing.T) { } // Switch LDS resp to a LDS with inline RDS resource - wantVirtualHosts2 := &xdsclient.VirtualHost{Domains: []string{"target"}, - Routes: []*xdsclient.Route{{ + wantVirtualHosts2 := &xdsresource.VirtualHost{Domains: []string{"target"}, + Routes: []*xdsresource.Route{{ Path: newStringP(""), - WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}, + WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}, }}, } wantUpdate2 := serviceUpdate{virtualHost: wantVirtualHosts2} - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{InlineRouteConfig: &xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{wantVirtualHosts2}, + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{InlineRouteConfig: &xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{wantVirtualHosts2}, }}, nil) // This inline RDS resource should cause the RDS watch to be canceled. if _, err := xdsC.WaitForCancelRouteConfigWatch(ctx); err != nil { @@ -362,13 +362,13 @@ func (s) TestServiceWatchInlineRDS(t *testing.T) { } // Switch LDS update back to LDS with RDS name to watch. - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}, }, }, }, nil) @@ -377,8 +377,8 @@ func (s) TestServiceWatchInlineRDS(t *testing.T) { } // Switch LDS resp to a LDS with inline RDS resource again. - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{InlineRouteConfig: &xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{wantVirtualHosts2}, + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{InlineRouteConfig: &xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{wantVirtualHosts2}, }}, nil) // This inline RDS resource should cause the RDS watch to be canceled. if _, err := xdsC.WaitForCancelRouteConfigWatch(ctx); err != nil { diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index 90e6c1d4db05..c05a7422904a 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -51,6 +51,7 @@ import ( "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) const ( @@ -63,8 +64,8 @@ const ( var target = resolver.Target{Endpoint: targetStr} -var routerFilter = xdsclient.HTTPFilter{Name: "rtr", Filter: httpfilter.Get(router.TypeURL)} -var routerFilterList = []xdsclient.HTTPFilter{routerFilter} +var routerFilter = xdsresource.HTTPFilter{Name: "rtr", Filter: httpfilter.Get(router.TypeURL)} +var routerFilterList = []xdsresource.HTTPFilter{routerFilter} type s struct { grpctest.Tester @@ -262,17 +263,17 @@ func (s) TestXDSResolverWatchCallbackAfterClose(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) // Call the watchAPI callback after closing the resolver, and make sure no // update is triggerred on the ClientConn. xdsR.Close() - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}, }, }, }, nil) @@ -309,13 +310,13 @@ func (s) TestXDSResolverBadServiceUpdate(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) // Invoke the watchAPI callback with a bad service update and wait for the // ReportError method to be called on the ClientConn. suErr := errors.New("bad serviceupdate") - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{}, suErr) + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{}, suErr) if gotErrVal, gotErr := tcc.errorCh.Receive(ctx); gotErr != nil || gotErrVal != suErr { t.Fatalf("ClientConn.ReportError() received %v, want %v", gotErrVal, suErr) @@ -335,17 +336,17 @@ func (s) TestXDSResolverGoodServiceUpdate(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) defer replaceRandNumGenerator(0)() for _, tt := range []struct { - routes []*xdsclient.Route + routes []*xdsresource.Route wantJSON string wantClusters map[string]bool }{ { - routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{"test-cluster-1": {Weight: 1}}}}, + routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{"test-cluster-1": {Weight: 1}}}}, wantJSON: `{"loadBalancingConfig":[{ "xds_cluster_manager_experimental":{ "children":{ @@ -357,7 +358,7 @@ func (s) TestXDSResolverGoodServiceUpdate(t *testing.T) { wantClusters: map[string]bool{"test-cluster-1": true}, }, { - routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{ + routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{ "cluster_1": {Weight: 75}, "cluster_2": {Weight: 25}, }}}, @@ -381,7 +382,7 @@ func (s) TestXDSResolverGoodServiceUpdate(t *testing.T) { wantClusters: map[string]bool{"cluster_1": true, "cluster_2": true}, }, { - routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{ + routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{ "cluster_1": {Weight: 75}, "cluster_2": {Weight: 25}, }}}, @@ -404,8 +405,8 @@ func (s) TestXDSResolverGoodServiceUpdate(t *testing.T) { } { // Invoke the watchAPI callback with a good service update and wait for the // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, Routes: tt.routes, @@ -474,22 +475,22 @@ func (s) TestXDSResolverRequestHash(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) // Invoke watchAPI callback with a good service update (with hash policies // specified) and wait for UpdateState method to be called on ClientConn. - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{ + Routes: []*xdsresource.Route{{ Prefix: newStringP(""), - WeightedClusters: map[string]xdsclient.WeightedCluster{ + WeightedClusters: map[string]xdsresource.WeightedCluster{ "cluster_1": {Weight: 75}, "cluster_2": {Weight: 25}, }, - HashPolicies: []*xdsclient.HashPolicy{{ - HashPolicyType: xdsclient.HashPolicyTypeHeader, + HashPolicies: []*xdsresource.HashPolicy{{ + HashPolicyType: xdsresource.HashPolicyTypeHeader, HeaderName: ":path", }}, }}, @@ -534,16 +535,16 @@ func (s) TestXDSResolverRemovedWithRPCs(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) // Invoke the watchAPI callback with a good service update and wait for the // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{"test-cluster-1": {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{"test-cluster-1": {Weight: 1}}}}, }, }, }, nil) @@ -570,7 +571,7 @@ func (s) TestXDSResolverRemovedWithRPCs(t *testing.T) { // Delete the resource suErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "resource removed error") - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{}, suErr) + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{}, suErr) if _, err = tcc.stateCh.Receive(ctx); err != nil { t.Fatalf("Error waiting for UpdateState to be called: %v", err) @@ -594,16 +595,16 @@ func (s) TestXDSResolverRemovedResource(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) // Invoke the watchAPI callback with a good service update and wait for the // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{"test-cluster-1": {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{"test-cluster-1": {Weight: 1}}}}, }, }, }, nil) @@ -649,7 +650,7 @@ func (s) TestXDSResolverRemovedResource(t *testing.T) { // Delete the resource. The channel should receive a service config with the // original cluster but with an erroring config selector. suErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "resource removed error") - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{}, suErr) + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{}, suErr) if gotState, err = tcc.stateCh.Receive(ctx); err != nil { t.Fatalf("Error waiting for UpdateState to be called: %v", err) @@ -702,7 +703,7 @@ func (s) TestXDSResolverWRR(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) defer func(oldNewWRR func() wrr.WRR) { newWRR = oldNewWRR }(newWRR) @@ -710,11 +711,11 @@ func (s) TestXDSResolverWRR(t *testing.T) { // Invoke the watchAPI callback with a good service update and wait for the // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{ + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{ "A": {Weight: 5}, "B": {Weight: 10}, }}}, @@ -762,7 +763,7 @@ func (s) TestXDSResolverMaxStreamDuration(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, MaxStreamDuration: time.Second, HTTPFilters: routerFilterList}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, MaxStreamDuration: time.Second, HTTPFilters: routerFilterList}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) defer func(oldNewWRR func() wrr.WRR) { newWRR = oldNewWRR }(newWRR) @@ -770,21 +771,21 @@ func (s) TestXDSResolverMaxStreamDuration(t *testing.T) { // Invoke the watchAPI callback with a good service update and wait for the // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{ + Routes: []*xdsresource.Route{{ Prefix: newStringP("/foo"), - WeightedClusters: map[string]xdsclient.WeightedCluster{"A": {Weight: 1}}, + WeightedClusters: map[string]xdsresource.WeightedCluster{"A": {Weight: 1}}, MaxStreamDuration: newDurationP(5 * time.Second), }, { Prefix: newStringP("/bar"), - WeightedClusters: map[string]xdsclient.WeightedCluster{"B": {Weight: 1}}, + WeightedClusters: map[string]xdsresource.WeightedCluster{"B": {Weight: 1}}, MaxStreamDuration: newDurationP(0), }, { Prefix: newStringP(""), - WeightedClusters: map[string]xdsclient.WeightedCluster{"C": {Weight: 1}}, + WeightedClusters: map[string]xdsresource.WeightedCluster{"C": {Weight: 1}}, }}, }, }, @@ -855,16 +856,16 @@ func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) // Invoke the watchAPI callback with a good service update and wait for the // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{"test-cluster-1": {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{"test-cluster-1": {Weight: 1}}}}, }, }, }, nil) @@ -910,21 +911,21 @@ func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) { // Perform TWO updates to ensure the old config selector does not hold a // reference to test-cluster-1. - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{"NEW": {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{"NEW": {Weight: 1}}}}, }, }, }, nil) tcc.stateCh.Receive(ctx) // Ignore the first update. - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{"NEW": {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{"NEW": {Weight: 1}}}}, }, }, }, nil) @@ -959,11 +960,11 @@ func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) { // test-cluster-1. res.OnCommitted() - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{"NEW": {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{"NEW": {Weight: 1}}}}, }, }, }, nil) @@ -1004,13 +1005,13 @@ func (s) TestXDSResolverGoodUpdateAfterError(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) // Invoke the watchAPI callback with a bad service update and wait for the // ReportError method to be called on the ClientConn. suErr := errors.New("bad serviceupdate") - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{}, suErr) + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{}, suErr) if gotErrVal, gotErr := tcc.errorCh.Receive(ctx); gotErr != nil || gotErrVal != suErr { t.Fatalf("ClientConn.ReportError() received %v, want %v", gotErrVal, suErr) @@ -1018,11 +1019,11 @@ func (s) TestXDSResolverGoodUpdateAfterError(t *testing.T) { // Invoke the watchAPI callback with a good service update and wait for the // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}, }, }, }, nil) @@ -1038,7 +1039,7 @@ func (s) TestXDSResolverGoodUpdateAfterError(t *testing.T) { // Invoke the watchAPI callback with a bad service update and wait for the // ReportError method to be called on the ClientConn. suErr2 := errors.New("bad serviceupdate 2") - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{}, suErr2) + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{}, suErr2) if gotErrVal, gotErr := tcc.errorCh.Receive(ctx); gotErr != nil || gotErrVal != suErr2 { t.Fatalf("ClientConn.ReportError() received %v, want %v", gotErrVal, suErr2) } @@ -1058,13 +1059,13 @@ func (s) TestXDSResolverResourceNotFoundError(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) // Invoke the watchAPI callback with a bad service update and wait for the // ReportError method to be called on the ClientConn. suErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "resource removed error") - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{}, suErr) + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{}, suErr) if gotErrVal, gotErr := tcc.errorCh.Receive(ctx); gotErr != context.DeadlineExceeded { t.Fatalf("ClientConn.ReportError() received %v, %v, want channel recv timeout", gotErrVal, gotErr) @@ -1104,12 +1105,12 @@ func (s) TestXDSResolverMultipleLDSUpdates(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) defer replaceRandNumGenerator(0)() // Send a new LDS update, with the same fields. - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) ctx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) defer cancel() // Should NOT trigger a state update. @@ -1119,7 +1120,7 @@ func (s) TestXDSResolverMultipleLDSUpdates(t *testing.T) { } // Send a new LDS update, with the same RDS name, but different fields. - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, MaxStreamDuration: time.Second, HTTPFilters: routerFilterList}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, MaxStreamDuration: time.Second, HTTPFilters: routerFilterList}, nil) ctx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) defer cancel() gotState, err = tcc.stateCh.Receive(ctx) @@ -1187,7 +1188,7 @@ func (s) TestXDSResolverHTTPFilters(t *testing.T) { var path []string testCases := []struct { name string - ldsFilters []xdsclient.HTTPFilter + ldsFilters []xdsresource.HTTPFilter vhOverrides map[string]httpfilter.FilterConfig rtOverrides map[string]httpfilter.FilterConfig clOverrides map[string]httpfilter.FilterConfig @@ -1197,7 +1198,7 @@ func (s) TestXDSResolverHTTPFilters(t *testing.T) { }{ { name: "no router filter", - ldsFilters: []xdsclient.HTTPFilter{ + ldsFilters: []xdsresource.HTTPFilter{ {Name: "foo", Filter: &filterBuilder{path: &path}, Config: filterCfg{s: "foo1"}}, }, rpcRes: map[string][][]string{ @@ -1209,7 +1210,7 @@ func (s) TestXDSResolverHTTPFilters(t *testing.T) { }, { name: "ignored after router filter", - ldsFilters: []xdsclient.HTTPFilter{ + ldsFilters: []xdsresource.HTTPFilter{ {Name: "foo", Filter: &filterBuilder{path: &path}, Config: filterCfg{s: "foo1"}}, routerFilter, {Name: "foo2", Filter: &filterBuilder{path: &path}, Config: filterCfg{s: "foo2"}}, @@ -1227,7 +1228,7 @@ func (s) TestXDSResolverHTTPFilters(t *testing.T) { }, { name: "NewStream error; ensure earlier interceptor Done is still called", - ldsFilters: []xdsclient.HTTPFilter{ + ldsFilters: []xdsresource.HTTPFilter{ {Name: "foo", Filter: &filterBuilder{path: &path}, Config: filterCfg{s: "foo1"}}, {Name: "bar", Filter: &filterBuilder{path: &path}, Config: filterCfg{s: "bar1", newStreamErr: errors.New("bar newstream err")}}, routerFilter, @@ -1244,7 +1245,7 @@ func (s) TestXDSResolverHTTPFilters(t *testing.T) { }, { name: "all overrides", - ldsFilters: []xdsclient.HTTPFilter{ + ldsFilters: []xdsresource.HTTPFilter{ {Name: "foo", Filter: &filterBuilder{path: &path}, Config: filterCfg{s: "foo1", newStreamErr: errors.New("this is overridden to nil")}}, {Name: "bar", Filter: &filterBuilder{path: &path}, Config: filterCfg{s: "bar1"}}, routerFilter, @@ -1280,7 +1281,7 @@ func (s) TestXDSResolverHTTPFilters(t *testing.T) { defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{ + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{ RouteConfigName: routeStr, HTTPFilters: tc.ldsFilters, }, nil) @@ -1293,17 +1294,17 @@ func (s) TestXDSResolverHTTPFilters(t *testing.T) { // Invoke the watchAPI callback with a good service update and wait for the // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{ - Prefix: newStringP("1"), WeightedClusters: map[string]xdsclient.WeightedCluster{ + Routes: []*xdsresource.Route{{ + Prefix: newStringP("1"), WeightedClusters: map[string]xdsresource.WeightedCluster{ "A": {Weight: 1}, "B": {Weight: 1}, }, }, { - Prefix: newStringP("2"), WeightedClusters: map[string]xdsclient.WeightedCluster{ + Prefix: newStringP("2"), WeightedClusters: map[string]xdsresource.WeightedCluster{ "A": {Weight: 1}, "B": {Weight: 1, HTTPFilterConfigOverride: tc.clOverrides}, }, @@ -1391,13 +1392,13 @@ func (s) TestXDSResolverHTTPFilters(t *testing.T) { func replaceRandNumGenerator(start int64) func() { nextInt := start - xdsclient.RandInt63n = func(int64) (ret int64) { + xdsresource.RandInt63n = func(int64) (ret int64) { ret = nextInt nextInt++ return } return func() { - xdsclient.RandInt63n = grpcrand.Int63n + xdsresource.RandInt63n = grpcrand.Int63n } } diff --git a/xds/internal/server/conn_wrapper.go b/xds/internal/server/conn_wrapper.go index dd0374dc88e4..f1ee06e7b553 100644 --- a/xds/internal/server/conn_wrapper.go +++ b/xds/internal/server/conn_wrapper.go @@ -27,7 +27,7 @@ import ( "google.golang.org/grpc/credentials/tls/certprovider" xdsinternal "google.golang.org/grpc/internal/credentials/xds" - "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) // connWrapper is a thin wrapper around a net.Conn returned by Accept(). It @@ -43,7 +43,7 @@ type connWrapper struct { net.Conn // The specific filter chain picked for handling this connection. - filterChain *xdsclient.FilterChain + filterChain *xdsresource.FilterChain // A reference fo the listenerWrapper on which this connection was accepted. parent *listenerWrapper @@ -61,11 +61,11 @@ type connWrapper struct { // The virtual hosts with matchable routes and instantiated HTTP Filters per // route. - virtualHosts []xdsclient.VirtualHostWithInterceptors + virtualHosts []xdsresource.VirtualHostWithInterceptors } // VirtualHosts returns the virtual hosts to be used for server side routing. -func (c *connWrapper) VirtualHosts() []xdsclient.VirtualHostWithInterceptors { +func (c *connWrapper) VirtualHosts() []xdsresource.VirtualHostWithInterceptors { return c.virtualHosts } diff --git a/xds/internal/server/listener_wrapper.go b/xds/internal/server/listener_wrapper.go index 99c9a7532307..045baf00f8c4 100644 --- a/xds/internal/server/listener_wrapper.go +++ b/xds/internal/server/listener_wrapper.go @@ -38,6 +38,7 @@ import ( "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) var ( @@ -73,8 +74,8 @@ func prefixLogger(p *listenerWrapper) *internalgrpclog.PrefixLogger { // XDSClient wraps the methods on the XDSClient which are required by // the listenerWrapper. type XDSClient interface { - WatchListener(string, func(xdsclient.ListenerUpdate, error)) func() - WatchRouteConfig(string, func(xdsclient.RouteConfigUpdate, error)) func() + WatchListener(string, func(xdsresource.ListenerUpdate, error)) func() + WatchRouteConfig(string, func(xdsresource.RouteConfigUpdate, error)) func() BootstrapConfig() *bootstrap.Config } @@ -136,7 +137,7 @@ func NewListenerWrapper(params ListenerWrapperParams) (net.Listener, <-chan stru } type ldsUpdateWithError struct { - update xdsclient.ListenerUpdate + update xdsresource.ListenerUpdate err error } @@ -182,7 +183,7 @@ type listenerWrapper struct { // Current serving mode. mode connectivity.ServingMode // Filter chains received as part of the last good update. - filterChains *xdsclient.FilterChainManager + filterChains *xdsresource.FilterChainManager // rdsHandler is used for any dynamic RDS resources specified in a LDS // update. @@ -250,7 +251,7 @@ func (l *listenerWrapper) Accept() (net.Conn, error) { conn.Close() continue } - fc, err := l.filterChains.Lookup(xdsclient.FilterChainLookupParams{ + fc, err := l.filterChains.Lookup(xdsresource.FilterChainLookupParams{ IsUnspecifiedListener: l.isUnspecifiedAddr, DestAddr: destAddr.IP, SourceAddr: srcAddr.IP, @@ -276,12 +277,12 @@ func (l *listenerWrapper) Accept() (net.Conn, error) { if !env.RBACSupport { return &connWrapper{Conn: conn, filterChain: fc, parent: l}, nil } - var rc xdsclient.RouteConfigUpdate + var rc xdsresource.RouteConfigUpdate if fc.InlineRouteConfig != nil { rc = *fc.InlineRouteConfig } else { rcPtr := atomic.LoadPointer(&l.rdsUpdates) - rcuPtr := (*map[string]xdsclient.RouteConfigUpdate)(rcPtr) + rcuPtr := (*map[string]xdsresource.RouteConfigUpdate)(rcPtr) // This shouldn't happen, but this error protects against a panic. if rcuPtr == nil { return nil, errors.New("route configuration pointer is nil") @@ -340,7 +341,7 @@ func (l *listenerWrapper) run() { // handleLDSUpdate is the callback which handles LDS Updates. It writes the // received update to the update channel, which is picked up by the run // goroutine. -func (l *listenerWrapper) handleListenerUpdate(update xdsclient.ListenerUpdate, err error) { +func (l *listenerWrapper) handleListenerUpdate(update xdsresource.ListenerUpdate, err error) { if l.closed.HasFired() { l.logger.Warningf("Resource %q received update: %v with error: %v, after listener was closed", l.name, update, err) return @@ -429,7 +430,7 @@ func (l *listenerWrapper) handleLDSUpdate(update ldsUpdateWithError) { } } -func (l *listenerWrapper) switchMode(fcs *xdsclient.FilterChainManager, newMode connectivity.ServingMode, err error) { +func (l *listenerWrapper) switchMode(fcs *xdsresource.FilterChainManager, newMode connectivity.ServingMode, err error) { l.mu.Lock() defer l.mu.Unlock() diff --git a/xds/internal/server/listener_wrapper_test.go b/xds/internal/server/listener_wrapper_test.go index 383729363665..1dba999008a5 100644 --- a/xds/internal/server/listener_wrapper_test.go +++ b/xds/internal/server/listener_wrapper_test.go @@ -38,7 +38,7 @@ import ( _ "google.golang.org/grpc/xds/internal/httpfilter/router" "google.golang.org/grpc/xds/internal/testutils/e2e" "google.golang.org/grpc/xds/internal/testutils/fakeclient" - "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) const ( @@ -272,7 +272,7 @@ func (s) TestNewListenerWrapper(t *testing.T) { } // Push an error to the listener update handler. - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{}, errors.New("bad listener update")) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{}, errors.New("bad listener update")) timer := time.NewTimer(defaultTestShortTimeout) select { case <-timer.C: @@ -281,15 +281,15 @@ func (s) TestNewListenerWrapper(t *testing.T) { t.Fatalf("ready channel written to after receipt of a bad Listener update") } - fcm, err := xdsclient.NewFilterChainManager(listenerWithFilterChains) + fcm, err := xdsresource.NewFilterChainManager(listenerWithFilterChains, nil) if err != nil { t.Fatalf("xdsclient.NewFilterChainManager() failed with error: %v", err) } // Push an update whose address does not match the address to which our // listener is bound, and verify that the ready channel is not written to. - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{ - InboundListenerCfg: &xdsclient.InboundListenerConfig{ + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{ + InboundListenerCfg: &xdsresource.InboundListenerConfig{ Address: "10.0.0.1", Port: "50051", FilterChains: fcm, @@ -306,8 +306,8 @@ func (s) TestNewListenerWrapper(t *testing.T) { // Since there are no dynamic RDS updates needed to be received, the // ListenerWrapper does not have to wait for anything else before telling // that it is ready. - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{ - InboundListenerCfg: &xdsclient.InboundListenerConfig{ + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{ + InboundListenerCfg: &xdsresource.InboundListenerConfig{ Address: fakeListenerHost, Port: strconv.Itoa(fakeListenerPort), FilterChains: fcm, @@ -345,7 +345,7 @@ func (s) TestNewListenerWrapperWithRouteUpdate(t *testing.T) { if name != testListenerResourceName { t.Fatalf("listenerWrapper registered a lds watch on %s, want %s", name, testListenerResourceName) } - fcm, err := xdsclient.NewFilterChainManager(listenerWithRouteConfiguration) + fcm, err := xdsresource.NewFilterChainManager(listenerWithRouteConfiguration, nil) if err != nil { t.Fatalf("xdsclient.NewFilterChainManager() failed with error: %v", err) } @@ -354,8 +354,8 @@ func (s) TestNewListenerWrapperWithRouteUpdate(t *testing.T) { // RDS Resources that need to be received. This should ping rds handler // about which rds names to start, which will eventually start a watch on // xds client for rds name "route-1". - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{ - InboundListenerCfg: &xdsclient.InboundListenerConfig{ + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{ + InboundListenerCfg: &xdsresource.InboundListenerConfig{ Address: fakeListenerHost, Port: strconv.Itoa(fakeListenerPort), FilterChains: fcm, @@ -383,7 +383,7 @@ func (s) TestNewListenerWrapperWithRouteUpdate(t *testing.T) { // should trigger the listener wrapper to fire GoodUpdate, as it has // received both it's LDS Configuration and also RDS Configuration, // specified in LDS Configuration. - xdsC.InvokeWatchRouteConfigCallback("route-1", xdsclient.RouteConfigUpdate{}, nil) + xdsC.InvokeWatchRouteConfigCallback("route-1", xdsresource.RouteConfigUpdate{}, nil) // All of the xDS updates have completed, so can expect to send a ping on // good update channel. @@ -408,12 +408,12 @@ func (s) TestListenerWrapper_Accept(t *testing.T) { // Push a good update with a filter chain which accepts local connections on // 192.168.0.0/16 subnet and port 80. - fcm, err := xdsclient.NewFilterChainManager(listenerWithFilterChains) + fcm, err := xdsresource.NewFilterChainManager(listenerWithFilterChains, nil) if err != nil { t.Fatalf("xdsclient.NewFilterChainManager() failed with error: %v", err) } - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{ - InboundListenerCfg: &xdsclient.InboundListenerConfig{ + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{ + InboundListenerCfg: &xdsresource.InboundListenerConfig{ Address: fakeListenerHost, Port: strconv.Itoa(fakeListenerPort), FilterChains: fcm, diff --git a/xds/internal/server/rds_handler.go b/xds/internal/server/rds_handler.go index cc676c4ca05f..722748cbd526 100644 --- a/xds/internal/server/rds_handler.go +++ b/xds/internal/server/rds_handler.go @@ -21,13 +21,13 @@ package server import ( "sync" - "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) // rdsHandlerUpdate wraps the full RouteConfigUpdate that are dynamically // queried for a given server side listener. type rdsHandlerUpdate struct { - updates map[string]xdsclient.RouteConfigUpdate + updates map[string]xdsresource.RouteConfigUpdate err error } @@ -37,7 +37,7 @@ type rdsHandler struct { xdsC XDSClient mu sync.Mutex - updates map[string]xdsclient.RouteConfigUpdate + updates map[string]xdsresource.RouteConfigUpdate cancels map[string]func() // For a rdsHandler update, the only update wrapped listener cares about is @@ -53,7 +53,7 @@ func newRDSHandler(xdsC XDSClient, ch chan rdsHandlerUpdate) *rdsHandler { return &rdsHandler{ xdsC: xdsC, updateChannel: ch, - updates: make(map[string]xdsclient.RouteConfigUpdate), + updates: make(map[string]xdsresource.RouteConfigUpdate), cancels: make(map[string]func()), } } @@ -70,7 +70,7 @@ func (rh *rdsHandler) updateRouteNamesToWatch(routeNamesToWatch map[string]bool) for routeName := range routeNamesToWatch { if _, ok := rh.cancels[routeName]; !ok { func(routeName string) { - rh.cancels[routeName] = rh.xdsC.WatchRouteConfig(routeName, func(update xdsclient.RouteConfigUpdate, err error) { + rh.cancels[routeName] = rh.xdsC.WatchRouteConfig(routeName, func(update xdsresource.RouteConfigUpdate, err error) { rh.handleRouteUpdate(routeName, update, err) }) }(routeName) @@ -97,7 +97,7 @@ func (rh *rdsHandler) updateRouteNamesToWatch(routeNamesToWatch map[string]bool) // handleRouteUpdate persists the route config for a given route name, and also // sends an update to the Listener Wrapper on an error received or if the rds // handler has a full collection of updates. -func (rh *rdsHandler) handleRouteUpdate(routeName string, update xdsclient.RouteConfigUpdate, err error) { +func (rh *rdsHandler) handleRouteUpdate(routeName string, update xdsresource.RouteConfigUpdate, err error) { if err != nil { drainAndPush(rh.updateChannel, rdsHandlerUpdate{err: err}) return diff --git a/xds/internal/server/rds_handler_test.go b/xds/internal/server/rds_handler_test.go index d1daffd940c0..fc622851cfa2 100644 --- a/xds/internal/server/rds_handler_test.go +++ b/xds/internal/server/rds_handler_test.go @@ -26,7 +26,7 @@ import ( "github.com/google/go-cmp/cmp" "google.golang.org/grpc/xds/internal/testutils/fakeclient" - "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) const ( @@ -86,12 +86,12 @@ func (s) TestSuccessCaseOneRDSWatch(t *testing.T) { if gotRoute != route1 { t.Fatalf("xdsClient.WatchRDS called for route: %v, want %v", gotRoute, route1) } - rdsUpdate := xdsclient.RouteConfigUpdate{} + rdsUpdate := xdsresource.RouteConfigUpdate{} // Invoke callback with the xds client with a certain route update. Due to // this route update updating every route name that rds handler handles, // this should write to the update channel to send to the listener. fakeClient.InvokeWatchRouteConfigCallback(route1, rdsUpdate, nil) - rhuWant := map[string]xdsclient.RouteConfigUpdate{route1: rdsUpdate} + rhuWant := map[string]xdsresource.RouteConfigUpdate{route1: rdsUpdate} select { case rhu := <-ch: if diff := cmp.Diff(rhu.updates, rhuWant); diff != "" { @@ -147,7 +147,7 @@ func (s) TestSuccessCaseTwoUpdates(t *testing.T) { // Invoke the callback with an update for route 1. This shouldn't cause the // handler to write an update, as it has not received RouteConfigurations // for every RouteName. - rdsUpdate1 := xdsclient.RouteConfigUpdate{} + rdsUpdate1 := xdsresource.RouteConfigUpdate{} fakeClient.InvokeWatchRouteConfigCallback(route1, rdsUpdate1, nil) // The RDS Handler should not send an update. @@ -162,12 +162,12 @@ func (s) TestSuccessCaseTwoUpdates(t *testing.T) { // Invoke the callback with an update for route 2. This should cause the // handler to write an update, as it has received RouteConfigurations for // every RouteName. - rdsUpdate2 := xdsclient.RouteConfigUpdate{} + rdsUpdate2 := xdsresource.RouteConfigUpdate{} fakeClient.InvokeWatchRouteConfigCallback(route2, rdsUpdate2, nil) // The RDS Handler should then update the listener wrapper with an update // with two route configurations, as both route names the RDS Handler handles // have received an update. - rhuWant := map[string]xdsclient.RouteConfigUpdate{route1: rdsUpdate1, route2: rdsUpdate2} + rhuWant := map[string]xdsresource.RouteConfigUpdate{route1: rdsUpdate1, route2: rdsUpdate2} select { case rhu := <-ch: if diff := cmp.Diff(rhu.updates, rhuWant); diff != "" { @@ -213,12 +213,12 @@ func (s) TestSuccessCaseDeletedRoute(t *testing.T) { t.Fatalf("xdsClient.CancelRDS called for route %v, want %v", routeNameDeleted, route2) } - rdsUpdate := xdsclient.RouteConfigUpdate{} + rdsUpdate := xdsresource.RouteConfigUpdate{} // Invoke callback with the xds client with a certain route update. Due to // this route update updating every route name that rds handler handles, // this should write to the update channel to send to the listener. fakeClient.InvokeWatchRouteConfigCallback(route1, rdsUpdate, nil) - rhuWant := map[string]xdsclient.RouteConfigUpdate{route1: rdsUpdate} + rhuWant := map[string]xdsresource.RouteConfigUpdate{route1: rdsUpdate} select { case rhu := <-ch: if diff := cmp.Diff(rhu.updates, rhuWant); diff != "" { @@ -281,7 +281,7 @@ func (s) TestSuccessCaseTwoUpdatesAddAndDeleteRoute(t *testing.T) { // Invoke the callback with an update for route 2. This shouldn't cause the // handler to write an update, as it has not received RouteConfigurations // for every RouteName. - rdsUpdate2 := xdsclient.RouteConfigUpdate{} + rdsUpdate2 := xdsresource.RouteConfigUpdate{} fakeClient.InvokeWatchRouteConfigCallback(route2, rdsUpdate2, nil) // The RDS Handler should not send an update. @@ -296,12 +296,12 @@ func (s) TestSuccessCaseTwoUpdatesAddAndDeleteRoute(t *testing.T) { // Invoke the callback with an update for route 3. This should cause the // handler to write an update, as it has received RouteConfigurations for // every RouteName. - rdsUpdate3 := xdsclient.RouteConfigUpdate{} + rdsUpdate3 := xdsresource.RouteConfigUpdate{} fakeClient.InvokeWatchRouteConfigCallback(route3, rdsUpdate3, nil) // The RDS Handler should then update the listener wrapper with an update // with two route configurations, as both route names the RDS Handler handles // have received an update. - rhuWant := map[string]xdsclient.RouteConfigUpdate{route2: rdsUpdate2, route3: rdsUpdate3} + rhuWant := map[string]xdsresource.RouteConfigUpdate{route2: rdsUpdate2, route3: rdsUpdate3} select { case rhu := <-rh.updateChannel: if diff := cmp.Diff(rhu.updates, rhuWant); diff != "" { @@ -335,8 +335,8 @@ func (s) TestSuccessCaseSecondUpdateMakesRouteFull(t *testing.T) { // Invoke the callbacks for two of the three watches. Since RDS is not full, // this shouldn't trigger rds handler to write an update to update buffer. - fakeClient.InvokeWatchRouteConfigCallback(route1, xdsclient.RouteConfigUpdate{}, nil) - fakeClient.InvokeWatchRouteConfigCallback(route2, xdsclient.RouteConfigUpdate{}, nil) + fakeClient.InvokeWatchRouteConfigCallback(route1, xdsresource.RouteConfigUpdate{}, nil) + fakeClient.InvokeWatchRouteConfigCallback(route2, xdsresource.RouteConfigUpdate{}, nil) // The RDS Handler should not send an update. sCtx, sCtxCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) @@ -360,7 +360,7 @@ func (s) TestSuccessCaseSecondUpdateMakesRouteFull(t *testing.T) { if routeNameDeleted != route3 { t.Fatalf("xdsClient.CancelRDS called for route %v, want %v", routeNameDeleted, route1) } - rhuWant := map[string]xdsclient.RouteConfigUpdate{route1: {}, route2: {}} + rhuWant := map[string]xdsresource.RouteConfigUpdate{route1: {}, route2: {}} select { case rhu := <-ch: if diff := cmp.Diff(rhu.updates, rhuWant); diff != "" { @@ -389,7 +389,7 @@ func (s) TestErrorReceived(t *testing.T) { } rdsErr := errors.New("some error") - fakeClient.InvokeWatchRouteConfigCallback(route1, xdsclient.RouteConfigUpdate{}, rdsErr) + fakeClient.InvokeWatchRouteConfigCallback(route1, xdsresource.RouteConfigUpdate{}, rdsErr) select { case rhu := <-ch: if rhu.err.Error() != "some error" { diff --git a/xds/internal/testutils/fakeclient/client.go b/xds/internal/testutils/fakeclient/client.go index b582fd9bee91..132fa413a7e3 100644 --- a/xds/internal/testutils/fakeclient/client.go +++ b/xds/internal/testutils/fakeclient/client.go @@ -27,6 +27,7 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/load" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) // Client is a fake implementation of an xds client. It exposes a bunch of @@ -51,16 +52,16 @@ type Client struct { loadStore *load.Store bootstrapCfg *bootstrap.Config - ldsCb func(xdsclient.ListenerUpdate, error) - rdsCbs map[string]func(xdsclient.RouteConfigUpdate, error) - cdsCbs map[string]func(xdsclient.ClusterUpdate, error) - edsCbs map[string]func(xdsclient.EndpointsUpdate, error) + ldsCb func(xdsresource.ListenerUpdate, error) + rdsCbs map[string]func(xdsresource.RouteConfigUpdate, error) + cdsCbs map[string]func(xdsresource.ClusterUpdate, error) + edsCbs map[string]func(xdsresource.EndpointsUpdate, error) Closed *grpcsync.Event // fired when Close is called. } // WatchListener registers a LDS watch. -func (xdsC *Client) WatchListener(serviceName string, callback func(xdsclient.ListenerUpdate, error)) func() { +func (xdsC *Client) WatchListener(serviceName string, callback func(xdsresource.ListenerUpdate, error)) func() { xdsC.ldsCb = callback xdsC.ldsWatchCh.Send(serviceName) return func() { @@ -82,7 +83,7 @@ func (xdsC *Client) WaitForWatchListener(ctx context.Context) (string, error) { // // Not thread safe with WatchListener. Only call this after // WaitForWatchListener. -func (xdsC *Client) InvokeWatchListenerCallback(update xdsclient.ListenerUpdate, err error) { +func (xdsC *Client) InvokeWatchListenerCallback(update xdsresource.ListenerUpdate, err error) { xdsC.ldsCb(update, err) } @@ -94,7 +95,7 @@ func (xdsC *Client) WaitForCancelListenerWatch(ctx context.Context) error { } // WatchRouteConfig registers a RDS watch. -func (xdsC *Client) WatchRouteConfig(routeName string, callback func(xdsclient.RouteConfigUpdate, error)) func() { +func (xdsC *Client) WatchRouteConfig(routeName string, callback func(xdsresource.RouteConfigUpdate, error)) func() { xdsC.rdsCbs[routeName] = callback xdsC.rdsWatchCh.Send(routeName) return func() { @@ -116,7 +117,7 @@ func (xdsC *Client) WaitForWatchRouteConfig(ctx context.Context) (string, error) // // Not thread safe with WatchRouteConfig. Only call this after // WaitForWatchRouteConfig. -func (xdsC *Client) InvokeWatchRouteConfigCallback(name string, update xdsclient.RouteConfigUpdate, err error) { +func (xdsC *Client) InvokeWatchRouteConfigCallback(name string, update xdsresource.RouteConfigUpdate, err error) { if len(xdsC.rdsCbs) != 1 { xdsC.rdsCbs[name](update, err) return @@ -141,7 +142,7 @@ func (xdsC *Client) WaitForCancelRouteConfigWatch(ctx context.Context) (string, } // WatchCluster registers a CDS watch. -func (xdsC *Client) WatchCluster(clusterName string, callback func(xdsclient.ClusterUpdate, error)) func() { +func (xdsC *Client) WatchCluster(clusterName string, callback func(xdsresource.ClusterUpdate, error)) func() { // Due to the tree like structure of aggregate clusters, there can be multiple callbacks persisted for each cluster // node. However, the client doesn't care about the parent child relationship between the nodes, only that it invokes // the right callback for a particular cluster. @@ -166,7 +167,7 @@ func (xdsC *Client) WaitForWatchCluster(ctx context.Context) (string, error) { // // Not thread safe with WatchCluster. Only call this after // WaitForWatchCluster. -func (xdsC *Client) InvokeWatchClusterCallback(update xdsclient.ClusterUpdate, err error) { +func (xdsC *Client) InvokeWatchClusterCallback(update xdsresource.ClusterUpdate, err error) { // Keeps functionality with previous usage of this, if single callback call that callback. if len(xdsC.cdsCbs) == 1 { var clusterName string @@ -192,7 +193,7 @@ func (xdsC *Client) WaitForCancelClusterWatch(ctx context.Context) (string, erro } // WatchEndpoints registers an EDS watch for provided clusterName. -func (xdsC *Client) WatchEndpoints(clusterName string, callback func(xdsclient.EndpointsUpdate, error)) (cancel func()) { +func (xdsC *Client) WatchEndpoints(clusterName string, callback func(xdsresource.EndpointsUpdate, error)) (cancel func()) { xdsC.edsCbs[clusterName] = callback xdsC.edsWatchCh.Send(clusterName) return func() { @@ -214,7 +215,7 @@ func (xdsC *Client) WaitForWatchEDS(ctx context.Context) (string, error) { // // Not thread safe with WatchEndpoints. Only call this after // WaitForWatchEDS. -func (xdsC *Client) InvokeWatchEDSCallback(name string, update xdsclient.EndpointsUpdate, err error) { +func (xdsC *Client) InvokeWatchEDSCallback(name string, update xdsresource.EndpointsUpdate, err error) { if len(xdsC.edsCbs) != 1 { // This may panic if name isn't found. But it's fine for tests. xdsC.edsCbs[name](update, err) @@ -316,9 +317,9 @@ func NewClientWithName(name string) *Client { loadReportCh: testutils.NewChannel(), lrsCancelCh: testutils.NewChannel(), loadStore: load.NewStore(), - rdsCbs: make(map[string]func(xdsclient.RouteConfigUpdate, error)), - cdsCbs: make(map[string]func(xdsclient.ClusterUpdate, error)), - edsCbs: make(map[string]func(xdsclient.EndpointsUpdate, error)), + rdsCbs: make(map[string]func(xdsresource.RouteConfigUpdate, error)), + cdsCbs: make(map[string]func(xdsresource.ClusterUpdate, error)), + edsCbs: make(map[string]func(xdsresource.EndpointsUpdate, error)), Closed: grpcsync.NewEvent(), } } diff --git a/xds/internal/xdsclient/attributes.go b/xds/internal/xdsclient/attributes.go index 467c205a2559..52507bd83699 100644 --- a/xds/internal/xdsclient/attributes.go +++ b/xds/internal/xdsclient/attributes.go @@ -21,6 +21,7 @@ import ( "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/load" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) type clientKeyType string @@ -31,10 +32,10 @@ const clientKey = clientKeyType("grpc.xds.internal.client.Client") // (collectively termed as xDS) on a remote management server, to discover // various dynamic resources. type XDSClient interface { - WatchListener(string, func(ListenerUpdate, error)) func() - WatchRouteConfig(string, func(RouteConfigUpdate, error)) func() - WatchCluster(string, func(ClusterUpdate, error)) func() - WatchEndpoints(clusterName string, edsCb func(EndpointsUpdate, error)) (cancel func()) + WatchListener(string, func(xdsresource.ListenerUpdate, error)) func() + WatchRouteConfig(string, func(xdsresource.RouteConfigUpdate, error)) func() + WatchCluster(string, func(xdsresource.ClusterUpdate, error)) func() + WatchEndpoints(clusterName string, edsCb func(xdsresource.EndpointsUpdate, error)) (cancel func()) ReportLoad(server string) (*load.Store, func()) DumpLDS() (string, map[string]UpdateWithMD) diff --git a/xds/internal/xdsclient/callback.go b/xds/internal/xdsclient/callback.go index 0c2665e84c0e..6643d1d4e824 100644 --- a/xds/internal/xdsclient/callback.go +++ b/xds/internal/xdsclient/callback.go @@ -20,6 +20,7 @@ package xdsclient import ( "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/proto" ) @@ -52,19 +53,19 @@ func (c *clientImpl) callCallback(wiu *watcherInfoWithUpdate) { switch wiu.wi.rType { case ListenerResource: if s, ok := c.ldsWatchers[wiu.wi.target]; ok && s[wiu.wi] { - ccb = func() { wiu.wi.ldsCallback(wiu.update.(ListenerUpdate), wiu.err) } + ccb = func() { wiu.wi.ldsCallback(wiu.update.(xdsresource.ListenerUpdate), wiu.err) } } case RouteConfigResource: if s, ok := c.rdsWatchers[wiu.wi.target]; ok && s[wiu.wi] { - ccb = func() { wiu.wi.rdsCallback(wiu.update.(RouteConfigUpdate), wiu.err) } + ccb = func() { wiu.wi.rdsCallback(wiu.update.(xdsresource.RouteConfigUpdate), wiu.err) } } case ClusterResource: if s, ok := c.cdsWatchers[wiu.wi.target]; ok && s[wiu.wi] { - ccb = func() { wiu.wi.cdsCallback(wiu.update.(ClusterUpdate), wiu.err) } + ccb = func() { wiu.wi.cdsCallback(wiu.update.(xdsresource.ClusterUpdate), wiu.err) } } case EndpointsResource: if s, ok := c.edsWatchers[wiu.wi.target]; ok && s[wiu.wi] { - ccb = func() { wiu.wi.edsCallback(wiu.update.(EndpointsUpdate), wiu.err) } + ccb = func() { wiu.wi.edsCallback(wiu.update.(xdsresource.EndpointsUpdate), wiu.err) } } } c.mu.Unlock() @@ -79,7 +80,7 @@ func (c *clientImpl) callCallback(wiu *watcherInfoWithUpdate) { // // A response can contain multiple resources. They will be parsed and put in a // map from resource name to the resource content. -func (c *clientImpl) NewListeners(updates map[string]ListenerUpdateErrTuple, metadata UpdateMetadata) { +func (c *clientImpl) NewListeners(updates map[string]xdsresource.ListenerUpdateErrTuple, metadata xdsresource.UpdateMetadata) { c.mu.Lock() defer c.mu.Unlock() @@ -116,7 +117,7 @@ func (c *clientImpl) NewListeners(updates map[string]ListenerUpdateErrTuple, met // NACK metadata because some other resources in the same response // are invalid. mdCopy := metadata - mdCopy.Status = ServiceStatusACKed + mdCopy.Status = xdsresource.ServiceStatusACKed mdCopy.ErrState = nil if metadata.ErrState != nil { mdCopy.Version = metadata.ErrState.Version @@ -132,7 +133,7 @@ func (c *clientImpl) NewListeners(updates map[string]ListenerUpdateErrTuple, met // the resource from cache, and also send an resource not found // error to indicate resource removed. delete(c.ldsCache, name) - c.ldsMD[name] = UpdateMetadata{Status: ServiceStatusNotExist} + c.ldsMD[name] = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} for wi := range c.ldsWatchers[name] { wi.resourceNotFound() } @@ -148,7 +149,7 @@ func (c *clientImpl) NewListeners(updates map[string]ListenerUpdateErrTuple, met // // A response can contain multiple resources. They will be parsed and put in a // map from resource name to the resource content. -func (c *clientImpl) NewRouteConfigs(updates map[string]RouteConfigUpdateErrTuple, metadata UpdateMetadata) { +func (c *clientImpl) NewRouteConfigs(updates map[string]xdsresource.RouteConfigUpdateErrTuple, metadata xdsresource.UpdateMetadata) { c.mu.Lock() defer c.mu.Unlock() @@ -186,7 +187,7 @@ func (c *clientImpl) NewRouteConfigs(updates map[string]RouteConfigUpdateErrTupl // NACK metadata because some other resources in the same response // are invalid. mdCopy := metadata - mdCopy.Status = ServiceStatusACKed + mdCopy.Status = xdsresource.ServiceStatusACKed mdCopy.ErrState = nil if metadata.ErrState != nil { mdCopy.Version = metadata.ErrState.Version @@ -201,7 +202,7 @@ func (c *clientImpl) NewRouteConfigs(updates map[string]RouteConfigUpdateErrTupl // // A response can contain multiple resources. They will be parsed and put in a // map from resource name to the resource content. -func (c *clientImpl) NewClusters(updates map[string]ClusterUpdateErrTuple, metadata UpdateMetadata) { +func (c *clientImpl) NewClusters(updates map[string]xdsresource.ClusterUpdateErrTuple, metadata xdsresource.UpdateMetadata) { c.mu.Lock() defer c.mu.Unlock() @@ -240,7 +241,7 @@ func (c *clientImpl) NewClusters(updates map[string]ClusterUpdateErrTuple, metad // NACK metadata because some other resources in the same response // are invalid. mdCopy := metadata - mdCopy.Status = ServiceStatusACKed + mdCopy.Status = xdsresource.ServiceStatusACKed mdCopy.ErrState = nil if metadata.ErrState != nil { mdCopy.Version = metadata.ErrState.Version @@ -256,7 +257,7 @@ func (c *clientImpl) NewClusters(updates map[string]ClusterUpdateErrTuple, metad // from cache, and also send an resource not found error to indicate // resource removed. delete(c.cdsCache, name) - c.ldsMD[name] = UpdateMetadata{Status: ServiceStatusNotExist} + c.ldsMD[name] = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} for wi := range c.cdsWatchers[name] { wi.resourceNotFound() } @@ -272,7 +273,7 @@ func (c *clientImpl) NewClusters(updates map[string]ClusterUpdateErrTuple, metad // // A response can contain multiple resources. They will be parsed and put in a // map from resource name to the resource content. -func (c *clientImpl) NewEndpoints(updates map[string]EndpointsUpdateErrTuple, metadata UpdateMetadata) { +func (c *clientImpl) NewEndpoints(updates map[string]xdsresource.EndpointsUpdateErrTuple, metadata xdsresource.UpdateMetadata) { c.mu.Lock() defer c.mu.Unlock() @@ -311,7 +312,7 @@ func (c *clientImpl) NewEndpoints(updates map[string]EndpointsUpdateErrTuple, me // NACK metadata because some other resources in the same response // are invalid. mdCopy := metadata - mdCopy.Status = ServiceStatusACKed + mdCopy.Status = xdsresource.ServiceStatusACKed mdCopy.ErrState = nil if metadata.ErrState != nil { mdCopy.Version = metadata.ErrState.Version diff --git a/xds/internal/xdsclient/client.go b/xds/internal/xdsclient/client.go index 39f1df215d1a..74dab87742a5 100644 --- a/xds/internal/xdsclient/client.go +++ b/xds/internal/xdsclient/client.go @@ -24,27 +24,20 @@ import ( "context" "errors" "fmt" - "regexp" "sync" "time" "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/types/known/anypb" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/internal/xds/matcher" - "google.golang.org/grpc/xds/internal/httpfilter" - "google.golang.org/grpc/xds/internal/xdsclient/load" - "google.golang.org/grpc" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/keepalive" - "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/version" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/load" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) var ( @@ -70,12 +63,6 @@ func getAPIClientBuilder(version version.TransportAPI) APIClientBuilder { return nil } -// UpdateValidatorFunc performs validations on update structs using -// context/logic available at the xdsClient layer. Since these validation are -// performed on internal update structs, they can be shared between different -// API clients. -type UpdateValidatorFunc func(interface{}) error - // BuildOptions contains options to be passed to client builders. type BuildOptions struct { // Parent is a top-level xDS client which has the intelligence to take @@ -83,7 +70,7 @@ type BuildOptions struct { // server. Parent UpdateHandler // Validator performs post unmarshal validation checks. - Validator UpdateValidatorFunc + Validator xdsresource.UpdateValidatorFunc // NodeProto contains the Node proto to be used in xDS requests. The actual // type depends on the transport protocol version used. NodeProto proto.Message @@ -140,437 +127,19 @@ type loadReportingOptions struct { // resource updates from an APIClient for a specific version. type UpdateHandler interface { // NewListeners handles updates to xDS listener resources. - NewListeners(map[string]ListenerUpdateErrTuple, UpdateMetadata) + NewListeners(map[string]xdsresource.ListenerUpdateErrTuple, xdsresource.UpdateMetadata) // NewRouteConfigs handles updates to xDS RouteConfiguration resources. - NewRouteConfigs(map[string]RouteConfigUpdateErrTuple, UpdateMetadata) + NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple, xdsresource.UpdateMetadata) // NewClusters handles updates to xDS Cluster resources. - NewClusters(map[string]ClusterUpdateErrTuple, UpdateMetadata) + NewClusters(map[string]xdsresource.ClusterUpdateErrTuple, xdsresource.UpdateMetadata) // NewEndpoints handles updates to xDS ClusterLoadAssignment (or tersely // referred to as Endpoints) resources. - NewEndpoints(map[string]EndpointsUpdateErrTuple, UpdateMetadata) + NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple, xdsresource.UpdateMetadata) // NewConnectionError handles connection errors from the xDS stream. The // error will be reported to all the resource watchers. NewConnectionError(err error) } -// ServiceStatus is the status of the update. -type ServiceStatus int - -const ( - // ServiceStatusUnknown is the default state, before a watch is started for - // the resource. - ServiceStatusUnknown ServiceStatus = iota - // ServiceStatusRequested is when the watch is started, but before and - // response is received. - ServiceStatusRequested - // ServiceStatusNotExist is when the resource doesn't exist in - // state-of-the-world responses (e.g. LDS and CDS), which means the resource - // is removed by the management server. - ServiceStatusNotExist // Resource is removed in the server, in LDS/CDS. - // ServiceStatusACKed is when the resource is ACKed. - ServiceStatusACKed - // ServiceStatusNACKed is when the resource is NACKed. - ServiceStatusNACKed -) - -// UpdateErrorMetadata is part of UpdateMetadata. It contains the error state -// when a response is NACKed. -type UpdateErrorMetadata struct { - // Version is the version of the NACKed response. - Version string - // Err contains why the response was NACKed. - Err error - // Timestamp is when the NACKed response was received. - Timestamp time.Time -} - -// UpdateMetadata contains the metadata for each update, including timestamp, -// raw message, and so on. -type UpdateMetadata struct { - // Status is the status of this resource, e.g. ACKed, NACKed, or - // Not_exist(removed). - Status ServiceStatus - // Version is the version of the xds response. Note that this is the version - // of the resource in use (previous ACKed). If a response is NACKed, the - // NACKed version is in ErrState. - Version string - // Timestamp is when the response is received. - Timestamp time.Time - // ErrState is set when the update is NACKed. - ErrState *UpdateErrorMetadata -} - -// ListenerUpdate contains information received in an LDS response, which is of -// interest to the registered LDS watcher. -type ListenerUpdate struct { - // RouteConfigName is the route configuration name corresponding to the - // target which is being watched through LDS. - // - // Exactly one of RouteConfigName and InlineRouteConfig is set. - RouteConfigName string - // InlineRouteConfig is the inline route configuration (RDS response) - // returned inside LDS. - // - // Exactly one of RouteConfigName and InlineRouteConfig is set. - InlineRouteConfig *RouteConfigUpdate - - // MaxStreamDuration contains the HTTP connection manager's - // common_http_protocol_options.max_stream_duration field, or zero if - // unset. - MaxStreamDuration time.Duration - // HTTPFilters is a list of HTTP filters (name, config) from the LDS - // response. - HTTPFilters []HTTPFilter - // InboundListenerCfg contains inbound listener configuration. - InboundListenerCfg *InboundListenerConfig - - // Raw is the resource from the xds response. - Raw *anypb.Any -} - -// HTTPFilter represents one HTTP filter from an LDS response's HTTP connection -// manager field. -type HTTPFilter struct { - // Name is an arbitrary name of the filter. Used for applying override - // settings in virtual host / route / weighted cluster configuration (not - // yet supported). - Name string - // Filter is the HTTP filter found in the registry for the config type. - Filter httpfilter.Filter - // Config contains the filter's configuration - Config httpfilter.FilterConfig -} - -// InboundListenerConfig contains information about the inbound listener, i.e -// the server-side listener. -type InboundListenerConfig struct { - // Address is the local address on which the inbound listener is expected to - // accept incoming connections. - Address string - // Port is the local port on which the inbound listener is expected to - // accept incoming connections. - Port string - // FilterChains is the list of filter chains associated with this listener. - FilterChains *FilterChainManager -} - -// RouteConfigUpdate contains information received in an RDS response, which is -// of interest to the registered RDS watcher. -type RouteConfigUpdate struct { - VirtualHosts []*VirtualHost - // Raw is the resource from the xds response. - Raw *anypb.Any -} - -// VirtualHost contains the routes for a list of Domains. -// -// Note that the domains in this slice can be a wildcard, not an exact string. -// The consumer of this struct needs to find the best match for its hostname. -type VirtualHost struct { - Domains []string - // Routes contains a list of routes, each containing matchers and - // corresponding action. - Routes []*Route - // HTTPFilterConfigOverride contains any HTTP filter config overrides for - // the virtual host which may be present. An individual filter's override - // may be unused if the matching Route contains an override for that - // filter. - HTTPFilterConfigOverride map[string]httpfilter.FilterConfig - RetryConfig *RetryConfig -} - -// RetryConfig contains all retry-related configuration in either a VirtualHost -// or Route. -type RetryConfig struct { - // RetryOn is a set of status codes on which to retry. Only Canceled, - // DeadlineExceeded, Internal, ResourceExhausted, and Unavailable are - // supported; any other values will be omitted. - RetryOn map[codes.Code]bool - NumRetries uint32 // maximum number of retry attempts - RetryBackoff RetryBackoff // retry backoff policy -} - -// RetryBackoff describes the backoff policy for retries. -type RetryBackoff struct { - BaseInterval time.Duration // initial backoff duration between attempts - MaxInterval time.Duration // maximum backoff duration -} - -// HashPolicyType specifies the type of HashPolicy from a received RDS Response. -type HashPolicyType int - -const ( - // HashPolicyTypeHeader specifies to hash a Header in the incoming request. - HashPolicyTypeHeader HashPolicyType = iota - // HashPolicyTypeChannelID specifies to hash a unique Identifier of the - // Channel. In grpc-go, this will be done using the ClientConn pointer. - HashPolicyTypeChannelID -) - -// HashPolicy specifies the HashPolicy if the upstream cluster uses a hashing -// load balancer. -type HashPolicy struct { - HashPolicyType HashPolicyType - Terminal bool - // Fields used for type HEADER. - HeaderName string - Regex *regexp.Regexp - RegexSubstitution string -} - -// RouteAction is the action of the route from a received RDS response. -type RouteAction int - -const ( - // RouteActionUnsupported are routing types currently unsupported by grpc. - // According to A36, "A Route with an inappropriate action causes RPCs - // matching that route to fail." - RouteActionUnsupported RouteAction = iota - // RouteActionRoute is the expected route type on the client side. Route - // represents routing a request to some upstream cluster. On the client - // side, if an RPC matches to a route that is not RouteActionRoute, the RPC - // will fail according to A36. - RouteActionRoute - // RouteActionNonForwardingAction is the expected route type on the server - // side. NonForwardingAction represents when a route will generate a - // response directly, without forwarding to an upstream host. - RouteActionNonForwardingAction -) - -// Route is both a specification of how to match a request as well as an -// indication of the action to take upon match. -type Route struct { - Path *string - Prefix *string - Regex *regexp.Regexp - // Indicates if prefix/path matching should be case insensitive. The default - // is false (case sensitive). - CaseInsensitive bool - Headers []*HeaderMatcher - Fraction *uint32 - - HashPolicies []*HashPolicy - - // If the matchers above indicate a match, the below configuration is used. - WeightedClusters map[string]WeightedCluster - // If MaxStreamDuration is nil, it indicates neither of the route action's - // max_stream_duration fields (grpc_timeout_header_max nor - // max_stream_duration) were set. In this case, the ListenerUpdate's - // MaxStreamDuration field should be used. If MaxStreamDuration is set to - // an explicit zero duration, the application's deadline should be used. - MaxStreamDuration *time.Duration - // HTTPFilterConfigOverride contains any HTTP filter config overrides for - // the route which may be present. An individual filter's override may be - // unused if the matching WeightedCluster contains an override for that - // filter. - HTTPFilterConfigOverride map[string]httpfilter.FilterConfig - RetryConfig *RetryConfig - - RouteAction RouteAction -} - -// WeightedCluster contains settings for an xds RouteAction.WeightedCluster. -type WeightedCluster struct { - // Weight is the relative weight of the cluster. It will never be zero. - Weight uint32 - // HTTPFilterConfigOverride contains any HTTP filter config overrides for - // the weighted cluster which may be present. - HTTPFilterConfigOverride map[string]httpfilter.FilterConfig -} - -// HeaderMatcher represents header matchers. -type HeaderMatcher struct { - Name string - InvertMatch *bool - ExactMatch *string - RegexMatch *regexp.Regexp - PrefixMatch *string - SuffixMatch *string - RangeMatch *Int64Range - PresentMatch *bool -} - -// Int64Range is a range for header range match. -type Int64Range struct { - Start int64 - End int64 -} - -// SecurityConfig contains the security configuration received as part of the -// Cluster resource on the client-side, and as part of the Listener resource on -// the server-side. -type SecurityConfig struct { - // RootInstanceName identifies the certProvider plugin to be used to fetch - // root certificates. This instance name will be resolved to the plugin name - // and its associated configuration from the certificate_providers field of - // the bootstrap file. - RootInstanceName string - // RootCertName is the certificate name to be passed to the plugin (looked - // up from the bootstrap file) while fetching root certificates. - RootCertName string - // IdentityInstanceName identifies the certProvider plugin to be used to - // fetch identity certificates. This instance name will be resolved to the - // plugin name and its associated configuration from the - // certificate_providers field of the bootstrap file. - IdentityInstanceName string - // IdentityCertName is the certificate name to be passed to the plugin - // (looked up from the bootstrap file) while fetching identity certificates. - IdentityCertName string - // SubjectAltNameMatchers is an optional list of match criteria for SANs - // specified on the peer certificate. Used only on the client-side. - // - // Some intricacies: - // - If this field is empty, then any peer certificate is accepted. - // - If the peer certificate contains a wildcard DNS SAN, and an `exact` - // matcher is configured, a wildcard DNS match is performed instead of a - // regular string comparison. - SubjectAltNameMatchers []matcher.StringMatcher - // RequireClientCert indicates if the server handshake process expects the - // client to present a certificate. Set to true when performing mTLS. Used - // only on the server-side. - RequireClientCert bool -} - -// Equal returns true if sc is equal to other. -func (sc *SecurityConfig) Equal(other *SecurityConfig) bool { - switch { - case sc == nil && other == nil: - return true - case (sc != nil) != (other != nil): - return false - } - switch { - case sc.RootInstanceName != other.RootInstanceName: - return false - case sc.RootCertName != other.RootCertName: - return false - case sc.IdentityInstanceName != other.IdentityInstanceName: - return false - case sc.IdentityCertName != other.IdentityCertName: - return false - case sc.RequireClientCert != other.RequireClientCert: - return false - default: - if len(sc.SubjectAltNameMatchers) != len(other.SubjectAltNameMatchers) { - return false - } - for i := 0; i < len(sc.SubjectAltNameMatchers); i++ { - if !sc.SubjectAltNameMatchers[i].Equal(other.SubjectAltNameMatchers[i]) { - return false - } - } - } - return true -} - -// ClusterType is the type of cluster from a received CDS response. -type ClusterType int - -const ( - // ClusterTypeEDS represents the EDS cluster type, which will delegate endpoint - // discovery to the management server. - ClusterTypeEDS ClusterType = iota - // ClusterTypeLogicalDNS represents the Logical DNS cluster type, which essentially - // maps to the gRPC behavior of using the DNS resolver with pick_first LB policy. - ClusterTypeLogicalDNS - // ClusterTypeAggregate represents the Aggregate Cluster type, which provides a - // prioritized list of clusters to use. It is used for failover between clusters - // with a different configuration. - ClusterTypeAggregate -) - -// ClusterLBPolicyRingHash represents ring_hash lb policy, and also contains its -// config. -type ClusterLBPolicyRingHash struct { - MinimumRingSize uint64 - MaximumRingSize uint64 -} - -// ClusterUpdate contains information from a received CDS response, which is of -// interest to the registered CDS watcher. -type ClusterUpdate struct { - ClusterType ClusterType - // ClusterName is the clusterName being watched for through CDS. - ClusterName string - // EDSServiceName is an optional name for EDS. If it's not set, the balancer - // should watch ClusterName for the EDS resources. - EDSServiceName string - // EnableLRS indicates whether or not load should be reported through LRS. - EnableLRS bool - // SecurityCfg contains security configuration sent by the control plane. - SecurityCfg *SecurityConfig - // MaxRequests for circuit breaking, if any (otherwise nil). - MaxRequests *uint32 - // DNSHostName is used only for cluster type DNS. It's the DNS name to - // resolve in "host:port" form - DNSHostName string - // PrioritizedClusterNames is used only for cluster type aggregate. It represents - // a prioritized list of cluster names. - PrioritizedClusterNames []string - - // LBPolicy is the lb policy for this cluster. - // - // This only support round_robin and ring_hash. - // - if it's nil, the lb policy is round_robin - // - if it's not nil, the lb policy is ring_hash, the this field has the config. - // - // When we add more support policies, this can be made an interface, and - // will be set to different types based on the policy type. - LBPolicy *ClusterLBPolicyRingHash - - // Raw is the resource from the xds response. - Raw *anypb.Any -} - -// OverloadDropConfig contains the config to drop overloads. -type OverloadDropConfig struct { - Category string - Numerator uint32 - Denominator uint32 -} - -// EndpointHealthStatus represents the health status of an endpoint. -type EndpointHealthStatus int32 - -const ( - // EndpointHealthStatusUnknown represents HealthStatus UNKNOWN. - EndpointHealthStatusUnknown EndpointHealthStatus = iota - // EndpointHealthStatusHealthy represents HealthStatus HEALTHY. - EndpointHealthStatusHealthy - // EndpointHealthStatusUnhealthy represents HealthStatus UNHEALTHY. - EndpointHealthStatusUnhealthy - // EndpointHealthStatusDraining represents HealthStatus DRAINING. - EndpointHealthStatusDraining - // EndpointHealthStatusTimeout represents HealthStatus TIMEOUT. - EndpointHealthStatusTimeout - // EndpointHealthStatusDegraded represents HealthStatus DEGRADED. - EndpointHealthStatusDegraded -) - -// Endpoint contains information of an endpoint. -type Endpoint struct { - Address string - HealthStatus EndpointHealthStatus - Weight uint32 -} - -// Locality contains information of a locality. -type Locality struct { - Endpoints []Endpoint - ID internal.LocalityID - Priority uint32 - Weight uint32 -} - -// EndpointsUpdate contains an EDS update. -type EndpointsUpdate struct { - Drops []OverloadDropConfig - Localities []Locality - - // Raw is the resource from the xds response. - Raw *anypb.Any -} - // Function to be overridden in tests. var newAPIClient = func(apiVersion version.TransportAPI, cc *grpc.ClientConn, opts BuildOptions) (APIClient, error) { cb := getAPIClientBuilder(apiVersion) @@ -603,20 +172,20 @@ type clientImpl struct { mu sync.Mutex ldsWatchers map[string]map[*watchInfo]bool ldsVersion string // Only used in CSDS. - ldsCache map[string]ListenerUpdate - ldsMD map[string]UpdateMetadata + ldsCache map[string]xdsresource.ListenerUpdate + ldsMD map[string]xdsresource.UpdateMetadata rdsWatchers map[string]map[*watchInfo]bool rdsVersion string // Only used in CSDS. - rdsCache map[string]RouteConfigUpdate - rdsMD map[string]UpdateMetadata + rdsCache map[string]xdsresource.RouteConfigUpdate + rdsMD map[string]xdsresource.UpdateMetadata cdsWatchers map[string]map[*watchInfo]bool cdsVersion string // Only used in CSDS. - cdsCache map[string]ClusterUpdate - cdsMD map[string]UpdateMetadata + cdsCache map[string]xdsresource.ClusterUpdate + cdsMD map[string]xdsresource.UpdateMetadata edsWatchers map[string]map[*watchInfo]bool edsVersion string // Only used in CSDS. - edsCache map[string]EndpointsUpdate - edsMD map[string]UpdateMetadata + edsCache map[string]xdsresource.EndpointsUpdate + edsMD map[string]xdsresource.UpdateMetadata // Changes to map lrsClients and the lrsClient inside the map need to be // protected by lrsMu. @@ -652,17 +221,17 @@ func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration) ( updateCh: buffer.NewUnbounded(), ldsWatchers: make(map[string]map[*watchInfo]bool), - ldsCache: make(map[string]ListenerUpdate), - ldsMD: make(map[string]UpdateMetadata), + ldsCache: make(map[string]xdsresource.ListenerUpdate), + ldsMD: make(map[string]xdsresource.UpdateMetadata), rdsWatchers: make(map[string]map[*watchInfo]bool), - rdsCache: make(map[string]RouteConfigUpdate), - rdsMD: make(map[string]UpdateMetadata), + rdsCache: make(map[string]xdsresource.RouteConfigUpdate), + rdsMD: make(map[string]xdsresource.UpdateMetadata), cdsWatchers: make(map[string]map[*watchInfo]bool), - cdsCache: make(map[string]ClusterUpdate), - cdsMD: make(map[string]UpdateMetadata), + cdsCache: make(map[string]xdsresource.ClusterUpdate), + cdsMD: make(map[string]xdsresource.UpdateMetadata), edsWatchers: make(map[string]map[*watchInfo]bool), - edsCache: make(map[string]EndpointsUpdate), - edsMD: make(map[string]UpdateMetadata), + edsCache: make(map[string]xdsresource.EndpointsUpdate), + edsMD: make(map[string]xdsresource.UpdateMetadata), lrsClients: make(map[string]*lrsClient), } @@ -732,14 +301,14 @@ func (c *clientImpl) Close() { c.logger.Infof("Shutdown") } -func (c *clientImpl) filterChainUpdateValidator(fc *FilterChain) error { +func (c *clientImpl) filterChainUpdateValidator(fc *xdsresource.FilterChain) error { if fc == nil { return nil } return c.securityConfigUpdateValidator(fc.SecurityCfg) } -func (c *clientImpl) securityConfigUpdateValidator(sc *SecurityConfig) error { +func (c *clientImpl) securityConfigUpdateValidator(sc *xdsresource.SecurityConfig) error { if sc == nil { return nil } @@ -758,28 +327,12 @@ func (c *clientImpl) securityConfigUpdateValidator(sc *SecurityConfig) error { func (c *clientImpl) updateValidator(u interface{}) error { switch update := u.(type) { - case ListenerUpdate: + case xdsresource.ListenerUpdate: if update.InboundListenerCfg == nil || update.InboundListenerCfg.FilterChains == nil { return nil } - - fcm := update.InboundListenerCfg.FilterChains - for _, dst := range fcm.dstPrefixMap { - for _, srcType := range dst.srcTypeArr { - if srcType == nil { - continue - } - for _, src := range srcType.srcPrefixMap { - for _, fc := range src.srcPortMap { - if err := c.filterChainUpdateValidator(fc); err != nil { - return err - } - } - } - } - } - return c.filterChainUpdateValidator(fcm.def) - case ClusterUpdate: + return update.InboundListenerCfg.FilterChains.Validate(c.filterChainUpdateValidator) + case xdsresource.ClusterUpdate: return c.securityConfigUpdateValidator(update.SecurityCfg) default: // We currently invoke this update validation function only for LDS and @@ -821,33 +374,3 @@ func (r ResourceType) String() string { return "UnknownResource" } } - -// IsListenerResource returns true if the provider URL corresponds to an xDS -// Listener resource. -func IsListenerResource(url string) bool { - return url == version.V2ListenerURL || url == version.V3ListenerURL -} - -// IsHTTPConnManagerResource returns true if the provider URL corresponds to an xDS -// HTTPConnManager resource. -func IsHTTPConnManagerResource(url string) bool { - return url == version.V2HTTPConnManagerURL || url == version.V3HTTPConnManagerURL -} - -// IsRouteConfigResource returns true if the provider URL corresponds to an xDS -// RouteConfig resource. -func IsRouteConfigResource(url string) bool { - return url == version.V2RouteConfigURL || url == version.V3RouteConfigURL -} - -// IsClusterResource returns true if the provider URL corresponds to an xDS -// Cluster resource. -func IsClusterResource(url string) bool { - return url == version.V2ClusterURL || url == version.V3ClusterURL -} - -// IsEndpointsResource returns true if the provider URL corresponds to an xDS -// Endpoints resource. -func IsEndpointsResource(url string) bool { - return url == version.V2EndpointsURL || url == version.V3EndpointsURL -} diff --git a/xds/internal/xdsclient/client_test.go b/xds/internal/xdsclient/client_test.go index 2a6d6ae2a536..a668ff1378f3 100644 --- a/xds/internal/xdsclient/client_test.go +++ b/xds/internal/xdsclient/client_test.go @@ -26,6 +26,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/types/known/anypb" "google.golang.org/grpc" @@ -60,21 +61,9 @@ const ( defaultTestShortTimeout = 10 * time.Millisecond // For events expected to *not* happen. ) -var ( - cmpOpts = cmp.Options{ - cmpopts.EquateEmpty(), - cmp.FilterValues(func(x, y error) bool { return true }, cmpopts.EquateErrors()), - cmp.Comparer(func(a, b time.Time) bool { return true }), - protocmp.Transform(), - } - - cmpOptsIgnoreDetails = cmp.Options{ - cmp.Comparer(func(a, b time.Time) bool { return true }), - cmp.Comparer(func(x, y error) bool { - return (x == nil) == (y == nil) - }), - } -) +func newStringP(s string) *string { + return &s +} func clientOpts(balancerName string, overrideWatchExpiryTimeout bool) (*bootstrap.Config, time.Duration) { watchExpiryTimeout := defaultWatchExpiryTimeout @@ -164,10 +153,10 @@ func (s) TestWatchCallAnotherWatch(t *testing.T) { clusterUpdateCh := testutils.NewChannel() firstTime := true - client.WatchCluster(testCDSName, func(update ClusterUpdate, err error) { - clusterUpdateCh.Send(ClusterUpdateErrTuple{Update: update, Err: err}) + client.WatchCluster(testCDSName, func(update xdsresource.ClusterUpdate, err error) { + clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) // Calls another watch inline, to ensure there's deadlock. - client.WatchCluster("another-random-name", func(ClusterUpdate, error) {}) + client.WatchCluster("another-random-name", func(xdsresource.ClusterUpdate, error) {}) if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); firstTime && err != nil { t.Fatalf("want new watch to start, got error %v", err) @@ -178,27 +167,27 @@ func (s) TestWatchCallAnotherWatch(t *testing.T) { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate := ClusterUpdate{ClusterName: testEDSName} - client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{}) + wantUpdate := xdsresource.ClusterUpdate{ClusterName: testEDSName} + client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } // The second update needs to be different in the underlying resource proto // for the watch callback to be invoked. - wantUpdate2 := ClusterUpdate{ClusterName: testEDSName + "2", Raw: &anypb.Any{}} - client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate2}}, UpdateMetadata{}) + wantUpdate2 := xdsresource.ClusterUpdate{ClusterName: testEDSName + "2", Raw: &anypb.Any{}} + client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate2}}, xdsresource.UpdateMetadata{}) if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate2, nil); err != nil { t.Fatal(err) } } -func verifyListenerUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate ListenerUpdate, wantErr error) error { +func verifyListenerUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate xdsresource.ListenerUpdate, wantErr error) error { u, err := updateCh.Receive(ctx) if err != nil { return fmt.Errorf("timeout when waiting for listener update: %v", err) } - gotUpdate := u.(ListenerUpdateErrTuple) + gotUpdate := u.(xdsresource.ListenerUpdateErrTuple) if wantErr != nil { if gotUpdate.Err != wantErr { return fmt.Errorf("unexpected error: %v, want %v", gotUpdate.Err, wantErr) @@ -211,12 +200,12 @@ func verifyListenerUpdate(ctx context.Context, updateCh *testutils.Channel, want return nil } -func verifyRouteConfigUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate RouteConfigUpdate, wantErr error) error { +func verifyRouteConfigUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate xdsresource.RouteConfigUpdate, wantErr error) error { u, err := updateCh.Receive(ctx) if err != nil { return fmt.Errorf("timeout when waiting for route configuration update: %v", err) } - gotUpdate := u.(RouteConfigUpdateErrTuple) + gotUpdate := u.(xdsresource.RouteConfigUpdateErrTuple) if wantErr != nil { if gotUpdate.Err != wantErr { return fmt.Errorf("unexpected error: %v, want %v", gotUpdate.Err, wantErr) @@ -229,12 +218,12 @@ func verifyRouteConfigUpdate(ctx context.Context, updateCh *testutils.Channel, w return nil } -func verifyClusterUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate ClusterUpdate, wantErr error) error { +func verifyClusterUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate xdsresource.ClusterUpdate, wantErr error) error { u, err := updateCh.Receive(ctx) if err != nil { return fmt.Errorf("timeout when waiting for cluster update: %v", err) } - gotUpdate := u.(ClusterUpdateErrTuple) + gotUpdate := u.(xdsresource.ClusterUpdateErrTuple) if wantErr != nil { if gotUpdate.Err != wantErr { return fmt.Errorf("unexpected error: %v, want %v", gotUpdate.Err, wantErr) @@ -247,12 +236,12 @@ func verifyClusterUpdate(ctx context.Context, updateCh *testutils.Channel, wantU return nil } -func verifyEndpointsUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate EndpointsUpdate, wantErr error) error { +func verifyEndpointsUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate xdsresource.EndpointsUpdate, wantErr error) error { u, err := updateCh.Receive(ctx) if err != nil { return fmt.Errorf("timeout when waiting for endpoints update: %v", err) } - gotUpdate := u.(EndpointsUpdateErrTuple) + gotUpdate := u.(xdsresource.EndpointsUpdateErrTuple) if wantErr != nil { if gotUpdate.Err != wantErr { return fmt.Errorf("unexpected error: %v, want %v", gotUpdate.Err, wantErr) diff --git a/xds/internal/xdsclient/dump.go b/xds/internal/xdsclient/dump.go index db9b474f370d..dfe83c5b1755 100644 --- a/xds/internal/xdsclient/dump.go +++ b/xds/internal/xdsclient/dump.go @@ -18,7 +18,10 @@ package xdsclient -import anypb "github.com/golang/protobuf/ptypes/any" +import ( + anypb "github.com/golang/protobuf/ptypes/any" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) // UpdateWithMD contains the raw message of the update and the metadata, // including version, raw message, timestamp. @@ -26,31 +29,31 @@ import anypb "github.com/golang/protobuf/ptypes/any" // This is to be used for config dump and CSDS, not directly by users (like // resolvers/balancers). type UpdateWithMD struct { - MD UpdateMetadata + MD xdsresource.UpdateMetadata Raw *anypb.Any } func rawFromCache(s string, cache interface{}) *anypb.Any { switch c := cache.(type) { - case map[string]ListenerUpdate: + case map[string]xdsresource.ListenerUpdate: v, ok := c[s] if !ok { return nil } return v.Raw - case map[string]RouteConfigUpdate: + case map[string]xdsresource.RouteConfigUpdate: v, ok := c[s] if !ok { return nil } return v.Raw - case map[string]ClusterUpdate: + case map[string]xdsresource.ClusterUpdate: v, ok := c[s] if !ok { return nil } return v.Raw - case map[string]EndpointsUpdate: + case map[string]xdsresource.EndpointsUpdate: v, ok := c[s] if !ok { return nil @@ -67,7 +70,7 @@ func (c *clientImpl) dump(t ResourceType) (string, map[string]UpdateWithMD) { var ( version string - md map[string]UpdateMetadata + md map[string]xdsresource.UpdateMetadata cache interface{} ) switch t { diff --git a/xds/internal/xdsclient/dump_test.go b/xds/internal/xdsclient/dump_test.go index c162a9418f23..2d0b6c17e0b2 100644 --- a/xds/internal/xdsclient/dump_test.go +++ b/xds/internal/xdsclient/dump_test.go @@ -30,6 +30,7 @@ import ( v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/testing/protocmp" "google.golang.org/protobuf/types/known/anypb" "google.golang.org/protobuf/types/known/durationpb" @@ -94,25 +95,25 @@ func (s) TestLDSConfigDump(t *testing.T) { wantRequested := make(map[string]xdsclient.UpdateWithMD) for _, n := range ldsTargets { - cancel := client.WatchListener(n, func(update xdsclient.ListenerUpdate, err error) {}) + cancel := client.WatchListener(n, func(update xdsresource.ListenerUpdate, err error) {}) defer cancel() - wantRequested[n] = xdsclient.UpdateWithMD{MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusRequested}} + wantRequested[n] = xdsclient.UpdateWithMD{MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}} } // Expected requested. if err := compareDump(client.DumpLDS, "", wantRequested); err != nil { t.Fatalf(err.Error()) } - update0 := make(map[string]xdsclient.ListenerUpdateErrTuple) + update0 := make(map[string]xdsresource.ListenerUpdateErrTuple) want0 := make(map[string]xdsclient.UpdateWithMD) for n, r := range listenerRaws { - update0[n] = xdsclient.ListenerUpdateErrTuple{Update: xdsclient.ListenerUpdate{Raw: r}} + update0[n] = xdsresource.ListenerUpdateErrTuple{Update: xdsresource.ListenerUpdate{Raw: r}} want0[n] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: testVersion}, + MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion}, Raw: r, } } - updateHandler.NewListeners(update0, xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: testVersion}) + updateHandler.NewListeners(update0, xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion}) // Expect ACK. if err := compareDump(client.DumpLDS, testVersion, want0); err != nil { @@ -122,13 +123,13 @@ func (s) TestLDSConfigDump(t *testing.T) { const nackVersion = "lds-version-nack" var nackErr = fmt.Errorf("lds nack error") updateHandler.NewListeners( - map[string]xdsclient.ListenerUpdateErrTuple{ + map[string]xdsresource.ListenerUpdateErrTuple{ ldsTargets[0]: {Err: nackErr}, - ldsTargets[1]: {Update: xdsclient.ListenerUpdate{Raw: listenerRaws[ldsTargets[1]]}}, + ldsTargets[1]: {Update: xdsresource.ListenerUpdate{Raw: listenerRaws[ldsTargets[1]]}}, }, - xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ + xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + ErrState: &xdsresource.UpdateErrorMetadata{ Version: nackVersion, Err: nackErr, }, @@ -140,10 +141,10 @@ func (s) TestLDSConfigDump(t *testing.T) { // Though resource 0 was NACKed, the dump should show the previous ACKed raw // message, as well as the NACK error. wantDump[ldsTargets[0]] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, + MD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, Version: testVersion, - ErrState: &xdsclient.UpdateErrorMetadata{ + ErrState: &xdsresource.UpdateErrorMetadata{ Version: nackVersion, Err: nackErr, }, @@ -152,7 +153,7 @@ func (s) TestLDSConfigDump(t *testing.T) { } wantDump[ldsTargets[1]] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: nackVersion}, + MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: nackVersion}, Raw: listenerRaws[ldsTargets[1]], } if err := compareDump(client.DumpLDS, nackVersion, wantDump); err != nil { @@ -210,25 +211,25 @@ func (s) TestRDSConfigDump(t *testing.T) { wantRequested := make(map[string]xdsclient.UpdateWithMD) for _, n := range rdsTargets { - cancel := client.WatchRouteConfig(n, func(update xdsclient.RouteConfigUpdate, err error) {}) + cancel := client.WatchRouteConfig(n, func(update xdsresource.RouteConfigUpdate, err error) {}) defer cancel() - wantRequested[n] = xdsclient.UpdateWithMD{MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusRequested}} + wantRequested[n] = xdsclient.UpdateWithMD{MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}} } // Expected requested. if err := compareDump(client.DumpRDS, "", wantRequested); err != nil { t.Fatalf(err.Error()) } - update0 := make(map[string]xdsclient.RouteConfigUpdateErrTuple) + update0 := make(map[string]xdsresource.RouteConfigUpdateErrTuple) want0 := make(map[string]xdsclient.UpdateWithMD) for n, r := range routeRaws { - update0[n] = xdsclient.RouteConfigUpdateErrTuple{Update: xdsclient.RouteConfigUpdate{Raw: r}} + update0[n] = xdsresource.RouteConfigUpdateErrTuple{Update: xdsresource.RouteConfigUpdate{Raw: r}} want0[n] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: testVersion}, + MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion}, Raw: r, } } - updateHandler.NewRouteConfigs(update0, xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: testVersion}) + updateHandler.NewRouteConfigs(update0, xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion}) // Expect ACK. if err := compareDump(client.DumpRDS, testVersion, want0); err != nil { @@ -238,13 +239,13 @@ func (s) TestRDSConfigDump(t *testing.T) { const nackVersion = "rds-version-nack" var nackErr = fmt.Errorf("rds nack error") updateHandler.NewRouteConfigs( - map[string]xdsclient.RouteConfigUpdateErrTuple{ + map[string]xdsresource.RouteConfigUpdateErrTuple{ rdsTargets[0]: {Err: nackErr}, - rdsTargets[1]: {Update: xdsclient.RouteConfigUpdate{Raw: routeRaws[rdsTargets[1]]}}, + rdsTargets[1]: {Update: xdsresource.RouteConfigUpdate{Raw: routeRaws[rdsTargets[1]]}}, }, - xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ + xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + ErrState: &xdsresource.UpdateErrorMetadata{ Version: nackVersion, Err: nackErr, }, @@ -256,10 +257,10 @@ func (s) TestRDSConfigDump(t *testing.T) { // Though resource 0 was NACKed, the dump should show the previous ACKed raw // message, as well as the NACK error. wantDump[rdsTargets[0]] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, + MD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, Version: testVersion, - ErrState: &xdsclient.UpdateErrorMetadata{ + ErrState: &xdsresource.UpdateErrorMetadata{ Version: nackVersion, Err: nackErr, }, @@ -267,7 +268,7 @@ func (s) TestRDSConfigDump(t *testing.T) { Raw: routeRaws[rdsTargets[0]], } wantDump[rdsTargets[1]] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: nackVersion}, + MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: nackVersion}, Raw: routeRaws[rdsTargets[1]], } if err := compareDump(client.DumpRDS, nackVersion, wantDump); err != nil { @@ -326,25 +327,25 @@ func (s) TestCDSConfigDump(t *testing.T) { wantRequested := make(map[string]xdsclient.UpdateWithMD) for _, n := range cdsTargets { - cancel := client.WatchCluster(n, func(update xdsclient.ClusterUpdate, err error) {}) + cancel := client.WatchCluster(n, func(update xdsresource.ClusterUpdate, err error) {}) defer cancel() - wantRequested[n] = xdsclient.UpdateWithMD{MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusRequested}} + wantRequested[n] = xdsclient.UpdateWithMD{MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}} } // Expected requested. if err := compareDump(client.DumpCDS, "", wantRequested); err != nil { t.Fatalf(err.Error()) } - update0 := make(map[string]xdsclient.ClusterUpdateErrTuple) + update0 := make(map[string]xdsresource.ClusterUpdateErrTuple) want0 := make(map[string]xdsclient.UpdateWithMD) for n, r := range clusterRaws { - update0[n] = xdsclient.ClusterUpdateErrTuple{Update: xdsclient.ClusterUpdate{Raw: r}} + update0[n] = xdsresource.ClusterUpdateErrTuple{Update: xdsresource.ClusterUpdate{Raw: r}} want0[n] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: testVersion}, + MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion}, Raw: r, } } - updateHandler.NewClusters(update0, xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: testVersion}) + updateHandler.NewClusters(update0, xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion}) // Expect ACK. if err := compareDump(client.DumpCDS, testVersion, want0); err != nil { @@ -354,13 +355,13 @@ func (s) TestCDSConfigDump(t *testing.T) { const nackVersion = "cds-version-nack" var nackErr = fmt.Errorf("cds nack error") updateHandler.NewClusters( - map[string]xdsclient.ClusterUpdateErrTuple{ + map[string]xdsresource.ClusterUpdateErrTuple{ cdsTargets[0]: {Err: nackErr}, - cdsTargets[1]: {Update: xdsclient.ClusterUpdate{Raw: clusterRaws[cdsTargets[1]]}}, + cdsTargets[1]: {Update: xdsresource.ClusterUpdate{Raw: clusterRaws[cdsTargets[1]]}}, }, - xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ + xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + ErrState: &xdsresource.UpdateErrorMetadata{ Version: nackVersion, Err: nackErr, }, @@ -372,10 +373,10 @@ func (s) TestCDSConfigDump(t *testing.T) { // Though resource 0 was NACKed, the dump should show the previous ACKed raw // message, as well as the NACK error. wantDump[cdsTargets[0]] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, + MD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, Version: testVersion, - ErrState: &xdsclient.UpdateErrorMetadata{ + ErrState: &xdsresource.UpdateErrorMetadata{ Version: nackVersion, Err: nackErr, }, @@ -383,7 +384,7 @@ func (s) TestCDSConfigDump(t *testing.T) { Raw: clusterRaws[cdsTargets[0]], } wantDump[cdsTargets[1]] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: nackVersion}, + MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: nackVersion}, Raw: clusterRaws[cdsTargets[1]], } if err := compareDump(client.DumpCDS, nackVersion, wantDump); err != nil { @@ -428,25 +429,25 @@ func (s) TestEDSConfigDump(t *testing.T) { wantRequested := make(map[string]xdsclient.UpdateWithMD) for _, n := range edsTargets { - cancel := client.WatchEndpoints(n, func(update xdsclient.EndpointsUpdate, err error) {}) + cancel := client.WatchEndpoints(n, func(update xdsresource.EndpointsUpdate, err error) {}) defer cancel() - wantRequested[n] = xdsclient.UpdateWithMD{MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusRequested}} + wantRequested[n] = xdsclient.UpdateWithMD{MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}} } // Expected requested. if err := compareDump(client.DumpEDS, "", wantRequested); err != nil { t.Fatalf(err.Error()) } - update0 := make(map[string]xdsclient.EndpointsUpdateErrTuple) + update0 := make(map[string]xdsresource.EndpointsUpdateErrTuple) want0 := make(map[string]xdsclient.UpdateWithMD) for n, r := range endpointRaws { - update0[n] = xdsclient.EndpointsUpdateErrTuple{Update: xdsclient.EndpointsUpdate{Raw: r}} + update0[n] = xdsresource.EndpointsUpdateErrTuple{Update: xdsresource.EndpointsUpdate{Raw: r}} want0[n] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: testVersion}, + MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion}, Raw: r, } } - updateHandler.NewEndpoints(update0, xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: testVersion}) + updateHandler.NewEndpoints(update0, xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion}) // Expect ACK. if err := compareDump(client.DumpEDS, testVersion, want0); err != nil { @@ -456,13 +457,13 @@ func (s) TestEDSConfigDump(t *testing.T) { const nackVersion = "eds-version-nack" var nackErr = fmt.Errorf("eds nack error") updateHandler.NewEndpoints( - map[string]xdsclient.EndpointsUpdateErrTuple{ + map[string]xdsresource.EndpointsUpdateErrTuple{ edsTargets[0]: {Err: nackErr}, - edsTargets[1]: {Update: xdsclient.EndpointsUpdate{Raw: endpointRaws[edsTargets[1]]}}, + edsTargets[1]: {Update: xdsresource.EndpointsUpdate{Raw: endpointRaws[edsTargets[1]]}}, }, - xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ + xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + ErrState: &xdsresource.UpdateErrorMetadata{ Version: nackVersion, Err: nackErr, }, @@ -474,10 +475,10 @@ func (s) TestEDSConfigDump(t *testing.T) { // Though resource 0 was NACKed, the dump should show the previous ACKed raw // message, as well as the NACK error. wantDump[edsTargets[0]] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, + MD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, Version: testVersion, - ErrState: &xdsclient.UpdateErrorMetadata{ + ErrState: &xdsresource.UpdateErrorMetadata{ Version: nackVersion, Err: nackErr, }, @@ -485,7 +486,7 @@ func (s) TestEDSConfigDump(t *testing.T) { Raw: endpointRaws[edsTargets[0]], } wantDump[edsTargets[1]] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: nackVersion}, + MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: nackVersion}, Raw: endpointRaws[edsTargets[1]], } if err := compareDump(client.DumpEDS, nackVersion, wantDump); err != nil { diff --git a/xds/internal/xdsclient/v2/ack_test.go b/xds/internal/xdsclient/v2/ack_test.go index d2f0605f6d08..21191341306b 100644 --- a/xds/internal/xdsclient/v2/ack_test.go +++ b/xds/internal/xdsclient/v2/ack_test.go @@ -34,6 +34,7 @@ import ( "google.golang.org/grpc/xds/internal/testutils/fakeserver" "google.golang.org/grpc/xds/internal/version" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) const ( @@ -47,7 +48,7 @@ func startXDSV2Client(t *testing.T, cc *grpc.ClientConn) (v2c *client, cbLDS, cb cbCDS = testutils.NewChannel() cbEDS = testutils.NewChannel() v2c, err := newV2Client(&testUpdateReceiver{ - f: func(rType xdsclient.ResourceType, d map[string]interface{}, md xdsclient.UpdateMetadata) { + f: func(rType xdsclient.ResourceType, d map[string]interface{}, md xdsresource.UpdateMetadata) { t.Logf("Received %v callback with {%+v}", rType, d) switch rType { case xdsclient.ListenerResource: diff --git a/xds/internal/xdsclient/v2/cds_test.go b/xds/internal/xdsclient/v2/cds_test.go index cef7563017c4..1c368b5a5c3a 100644 --- a/xds/internal/xdsclient/v2/cds_test.go +++ b/xds/internal/xdsclient/v2/cds_test.go @@ -29,6 +29,7 @@ import ( "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/xds/internal/version" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) const ( @@ -101,8 +102,8 @@ func (s) TestCDSHandleResponse(t *testing.T) { name string cdsResponse *xdspb.DiscoveryResponse wantErr bool - wantUpdate map[string]xdsclient.ClusterUpdateErrTuple - wantUpdateMD xdsclient.UpdateMetadata + wantUpdate map[string]xdsresource.ClusterUpdateErrTuple + wantUpdateMD xdsresource.UpdateMetadata wantUpdateErr bool }{ // Badly marshaled CDS response. @@ -111,9 +112,9 @@ func (s) TestCDSHandleResponse(t *testing.T) { cdsResponse: badlyMarshaledCDSResponse, wantErr: true, wantUpdate: nil, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + ErrState: &xdsresource.UpdateErrorMetadata{ Err: cmpopts.AnyError, }, }, @@ -125,9 +126,9 @@ func (s) TestCDSHandleResponse(t *testing.T) { cdsResponse: badResourceTypeInLDSResponse, wantErr: true, wantUpdate: nil, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + ErrState: &xdsresource.UpdateErrorMetadata{ Err: cmpopts.AnyError, }, }, @@ -139,8 +140,8 @@ func (s) TestCDSHandleResponse(t *testing.T) { cdsResponse: &xdspb.DiscoveryResponse{}, wantErr: false, wantUpdate: nil, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusACKed, + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusACKed, }, wantUpdateErr: false, }, @@ -149,11 +150,11 @@ func (s) TestCDSHandleResponse(t *testing.T) { name: "one-uninteresting-cluster", cdsResponse: goodCDSResponse2, wantErr: false, - wantUpdate: map[string]xdsclient.ClusterUpdateErrTuple{ - goodClusterName2: {Update: xdsclient.ClusterUpdate{ClusterName: goodClusterName2, EDSServiceName: serviceName2, Raw: marshaledCluster2}}, + wantUpdate: map[string]xdsresource.ClusterUpdateErrTuple{ + goodClusterName2: {Update: xdsresource.ClusterUpdate{ClusterName: goodClusterName2, EDSServiceName: serviceName2, Raw: marshaledCluster2}}, }, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusACKed, + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusACKed, }, wantUpdateErr: false, }, @@ -162,11 +163,11 @@ func (s) TestCDSHandleResponse(t *testing.T) { name: "one-good-cluster", cdsResponse: goodCDSResponse1, wantErr: false, - wantUpdate: map[string]xdsclient.ClusterUpdateErrTuple{ - goodClusterName1: {Update: xdsclient.ClusterUpdate{ClusterName: goodClusterName1, EDSServiceName: serviceName1, EnableLRS: true, Raw: marshaledCluster1}}, + wantUpdate: map[string]xdsresource.ClusterUpdateErrTuple{ + goodClusterName1: {Update: xdsresource.ClusterUpdate{ClusterName: goodClusterName1, EDSServiceName: serviceName1, EnableLRS: true, Raw: marshaledCluster1}}, }, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusACKed, + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusACKed, }, wantUpdateErr: false, }, @@ -194,7 +195,7 @@ func (s) TestCDSHandleResponseWithoutWatch(t *testing.T) { defer cleanup() v2c, err := newV2Client(&testUpdateReceiver{ - f: func(xdsclient.ResourceType, map[string]interface{}, xdsclient.UpdateMetadata) {}, + f: func(xdsclient.ResourceType, map[string]interface{}, xdsresource.UpdateMetadata) {}, }, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) if err != nil { t.Fatal(err) diff --git a/xds/internal/xdsclient/v2/client.go b/xds/internal/xdsclient/v2/client.go index dc137f63e5f5..60e87761e852 100644 --- a/xds/internal/xdsclient/v2/client.go +++ b/xds/internal/xdsclient/v2/client.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/xds/internal/version" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" @@ -93,7 +94,7 @@ type client struct { // ClientConn to the xDS gRPC server. Owned by the parent xdsClient. cc *grpc.ClientConn nodeProto *v2corepb.Node - updateValidator xdsclient.UpdateValidatorFunc + updateValidator xdsresource.UpdateValidatorFunc } func (v2c *client) NewStream(ctx context.Context) (grpc.ClientStream, error) { @@ -164,16 +165,16 @@ func (v2c *client) HandleResponse(r proto.Message) (xdsclient.ResourceType, stri var err error url := resp.GetTypeUrl() switch { - case xdsclient.IsListenerResource(url): + case xdsresource.IsListenerResource(url): err = v2c.handleLDSResponse(resp) rType = xdsclient.ListenerResource - case xdsclient.IsRouteConfigResource(url): + case xdsresource.IsRouteConfigResource(url): err = v2c.handleRDSResponse(resp) rType = xdsclient.RouteConfigResource - case xdsclient.IsClusterResource(url): + case xdsresource.IsClusterResource(url): err = v2c.handleCDSResponse(resp) rType = xdsclient.ClusterResource - case xdsclient.IsEndpointsResource(url): + case xdsresource.IsEndpointsResource(url): err = v2c.handleEDSResponse(resp) rType = xdsclient.EndpointsResource default: @@ -188,7 +189,7 @@ func (v2c *client) HandleResponse(r proto.Message) (xdsclient.ResourceType, stri // server. On receipt of a good response, it also invokes the registered watcher // callback. func (v2c *client) handleLDSResponse(resp *v2xdspb.DiscoveryResponse) error { - update, md, err := xdsclient.UnmarshalListener(&xdsclient.UnmarshalOptions{ + update, md, err := xdsresource.UnmarshalListener(&xdsresource.UnmarshalOptions{ Version: resp.GetVersionInfo(), Resources: resp.GetResources(), Logger: v2c.logger, @@ -202,7 +203,7 @@ func (v2c *client) handleLDSResponse(resp *v2xdspb.DiscoveryResponse) error { // server. On receipt of a good response, it caches validated resources and also // invokes the registered watcher callback. func (v2c *client) handleRDSResponse(resp *v2xdspb.DiscoveryResponse) error { - update, md, err := xdsclient.UnmarshalRouteConfig(&xdsclient.UnmarshalOptions{ + update, md, err := xdsresource.UnmarshalRouteConfig(&xdsresource.UnmarshalOptions{ Version: resp.GetVersionInfo(), Resources: resp.GetResources(), Logger: v2c.logger, @@ -216,7 +217,7 @@ func (v2c *client) handleRDSResponse(resp *v2xdspb.DiscoveryResponse) error { // server. On receipt of a good response, it also invokes the registered watcher // callback. func (v2c *client) handleCDSResponse(resp *v2xdspb.DiscoveryResponse) error { - update, md, err := xdsclient.UnmarshalCluster(&xdsclient.UnmarshalOptions{ + update, md, err := xdsresource.UnmarshalCluster(&xdsresource.UnmarshalOptions{ Version: resp.GetVersionInfo(), Resources: resp.GetResources(), Logger: v2c.logger, @@ -227,7 +228,7 @@ func (v2c *client) handleCDSResponse(resp *v2xdspb.DiscoveryResponse) error { } func (v2c *client) handleEDSResponse(resp *v2xdspb.DiscoveryResponse) error { - update, md, err := xdsclient.UnmarshalEndpoints(&xdsclient.UnmarshalOptions{ + update, md, err := xdsresource.UnmarshalEndpoints(&xdsresource.UnmarshalOptions{ Version: resp.GetVersionInfo(), Resources: resp.GetResources(), Logger: v2c.logger, diff --git a/xds/internal/xdsclient/v2/client_test.go b/xds/internal/xdsclient/v2/client_test.go index fc3fa821a157..f74e87fb370d 100644 --- a/xds/internal/xdsclient/v2/client_test.go +++ b/xds/internal/xdsclient/v2/client_test.go @@ -37,6 +37,7 @@ import ( "google.golang.org/grpc/xds/internal/testutils/fakeserver" "google.golang.org/grpc/xds/internal/version" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/testing/protocmp" xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" @@ -293,15 +294,15 @@ type watchHandleTestcase struct { responseToHandle *xdspb.DiscoveryResponse wantHandleErr bool wantUpdate interface{} - wantUpdateMD xdsclient.UpdateMetadata + wantUpdateMD xdsresource.UpdateMetadata wantUpdateErr bool } type testUpdateReceiver struct { - f func(rType xdsclient.ResourceType, d map[string]interface{}, md xdsclient.UpdateMetadata) + f func(rType xdsclient.ResourceType, d map[string]interface{}, md xdsresource.UpdateMetadata) } -func (t *testUpdateReceiver) NewListeners(d map[string]xdsclient.ListenerUpdateErrTuple, metadata xdsclient.UpdateMetadata) { +func (t *testUpdateReceiver) NewListeners(d map[string]xdsresource.ListenerUpdateErrTuple, metadata xdsresource.UpdateMetadata) { dd := make(map[string]interface{}) for k, v := range d { dd[k] = v @@ -309,7 +310,7 @@ func (t *testUpdateReceiver) NewListeners(d map[string]xdsclient.ListenerUpdateE t.newUpdate(xdsclient.ListenerResource, dd, metadata) } -func (t *testUpdateReceiver) NewRouteConfigs(d map[string]xdsclient.RouteConfigUpdateErrTuple, metadata xdsclient.UpdateMetadata) { +func (t *testUpdateReceiver) NewRouteConfigs(d map[string]xdsresource.RouteConfigUpdateErrTuple, metadata xdsresource.UpdateMetadata) { dd := make(map[string]interface{}) for k, v := range d { dd[k] = v @@ -317,7 +318,7 @@ func (t *testUpdateReceiver) NewRouteConfigs(d map[string]xdsclient.RouteConfigU t.newUpdate(xdsclient.RouteConfigResource, dd, metadata) } -func (t *testUpdateReceiver) NewClusters(d map[string]xdsclient.ClusterUpdateErrTuple, metadata xdsclient.UpdateMetadata) { +func (t *testUpdateReceiver) NewClusters(d map[string]xdsresource.ClusterUpdateErrTuple, metadata xdsresource.UpdateMetadata) { dd := make(map[string]interface{}) for k, v := range d { dd[k] = v @@ -325,7 +326,7 @@ func (t *testUpdateReceiver) NewClusters(d map[string]xdsclient.ClusterUpdateErr t.newUpdate(xdsclient.ClusterResource, dd, metadata) } -func (t *testUpdateReceiver) NewEndpoints(d map[string]xdsclient.EndpointsUpdateErrTuple, metadata xdsclient.UpdateMetadata) { +func (t *testUpdateReceiver) NewEndpoints(d map[string]xdsresource.EndpointsUpdateErrTuple, metadata xdsresource.UpdateMetadata) { dd := make(map[string]interface{}) for k, v := range d { dd[k] = v @@ -335,7 +336,7 @@ func (t *testUpdateReceiver) NewEndpoints(d map[string]xdsclient.EndpointsUpdate func (t *testUpdateReceiver) NewConnectionError(error) {} -func (t *testUpdateReceiver) newUpdate(rType xdsclient.ResourceType, d map[string]interface{}, metadata xdsclient.UpdateMetadata) { +func (t *testUpdateReceiver) newUpdate(rType xdsclient.ResourceType, d map[string]interface{}, metadata xdsresource.UpdateMetadata) { t.f(rType, d, metadata) } @@ -353,37 +354,37 @@ func testWatchHandle(t *testing.T, test *watchHandleTestcase) { type updateErr struct { u interface{} - md xdsclient.UpdateMetadata + md xdsresource.UpdateMetadata err error } gotUpdateCh := testutils.NewChannel() v2c, err := newV2Client(&testUpdateReceiver{ - f: func(rType xdsclient.ResourceType, d map[string]interface{}, md xdsclient.UpdateMetadata) { + f: func(rType xdsclient.ResourceType, d map[string]interface{}, md xdsresource.UpdateMetadata) { if rType == test.rType { switch test.rType { case xdsclient.ListenerResource: - dd := make(map[string]xdsclient.ListenerUpdateErrTuple) + dd := make(map[string]xdsresource.ListenerUpdateErrTuple) for n, u := range d { - dd[n] = u.(xdsclient.ListenerUpdateErrTuple) + dd[n] = u.(xdsresource.ListenerUpdateErrTuple) } gotUpdateCh.Send(updateErr{dd, md, nil}) case xdsclient.RouteConfigResource: - dd := make(map[string]xdsclient.RouteConfigUpdateErrTuple) + dd := make(map[string]xdsresource.RouteConfigUpdateErrTuple) for n, u := range d { - dd[n] = u.(xdsclient.RouteConfigUpdateErrTuple) + dd[n] = u.(xdsresource.RouteConfigUpdateErrTuple) } gotUpdateCh.Send(updateErr{dd, md, nil}) case xdsclient.ClusterResource: - dd := make(map[string]xdsclient.ClusterUpdateErrTuple) + dd := make(map[string]xdsresource.ClusterUpdateErrTuple) for n, u := range d { - dd[n] = u.(xdsclient.ClusterUpdateErrTuple) + dd[n] = u.(xdsresource.ClusterUpdateErrTuple) } gotUpdateCh.Send(updateErr{dd, md, nil}) case xdsclient.EndpointsResource: - dd := make(map[string]xdsclient.EndpointsUpdateErrTuple) + dd := make(map[string]xdsresource.EndpointsUpdateErrTuple) for n, u := range d { - dd[n] = u.(xdsclient.EndpointsUpdateErrTuple) + dd[n] = u.(xdsresource.EndpointsUpdateErrTuple) } gotUpdateCh.Send(updateErr{dd, md, nil}) } @@ -431,8 +432,8 @@ func testWatchHandle(t *testing.T, test *watchHandleTestcase) { wantUpdate := test.wantUpdate cmpOpts := cmp.Options{ cmpopts.EquateEmpty(), protocmp.Transform(), - cmpopts.IgnoreFields(xdsclient.UpdateMetadata{}, "Timestamp"), - cmpopts.IgnoreFields(xdsclient.UpdateErrorMetadata{}, "Timestamp"), + cmpopts.IgnoreFields(xdsresource.UpdateMetadata{}, "Timestamp"), + cmpopts.IgnoreFields(xdsresource.UpdateErrorMetadata{}, "Timestamp"), cmp.FilterValues(func(x, y error) bool { return true }, cmpopts.EquateErrors()), } uErr, err := gotUpdateCh.Receive(ctx) @@ -503,7 +504,7 @@ func (s) TestV2ClientBackoffAfterRecvError(t *testing.T) { callbackCh := make(chan struct{}) v2c, err := newV2Client(&testUpdateReceiver{ - f: func(xdsclient.ResourceType, map[string]interface{}, xdsclient.UpdateMetadata) { close(callbackCh) }, + f: func(xdsclient.ResourceType, map[string]interface{}, xdsresource.UpdateMetadata) { close(callbackCh) }, }, cc, goodNodeProto, clientBackoff, nil) if err != nil { t.Fatal(err) @@ -548,7 +549,7 @@ func (s) TestV2ClientRetriesAfterBrokenStream(t *testing.T) { callbackCh := testutils.NewChannel() v2c, err := newV2Client(&testUpdateReceiver{ - f: func(rType xdsclient.ResourceType, d map[string]interface{}, md xdsclient.UpdateMetadata) { + f: func(rType xdsclient.ResourceType, d map[string]interface{}, md xdsresource.UpdateMetadata) { if rType == xdsclient.ListenerResource { if u, ok := d[goodLDSTarget1]; ok { t.Logf("Received LDS callback with ldsUpdate {%+v}", u) @@ -620,7 +621,7 @@ func (s) TestV2ClientWatchWithoutStream(t *testing.T) { callbackCh := testutils.NewChannel() v2c, err := newV2Client(&testUpdateReceiver{ - f: func(rType xdsclient.ResourceType, d map[string]interface{}, md xdsclient.UpdateMetadata) { + f: func(rType xdsclient.ResourceType, d map[string]interface{}, md xdsresource.UpdateMetadata) { if rType == xdsclient.ListenerResource { if u, ok := d[goodLDSTarget1]; ok { t.Logf("Received LDS callback with ldsUpdate {%+v}", u) @@ -664,7 +665,7 @@ func (s) TestV2ClientWatchWithoutStream(t *testing.T) { if v, err := callbackCh.Receive(ctx); err != nil { t.Fatal("Timeout when expecting LDS update") - } else if _, ok := v.(xdsclient.ListenerUpdateErrTuple); !ok { + } else if _, ok := v.(xdsresource.ListenerUpdateErrTuple); !ok { t.Fatalf("Expect an LDS update from watcher, got %v", v) } } diff --git a/xds/internal/xdsclient/v2/eds_test.go b/xds/internal/xdsclient/v2/eds_test.go index 8176b6dfb93a..d0f355e337b7 100644 --- a/xds/internal/xdsclient/v2/eds_test.go +++ b/xds/internal/xdsclient/v2/eds_test.go @@ -30,6 +30,7 @@ import ( xtestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/version" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) var ( @@ -76,8 +77,8 @@ func (s) TestEDSHandleResponse(t *testing.T) { name string edsResponse *v2xdspb.DiscoveryResponse wantErr bool - wantUpdate map[string]xdsclient.EndpointsUpdateErrTuple - wantUpdateMD xdsclient.UpdateMetadata + wantUpdate map[string]xdsresource.EndpointsUpdateErrTuple + wantUpdateMD xdsresource.UpdateMetadata wantUpdateErr bool }{ // Any in resource is badly marshaled. @@ -86,9 +87,9 @@ func (s) TestEDSHandleResponse(t *testing.T) { edsResponse: badlyMarshaledEDSResponse, wantErr: true, wantUpdate: nil, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + ErrState: &xdsresource.UpdateErrorMetadata{ Err: cmpopts.AnyError, }, }, @@ -100,9 +101,9 @@ func (s) TestEDSHandleResponse(t *testing.T) { edsResponse: badResourceTypeInEDSResponse, wantErr: true, wantUpdate: nil, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + ErrState: &xdsresource.UpdateErrorMetadata{ Err: cmpopts.AnyError, }, }, @@ -113,11 +114,11 @@ func (s) TestEDSHandleResponse(t *testing.T) { name: "one-uninterestring-assignment", edsResponse: goodEDSResponse2, wantErr: false, - wantUpdate: map[string]xdsclient.EndpointsUpdateErrTuple{ - "not-goodEDSName": {Update: xdsclient.EndpointsUpdate{ - Localities: []xdsclient.Locality{ + wantUpdate: map[string]xdsresource.EndpointsUpdateErrTuple{ + "not-goodEDSName": {Update: xdsresource.EndpointsUpdate{ + Localities: []xdsresource.Locality{ { - Endpoints: []xdsclient.Endpoint{{Address: "addr1:314"}}, + Endpoints: []xdsresource.Endpoint{{Address: "addr1:314"}}, ID: internal.LocalityID{SubZone: "locality-1"}, Priority: 0, Weight: 1, @@ -126,8 +127,8 @@ func (s) TestEDSHandleResponse(t *testing.T) { Raw: marshaledGoodCLA2, }}, }, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusACKed, + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusACKed, }, wantUpdateErr: false, }, @@ -136,17 +137,17 @@ func (s) TestEDSHandleResponse(t *testing.T) { name: "one-good-assignment", edsResponse: goodEDSResponse1, wantErr: false, - wantUpdate: map[string]xdsclient.EndpointsUpdateErrTuple{ - goodEDSName: {Update: xdsclient.EndpointsUpdate{ - Localities: []xdsclient.Locality{ + wantUpdate: map[string]xdsresource.EndpointsUpdateErrTuple{ + goodEDSName: {Update: xdsresource.EndpointsUpdate{ + Localities: []xdsresource.Locality{ { - Endpoints: []xdsclient.Endpoint{{Address: "addr1:314"}}, + Endpoints: []xdsresource.Endpoint{{Address: "addr1:314"}}, ID: internal.LocalityID{SubZone: "locality-1"}, Priority: 1, Weight: 1, }, { - Endpoints: []xdsclient.Endpoint{{Address: "addr2:159"}}, + Endpoints: []xdsresource.Endpoint{{Address: "addr2:159"}}, ID: internal.LocalityID{SubZone: "locality-2"}, Priority: 0, Weight: 1, @@ -155,8 +156,8 @@ func (s) TestEDSHandleResponse(t *testing.T) { Raw: marshaledGoodCLA1, }}, }, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusACKed, + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusACKed, }, wantUpdateErr: false, }, @@ -183,7 +184,7 @@ func (s) TestEDSHandleResponseWithoutWatch(t *testing.T) { defer cleanup() v2c, err := newV2Client(&testUpdateReceiver{ - f: func(xdsclient.ResourceType, map[string]interface{}, xdsclient.UpdateMetadata) {}, + f: func(xdsclient.ResourceType, map[string]interface{}, xdsresource.UpdateMetadata) {}, }, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) if err != nil { t.Fatal(err) diff --git a/xds/internal/xdsclient/v2/lds_test.go b/xds/internal/xdsclient/v2/lds_test.go index a0600550095b..fdb2abeb5132 100644 --- a/xds/internal/xdsclient/v2/lds_test.go +++ b/xds/internal/xdsclient/v2/lds_test.go @@ -24,8 +24,8 @@ import ( v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" "github.com/google/go-cmp/cmp/cmpopts" - "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) // TestLDSHandleResponse starts a fake xDS server, makes a ClientConn to it, @@ -36,8 +36,8 @@ func (s) TestLDSHandleResponse(t *testing.T) { name string ldsResponse *v2xdspb.DiscoveryResponse wantErr bool - wantUpdate map[string]xdsclient.ListenerUpdateErrTuple - wantUpdateMD xdsclient.UpdateMetadata + wantUpdate map[string]xdsresource.ListenerUpdateErrTuple + wantUpdateMD xdsresource.UpdateMetadata wantUpdateErr bool }{ // Badly marshaled LDS response. @@ -46,9 +46,9 @@ func (s) TestLDSHandleResponse(t *testing.T) { ldsResponse: badlyMarshaledLDSResponse, wantErr: true, wantUpdate: nil, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + ErrState: &xdsresource.UpdateErrorMetadata{ Err: cmpopts.AnyError, }, }, @@ -60,9 +60,9 @@ func (s) TestLDSHandleResponse(t *testing.T) { ldsResponse: badResourceTypeInLDSResponse, wantErr: true, wantUpdate: nil, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + ErrState: &xdsresource.UpdateErrorMetadata{ Err: cmpopts.AnyError, }, }, @@ -75,12 +75,12 @@ func (s) TestLDSHandleResponse(t *testing.T) { name: "no-apiListener-in-response", ldsResponse: noAPIListenerLDSResponse, wantErr: true, - wantUpdate: map[string]xdsclient.ListenerUpdateErrTuple{ + wantUpdate: map[string]xdsresource.ListenerUpdateErrTuple{ goodLDSTarget1: {Err: cmpopts.AnyError}, }, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + ErrState: &xdsresource.UpdateErrorMetadata{ Err: cmpopts.AnyError, }, }, @@ -91,11 +91,11 @@ func (s) TestLDSHandleResponse(t *testing.T) { name: "one-good-listener", ldsResponse: goodLDSResponse1, wantErr: false, - wantUpdate: map[string]xdsclient.ListenerUpdateErrTuple{ - goodLDSTarget1: {Update: xdsclient.ListenerUpdate{RouteConfigName: goodRouteName1, Raw: marshaledListener1}}, + wantUpdate: map[string]xdsresource.ListenerUpdateErrTuple{ + goodLDSTarget1: {Update: xdsresource.ListenerUpdate{RouteConfigName: goodRouteName1, Raw: marshaledListener1}}, }, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusACKed, + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusACKed, }, wantUpdateErr: false, }, @@ -105,12 +105,12 @@ func (s) TestLDSHandleResponse(t *testing.T) { name: "multiple-good-listener", ldsResponse: ldsResponseWithMultipleResources, wantErr: false, - wantUpdate: map[string]xdsclient.ListenerUpdateErrTuple{ - goodLDSTarget1: {Update: xdsclient.ListenerUpdate{RouteConfigName: goodRouteName1, Raw: marshaledListener1}}, - goodLDSTarget2: {Update: xdsclient.ListenerUpdate{RouteConfigName: goodRouteName1, Raw: marshaledListener2}}, + wantUpdate: map[string]xdsresource.ListenerUpdateErrTuple{ + goodLDSTarget1: {Update: xdsresource.ListenerUpdate{RouteConfigName: goodRouteName1, Raw: marshaledListener1}}, + goodLDSTarget2: {Update: xdsresource.ListenerUpdate{RouteConfigName: goodRouteName1, Raw: marshaledListener2}}, }, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusACKed, + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusACKed, }, wantUpdateErr: false, }, @@ -121,13 +121,13 @@ func (s) TestLDSHandleResponse(t *testing.T) { name: "good-bad-ugly-listeners", ldsResponse: goodBadUglyLDSResponse, wantErr: true, - wantUpdate: map[string]xdsclient.ListenerUpdateErrTuple{ - goodLDSTarget1: {Update: xdsclient.ListenerUpdate{RouteConfigName: goodRouteName1, Raw: marshaledListener1}}, + wantUpdate: map[string]xdsresource.ListenerUpdateErrTuple{ + goodLDSTarget1: {Update: xdsresource.ListenerUpdate{RouteConfigName: goodRouteName1, Raw: marshaledListener1}}, goodLDSTarget2: {Err: cmpopts.AnyError}, }, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + ErrState: &xdsresource.UpdateErrorMetadata{ Err: cmpopts.AnyError, }, }, @@ -138,11 +138,11 @@ func (s) TestLDSHandleResponse(t *testing.T) { name: "one-uninteresting-listener", ldsResponse: goodLDSResponse2, wantErr: false, - wantUpdate: map[string]xdsclient.ListenerUpdateErrTuple{ - goodLDSTarget2: {Update: xdsclient.ListenerUpdate{RouteConfigName: goodRouteName1, Raw: marshaledListener2}}, + wantUpdate: map[string]xdsresource.ListenerUpdateErrTuple{ + goodLDSTarget2: {Update: xdsresource.ListenerUpdate{RouteConfigName: goodRouteName1, Raw: marshaledListener2}}, }, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusACKed, + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusACKed, }, wantUpdateErr: false, }, @@ -153,8 +153,8 @@ func (s) TestLDSHandleResponse(t *testing.T) { ldsResponse: emptyLDSResponse, wantErr: false, wantUpdate: nil, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusACKed, + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusACKed, }, wantUpdateErr: false, }, @@ -182,7 +182,7 @@ func (s) TestLDSHandleResponseWithoutWatch(t *testing.T) { defer cleanup() v2c, err := newV2Client(&testUpdateReceiver{ - f: func(xdsclient.ResourceType, map[string]interface{}, xdsclient.UpdateMetadata) {}, + f: func(xdsclient.ResourceType, map[string]interface{}, xdsresource.UpdateMetadata) {}, }, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) if err != nil { t.Fatal(err) diff --git a/xds/internal/xdsclient/v2/rds_test.go b/xds/internal/xdsclient/v2/rds_test.go index 3389f0539469..79e51ab231ea 100644 --- a/xds/internal/xdsclient/v2/rds_test.go +++ b/xds/internal/xdsclient/v2/rds_test.go @@ -25,9 +25,9 @@ import ( xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" "github.com/google/go-cmp/cmp/cmpopts" - "google.golang.org/grpc/xds/internal/testutils/fakeserver" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) // doLDS makes a LDS watch, and waits for the response and ack to finish. @@ -50,8 +50,8 @@ func (s) TestRDSHandleResponseWithRouting(t *testing.T) { name string rdsResponse *xdspb.DiscoveryResponse wantErr bool - wantUpdate map[string]xdsclient.RouteConfigUpdateErrTuple - wantUpdateMD xdsclient.UpdateMetadata + wantUpdate map[string]xdsresource.RouteConfigUpdateErrTuple + wantUpdateMD xdsresource.UpdateMetadata wantUpdateErr bool }{ // Badly marshaled RDS response. @@ -60,9 +60,9 @@ func (s) TestRDSHandleResponseWithRouting(t *testing.T) { rdsResponse: badlyMarshaledRDSResponse, wantErr: true, wantUpdate: nil, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + ErrState: &xdsresource.UpdateErrorMetadata{ Err: cmpopts.AnyError, }, }, @@ -74,9 +74,9 @@ func (s) TestRDSHandleResponseWithRouting(t *testing.T) { rdsResponse: badResourceTypeInRDSResponse, wantErr: true, wantUpdate: nil, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + ErrState: &xdsresource.UpdateErrorMetadata{ Err: cmpopts.AnyError, }, }, @@ -89,14 +89,14 @@ func (s) TestRDSHandleResponseWithRouting(t *testing.T) { name: "no-virtual-hosts-in-response", rdsResponse: noVirtualHostsInRDSResponse, wantErr: false, - wantUpdate: map[string]xdsclient.RouteConfigUpdateErrTuple{ - goodRouteName1: {Update: xdsclient.RouteConfigUpdate{ + wantUpdate: map[string]xdsresource.RouteConfigUpdateErrTuple{ + goodRouteName1: {Update: xdsresource.RouteConfigUpdate{ VirtualHosts: nil, Raw: marshaledNoVirtualHostsRouteConfig, }}, }, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusACKed, + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusACKed, }, wantUpdateErr: false, }, @@ -105,28 +105,28 @@ func (s) TestRDSHandleResponseWithRouting(t *testing.T) { name: "one-uninteresting-route-config", rdsResponse: goodRDSResponse2, wantErr: false, - wantUpdate: map[string]xdsclient.RouteConfigUpdateErrTuple{ - goodRouteName2: {Update: xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + wantUpdate: map[string]xdsresource.RouteConfigUpdateErrTuple{ + goodRouteName2: {Update: xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{uninterestingDomain}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), - WeightedClusters: map[string]xdsclient.WeightedCluster{uninterestingClusterName: {Weight: 1}}, - RouteAction: xdsclient.RouteActionRoute}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), + WeightedClusters: map[string]xdsresource.WeightedCluster{uninterestingClusterName: {Weight: 1}}, + RouteAction: xdsresource.RouteActionRoute}}, }, { Domains: []string{goodLDSTarget1}, - Routes: []*xdsclient.Route{{ + Routes: []*xdsresource.Route{{ Prefix: newStringP(""), - WeightedClusters: map[string]xdsclient.WeightedCluster{goodClusterName2: {Weight: 1}}, - RouteAction: xdsclient.RouteActionRoute}}, + WeightedClusters: map[string]xdsresource.WeightedCluster{goodClusterName2: {Weight: 1}}, + RouteAction: xdsresource.RouteActionRoute}}, }, }, Raw: marshaledGoodRouteConfig2, }}, }, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusACKed, + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusACKed, }, wantUpdateErr: false, }, @@ -135,28 +135,28 @@ func (s) TestRDSHandleResponseWithRouting(t *testing.T) { name: "one-good-route-config", rdsResponse: goodRDSResponse1, wantErr: false, - wantUpdate: map[string]xdsclient.RouteConfigUpdateErrTuple{ - goodRouteName1: {Update: xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + wantUpdate: map[string]xdsresource.RouteConfigUpdateErrTuple{ + goodRouteName1: {Update: xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{uninterestingDomain}, - Routes: []*xdsclient.Route{{ + Routes: []*xdsresource.Route{{ Prefix: newStringP(""), - WeightedClusters: map[string]xdsclient.WeightedCluster{uninterestingClusterName: {Weight: 1}}, - RouteAction: xdsclient.RouteActionRoute}}, + WeightedClusters: map[string]xdsresource.WeightedCluster{uninterestingClusterName: {Weight: 1}}, + RouteAction: xdsresource.RouteActionRoute}}, }, { Domains: []string{goodLDSTarget1}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), - WeightedClusters: map[string]xdsclient.WeightedCluster{goodClusterName1: {Weight: 1}}, - RouteAction: xdsclient.RouteActionRoute}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), + WeightedClusters: map[string]xdsresource.WeightedCluster{goodClusterName1: {Weight: 1}}, + RouteAction: xdsresource.RouteActionRoute}}, }, }, Raw: marshaledGoodRouteConfig1, }}, }, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusACKed, + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusACKed, }, wantUpdateErr: false, }, @@ -183,7 +183,7 @@ func (s) TestRDSHandleResponseWithoutRDSWatch(t *testing.T) { defer cleanup() v2c, err := newV2Client(&testUpdateReceiver{ - f: func(xdsclient.ResourceType, map[string]interface{}, xdsclient.UpdateMetadata) {}, + f: func(xdsclient.ResourceType, map[string]interface{}, xdsresource.UpdateMetadata) {}, }, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) if err != nil { t.Fatal(err) diff --git a/xds/internal/xdsclient/v3/client.go b/xds/internal/xdsclient/v3/client.go index 827c06b741b7..21d8809dd33b 100644 --- a/xds/internal/xdsclient/v3/client.go +++ b/xds/internal/xdsclient/v3/client.go @@ -31,6 +31,7 @@ import ( "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/xds/internal/version" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3adsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" @@ -93,7 +94,7 @@ type client struct { // ClientConn to the xDS gRPC server. Owned by the parent xdsClient. cc *grpc.ClientConn nodeProto *v3corepb.Node - updateValidator xdsclient.UpdateValidatorFunc + updateValidator xdsresource.UpdateValidatorFunc } func (v3c *client) NewStream(ctx context.Context) (grpc.ClientStream, error) { @@ -164,16 +165,16 @@ func (v3c *client) HandleResponse(r proto.Message) (xdsclient.ResourceType, stri var err error url := resp.GetTypeUrl() switch { - case xdsclient.IsListenerResource(url): + case xdsresource.IsListenerResource(url): err = v3c.handleLDSResponse(resp) rType = xdsclient.ListenerResource - case xdsclient.IsRouteConfigResource(url): + case xdsresource.IsRouteConfigResource(url): err = v3c.handleRDSResponse(resp) rType = xdsclient.RouteConfigResource - case xdsclient.IsClusterResource(url): + case xdsresource.IsClusterResource(url): err = v3c.handleCDSResponse(resp) rType = xdsclient.ClusterResource - case xdsclient.IsEndpointsResource(url): + case xdsresource.IsEndpointsResource(url): err = v3c.handleEDSResponse(resp) rType = xdsclient.EndpointsResource default: @@ -188,7 +189,7 @@ func (v3c *client) HandleResponse(r proto.Message) (xdsclient.ResourceType, stri // server. On receipt of a good response, it also invokes the registered watcher // callback. func (v3c *client) handleLDSResponse(resp *v3discoverypb.DiscoveryResponse) error { - update, md, err := xdsclient.UnmarshalListener(&xdsclient.UnmarshalOptions{ + update, md, err := xdsresource.UnmarshalListener(&xdsresource.UnmarshalOptions{ Version: resp.GetVersionInfo(), Resources: resp.GetResources(), Logger: v3c.logger, @@ -202,7 +203,7 @@ func (v3c *client) handleLDSResponse(resp *v3discoverypb.DiscoveryResponse) erro // server. On receipt of a good response, it caches validated resources and also // invokes the registered watcher callback. func (v3c *client) handleRDSResponse(resp *v3discoverypb.DiscoveryResponse) error { - update, md, err := xdsclient.UnmarshalRouteConfig(&xdsclient.UnmarshalOptions{ + update, md, err := xdsresource.UnmarshalRouteConfig(&xdsresource.UnmarshalOptions{ Version: resp.GetVersionInfo(), Resources: resp.GetResources(), Logger: v3c.logger, @@ -216,7 +217,7 @@ func (v3c *client) handleRDSResponse(resp *v3discoverypb.DiscoveryResponse) erro // server. On receipt of a good response, it also invokes the registered watcher // callback. func (v3c *client) handleCDSResponse(resp *v3discoverypb.DiscoveryResponse) error { - update, md, err := xdsclient.UnmarshalCluster(&xdsclient.UnmarshalOptions{ + update, md, err := xdsresource.UnmarshalCluster(&xdsresource.UnmarshalOptions{ Version: resp.GetVersionInfo(), Resources: resp.GetResources(), Logger: v3c.logger, @@ -227,7 +228,7 @@ func (v3c *client) handleCDSResponse(resp *v3discoverypb.DiscoveryResponse) erro } func (v3c *client) handleEDSResponse(resp *v3discoverypb.DiscoveryResponse) error { - update, md, err := xdsclient.UnmarshalEndpoints(&xdsclient.UnmarshalOptions{ + update, md, err := xdsresource.UnmarshalEndpoints(&xdsresource.UnmarshalOptions{ Version: resp.GetVersionInfo(), Resources: resp.GetResources(), Logger: v3c.logger, diff --git a/xds/internal/xdsclient/watchers.go b/xds/internal/xdsclient/watchers.go index e26ed360308a..639a918627b8 100644 --- a/xds/internal/xdsclient/watchers.go +++ b/xds/internal/xdsclient/watchers.go @@ -24,6 +24,7 @@ import ( "time" "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) type watchInfoState int @@ -41,10 +42,10 @@ type watchInfo struct { rType ResourceType target string - ldsCallback func(ListenerUpdate, error) - rdsCallback func(RouteConfigUpdate, error) - cdsCallback func(ClusterUpdate, error) - edsCallback func(EndpointsUpdate, error) + ldsCallback func(xdsresource.ListenerUpdate, error) + rdsCallback func(xdsresource.RouteConfigUpdate, error) + cdsCallback func(xdsresource.ClusterUpdate, error) + edsCallback func(xdsresource.EndpointsUpdate, error) expiryTimer *time.Timer @@ -105,13 +106,13 @@ func (wi *watchInfo) sendErrorLocked(err error) { ) switch wi.rType { case ListenerResource: - u = ListenerUpdate{} + u = xdsresource.ListenerUpdate{} case RouteConfigResource: - u = RouteConfigUpdate{} + u = xdsresource.RouteConfigUpdate{} case ClusterResource: - u = ClusterUpdate{} + u = xdsresource.ClusterUpdate{} case EndpointsResource: - u = EndpointsUpdate{} + u = xdsresource.EndpointsUpdate{} } wi.c.scheduleCallback(wi, u, err) } @@ -132,7 +133,7 @@ func (c *clientImpl) watch(wi *watchInfo) (cancel func()) { c.logger.Debugf("new watch for type %v, resource name %v", wi.rType, wi.target) var ( watchers map[string]map[*watchInfo]bool - mds map[string]UpdateMetadata + mds map[string]xdsresource.UpdateMetadata ) switch wi.rType { case ListenerResource: @@ -163,7 +164,7 @@ func (c *clientImpl) watch(wi *watchInfo) (cancel func()) { c.logger.Debugf("first watch for type %v, resource name %v, will send a new xDS request", wi.rType, wi.target) s = make(map[*watchInfo]bool) watchers[resourceName] = s - mds[resourceName] = UpdateMetadata{Status: ServiceStatusRequested} + mds[resourceName] = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested} c.apiClient.AddWatch(wi.rType, resourceName) } // No matter what, add the new watcher to the set, so it's callback will be @@ -233,7 +234,7 @@ func (c *clientImpl) watch(wi *watchInfo) (cancel func()) { // Note that during race (e.g. an xDS response is received while the user is // calling cancel()), there's a small window where the callback can be called // after the watcher is canceled. The caller needs to handle this case. -func (c *clientImpl) WatchListener(serviceName string, cb func(ListenerUpdate, error)) (cancel func()) { +func (c *clientImpl) WatchListener(serviceName string, cb func(xdsresource.ListenerUpdate, error)) (cancel func()) { wi := &watchInfo{ c: c, rType: ListenerResource, @@ -252,7 +253,7 @@ func (c *clientImpl) WatchListener(serviceName string, cb func(ListenerUpdate, e // Note that during race (e.g. an xDS response is received while the user is // calling cancel()), there's a small window where the callback can be called // after the watcher is canceled. The caller needs to handle this case. -func (c *clientImpl) WatchRouteConfig(routeName string, cb func(RouteConfigUpdate, error)) (cancel func()) { +func (c *clientImpl) WatchRouteConfig(routeName string, cb func(xdsresource.RouteConfigUpdate, error)) (cancel func()) { wi := &watchInfo{ c: c, rType: RouteConfigResource, @@ -275,7 +276,7 @@ func (c *clientImpl) WatchRouteConfig(routeName string, cb func(RouteConfigUpdat // Note that during race (e.g. an xDS response is received while the user is // calling cancel()), there's a small window where the callback can be called // after the watcher is canceled. The caller needs to handle this case. -func (c *clientImpl) WatchCluster(clusterName string, cb func(ClusterUpdate, error)) (cancel func()) { +func (c *clientImpl) WatchCluster(clusterName string, cb func(xdsresource.ClusterUpdate, error)) (cancel func()) { wi := &watchInfo{ c: c, rType: ClusterResource, @@ -297,7 +298,7 @@ func (c *clientImpl) WatchCluster(clusterName string, cb func(ClusterUpdate, err // Note that during race (e.g. an xDS response is received while the user is // calling cancel()), there's a small window where the callback can be called // after the watcher is canceled. The caller needs to handle this case. -func (c *clientImpl) WatchEndpoints(clusterName string, cb func(EndpointsUpdate, error)) (cancel func()) { +func (c *clientImpl) WatchEndpoints(clusterName string, cb func(xdsresource.EndpointsUpdate, error)) (cancel func()) { wi := &watchInfo{ c: c, rType: EndpointsResource, diff --git a/xds/internal/xdsclient/watchers_cluster_test.go b/xds/internal/xdsclient/watchers_cluster_test.go index c06319e959c6..7ddaf08637e4 100644 --- a/xds/internal/xdsclient/watchers_cluster_test.go +++ b/xds/internal/xdsclient/watchers_cluster_test.go @@ -24,6 +24,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/types/known/anypb" "google.golang.org/grpc/internal/testutils" @@ -52,15 +53,15 @@ func (s) TestClusterWatch(t *testing.T) { apiClient := c.(*testAPIClient) clusterUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchCluster(testCDSName, func(update ClusterUpdate, err error) { - clusterUpdateCh.Send(ClusterUpdateErrTuple{Update: update, Err: err}) + cancelWatch := client.WatchCluster(testCDSName, func(update xdsresource.ClusterUpdate, err error) { + clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate := ClusterUpdate{ClusterName: testEDSName} - client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{}) + wantUpdate := xdsresource.ClusterUpdate{ClusterName: testEDSName} + client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } @@ -70,17 +71,17 @@ func (s) TestClusterWatch(t *testing.T) { // new update is not considered equal to the old one. newUpdate := wantUpdate newUpdate.Raw = &anypb.Any{} - client.NewClusters(map[string]ClusterUpdateErrTuple{ + client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{ testCDSName: {Update: newUpdate}, "randomName": {}, - }, UpdateMetadata{}) + }, xdsresource.UpdateMetadata{}) if err := verifyClusterUpdate(ctx, clusterUpdateCh, newUpdate, nil); err != nil { t.Fatal(err) } // Cancel watch, and send update again. cancelWatch() - client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{}) + client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() if u, err := clusterUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { @@ -114,8 +115,8 @@ func (s) TestClusterTwoWatchSameResourceName(t *testing.T) { for i := 0; i < count; i++ { clusterUpdateCh := testutils.NewChannel() clusterUpdateChs = append(clusterUpdateChs, clusterUpdateCh) - cancelLastWatch = client.WatchCluster(testCDSName, func(update ClusterUpdate, err error) { - clusterUpdateCh.Send(ClusterUpdateErrTuple{Update: update, Err: err}) + cancelLastWatch = client.WatchCluster(testCDSName, func(update xdsresource.ClusterUpdate, err error) { + clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) }) if i == 0 { @@ -127,8 +128,8 @@ func (s) TestClusterTwoWatchSameResourceName(t *testing.T) { } } - wantUpdate := ClusterUpdate{ClusterName: testEDSName} - client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{}) + wantUpdate := xdsresource.ClusterUpdate{ClusterName: testEDSName} + client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) for i := 0; i < count; i++ { if err := verifyClusterUpdate(ctx, clusterUpdateChs[i], wantUpdate, nil); err != nil { t.Fatal(err) @@ -139,7 +140,7 @@ func (s) TestClusterTwoWatchSameResourceName(t *testing.T) { // be notified because one has been cancelled, and the other is receiving // the same update. cancelLastWatch() - client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{}) + client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) for i := 0; i < count; i++ { func() { sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) @@ -153,8 +154,8 @@ func (s) TestClusterTwoWatchSameResourceName(t *testing.T) { // Push a new update and make sure the uncancelled watcher is invoked. // Specify a non-nil raw proto to ensure that the new update is not // considered equal to the old one. - newUpdate := ClusterUpdate{ClusterName: testEDSName, Raw: &anypb.Any{}} - client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName: {Update: newUpdate}}, UpdateMetadata{}) + newUpdate := xdsresource.ClusterUpdate{ClusterName: testEDSName, Raw: &anypb.Any{}} + client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: {Update: newUpdate}}, xdsresource.UpdateMetadata{}) if err := verifyClusterUpdate(ctx, clusterUpdateChs[0], newUpdate, nil); err != nil { t.Fatal(err) } @@ -186,8 +187,8 @@ func (s) TestClusterThreeWatchDifferentResourceName(t *testing.T) { for i := 0; i < count; i++ { clusterUpdateCh := testutils.NewChannel() clusterUpdateChs = append(clusterUpdateChs, clusterUpdateCh) - client.WatchCluster(testCDSName+"1", func(update ClusterUpdate, err error) { - clusterUpdateCh.Send(ClusterUpdateErrTuple{Update: update, Err: err}) + client.WatchCluster(testCDSName+"1", func(update xdsresource.ClusterUpdate, err error) { + clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) }) if i == 0 { @@ -201,19 +202,19 @@ func (s) TestClusterThreeWatchDifferentResourceName(t *testing.T) { // Third watch for a different name. clusterUpdateCh2 := testutils.NewChannel() - client.WatchCluster(testCDSName+"2", func(update ClusterUpdate, err error) { - clusterUpdateCh2.Send(ClusterUpdateErrTuple{Update: update, Err: err}) + client.WatchCluster(testCDSName+"2", func(update xdsresource.ClusterUpdate, err error) { + clusterUpdateCh2.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate1 := ClusterUpdate{ClusterName: testEDSName + "1"} - wantUpdate2 := ClusterUpdate{ClusterName: testEDSName + "2"} - client.NewClusters(map[string]ClusterUpdateErrTuple{ + wantUpdate1 := xdsresource.ClusterUpdate{ClusterName: testEDSName + "1"} + wantUpdate2 := xdsresource.ClusterUpdate{ClusterName: testEDSName + "2"} + client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{ testCDSName + "1": {Update: wantUpdate1}, testCDSName + "2": {Update: wantUpdate2}, - }, UpdateMetadata{}) + }, xdsresource.UpdateMetadata{}) for i := 0; i < count; i++ { if err := verifyClusterUpdate(ctx, clusterUpdateChs[i], wantUpdate1, nil); err != nil { @@ -246,25 +247,25 @@ func (s) TestClusterWatchAfterCache(t *testing.T) { apiClient := c.(*testAPIClient) clusterUpdateCh := testutils.NewChannel() - client.WatchCluster(testCDSName, func(update ClusterUpdate, err error) { - clusterUpdateCh.Send(ClusterUpdateErrTuple{Update: update, Err: err}) + client.WatchCluster(testCDSName, func(update xdsresource.ClusterUpdate, err error) { + clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate := ClusterUpdate{ClusterName: testEDSName} - client.NewClusters(map[string]ClusterUpdateErrTuple{ + wantUpdate := xdsresource.ClusterUpdate{ClusterName: testEDSName} + client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{ testCDSName: {Update: wantUpdate}, - }, UpdateMetadata{}) + }, xdsresource.UpdateMetadata{}) if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } // Another watch for the resource in cache. clusterUpdateCh2 := testutils.NewChannel() - client.WatchCluster(testCDSName, func(update ClusterUpdate, err error) { - clusterUpdateCh2.Send(ClusterUpdateErrTuple{Update: update, Err: err}) + client.WatchCluster(testCDSName, func(update xdsresource.ClusterUpdate, err error) { + clusterUpdateCh2.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) }) sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() @@ -307,8 +308,8 @@ func (s) TestClusterWatchExpiryTimer(t *testing.T) { apiClient := c.(*testAPIClient) clusterUpdateCh := testutils.NewChannel() - client.WatchCluster(testCDSName, func(u ClusterUpdate, err error) { - clusterUpdateCh.Send(ClusterUpdateErrTuple{Update: u, Err: err}) + client.WatchCluster(testCDSName, func(u xdsresource.ClusterUpdate, err error) { + clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) }) if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) @@ -318,8 +319,8 @@ func (s) TestClusterWatchExpiryTimer(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for cluster update: %v", err) } - gotUpdate := u.(ClusterUpdateErrTuple) - if gotUpdate.Err == nil || !cmp.Equal(gotUpdate.Update, ClusterUpdate{}) { + gotUpdate := u.(xdsresource.ClusterUpdateErrTuple) + if gotUpdate.Err == nil || !cmp.Equal(gotUpdate.Update, xdsresource.ClusterUpdate{}) { t.Fatalf("unexpected clusterUpdate: (%v, %v), want: (ClusterUpdate{}, nil)", gotUpdate.Update, gotUpdate.Err) } } @@ -346,17 +347,17 @@ func (s) TestClusterWatchExpiryTimerStop(t *testing.T) { apiClient := c.(*testAPIClient) clusterUpdateCh := testutils.NewChannel() - client.WatchCluster(testCDSName, func(u ClusterUpdate, err error) { - clusterUpdateCh.Send(ClusterUpdateErrTuple{Update: u, Err: err}) + client.WatchCluster(testCDSName, func(u xdsresource.ClusterUpdate, err error) { + clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) }) if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate := ClusterUpdate{ClusterName: testEDSName} - client.NewClusters(map[string]ClusterUpdateErrTuple{ + wantUpdate := xdsresource.ClusterUpdate{ClusterName: testEDSName} + client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{ testCDSName: {Update: wantUpdate}, - }, UpdateMetadata{}) + }, xdsresource.UpdateMetadata{}) if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } @@ -394,8 +395,8 @@ func (s) TestClusterResourceRemoved(t *testing.T) { apiClient := c.(*testAPIClient) clusterUpdateCh1 := testutils.NewChannel() - client.WatchCluster(testCDSName+"1", func(update ClusterUpdate, err error) { - clusterUpdateCh1.Send(ClusterUpdateErrTuple{Update: update, Err: err}) + client.WatchCluster(testCDSName+"1", func(update xdsresource.ClusterUpdate, err error) { + clusterUpdateCh1.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) @@ -403,19 +404,19 @@ func (s) TestClusterResourceRemoved(t *testing.T) { // Another watch for a different name. clusterUpdateCh2 := testutils.NewChannel() - client.WatchCluster(testCDSName+"2", func(update ClusterUpdate, err error) { - clusterUpdateCh2.Send(ClusterUpdateErrTuple{Update: update, Err: err}) + client.WatchCluster(testCDSName+"2", func(update xdsresource.ClusterUpdate, err error) { + clusterUpdateCh2.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate1 := ClusterUpdate{ClusterName: testEDSName + "1"} - wantUpdate2 := ClusterUpdate{ClusterName: testEDSName + "2"} - client.NewClusters(map[string]ClusterUpdateErrTuple{ + wantUpdate1 := xdsresource.ClusterUpdate{ClusterName: testEDSName + "1"} + wantUpdate2 := xdsresource.ClusterUpdate{ClusterName: testEDSName + "2"} + client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{ testCDSName + "1": {Update: wantUpdate1}, testCDSName + "2": {Update: wantUpdate2}, - }, UpdateMetadata{}) + }, xdsresource.UpdateMetadata{}) if err := verifyClusterUpdate(ctx, clusterUpdateCh1, wantUpdate1, nil); err != nil { t.Fatal(err) } @@ -424,10 +425,10 @@ func (s) TestClusterResourceRemoved(t *testing.T) { } // Send another update to remove resource 1. - client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName + "2": {Update: wantUpdate2}}, UpdateMetadata{}) + client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName + "2": {Update: wantUpdate2}}, xdsresource.UpdateMetadata{}) // Watcher 1 should get an error. - if u, err := clusterUpdateCh1.Receive(ctx); err != nil || ErrType(u.(ClusterUpdateErrTuple).Err) != ErrorTypeResourceNotFound { + if u, err := clusterUpdateCh1.Receive(ctx); err != nil || ErrType(u.(xdsresource.ClusterUpdateErrTuple).Err) != ErrorTypeResourceNotFound { t.Errorf("unexpected clusterUpdate: %v, error receiving from channel: %v, want update with error resource not found", u, err) } @@ -440,8 +441,8 @@ func (s) TestClusterResourceRemoved(t *testing.T) { // Send another update with resource 2 modified. Specify a non-nil raw proto // to ensure that the new update is not considered equal to the old one. - wantUpdate2 = ClusterUpdate{ClusterName: testEDSName + "2", Raw: &anypb.Any{}} - client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName + "2": {Update: wantUpdate2}}, UpdateMetadata{}) + wantUpdate2 = xdsresource.ClusterUpdate{ClusterName: testEDSName + "2", Raw: &anypb.Any{}} + client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName + "2": {Update: wantUpdate2}}, xdsresource.UpdateMetadata{}) // Watcher 1 should not see an update. sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) @@ -477,8 +478,8 @@ func (s) TestClusterWatchNACKError(t *testing.T) { apiClient := c.(*testAPIClient) clusterUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchCluster(testCDSName, func(update ClusterUpdate, err error) { - clusterUpdateCh.Send(ClusterUpdateErrTuple{Update: update, Err: err}) + cancelWatch := client.WatchCluster(testCDSName, func(update xdsresource.ClusterUpdate, err error) { + clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) }) defer cancelWatch() if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { @@ -486,10 +487,10 @@ func (s) TestClusterWatchNACKError(t *testing.T) { } wantError := fmt.Errorf("testing error") - client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName: { + client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: { Err: wantError, - }}, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}}) - if err := verifyClusterUpdate(ctx, clusterUpdateCh, ClusterUpdate{}, wantError); err != nil { + }}, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) + if err := verifyClusterUpdate(ctx, clusterUpdateCh, xdsresource.ClusterUpdate{}, wantError); err != nil { t.Fatal(err) } } @@ -521,8 +522,8 @@ func (s) TestClusterWatchPartialValid(t *testing.T) { for _, name := range []string{testCDSName, badResourceName} { clusterUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchCluster(name, func(update ClusterUpdate, err error) { - clusterUpdateCh.Send(ClusterUpdateErrTuple{Update: update, Err: err}) + cancelWatch := client.WatchCluster(name, func(update xdsresource.ClusterUpdate, err error) { + clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) }) defer func() { cancelWatch() @@ -538,18 +539,18 @@ func (s) TestClusterWatchPartialValid(t *testing.T) { wantError := fmt.Errorf("testing error") wantError2 := fmt.Errorf("individual error") - client.NewClusters(map[string]ClusterUpdateErrTuple{ - testCDSName: {Update: ClusterUpdate{ClusterName: testEDSName}}, + client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{ + testCDSName: {Update: xdsresource.ClusterUpdate{ClusterName: testEDSName}}, badResourceName: {Err: wantError2}, - }, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}}) + }, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) // The valid resource should be sent to the watcher. - if err := verifyClusterUpdate(ctx, updateChs[testCDSName], ClusterUpdate{ClusterName: testEDSName}, nil); err != nil { + if err := verifyClusterUpdate(ctx, updateChs[testCDSName], xdsresource.ClusterUpdate{ClusterName: testEDSName}, nil); err != nil { t.Fatal(err) } // The failed watcher should receive an error. - if err := verifyClusterUpdate(ctx, updateChs[badResourceName], ClusterUpdate{}, wantError2); err != nil { + if err := verifyClusterUpdate(ctx, updateChs[badResourceName], xdsresource.ClusterUpdate{}, wantError2); err != nil { t.Fatal(err) } } diff --git a/xds/internal/xdsclient/watchers_endpoints_test.go b/xds/internal/xdsclient/watchers_endpoints_test.go index b87723e5086e..3db3c3efa755 100644 --- a/xds/internal/xdsclient/watchers_endpoints_test.go +++ b/xds/internal/xdsclient/watchers_endpoints_test.go @@ -24,6 +24,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/types/known/anypb" "google.golang.org/grpc/internal/testutils" @@ -31,15 +32,15 @@ import ( ) var ( - testLocalities = []Locality{ + testLocalities = []xdsresource.Locality{ { - Endpoints: []Endpoint{{Address: "addr1:314"}}, + Endpoints: []xdsresource.Endpoint{{Address: "addr1:314"}}, ID: internal.LocalityID{SubZone: "locality-1"}, Priority: 1, Weight: 1, }, { - Endpoints: []Endpoint{{Address: "addr2:159"}}, + Endpoints: []xdsresource.Endpoint{{Address: "addr2:159"}}, ID: internal.LocalityID{SubZone: "locality-2"}, Priority: 0, Weight: 1, @@ -70,15 +71,15 @@ func (s) TestEndpointsWatch(t *testing.T) { apiClient := c.(*testAPIClient) endpointsUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchEndpoints(testCDSName, func(update EndpointsUpdate, err error) { - endpointsUpdateCh.Send(EndpointsUpdateErrTuple{Update: update, Err: err}) + cancelWatch := client.WatchEndpoints(testCDSName, func(update xdsresource.EndpointsUpdate, err error) { + endpointsUpdateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[EndpointsResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate := EndpointsUpdate{Localities: []Locality{testLocalities[0]}} - client.NewEndpoints(map[string]EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{}) + wantUpdate := xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}} + client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } @@ -88,17 +89,17 @@ func (s) TestEndpointsWatch(t *testing.T) { // new update is not considered equal to the old one. newUpdate := wantUpdate newUpdate.Raw = &anypb.Any{} - client.NewEndpoints(map[string]EndpointsUpdateErrTuple{ + client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{ testCDSName: {Update: newUpdate}, "randomName": {}, - }, UpdateMetadata{}) + }, xdsresource.UpdateMetadata{}) if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh, newUpdate, nil); err != nil { t.Fatal(err) } // Cancel watch, and send update again. cancelWatch() - client.NewEndpoints(map[string]EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{}) + client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() if u, err := endpointsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { @@ -134,8 +135,8 @@ func (s) TestEndpointsTwoWatchSameResourceName(t *testing.T) { for i := 0; i < count; i++ { endpointsUpdateCh := testutils.NewChannel() endpointsUpdateChs = append(endpointsUpdateChs, endpointsUpdateCh) - cancelLastWatch = client.WatchEndpoints(testCDSName, func(update EndpointsUpdate, err error) { - endpointsUpdateCh.Send(EndpointsUpdateErrTuple{Update: update, Err: err}) + cancelLastWatch = client.WatchEndpoints(testCDSName, func(update xdsresource.EndpointsUpdate, err error) { + endpointsUpdateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err}) }) if i == 0 { @@ -147,8 +148,8 @@ func (s) TestEndpointsTwoWatchSameResourceName(t *testing.T) { } } - wantUpdate := EndpointsUpdate{Localities: []Locality{testLocalities[0]}} - client.NewEndpoints(map[string]EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{}) + wantUpdate := xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}} + client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) for i := 0; i < count; i++ { if err := verifyEndpointsUpdate(ctx, endpointsUpdateChs[i], wantUpdate, nil); err != nil { t.Fatal(err) @@ -159,7 +160,7 @@ func (s) TestEndpointsTwoWatchSameResourceName(t *testing.T) { // be notified because one has been cancelled, and the other is receiving // the same update. cancelLastWatch() - client.NewEndpoints(map[string]EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{}) + client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) for i := 0; i < count; i++ { func() { sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) @@ -173,8 +174,8 @@ func (s) TestEndpointsTwoWatchSameResourceName(t *testing.T) { // Push a new update and make sure the uncancelled watcher is invoked. // Specify a non-nil raw proto to ensure that the new update is not // considered equal to the old one. - newUpdate := EndpointsUpdate{Localities: []Locality{testLocalities[0]}, Raw: &anypb.Any{}} - client.NewEndpoints(map[string]EndpointsUpdateErrTuple{testCDSName: {Update: newUpdate}}, UpdateMetadata{}) + newUpdate := xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}, Raw: &anypb.Any{}} + client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{testCDSName: {Update: newUpdate}}, xdsresource.UpdateMetadata{}) if err := verifyEndpointsUpdate(ctx, endpointsUpdateChs[0], newUpdate, nil); err != nil { t.Fatal(err) } @@ -206,8 +207,8 @@ func (s) TestEndpointsThreeWatchDifferentResourceName(t *testing.T) { for i := 0; i < count; i++ { endpointsUpdateCh := testutils.NewChannel() endpointsUpdateChs = append(endpointsUpdateChs, endpointsUpdateCh) - client.WatchEndpoints(testCDSName+"1", func(update EndpointsUpdate, err error) { - endpointsUpdateCh.Send(EndpointsUpdateErrTuple{Update: update, Err: err}) + client.WatchEndpoints(testCDSName+"1", func(update xdsresource.EndpointsUpdate, err error) { + endpointsUpdateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err}) }) if i == 0 { @@ -221,19 +222,19 @@ func (s) TestEndpointsThreeWatchDifferentResourceName(t *testing.T) { // Third watch for a different name. endpointsUpdateCh2 := testutils.NewChannel() - client.WatchEndpoints(testCDSName+"2", func(update EndpointsUpdate, err error) { - endpointsUpdateCh2.Send(EndpointsUpdateErrTuple{Update: update, Err: err}) + client.WatchEndpoints(testCDSName+"2", func(update xdsresource.EndpointsUpdate, err error) { + endpointsUpdateCh2.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[EndpointsResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate1 := EndpointsUpdate{Localities: []Locality{testLocalities[0]}} - wantUpdate2 := EndpointsUpdate{Localities: []Locality{testLocalities[1]}} - client.NewEndpoints(map[string]EndpointsUpdateErrTuple{ + wantUpdate1 := xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}} + wantUpdate2 := xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[1]}} + client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{ testCDSName + "1": {Update: wantUpdate1}, testCDSName + "2": {Update: wantUpdate2}, - }, UpdateMetadata{}) + }, xdsresource.UpdateMetadata{}) for i := 0; i < count; i++ { if err := verifyEndpointsUpdate(ctx, endpointsUpdateChs[i], wantUpdate1, nil); err != nil { @@ -266,23 +267,23 @@ func (s) TestEndpointsWatchAfterCache(t *testing.T) { apiClient := c.(*testAPIClient) endpointsUpdateCh := testutils.NewChannel() - client.WatchEndpoints(testCDSName, func(update EndpointsUpdate, err error) { - endpointsUpdateCh.Send(EndpointsUpdateErrTuple{Update: update, Err: err}) + client.WatchEndpoints(testCDSName, func(update xdsresource.EndpointsUpdate, err error) { + endpointsUpdateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[EndpointsResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate := EndpointsUpdate{Localities: []Locality{testLocalities[0]}} - client.NewEndpoints(map[string]EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{}) + wantUpdate := xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}} + client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } // Another watch for the resource in cache. endpointsUpdateCh2 := testutils.NewChannel() - client.WatchEndpoints(testCDSName, func(update EndpointsUpdate, err error) { - endpointsUpdateCh2.Send(EndpointsUpdateErrTuple{Update: update, Err: err}) + client.WatchEndpoints(testCDSName, func(update xdsresource.EndpointsUpdate, err error) { + endpointsUpdateCh2.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err}) }) sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() @@ -325,8 +326,8 @@ func (s) TestEndpointsWatchExpiryTimer(t *testing.T) { apiClient := c.(*testAPIClient) endpointsUpdateCh := testutils.NewChannel() - client.WatchEndpoints(testCDSName, func(update EndpointsUpdate, err error) { - endpointsUpdateCh.Send(EndpointsUpdateErrTuple{Update: update, Err: err}) + client.WatchEndpoints(testCDSName, func(update xdsresource.EndpointsUpdate, err error) { + endpointsUpdateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[EndpointsResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) @@ -336,8 +337,8 @@ func (s) TestEndpointsWatchExpiryTimer(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for endpoints update: %v", err) } - gotUpdate := u.(EndpointsUpdateErrTuple) - if gotUpdate.Err == nil || !cmp.Equal(gotUpdate.Update, EndpointsUpdate{}) { + gotUpdate := u.(xdsresource.EndpointsUpdateErrTuple) + if gotUpdate.Err == nil || !cmp.Equal(gotUpdate.Update, xdsresource.EndpointsUpdate{}) { t.Fatalf("unexpected endpointsUpdate: (%v, %v), want: (EndpointsUpdate{}, nil)", gotUpdate.Update, gotUpdate.Err) } } @@ -363,8 +364,8 @@ func (s) TestEndpointsWatchNACKError(t *testing.T) { apiClient := c.(*testAPIClient) endpointsUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchEndpoints(testCDSName, func(update EndpointsUpdate, err error) { - endpointsUpdateCh.Send(EndpointsUpdateErrTuple{Update: update, Err: err}) + cancelWatch := client.WatchEndpoints(testCDSName, func(update xdsresource.EndpointsUpdate, err error) { + endpointsUpdateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err}) }) defer cancelWatch() if _, err := apiClient.addWatches[EndpointsResource].Receive(ctx); err != nil { @@ -372,8 +373,8 @@ func (s) TestEndpointsWatchNACKError(t *testing.T) { } wantError := fmt.Errorf("testing error") - client.NewEndpoints(map[string]EndpointsUpdateErrTuple{testCDSName: {Err: wantError}}, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}}) - if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh, EndpointsUpdate{}, wantError); err != nil { + client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{testCDSName: {Err: wantError}}, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) + if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh, xdsresource.EndpointsUpdate{}, wantError); err != nil { t.Fatal(err) } } @@ -405,8 +406,8 @@ func (s) TestEndpointsWatchPartialValid(t *testing.T) { for _, name := range []string{testCDSName, badResourceName} { endpointsUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchEndpoints(name, func(update EndpointsUpdate, err error) { - endpointsUpdateCh.Send(EndpointsUpdateErrTuple{Update: update, Err: err}) + cancelWatch := client.WatchEndpoints(name, func(update xdsresource.EndpointsUpdate, err error) { + endpointsUpdateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err}) }) defer func() { cancelWatch() @@ -422,18 +423,18 @@ func (s) TestEndpointsWatchPartialValid(t *testing.T) { wantError := fmt.Errorf("testing error") wantError2 := fmt.Errorf("individual error") - client.NewEndpoints(map[string]EndpointsUpdateErrTuple{ - testCDSName: {Update: EndpointsUpdate{Localities: []Locality{testLocalities[0]}}}, + client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{ + testCDSName: {Update: xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}}}, badResourceName: {Err: wantError2}, - }, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}}) + }, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) // The valid resource should be sent to the watcher. - if err := verifyEndpointsUpdate(ctx, updateChs[testCDSName], EndpointsUpdate{Localities: []Locality{testLocalities[0]}}, nil); err != nil { + if err := verifyEndpointsUpdate(ctx, updateChs[testCDSName], xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}}, nil); err != nil { t.Fatal(err) } // The failed watcher should receive an error. - if err := verifyEndpointsUpdate(ctx, updateChs[badResourceName], EndpointsUpdate{}, wantError2); err != nil { + if err := verifyEndpointsUpdate(ctx, updateChs[badResourceName], xdsresource.EndpointsUpdate{}, wantError2); err != nil { t.Fatal(err) } } diff --git a/xds/internal/xdsclient/watchers_listener_test.go b/xds/internal/xdsclient/watchers_listener_test.go index 176e6bbcb7b4..cd375639f8ee 100644 --- a/xds/internal/xdsclient/watchers_listener_test.go +++ b/xds/internal/xdsclient/watchers_listener_test.go @@ -26,6 +26,7 @@ import ( v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/types/known/anypb" ) @@ -52,15 +53,15 @@ func (s) TestLDSWatch(t *testing.T) { apiClient := c.(*testAPIClient) ldsUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchListener(testLDSName, func(update ListenerUpdate, err error) { - ldsUpdateCh.Send(ListenerUpdateErrTuple{Update: update, Err: err}) + cancelWatch := client.WatchListener(testLDSName, func(update xdsresource.ListenerUpdate, err error) { + ldsUpdateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate := ListenerUpdate{RouteConfigName: testRDSName} - client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, UpdateMetadata{}) + wantUpdate := xdsresource.ListenerUpdate{RouteConfigName: testRDSName} + client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) if err := verifyListenerUpdate(ctx, ldsUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } @@ -68,18 +69,18 @@ func (s) TestLDSWatch(t *testing.T) { // Push an update, with an extra resource for a different resource name. // Specify a non-nil raw proto in the original resource to ensure that the // new update is not considered equal to the old one. - newUpdate := ListenerUpdate{RouteConfigName: testRDSName, Raw: &anypb.Any{}} - client.NewListeners(map[string]ListenerUpdateErrTuple{ + newUpdate := xdsresource.ListenerUpdate{RouteConfigName: testRDSName, Raw: &anypb.Any{}} + client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{ testLDSName: {Update: newUpdate}, "randomName": {}, - }, UpdateMetadata{}) + }, xdsresource.UpdateMetadata{}) if err := verifyListenerUpdate(ctx, ldsUpdateCh, newUpdate, nil); err != nil { t.Fatal(err) } // Cancel watch, and send update again. cancelWatch() - client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, UpdateMetadata{}) + client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() if u, err := ldsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { @@ -116,8 +117,8 @@ func (s) TestLDSTwoWatchSameResourceName(t *testing.T) { for i := 0; i < count; i++ { ldsUpdateCh := testutils.NewChannel() ldsUpdateChs = append(ldsUpdateChs, ldsUpdateCh) - cancelLastWatch = client.WatchListener(testLDSName, func(update ListenerUpdate, err error) { - ldsUpdateCh.Send(ListenerUpdateErrTuple{Update: update, Err: err}) + cancelLastWatch = client.WatchListener(testLDSName, func(update xdsresource.ListenerUpdate, err error) { + ldsUpdateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) }) if i == 0 { @@ -129,8 +130,8 @@ func (s) TestLDSTwoWatchSameResourceName(t *testing.T) { } } - wantUpdate := ListenerUpdate{RouteConfigName: testRDSName} - client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, UpdateMetadata{}) + wantUpdate := xdsresource.ListenerUpdate{RouteConfigName: testRDSName} + client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) for i := 0; i < count; i++ { if err := verifyListenerUpdate(ctx, ldsUpdateChs[i], wantUpdate, nil); err != nil { t.Fatal(err) @@ -141,7 +142,7 @@ func (s) TestLDSTwoWatchSameResourceName(t *testing.T) { // be notified because one has been cancelled, and the other is receiving // the same update. cancelLastWatch() - client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, UpdateMetadata{}) + client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) for i := 0; i < count; i++ { func() { sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) @@ -155,8 +156,8 @@ func (s) TestLDSTwoWatchSameResourceName(t *testing.T) { // Push a new update and make sure the uncancelled watcher is invoked. // Specify a non-nil raw proto to ensure that the new update is not // considered equal to the old one. - newUpdate := ListenerUpdate{RouteConfigName: testRDSName, Raw: &anypb.Any{}} - client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName: {Update: newUpdate}}, UpdateMetadata{}) + newUpdate := xdsresource.ListenerUpdate{RouteConfigName: testRDSName, Raw: &anypb.Any{}} + client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Update: newUpdate}}, xdsresource.UpdateMetadata{}) if err := verifyListenerUpdate(ctx, ldsUpdateChs[0], newUpdate, nil); err != nil { t.Fatal(err) } @@ -189,8 +190,8 @@ func (s) TestLDSThreeWatchDifferentResourceName(t *testing.T) { for i := 0; i < count; i++ { ldsUpdateCh := testutils.NewChannel() ldsUpdateChs = append(ldsUpdateChs, ldsUpdateCh) - client.WatchListener(testLDSName+"1", func(update ListenerUpdate, err error) { - ldsUpdateCh.Send(ListenerUpdateErrTuple{Update: update, Err: err}) + client.WatchListener(testLDSName+"1", func(update xdsresource.ListenerUpdate, err error) { + ldsUpdateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) }) if i == 0 { @@ -204,19 +205,19 @@ func (s) TestLDSThreeWatchDifferentResourceName(t *testing.T) { // Third watch for a different name. ldsUpdateCh2 := testutils.NewChannel() - client.WatchListener(testLDSName+"2", func(update ListenerUpdate, err error) { - ldsUpdateCh2.Send(ListenerUpdateErrTuple{Update: update, Err: err}) + client.WatchListener(testLDSName+"2", func(update xdsresource.ListenerUpdate, err error) { + ldsUpdateCh2.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate1 := ListenerUpdate{RouteConfigName: testRDSName + "1"} - wantUpdate2 := ListenerUpdate{RouteConfigName: testRDSName + "2"} - client.NewListeners(map[string]ListenerUpdateErrTuple{ + wantUpdate1 := xdsresource.ListenerUpdate{RouteConfigName: testRDSName + "1"} + wantUpdate2 := xdsresource.ListenerUpdate{RouteConfigName: testRDSName + "2"} + client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{ testLDSName + "1": {Update: wantUpdate1}, testLDSName + "2": {Update: wantUpdate2}, - }, UpdateMetadata{}) + }, xdsresource.UpdateMetadata{}) for i := 0; i < count; i++ { if err := verifyListenerUpdate(ctx, ldsUpdateChs[i], wantUpdate1, nil); err != nil { @@ -249,23 +250,23 @@ func (s) TestLDSWatchAfterCache(t *testing.T) { apiClient := c.(*testAPIClient) ldsUpdateCh := testutils.NewChannel() - client.WatchListener(testLDSName, func(update ListenerUpdate, err error) { - ldsUpdateCh.Send(ListenerUpdateErrTuple{Update: update, Err: err}) + client.WatchListener(testLDSName, func(update xdsresource.ListenerUpdate, err error) { + ldsUpdateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate := ListenerUpdate{RouteConfigName: testRDSName} - client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, UpdateMetadata{}) + wantUpdate := xdsresource.ListenerUpdate{RouteConfigName: testRDSName} + client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) if err := verifyListenerUpdate(ctx, ldsUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } // Another watch for the resource in cache. ldsUpdateCh2 := testutils.NewChannel() - client.WatchListener(testLDSName, func(update ListenerUpdate, err error) { - ldsUpdateCh2.Send(ListenerUpdateErrTuple{Update: update, Err: err}) + client.WatchListener(testLDSName, func(update xdsresource.ListenerUpdate, err error) { + ldsUpdateCh2.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) }) sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() @@ -311,27 +312,27 @@ func (s) TestLDSResourceRemoved(t *testing.T) { apiClient := c.(*testAPIClient) ldsUpdateCh1 := testutils.NewChannel() - client.WatchListener(testLDSName+"1", func(update ListenerUpdate, err error) { - ldsUpdateCh1.Send(ListenerUpdateErrTuple{Update: update, Err: err}) + client.WatchListener(testLDSName+"1", func(update xdsresource.ListenerUpdate, err error) { + ldsUpdateCh1.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } // Another watch for a different name. ldsUpdateCh2 := testutils.NewChannel() - client.WatchListener(testLDSName+"2", func(update ListenerUpdate, err error) { - ldsUpdateCh2.Send(ListenerUpdateErrTuple{Update: update, Err: err}) + client.WatchListener(testLDSName+"2", func(update xdsresource.ListenerUpdate, err error) { + ldsUpdateCh2.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate1 := ListenerUpdate{RouteConfigName: testEDSName + "1"} - wantUpdate2 := ListenerUpdate{RouteConfigName: testEDSName + "2"} - client.NewListeners(map[string]ListenerUpdateErrTuple{ + wantUpdate1 := xdsresource.ListenerUpdate{RouteConfigName: testEDSName + "1"} + wantUpdate2 := xdsresource.ListenerUpdate{RouteConfigName: testEDSName + "2"} + client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{ testLDSName + "1": {Update: wantUpdate1}, testLDSName + "2": {Update: wantUpdate2}, - }, UpdateMetadata{}) + }, xdsresource.UpdateMetadata{}) if err := verifyListenerUpdate(ctx, ldsUpdateCh1, wantUpdate1, nil); err != nil { t.Fatal(err) } @@ -340,10 +341,10 @@ func (s) TestLDSResourceRemoved(t *testing.T) { } // Send another update to remove resource 1. - client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName + "2": {Update: wantUpdate2}}, UpdateMetadata{}) + client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName + "2": {Update: wantUpdate2}}, xdsresource.UpdateMetadata{}) // Watcher 1 should get an error. - if u, err := ldsUpdateCh1.Receive(ctx); err != nil || ErrType(u.(ListenerUpdateErrTuple).Err) != ErrorTypeResourceNotFound { + if u, err := ldsUpdateCh1.Receive(ctx); err != nil || ErrType(u.(xdsresource.ListenerUpdateErrTuple).Err) != ErrorTypeResourceNotFound { t.Errorf("unexpected ListenerUpdate: %v, error receiving from channel: %v, want update with error resource not found", u, err) } @@ -356,8 +357,8 @@ func (s) TestLDSResourceRemoved(t *testing.T) { // Send another update with resource 2 modified. Specify a non-nil raw proto // to ensure that the new update is not considered equal to the old one. - wantUpdate2 = ListenerUpdate{RouteConfigName: testEDSName + "2", Raw: &anypb.Any{}} - client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName + "2": {Update: wantUpdate2}}, UpdateMetadata{}) + wantUpdate2 = xdsresource.ListenerUpdate{RouteConfigName: testEDSName + "2", Raw: &anypb.Any{}} + client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName + "2": {Update: wantUpdate2}}, xdsresource.UpdateMetadata{}) // Watcher 1 should not see an update. sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) @@ -393,8 +394,8 @@ func (s) TestListenerWatchNACKError(t *testing.T) { apiClient := c.(*testAPIClient) ldsUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchListener(testLDSName, func(update ListenerUpdate, err error) { - ldsUpdateCh.Send(ListenerUpdateErrTuple{Update: update, Err: err}) + cancelWatch := client.WatchListener(testLDSName, func(update xdsresource.ListenerUpdate, err error) { + ldsUpdateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) }) defer cancelWatch() if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { @@ -402,8 +403,8 @@ func (s) TestListenerWatchNACKError(t *testing.T) { } wantError := fmt.Errorf("testing error") - client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName: {Err: wantError}}, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}}) - if err := verifyListenerUpdate(ctx, ldsUpdateCh, ListenerUpdate{}, wantError); err != nil { + client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Err: wantError}}, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) + if err := verifyListenerUpdate(ctx, ldsUpdateCh, xdsresource.ListenerUpdate{}, wantError); err != nil { t.Fatal(err) } } @@ -435,8 +436,8 @@ func (s) TestListenerWatchPartialValid(t *testing.T) { for _, name := range []string{testLDSName, badResourceName} { ldsUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchListener(name, func(update ListenerUpdate, err error) { - ldsUpdateCh.Send(ListenerUpdateErrTuple{Update: update, Err: err}) + cancelWatch := client.WatchListener(name, func(update xdsresource.ListenerUpdate, err error) { + ldsUpdateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) }) defer func() { cancelWatch() @@ -452,18 +453,18 @@ func (s) TestListenerWatchPartialValid(t *testing.T) { wantError := fmt.Errorf("testing error") wantError2 := fmt.Errorf("individual error") - client.NewListeners(map[string]ListenerUpdateErrTuple{ - testLDSName: {Update: ListenerUpdate{RouteConfigName: testEDSName}}, + client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{ + testLDSName: {Update: xdsresource.ListenerUpdate{RouteConfigName: testEDSName}}, badResourceName: {Err: wantError2}, - }, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}}) + }, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) // The valid resource should be sent to the watcher. - if err := verifyListenerUpdate(ctx, updateChs[testLDSName], ListenerUpdate{RouteConfigName: testEDSName}, nil); err != nil { + if err := verifyListenerUpdate(ctx, updateChs[testLDSName], xdsresource.ListenerUpdate{RouteConfigName: testEDSName}, nil); err != nil { t.Fatal(err) } // The failed watcher should receive an error. - if err := verifyListenerUpdate(ctx, updateChs[badResourceName], ListenerUpdate{}, wantError2); err != nil { + if err := verifyListenerUpdate(ctx, updateChs[badResourceName], xdsresource.ListenerUpdate{}, wantError2); err != nil { t.Fatal(err) } } @@ -489,8 +490,8 @@ func (s) TestListenerWatch_RedundantUpdateSupression(t *testing.T) { apiClient := c.(*testAPIClient) ldsUpdateCh := testutils.NewChannel() - client.WatchListener(testLDSName, func(update ListenerUpdate, err error) { - ldsUpdateCh.Send(ListenerUpdateErrTuple{Update: update, Err: err}) + client.WatchListener(testLDSName, func(update xdsresource.ListenerUpdate, err error) { + ldsUpdateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) @@ -515,8 +516,11 @@ func (s) TestListenerWatch_RedundantUpdateSupression(t *testing.T) { }, HttpFilters: []*v3httppb.HttpFilter{ { - Name: "customFilter1", - ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: customFilterConfig}, + Name: "customFilter1", + ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: &anypb.Any{ + TypeUrl: "custom.filter", + Value: []byte{1, 2, 3}, + }}, }, }, }), @@ -531,8 +535,11 @@ func (s) TestListenerWatch_RedundantUpdateSupression(t *testing.T) { }, HttpFilters: []*v3httppb.HttpFilter{ { - Name: "customFilter2", - ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: customFilterConfig}, + Name: "customFilter2", + ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: &anypb.Any{ + TypeUrl: "custom.filter", + Value: []byte{1, 2, 3}, + }}, }, }, }), @@ -540,42 +547,42 @@ func (s) TestListenerWatch_RedundantUpdateSupression(t *testing.T) { }) tests := []struct { - update ListenerUpdate + update xdsresource.ListenerUpdate wantCallback bool }{ { // First update. Callback should be invoked. - update: ListenerUpdate{Raw: basicListener}, + update: xdsresource.ListenerUpdate{Raw: basicListener}, wantCallback: true, }, { // Same update as previous. Callback should be skipped. - update: ListenerUpdate{Raw: basicListener}, + update: xdsresource.ListenerUpdate{Raw: basicListener}, wantCallback: false, }, { // New update. Callback should be invoked. - update: ListenerUpdate{Raw: listenerWithFilter1}, + update: xdsresource.ListenerUpdate{Raw: listenerWithFilter1}, wantCallback: true, }, { // Same update as previous. Callback should be skipped. - update: ListenerUpdate{Raw: listenerWithFilter1}, + update: xdsresource.ListenerUpdate{Raw: listenerWithFilter1}, wantCallback: false, }, { // New update. Callback should be invoked. - update: ListenerUpdate{Raw: listenerWithFilter2}, + update: xdsresource.ListenerUpdate{Raw: listenerWithFilter2}, wantCallback: true, }, { // Same update as previous. Callback should be skipped. - update: ListenerUpdate{Raw: listenerWithFilter2}, + update: xdsresource.ListenerUpdate{Raw: listenerWithFilter2}, wantCallback: false, }, } for _, test := range tests { - client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName: {Update: test.update}}, UpdateMetadata{}) + client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Update: test.update}}, xdsresource.UpdateMetadata{}) if test.wantCallback { if err := verifyListenerUpdate(ctx, ldsUpdateCh, test.update, nil); err != nil { t.Fatal(err) diff --git a/xds/internal/xdsclient/watchers_route_test.go b/xds/internal/xdsclient/watchers_route_test.go index 70c8dd829e9e..9e4e7e43611c 100644 --- a/xds/internal/xdsclient/watchers_route_test.go +++ b/xds/internal/xdsclient/watchers_route_test.go @@ -24,6 +24,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/types/known/anypb" "google.golang.org/grpc/internal/testutils" @@ -52,22 +53,22 @@ func (s) TestRDSWatch(t *testing.T) { apiClient := c.(*testAPIClient) rdsUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchRouteConfig(testRDSName, func(update RouteConfigUpdate, err error) { - rdsUpdateCh.Send(RouteConfigUpdateErrTuple{Update: update, Err: err}) + cancelWatch := client.WatchRouteConfig(testRDSName, func(update xdsresource.RouteConfigUpdate, err error) { + rdsUpdateCh.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[RouteConfigResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate := RouteConfigUpdate{ - VirtualHosts: []*VirtualHost{ + wantUpdate := xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{testLDSName}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{testCDSName: {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName: {Weight: 1}}}}, }, }, } - client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, UpdateMetadata{}) + client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } @@ -77,17 +78,17 @@ func (s) TestRDSWatch(t *testing.T) { // new update is not considered equal to the old one. newUpdate := wantUpdate newUpdate.Raw = &anypb.Any{} - client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{ + client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{ testRDSName: {Update: newUpdate}, "randomName": {}, - }, UpdateMetadata{}) + }, xdsresource.UpdateMetadata{}) if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh, newUpdate, nil); err != nil { t.Fatal(err) } // Cancel watch, and send update again. cancelWatch() - client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, UpdateMetadata{}) + client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() if u, err := rdsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { @@ -123,8 +124,8 @@ func (s) TestRDSTwoWatchSameResourceName(t *testing.T) { for i := 0; i < count; i++ { rdsUpdateCh := testutils.NewChannel() rdsUpdateChs = append(rdsUpdateChs, rdsUpdateCh) - cancelLastWatch = client.WatchRouteConfig(testRDSName, func(update RouteConfigUpdate, err error) { - rdsUpdateCh.Send(RouteConfigUpdateErrTuple{Update: update, Err: err}) + cancelLastWatch = client.WatchRouteConfig(testRDSName, func(update xdsresource.RouteConfigUpdate, err error) { + rdsUpdateCh.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err}) }) if i == 0 { @@ -136,15 +137,15 @@ func (s) TestRDSTwoWatchSameResourceName(t *testing.T) { } } - wantUpdate := RouteConfigUpdate{ - VirtualHosts: []*VirtualHost{ + wantUpdate := xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{testLDSName}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{testCDSName: {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName: {Weight: 1}}}}, }, }, } - client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, UpdateMetadata{}) + client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) for i := 0; i < count; i++ { if err := verifyRouteConfigUpdate(ctx, rdsUpdateChs[i], wantUpdate, nil); err != nil { t.Fatal(err) @@ -155,7 +156,7 @@ func (s) TestRDSTwoWatchSameResourceName(t *testing.T) { // be notified because one has been cancelled, and the other is receiving // the same update. cancelLastWatch() - client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, UpdateMetadata{}) + client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) for i := 0; i < count; i++ { func() { sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) @@ -171,7 +172,7 @@ func (s) TestRDSTwoWatchSameResourceName(t *testing.T) { // considered equal to the old one. newUpdate := wantUpdate newUpdate.Raw = &anypb.Any{} - client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{testRDSName: {Update: newUpdate}}, UpdateMetadata{}) + client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{testRDSName: {Update: newUpdate}}, xdsresource.UpdateMetadata{}) if err := verifyRouteConfigUpdate(ctx, rdsUpdateChs[0], newUpdate, nil); err != nil { t.Fatal(err) } @@ -203,8 +204,8 @@ func (s) TestRDSThreeWatchDifferentResourceName(t *testing.T) { for i := 0; i < count; i++ { rdsUpdateCh := testutils.NewChannel() rdsUpdateChs = append(rdsUpdateChs, rdsUpdateCh) - client.WatchRouteConfig(testRDSName+"1", func(update RouteConfigUpdate, err error) { - rdsUpdateCh.Send(RouteConfigUpdateErrTuple{Update: update, Err: err}) + client.WatchRouteConfig(testRDSName+"1", func(update xdsresource.RouteConfigUpdate, err error) { + rdsUpdateCh.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err}) }) if i == 0 { @@ -218,33 +219,33 @@ func (s) TestRDSThreeWatchDifferentResourceName(t *testing.T) { // Third watch for a different name. rdsUpdateCh2 := testutils.NewChannel() - client.WatchRouteConfig(testRDSName+"2", func(update RouteConfigUpdate, err error) { - rdsUpdateCh2.Send(RouteConfigUpdateErrTuple{Update: update, Err: err}) + client.WatchRouteConfig(testRDSName+"2", func(update xdsresource.RouteConfigUpdate, err error) { + rdsUpdateCh2.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[RouteConfigResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate1 := RouteConfigUpdate{ - VirtualHosts: []*VirtualHost{ + wantUpdate1 := xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{testLDSName}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{testCDSName + "1": {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName + "1": {Weight: 1}}}}, }, }, } - wantUpdate2 := RouteConfigUpdate{ - VirtualHosts: []*VirtualHost{ + wantUpdate2 := xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{testLDSName}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{testCDSName + "2": {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName + "2": {Weight: 1}}}}, }, }, } - client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{ + client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{ testRDSName + "1": {Update: wantUpdate1}, testRDSName + "2": {Update: wantUpdate2}, - }, UpdateMetadata{}) + }, xdsresource.UpdateMetadata{}) for i := 0; i < count; i++ { if err := verifyRouteConfigUpdate(ctx, rdsUpdateChs[i], wantUpdate1, nil); err != nil { @@ -277,30 +278,30 @@ func (s) TestRDSWatchAfterCache(t *testing.T) { apiClient := c.(*testAPIClient) rdsUpdateCh := testutils.NewChannel() - client.WatchRouteConfig(testRDSName, func(update RouteConfigUpdate, err error) { - rdsUpdateCh.Send(RouteConfigUpdateErrTuple{Update: update, Err: err}) + client.WatchRouteConfig(testRDSName, func(update xdsresource.RouteConfigUpdate, err error) { + rdsUpdateCh.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[RouteConfigResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate := RouteConfigUpdate{ - VirtualHosts: []*VirtualHost{ + wantUpdate := xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{testLDSName}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{testCDSName: {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName: {Weight: 1}}}}, }, }, } - client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, UpdateMetadata{}) + client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } // Another watch for the resource in cache. rdsUpdateCh2 := testutils.NewChannel() - client.WatchRouteConfig(testRDSName, func(update RouteConfigUpdate, err error) { - rdsUpdateCh2.Send(RouteConfigUpdateErrTuple{Update: update, Err: err}) + client.WatchRouteConfig(testRDSName, func(update xdsresource.RouteConfigUpdate, err error) { + rdsUpdateCh2.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err}) }) sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() @@ -309,7 +310,7 @@ func (s) TestRDSWatchAfterCache(t *testing.T) { } // New watch should receives the update. - if u, err := rdsUpdateCh2.Receive(ctx); err != nil || !cmp.Equal(u, RouteConfigUpdateErrTuple{wantUpdate, nil}, cmp.AllowUnexported(RouteConfigUpdateErrTuple{})) { + if u, err := rdsUpdateCh2.Receive(ctx); err != nil || !cmp.Equal(u, xdsresource.RouteConfigUpdateErrTuple{Update: wantUpdate}, cmp.AllowUnexported(xdsresource.RouteConfigUpdateErrTuple{})) { t.Errorf("unexpected RouteConfigUpdate: %v, error receiving from channel: %v", u, err) } @@ -342,8 +343,8 @@ func (s) TestRouteWatchNACKError(t *testing.T) { apiClient := c.(*testAPIClient) rdsUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchRouteConfig(testCDSName, func(update RouteConfigUpdate, err error) { - rdsUpdateCh.Send(RouteConfigUpdateErrTuple{Update: update, Err: err}) + cancelWatch := client.WatchRouteConfig(testCDSName, func(update xdsresource.RouteConfigUpdate, err error) { + rdsUpdateCh.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err}) }) defer cancelWatch() if _, err := apiClient.addWatches[RouteConfigResource].Receive(ctx); err != nil { @@ -351,8 +352,8 @@ func (s) TestRouteWatchNACKError(t *testing.T) { } wantError := fmt.Errorf("testing error") - client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{testCDSName: {Err: wantError}}, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}}) - if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh, RouteConfigUpdate{}, wantError); err != nil { + client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{testCDSName: {Err: wantError}}, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) + if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh, xdsresource.RouteConfigUpdate{}, wantError); err != nil { t.Fatal(err) } } @@ -384,8 +385,8 @@ func (s) TestRouteWatchPartialValid(t *testing.T) { for _, name := range []string{testRDSName, badResourceName} { rdsUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchRouteConfig(name, func(update RouteConfigUpdate, err error) { - rdsUpdateCh.Send(RouteConfigUpdateErrTuple{Update: update, Err: err}) + cancelWatch := client.WatchRouteConfig(name, func(update xdsresource.RouteConfigUpdate, err error) { + rdsUpdateCh.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err}) }) defer func() { cancelWatch() @@ -401,24 +402,24 @@ func (s) TestRouteWatchPartialValid(t *testing.T) { wantError := fmt.Errorf("testing error") wantError2 := fmt.Errorf("individual error") - client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{ - testRDSName: {Update: RouteConfigUpdate{VirtualHosts: []*VirtualHost{{ + client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{ + testRDSName: {Update: xdsresource.RouteConfigUpdate{VirtualHosts: []*xdsresource.VirtualHost{{ Domains: []string{testLDSName}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{testCDSName: {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName: {Weight: 1}}}}, }}}}, badResourceName: {Err: wantError2}, - }, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}}) + }, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) // The valid resource should be sent to the watcher. - if err := verifyRouteConfigUpdate(ctx, updateChs[testRDSName], RouteConfigUpdate{VirtualHosts: []*VirtualHost{{ + if err := verifyRouteConfigUpdate(ctx, updateChs[testRDSName], xdsresource.RouteConfigUpdate{VirtualHosts: []*xdsresource.VirtualHost{{ Domains: []string{testLDSName}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{testCDSName: {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName: {Weight: 1}}}}, }}}, nil); err != nil { t.Fatal(err) } // The failed watcher should receive an error. - if err := verifyRouteConfigUpdate(ctx, updateChs[badResourceName], RouteConfigUpdate{}, wantError2); err != nil { + if err := verifyRouteConfigUpdate(ctx, updateChs[badResourceName], xdsresource.RouteConfigUpdate{}, wantError2); err != nil { t.Fatal(err) } } diff --git a/xds/internal/xdsclient/xds.go b/xds/internal/xdsclient/xds.go deleted file mode 100644 index 4b4f0680de67..000000000000 --- a/xds/internal/xdsclient/xds.go +++ /dev/null @@ -1,1345 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package xdsclient - -import ( - "errors" - "fmt" - "net" - "regexp" - "strconv" - "strings" - "time" - - v1udpatypepb "github.com/cncf/udpa/go/udpa/type/v1" - v3cncftypepb "github.com/cncf/xds/go/xds/type/v3" - v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" - v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" - v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" - v3aggregateclusterpb "github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3" - v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" - v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" - v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" - "google.golang.org/protobuf/types/known/anypb" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/internal/xds/matcher" - - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/xds/env" - "google.golang.org/grpc/xds/internal" - "google.golang.org/grpc/xds/internal/httpfilter" - "google.golang.org/grpc/xds/internal/version" -) - -// TransportSocket proto message has a `name` field which is expected to be set -// to this value by the management server. -const transportSocketName = "envoy.transport_sockets.tls" - -// UnmarshalOptions wraps the input parameters for `UnmarshalXxx` functions. -type UnmarshalOptions struct { - // Version is the version of the received response. - Version string - // Resources are the xDS resources resources in the received response. - Resources []*anypb.Any - // Logger is the prefix logger to be used during unmarshaling. - Logger *grpclog.PrefixLogger - // UpdateValidator is a post unmarshal validation check provided by the - // upper layer. - UpdateValidator UpdateValidatorFunc -} - -// UnmarshalListener processes resources received in an LDS response, validates -// them, and transforms them into a native struct which contains only fields we -// are interested in. -func UnmarshalListener(opts *UnmarshalOptions) (map[string]ListenerUpdateErrTuple, UpdateMetadata, error) { - update := make(map[string]ListenerUpdateErrTuple) - md, err := processAllResources(opts, update) - return update, md, err -} - -func unmarshalListenerResource(r *anypb.Any, f UpdateValidatorFunc, logger *grpclog.PrefixLogger) (string, ListenerUpdate, error) { - if !IsListenerResource(r.GetTypeUrl()) { - return "", ListenerUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) - } - // TODO: Pass version.TransportAPI instead of relying upon the type URL - v2 := r.GetTypeUrl() == version.V2ListenerURL - lis := &v3listenerpb.Listener{} - if err := proto.Unmarshal(r.GetValue(), lis); err != nil { - return "", ListenerUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) - } - logger.Infof("Resource with name: %v, type: %T, contains: %v", lis.GetName(), lis, pretty.ToJSON(lis)) - - lu, err := processListener(lis, logger, v2) - if err != nil { - return lis.GetName(), ListenerUpdate{}, err - } - if f != nil { - if err := f(*lu); err != nil { - return lis.GetName(), ListenerUpdate{}, err - } - } - lu.Raw = r - return lis.GetName(), *lu, nil -} - -func processListener(lis *v3listenerpb.Listener, logger *grpclog.PrefixLogger, v2 bool) (*ListenerUpdate, error) { - if lis.GetApiListener() != nil { - return processClientSideListener(lis, logger, v2) - } - return processServerSideListener(lis) -} - -// processClientSideListener checks if the provided Listener proto meets -// the expected criteria. If so, it returns a non-empty routeConfigName. -func processClientSideListener(lis *v3listenerpb.Listener, logger *grpclog.PrefixLogger, v2 bool) (*ListenerUpdate, error) { - update := &ListenerUpdate{} - - apiLisAny := lis.GetApiListener().GetApiListener() - if !IsHTTPConnManagerResource(apiLisAny.GetTypeUrl()) { - return nil, fmt.Errorf("unexpected resource type: %q", apiLisAny.GetTypeUrl()) - } - apiLis := &v3httppb.HttpConnectionManager{} - if err := proto.Unmarshal(apiLisAny.GetValue(), apiLis); err != nil { - return nil, fmt.Errorf("failed to unmarshal api_listner: %v", err) - } - // "HttpConnectionManager.xff_num_trusted_hops must be unset or zero and - // HttpConnectionManager.original_ip_detection_extensions must be empty. If - // either field has an incorrect value, the Listener must be NACKed." - A41 - if apiLis.XffNumTrustedHops != 0 { - return nil, fmt.Errorf("xff_num_trusted_hops must be unset or zero %+v", apiLis) - } - if len(apiLis.OriginalIpDetectionExtensions) != 0 { - return nil, fmt.Errorf("original_ip_detection_extensions must be empty %+v", apiLis) - } - - switch apiLis.RouteSpecifier.(type) { - case *v3httppb.HttpConnectionManager_Rds: - if apiLis.GetRds().GetConfigSource().GetAds() == nil { - return nil, fmt.Errorf("ConfigSource is not ADS: %+v", lis) - } - name := apiLis.GetRds().GetRouteConfigName() - if name == "" { - return nil, fmt.Errorf("empty route_config_name: %+v", lis) - } - update.RouteConfigName = name - case *v3httppb.HttpConnectionManager_RouteConfig: - routeU, err := generateRDSUpdateFromRouteConfiguration(apiLis.GetRouteConfig(), logger, v2) - if err != nil { - return nil, fmt.Errorf("failed to parse inline RDS resp: %v", err) - } - update.InlineRouteConfig = &routeU - case nil: - return nil, fmt.Errorf("no RouteSpecifier: %+v", apiLis) - default: - return nil, fmt.Errorf("unsupported type %T for RouteSpecifier", apiLis.RouteSpecifier) - } - - if v2 { - return update, nil - } - - // The following checks and fields only apply to xDS protocol versions v3+. - - update.MaxStreamDuration = apiLis.GetCommonHttpProtocolOptions().GetMaxStreamDuration().AsDuration() - - var err error - if update.HTTPFilters, err = processHTTPFilters(apiLis.GetHttpFilters(), false); err != nil { - return nil, err - } - - return update, nil -} - -func unwrapHTTPFilterConfig(config *anypb.Any) (proto.Message, string, error) { - switch { - case ptypes.Is(config, &v3cncftypepb.TypedStruct{}): - // The real type name is inside the new TypedStruct message. - s := new(v3cncftypepb.TypedStruct) - if err := ptypes.UnmarshalAny(config, s); err != nil { - return nil, "", fmt.Errorf("error unmarshalling TypedStruct filter config: %v", err) - } - return s, s.GetTypeUrl(), nil - case ptypes.Is(config, &v1udpatypepb.TypedStruct{}): - // The real type name is inside the old TypedStruct message. - s := new(v1udpatypepb.TypedStruct) - if err := ptypes.UnmarshalAny(config, s); err != nil { - return nil, "", fmt.Errorf("error unmarshalling TypedStruct filter config: %v", err) - } - return s, s.GetTypeUrl(), nil - default: - return config, config.GetTypeUrl(), nil - } -} - -func validateHTTPFilterConfig(cfg *anypb.Any, lds, optional bool) (httpfilter.Filter, httpfilter.FilterConfig, error) { - config, typeURL, err := unwrapHTTPFilterConfig(cfg) - if err != nil { - return nil, nil, err - } - filterBuilder := httpfilter.Get(typeURL) - if filterBuilder == nil { - if optional { - return nil, nil, nil - } - return nil, nil, fmt.Errorf("no filter implementation found for %q", typeURL) - } - parseFunc := filterBuilder.ParseFilterConfig - if !lds { - parseFunc = filterBuilder.ParseFilterConfigOverride - } - filterConfig, err := parseFunc(config) - if err != nil { - return nil, nil, fmt.Errorf("error parsing config for filter %q: %v", typeURL, err) - } - return filterBuilder, filterConfig, nil -} - -func processHTTPFilterOverrides(cfgs map[string]*anypb.Any) (map[string]httpfilter.FilterConfig, error) { - if len(cfgs) == 0 { - return nil, nil - } - m := make(map[string]httpfilter.FilterConfig) - for name, cfg := range cfgs { - optional := false - s := new(v3routepb.FilterConfig) - if ptypes.Is(cfg, s) { - if err := ptypes.UnmarshalAny(cfg, s); err != nil { - return nil, fmt.Errorf("filter override %q: error unmarshalling FilterConfig: %v", name, err) - } - cfg = s.GetConfig() - optional = s.GetIsOptional() - } - - httpFilter, config, err := validateHTTPFilterConfig(cfg, false, optional) - if err != nil { - return nil, fmt.Errorf("filter override %q: %v", name, err) - } - if httpFilter == nil { - // Optional configs are ignored. - continue - } - m[name] = config - } - return m, nil -} - -func processHTTPFilters(filters []*v3httppb.HttpFilter, server bool) ([]HTTPFilter, error) { - ret := make([]HTTPFilter, 0, len(filters)) - seenNames := make(map[string]bool, len(filters)) - for _, filter := range filters { - name := filter.GetName() - if name == "" { - return nil, errors.New("filter missing name field") - } - if seenNames[name] { - return nil, fmt.Errorf("duplicate filter name %q", name) - } - seenNames[name] = true - - httpFilter, config, err := validateHTTPFilterConfig(filter.GetTypedConfig(), true, filter.GetIsOptional()) - if err != nil { - return nil, err - } - if httpFilter == nil { - // Optional configs are ignored. - continue - } - if server { - if _, ok := httpFilter.(httpfilter.ServerInterceptorBuilder); !ok { - if filter.GetIsOptional() { - continue - } - return nil, fmt.Errorf("HTTP filter %q not supported server-side", name) - } - } else if _, ok := httpFilter.(httpfilter.ClientInterceptorBuilder); !ok { - if filter.GetIsOptional() { - continue - } - return nil, fmt.Errorf("HTTP filter %q not supported client-side", name) - } - - // Save name/config - ret = append(ret, HTTPFilter{Name: name, Filter: httpFilter, Config: config}) - } - // "Validation will fail if a terminal filter is not the last filter in the - // chain or if a non-terminal filter is the last filter in the chain." - A39 - if len(ret) == 0 { - return nil, fmt.Errorf("http filters list is empty") - } - var i int - for ; i < len(ret)-1; i++ { - if ret[i].Filter.IsTerminal() { - return nil, fmt.Errorf("http filter %q is a terminal filter but it is not last in the filter chain", ret[i].Name) - } - } - if !ret[i].Filter.IsTerminal() { - return nil, fmt.Errorf("http filter %q is not a terminal filter", ret[len(ret)-1].Name) - } - return ret, nil -} - -func processServerSideListener(lis *v3listenerpb.Listener) (*ListenerUpdate, error) { - if n := len(lis.ListenerFilters); n != 0 { - return nil, fmt.Errorf("unsupported field 'listener_filters' contains %d entries", n) - } - if useOrigDst := lis.GetUseOriginalDst(); useOrigDst != nil && useOrigDst.GetValue() { - return nil, errors.New("unsupported field 'use_original_dst' is present and set to true") - } - addr := lis.GetAddress() - if addr == nil { - return nil, fmt.Errorf("no address field in LDS response: %+v", lis) - } - sockAddr := addr.GetSocketAddress() - if sockAddr == nil { - return nil, fmt.Errorf("no socket_address field in LDS response: %+v", lis) - } - lu := &ListenerUpdate{ - InboundListenerCfg: &InboundListenerConfig{ - Address: sockAddr.GetAddress(), - Port: strconv.Itoa(int(sockAddr.GetPortValue())), - }, - } - - fcMgr, err := NewFilterChainManager(lis) - if err != nil { - return nil, err - } - lu.InboundListenerCfg.FilterChains = fcMgr - return lu, nil -} - -// UnmarshalRouteConfig processes resources received in an RDS response, -// validates them, and transforms them into a native struct which contains only -// fields we are interested in. The provided hostname determines the route -// configuration resources of interest. -func UnmarshalRouteConfig(opts *UnmarshalOptions) (map[string]RouteConfigUpdateErrTuple, UpdateMetadata, error) { - update := make(map[string]RouteConfigUpdateErrTuple) - md, err := processAllResources(opts, update) - return update, md, err -} - -func unmarshalRouteConfigResource(r *anypb.Any, logger *grpclog.PrefixLogger) (string, RouteConfigUpdate, error) { - if !IsRouteConfigResource(r.GetTypeUrl()) { - return "", RouteConfigUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) - } - rc := &v3routepb.RouteConfiguration{} - if err := proto.Unmarshal(r.GetValue(), rc); err != nil { - return "", RouteConfigUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) - } - logger.Infof("Resource with name: %v, type: %T, contains: %v.", rc.GetName(), rc, pretty.ToJSON(rc)) - - // TODO: Pass version.TransportAPI instead of relying upon the type URL - v2 := r.GetTypeUrl() == version.V2RouteConfigURL - u, err := generateRDSUpdateFromRouteConfiguration(rc, logger, v2) - if err != nil { - return rc.GetName(), RouteConfigUpdate{}, err - } - u.Raw = r - return rc.GetName(), u, nil -} - -// generateRDSUpdateFromRouteConfiguration checks if the provided -// RouteConfiguration meets the expected criteria. If so, it returns a -// RouteConfigUpdate with nil error. -// -// A RouteConfiguration resource is considered valid when only if it contains a -// VirtualHost whose domain field matches the server name from the URI passed -// to the gRPC channel, and it contains a clusterName or a weighted cluster. -// -// The RouteConfiguration includes a list of virtualHosts, which may have zero -// or more elements. We are interested in the element whose domains field -// matches the server name specified in the "xds:" URI. The only field in the -// VirtualHost proto that the we are interested in is the list of routes. We -// only look at the last route in the list (the default route), whose match -// field must be empty and whose route field must be set. Inside that route -// message, the cluster field will contain the clusterName or weighted clusters -// we are looking for. -func generateRDSUpdateFromRouteConfiguration(rc *v3routepb.RouteConfiguration, logger *grpclog.PrefixLogger, v2 bool) (RouteConfigUpdate, error) { - vhs := make([]*VirtualHost, 0, len(rc.GetVirtualHosts())) - for _, vh := range rc.GetVirtualHosts() { - routes, err := routesProtoToSlice(vh.Routes, logger, v2) - if err != nil { - return RouteConfigUpdate{}, fmt.Errorf("received route is invalid: %v", err) - } - rc, err := generateRetryConfig(vh.GetRetryPolicy()) - if err != nil { - return RouteConfigUpdate{}, fmt.Errorf("received route is invalid: %v", err) - } - vhOut := &VirtualHost{ - Domains: vh.GetDomains(), - Routes: routes, - RetryConfig: rc, - } - if !v2 { - cfgs, err := processHTTPFilterOverrides(vh.GetTypedPerFilterConfig()) - if err != nil { - return RouteConfigUpdate{}, fmt.Errorf("virtual host %+v: %v", vh, err) - } - vhOut.HTTPFilterConfigOverride = cfgs - } - vhs = append(vhs, vhOut) - } - return RouteConfigUpdate{VirtualHosts: vhs}, nil -} - -func generateRetryConfig(rp *v3routepb.RetryPolicy) (*RetryConfig, error) { - if !env.RetrySupport || rp == nil { - return nil, nil - } - - cfg := &RetryConfig{RetryOn: make(map[codes.Code]bool)} - for _, s := range strings.Split(rp.GetRetryOn(), ",") { - switch strings.TrimSpace(strings.ToLower(s)) { - case "cancelled": - cfg.RetryOn[codes.Canceled] = true - case "deadline-exceeded": - cfg.RetryOn[codes.DeadlineExceeded] = true - case "internal": - cfg.RetryOn[codes.Internal] = true - case "resource-exhausted": - cfg.RetryOn[codes.ResourceExhausted] = true - case "unavailable": - cfg.RetryOn[codes.Unavailable] = true - } - } - - if rp.NumRetries == nil { - cfg.NumRetries = 1 - } else { - cfg.NumRetries = rp.GetNumRetries().Value - if cfg.NumRetries < 1 { - return nil, fmt.Errorf("retry_policy.num_retries = %v; must be >= 1", cfg.NumRetries) - } - } - - backoff := rp.GetRetryBackOff() - if backoff == nil { - cfg.RetryBackoff.BaseInterval = 25 * time.Millisecond - } else { - cfg.RetryBackoff.BaseInterval = backoff.GetBaseInterval().AsDuration() - if cfg.RetryBackoff.BaseInterval <= 0 { - return nil, fmt.Errorf("retry_policy.base_interval = %v; must be > 0", cfg.RetryBackoff.BaseInterval) - } - } - if max := backoff.GetMaxInterval(); max == nil { - cfg.RetryBackoff.MaxInterval = 10 * cfg.RetryBackoff.BaseInterval - } else { - cfg.RetryBackoff.MaxInterval = max.AsDuration() - if cfg.RetryBackoff.MaxInterval <= 0 { - return nil, fmt.Errorf("retry_policy.max_interval = %v; must be > 0", cfg.RetryBackoff.MaxInterval) - } - } - - if len(cfg.RetryOn) == 0 { - return &RetryConfig{}, nil - } - return cfg, nil -} - -func routesProtoToSlice(routes []*v3routepb.Route, logger *grpclog.PrefixLogger, v2 bool) ([]*Route, error) { - var routesRet []*Route - for _, r := range routes { - match := r.GetMatch() - if match == nil { - return nil, fmt.Errorf("route %+v doesn't have a match", r) - } - - if len(match.GetQueryParameters()) != 0 { - // Ignore route with query parameters. - logger.Warningf("route %+v has query parameter matchers, the route will be ignored", r) - continue - } - - pathSp := match.GetPathSpecifier() - if pathSp == nil { - return nil, fmt.Errorf("route %+v doesn't have a path specifier", r) - } - - var route Route - switch pt := pathSp.(type) { - case *v3routepb.RouteMatch_Prefix: - route.Prefix = &pt.Prefix - case *v3routepb.RouteMatch_Path: - route.Path = &pt.Path - case *v3routepb.RouteMatch_SafeRegex: - regex := pt.SafeRegex.GetRegex() - re, err := regexp.Compile(regex) - if err != nil { - return nil, fmt.Errorf("route %+v contains an invalid regex %q", r, regex) - } - route.Regex = re - default: - return nil, fmt.Errorf("route %+v has an unrecognized path specifier: %+v", r, pt) - } - - if caseSensitive := match.GetCaseSensitive(); caseSensitive != nil { - route.CaseInsensitive = !caseSensitive.Value - } - - for _, h := range match.GetHeaders() { - var header HeaderMatcher - switch ht := h.GetHeaderMatchSpecifier().(type) { - case *v3routepb.HeaderMatcher_ExactMatch: - header.ExactMatch = &ht.ExactMatch - case *v3routepb.HeaderMatcher_SafeRegexMatch: - regex := ht.SafeRegexMatch.GetRegex() - re, err := regexp.Compile(regex) - if err != nil { - return nil, fmt.Errorf("route %+v contains an invalid regex %q", r, regex) - } - header.RegexMatch = re - case *v3routepb.HeaderMatcher_RangeMatch: - header.RangeMatch = &Int64Range{ - Start: ht.RangeMatch.Start, - End: ht.RangeMatch.End, - } - case *v3routepb.HeaderMatcher_PresentMatch: - header.PresentMatch = &ht.PresentMatch - case *v3routepb.HeaderMatcher_PrefixMatch: - header.PrefixMatch = &ht.PrefixMatch - case *v3routepb.HeaderMatcher_SuffixMatch: - header.SuffixMatch = &ht.SuffixMatch - default: - return nil, fmt.Errorf("route %+v has an unrecognized header matcher: %+v", r, ht) - } - header.Name = h.GetName() - invert := h.GetInvertMatch() - header.InvertMatch = &invert - route.Headers = append(route.Headers, &header) - } - - if fr := match.GetRuntimeFraction(); fr != nil { - d := fr.GetDefaultValue() - n := d.GetNumerator() - switch d.GetDenominator() { - case v3typepb.FractionalPercent_HUNDRED: - n *= 10000 - case v3typepb.FractionalPercent_TEN_THOUSAND: - n *= 100 - case v3typepb.FractionalPercent_MILLION: - } - route.Fraction = &n - } - - switch r.GetAction().(type) { - case *v3routepb.Route_Route: - route.WeightedClusters = make(map[string]WeightedCluster) - action := r.GetRoute() - - // Hash Policies are only applicable for a Ring Hash LB. - if env.RingHashSupport { - hp, err := hashPoliciesProtoToSlice(action.HashPolicy, logger) - if err != nil { - return nil, err - } - route.HashPolicies = hp - } - - switch a := action.GetClusterSpecifier().(type) { - case *v3routepb.RouteAction_Cluster: - route.WeightedClusters[a.Cluster] = WeightedCluster{Weight: 1} - case *v3routepb.RouteAction_WeightedClusters: - wcs := a.WeightedClusters - var totalWeight uint32 - for _, c := range wcs.Clusters { - w := c.GetWeight().GetValue() - if w == 0 { - continue - } - wc := WeightedCluster{Weight: w} - if !v2 { - cfgs, err := processHTTPFilterOverrides(c.GetTypedPerFilterConfig()) - if err != nil { - return nil, fmt.Errorf("route %+v, action %+v: %v", r, a, err) - } - wc.HTTPFilterConfigOverride = cfgs - } - route.WeightedClusters[c.GetName()] = wc - totalWeight += w - } - // envoy xds doc - // default TotalWeight https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/route/v3/route_components.proto.html#envoy-v3-api-field-config-route-v3-weightedcluster-total-weight - wantTotalWeight := uint32(100) - if tw := wcs.GetTotalWeight(); tw != nil { - wantTotalWeight = tw.GetValue() - } - if totalWeight != wantTotalWeight { - return nil, fmt.Errorf("route %+v, action %+v, weights of clusters do not add up to total total weight, got: %v, expected total weight from response: %v", r, a, totalWeight, wantTotalWeight) - } - if totalWeight == 0 { - return nil, fmt.Errorf("route %+v, action %+v, has no valid cluster in WeightedCluster action", r, a) - } - case *v3routepb.RouteAction_ClusterHeader: - continue - default: - return nil, fmt.Errorf("route %+v, has an unknown ClusterSpecifier: %+v", r, a) - } - - msd := action.GetMaxStreamDuration() - // Prefer grpc_timeout_header_max, if set. - dur := msd.GetGrpcTimeoutHeaderMax() - if dur == nil { - dur = msd.GetMaxStreamDuration() - } - if dur != nil { - d := dur.AsDuration() - route.MaxStreamDuration = &d - } - - var err error - route.RetryConfig, err = generateRetryConfig(action.GetRetryPolicy()) - if err != nil { - return nil, fmt.Errorf("route %+v, action %+v: %v", r, action, err) - } - - route.RouteAction = RouteActionRoute - - case *v3routepb.Route_NonForwardingAction: - // Expected to be used on server side. - route.RouteAction = RouteActionNonForwardingAction - default: - route.RouteAction = RouteActionUnsupported - } - - if !v2 { - cfgs, err := processHTTPFilterOverrides(r.GetTypedPerFilterConfig()) - if err != nil { - return nil, fmt.Errorf("route %+v: %v", r, err) - } - route.HTTPFilterConfigOverride = cfgs - } - routesRet = append(routesRet, &route) - } - return routesRet, nil -} - -func hashPoliciesProtoToSlice(policies []*v3routepb.RouteAction_HashPolicy, logger *grpclog.PrefixLogger) ([]*HashPolicy, error) { - var hashPoliciesRet []*HashPolicy - for _, p := range policies { - policy := HashPolicy{Terminal: p.Terminal} - switch p.GetPolicySpecifier().(type) { - case *v3routepb.RouteAction_HashPolicy_Header_: - policy.HashPolicyType = HashPolicyTypeHeader - policy.HeaderName = p.GetHeader().GetHeaderName() - if rr := p.GetHeader().GetRegexRewrite(); rr != nil { - regex := rr.GetPattern().GetRegex() - re, err := regexp.Compile(regex) - if err != nil { - return nil, fmt.Errorf("hash policy %+v contains an invalid regex %q", p, regex) - } - policy.Regex = re - policy.RegexSubstitution = rr.GetSubstitution() - } - case *v3routepb.RouteAction_HashPolicy_FilterState_: - if p.GetFilterState().GetKey() != "io.grpc.channel_id" { - logger.Infof("hash policy %+v contains an invalid key for filter state policy %q", p, p.GetFilterState().GetKey()) - continue - } - policy.HashPolicyType = HashPolicyTypeChannelID - default: - logger.Infof("hash policy %T is an unsupported hash policy", p.GetPolicySpecifier()) - continue - } - - hashPoliciesRet = append(hashPoliciesRet, &policy) - } - return hashPoliciesRet, nil -} - -// UnmarshalCluster processes resources received in an CDS response, validates -// them, and transforms them into a native struct which contains only fields we -// are interested in. -func UnmarshalCluster(opts *UnmarshalOptions) (map[string]ClusterUpdateErrTuple, UpdateMetadata, error) { - update := make(map[string]ClusterUpdateErrTuple) - md, err := processAllResources(opts, update) - return update, md, err -} - -func unmarshalClusterResource(r *anypb.Any, f UpdateValidatorFunc, logger *grpclog.PrefixLogger) (string, ClusterUpdate, error) { - if !IsClusterResource(r.GetTypeUrl()) { - return "", ClusterUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) - } - - cluster := &v3clusterpb.Cluster{} - if err := proto.Unmarshal(r.GetValue(), cluster); err != nil { - return "", ClusterUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) - } - logger.Infof("Resource with name: %v, type: %T, contains: %v", cluster.GetName(), cluster, pretty.ToJSON(cluster)) - cu, err := validateClusterAndConstructClusterUpdate(cluster) - if err != nil { - return cluster.GetName(), ClusterUpdate{}, err - } - cu.Raw = r - if f != nil { - if err := f(cu); err != nil { - return "", ClusterUpdate{}, err - } - } - - return cluster.GetName(), cu, nil -} - -const ( - defaultRingHashMinSize = 1024 - defaultRingHashMaxSize = 8 * 1024 * 1024 // 8M - ringHashSizeUpperBound = 8 * 1024 * 1024 // 8M -) - -func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (ClusterUpdate, error) { - var lbPolicy *ClusterLBPolicyRingHash - switch cluster.GetLbPolicy() { - case v3clusterpb.Cluster_ROUND_ROBIN: - lbPolicy = nil // The default is round_robin, and there's no config to set. - case v3clusterpb.Cluster_RING_HASH: - if !env.RingHashSupport { - return ClusterUpdate{}, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) - } - rhc := cluster.GetRingHashLbConfig() - if rhc.GetHashFunction() != v3clusterpb.Cluster_RingHashLbConfig_XX_HASH { - return ClusterUpdate{}, fmt.Errorf("unsupported ring_hash hash function %v in response: %+v", rhc.GetHashFunction(), cluster) - } - // Minimum defaults to 1024 entries, and limited to 8M entries Maximum - // defaults to 8M entries, and limited to 8M entries - var minSize, maxSize uint64 = defaultRingHashMinSize, defaultRingHashMaxSize - if min := rhc.GetMinimumRingSize(); min != nil { - if min.GetValue() > ringHashSizeUpperBound { - return ClusterUpdate{}, fmt.Errorf("unexpected ring_hash mininum ring size %v in response: %+v", min.GetValue(), cluster) - } - minSize = min.GetValue() - } - if max := rhc.GetMaximumRingSize(); max != nil { - if max.GetValue() > ringHashSizeUpperBound { - return ClusterUpdate{}, fmt.Errorf("unexpected ring_hash maxinum ring size %v in response: %+v", max.GetValue(), cluster) - } - maxSize = max.GetValue() - } - if minSize > maxSize { - return ClusterUpdate{}, fmt.Errorf("ring_hash config min size %v is greater than max %v", minSize, maxSize) - } - lbPolicy = &ClusterLBPolicyRingHash{MinimumRingSize: minSize, MaximumRingSize: maxSize} - default: - return ClusterUpdate{}, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) - } - - // Process security configuration received from the control plane iff the - // corresponding environment variable is set. - var sc *SecurityConfig - if env.ClientSideSecuritySupport { - var err error - if sc, err = securityConfigFromCluster(cluster); err != nil { - return ClusterUpdate{}, err - } - } - - ret := ClusterUpdate{ - ClusterName: cluster.GetName(), - EnableLRS: cluster.GetLrsServer().GetSelf() != nil, - SecurityCfg: sc, - MaxRequests: circuitBreakersFromCluster(cluster), - LBPolicy: lbPolicy, - } - - // Validate and set cluster type from the response. - switch { - case cluster.GetType() == v3clusterpb.Cluster_EDS: - if cluster.GetEdsClusterConfig().GetEdsConfig().GetAds() == nil { - return ClusterUpdate{}, fmt.Errorf("unexpected edsConfig in response: %+v", cluster) - } - ret.ClusterType = ClusterTypeEDS - ret.EDSServiceName = cluster.GetEdsClusterConfig().GetServiceName() - return ret, nil - case cluster.GetType() == v3clusterpb.Cluster_LOGICAL_DNS: - if !env.AggregateAndDNSSupportEnv { - return ClusterUpdate{}, fmt.Errorf("unsupported cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster) - } - ret.ClusterType = ClusterTypeLogicalDNS - dnsHN, err := dnsHostNameFromCluster(cluster) - if err != nil { - return ClusterUpdate{}, err - } - ret.DNSHostName = dnsHN - return ret, nil - case cluster.GetClusterType() != nil && cluster.GetClusterType().Name == "envoy.clusters.aggregate": - if !env.AggregateAndDNSSupportEnv { - return ClusterUpdate{}, fmt.Errorf("unsupported cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster) - } - clusters := &v3aggregateclusterpb.ClusterConfig{} - if err := proto.Unmarshal(cluster.GetClusterType().GetTypedConfig().GetValue(), clusters); err != nil { - return ClusterUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) - } - ret.ClusterType = ClusterTypeAggregate - ret.PrioritizedClusterNames = clusters.Clusters - return ret, nil - default: - return ClusterUpdate{}, fmt.Errorf("unsupported cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster) - } -} - -// dnsHostNameFromCluster extracts the DNS host name from the cluster's load -// assignment. -// -// There should be exactly one locality, with one endpoint, whose address -// contains the address and port. -func dnsHostNameFromCluster(cluster *v3clusterpb.Cluster) (string, error) { - loadAssignment := cluster.GetLoadAssignment() - if loadAssignment == nil { - return "", fmt.Errorf("load_assignment not present for LOGICAL_DNS cluster") - } - if len(loadAssignment.GetEndpoints()) != 1 { - return "", fmt.Errorf("load_assignment for LOGICAL_DNS cluster must have exactly one locality, got: %+v", loadAssignment) - } - endpoints := loadAssignment.GetEndpoints()[0].GetLbEndpoints() - if len(endpoints) != 1 { - return "", fmt.Errorf("locality for LOGICAL_DNS cluster must have exactly one endpoint, got: %+v", endpoints) - } - endpoint := endpoints[0].GetEndpoint() - if endpoint == nil { - return "", fmt.Errorf("endpoint for LOGICAL_DNS cluster not set") - } - socketAddr := endpoint.GetAddress().GetSocketAddress() - if socketAddr == nil { - return "", fmt.Errorf("socket address for endpoint for LOGICAL_DNS cluster not set") - } - if socketAddr.GetResolverName() != "" { - return "", fmt.Errorf("socket address for endpoint for LOGICAL_DNS cluster not set has unexpected custom resolver name: %v", socketAddr.GetResolverName()) - } - host := socketAddr.GetAddress() - if host == "" { - return "", fmt.Errorf("host for endpoint for LOGICAL_DNS cluster not set") - } - port := socketAddr.GetPortValue() - if port == 0 { - return "", fmt.Errorf("port for endpoint for LOGICAL_DNS cluster not set") - } - return net.JoinHostPort(host, strconv.Itoa(int(port))), nil -} - -// securityConfigFromCluster extracts the relevant security configuration from -// the received Cluster resource. -func securityConfigFromCluster(cluster *v3clusterpb.Cluster) (*SecurityConfig, error) { - if tsm := cluster.GetTransportSocketMatches(); len(tsm) != 0 { - return nil, fmt.Errorf("unsupport transport_socket_matches field is non-empty: %+v", tsm) - } - // The Cluster resource contains a `transport_socket` field, which contains - // a oneof `typed_config` field of type `protobuf.Any`. The any proto - // contains a marshaled representation of an `UpstreamTlsContext` message. - ts := cluster.GetTransportSocket() - if ts == nil { - return nil, nil - } - if name := ts.GetName(); name != transportSocketName { - return nil, fmt.Errorf("transport_socket field has unexpected name: %s", name) - } - any := ts.GetTypedConfig() - if any == nil || any.TypeUrl != version.V3UpstreamTLSContextURL { - return nil, fmt.Errorf("transport_socket field has unexpected typeURL: %s", any.TypeUrl) - } - upstreamCtx := &v3tlspb.UpstreamTlsContext{} - if err := proto.Unmarshal(any.GetValue(), upstreamCtx); err != nil { - return nil, fmt.Errorf("failed to unmarshal UpstreamTlsContext in CDS response: %v", err) - } - // The following fields from `UpstreamTlsContext` are ignored: - // - sni - // - allow_renegotiation - // - max_session_keys - if upstreamCtx.GetCommonTlsContext() == nil { - return nil, errors.New("UpstreamTlsContext in CDS response does not contain a CommonTlsContext") - } - - return securityConfigFromCommonTLSContext(upstreamCtx.GetCommonTlsContext(), false) -} - -// common is expected to be not nil. -// The `alpn_protocols` field is ignored. -func securityConfigFromCommonTLSContext(common *v3tlspb.CommonTlsContext, server bool) (*SecurityConfig, error) { - if common.GetTlsParams() != nil { - return nil, fmt.Errorf("unsupported tls_params field in CommonTlsContext message: %+v", common) - } - if common.GetCustomHandshaker() != nil { - return nil, fmt.Errorf("unsupported custom_handshaker field in CommonTlsContext message: %+v", common) - } - - // For now, if we can't get a valid security config from the new fields, we - // fallback to the old deprecated fields. - // TODO: Drop support for deprecated fields. NACK if err != nil here. - sc, _ := securityConfigFromCommonTLSContextUsingNewFields(common, server) - if sc == nil || sc.Equal(&SecurityConfig{}) { - var err error - sc, err = securityConfigFromCommonTLSContextWithDeprecatedFields(common, server) - if err != nil { - return nil, err - } - } - if sc != nil { - // sc == nil is a valid case where the control plane has not sent us any - // security configuration. xDS creds will use fallback creds. - if server { - if sc.IdentityInstanceName == "" { - return nil, errors.New("security configuration on the server-side does not contain identity certificate provider instance name") - } - } else { - if sc.RootInstanceName == "" { - return nil, errors.New("security configuration on the client-side does not contain root certificate provider instance name") - } - } - } - return sc, nil -} - -func securityConfigFromCommonTLSContextWithDeprecatedFields(common *v3tlspb.CommonTlsContext, server bool) (*SecurityConfig, error) { - // The `CommonTlsContext` contains a - // `tls_certificate_certificate_provider_instance` field of type - // `CertificateProviderInstance`, which contains the provider instance name - // and the certificate name to fetch identity certs. - sc := &SecurityConfig{} - if identity := common.GetTlsCertificateCertificateProviderInstance(); identity != nil { - sc.IdentityInstanceName = identity.GetInstanceName() - sc.IdentityCertName = identity.GetCertificateName() - } - - // The `CommonTlsContext` contains a `validation_context_type` field which - // is a oneof. We can get the values that we are interested in from two of - // those possible values: - // - combined validation context: - // - contains a default validation context which holds the list of - // matchers for accepted SANs. - // - contains certificate provider instance configuration - // - certificate provider instance configuration - // - in this case, we do not get a list of accepted SANs. - switch t := common.GetValidationContextType().(type) { - case *v3tlspb.CommonTlsContext_CombinedValidationContext: - combined := common.GetCombinedValidationContext() - var matchers []matcher.StringMatcher - if def := combined.GetDefaultValidationContext(); def != nil { - for _, m := range def.GetMatchSubjectAltNames() { - matcher, err := matcher.StringMatcherFromProto(m) - if err != nil { - return nil, err - } - matchers = append(matchers, matcher) - } - } - if server && len(matchers) != 0 { - return nil, fmt.Errorf("match_subject_alt_names field in validation context is not supported on the server: %v", common) - } - sc.SubjectAltNameMatchers = matchers - if pi := combined.GetValidationContextCertificateProviderInstance(); pi != nil { - sc.RootInstanceName = pi.GetInstanceName() - sc.RootCertName = pi.GetCertificateName() - } - case *v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance: - pi := common.GetValidationContextCertificateProviderInstance() - sc.RootInstanceName = pi.GetInstanceName() - sc.RootCertName = pi.GetCertificateName() - case nil: - // It is valid for the validation context to be nil on the server side. - default: - return nil, fmt.Errorf("validation context contains unexpected type: %T", t) - } - return sc, nil -} - -// gRFC A29 https://github.com/grpc/proposal/blob/master/A29-xds-tls-security.md -// specifies the new way to fetch security configuration and says the following: -// -// Although there are various ways to obtain certificates as per this proto -// (which are supported by Envoy), gRPC supports only one of them and that is -// the `CertificateProviderPluginInstance` proto. -// -// This helper function attempts to fetch security configuration from the -// `CertificateProviderPluginInstance` message, given a CommonTlsContext. -func securityConfigFromCommonTLSContextUsingNewFields(common *v3tlspb.CommonTlsContext, server bool) (*SecurityConfig, error) { - // The `tls_certificate_provider_instance` field of type - // `CertificateProviderPluginInstance` is used to fetch the identity - // certificate provider. - sc := &SecurityConfig{} - identity := common.GetTlsCertificateProviderInstance() - if identity == nil && len(common.GetTlsCertificates()) != 0 { - return nil, fmt.Errorf("expected field tls_certificate_provider_instance is not set, while unsupported field tls_certificates is set in CommonTlsContext message: %+v", common) - } - if identity == nil && common.GetTlsCertificateSdsSecretConfigs() != nil { - return nil, fmt.Errorf("expected field tls_certificate_provider_instance is not set, while unsupported field tls_certificate_sds_secret_configs is set in CommonTlsContext message: %+v", common) - } - sc.IdentityInstanceName = identity.GetInstanceName() - sc.IdentityCertName = identity.GetCertificateName() - - // The `CommonTlsContext` contains a oneof field `validation_context_type`, - // which contains the `CertificateValidationContext` message in one of the - // following ways: - // - `validation_context` field - // - this is directly of type `CertificateValidationContext` - // - `combined_validation_context` field - // - this is of type `CombinedCertificateValidationContext` and contains - // a `default validation context` field of type - // `CertificateValidationContext` - // - // The `CertificateValidationContext` message has the following fields that - // we are interested in: - // - `ca_certificate_provider_instance` - // - this is of type `CertificateProviderPluginInstance` - // - `match_subject_alt_names` - // - this is a list of string matchers - // - // The `CertificateProviderPluginInstance` message contains two fields - // - instance_name - // - this is the certificate provider instance name to be looked up in - // the bootstrap configuration - // - certificate_name - // - this is an opaque name passed to the certificate provider - var validationCtx *v3tlspb.CertificateValidationContext - switch typ := common.GetValidationContextType().(type) { - case *v3tlspb.CommonTlsContext_ValidationContext: - validationCtx = common.GetValidationContext() - case *v3tlspb.CommonTlsContext_CombinedValidationContext: - validationCtx = common.GetCombinedValidationContext().GetDefaultValidationContext() - case nil: - // It is valid for the validation context to be nil on the server side. - return sc, nil - default: - return nil, fmt.Errorf("validation context contains unexpected type: %T", typ) - } - // If we get here, it means that the `CertificateValidationContext` message - // was found through one of the supported ways. It is an error if the - // validation context is specified, but it does not contain the - // ca_certificate_provider_instance field which contains information about - // the certificate provider to be used for the root certificates. - if validationCtx.GetCaCertificateProviderInstance() == nil { - return nil, fmt.Errorf("expected field ca_certificate_provider_instance is missing in CommonTlsContext message: %+v", common) - } - // The following fields are ignored: - // - trusted_ca - // - watched_directory - // - allow_expired_certificate - // - trust_chain_verification - switch { - case len(validationCtx.GetVerifyCertificateSpki()) != 0: - return nil, fmt.Errorf("unsupported verify_certificate_spki field in CommonTlsContext message: %+v", common) - case len(validationCtx.GetVerifyCertificateHash()) != 0: - return nil, fmt.Errorf("unsupported verify_certificate_hash field in CommonTlsContext message: %+v", common) - case validationCtx.GetRequireSignedCertificateTimestamp().GetValue(): - return nil, fmt.Errorf("unsupported require_sugned_ceritificate_timestamp field in CommonTlsContext message: %+v", common) - case validationCtx.GetCrl() != nil: - return nil, fmt.Errorf("unsupported crl field in CommonTlsContext message: %+v", common) - case validationCtx.GetCustomValidatorConfig() != nil: - return nil, fmt.Errorf("unsupported custom_validator_config field in CommonTlsContext message: %+v", common) - } - - if rootProvider := validationCtx.GetCaCertificateProviderInstance(); rootProvider != nil { - sc.RootInstanceName = rootProvider.GetInstanceName() - sc.RootCertName = rootProvider.GetCertificateName() - } - var matchers []matcher.StringMatcher - for _, m := range validationCtx.GetMatchSubjectAltNames() { - matcher, err := matcher.StringMatcherFromProto(m) - if err != nil { - return nil, err - } - matchers = append(matchers, matcher) - } - if server && len(matchers) != 0 { - return nil, fmt.Errorf("match_subject_alt_names field in validation context is not supported on the server: %v", common) - } - sc.SubjectAltNameMatchers = matchers - return sc, nil -} - -// circuitBreakersFromCluster extracts the circuit breakers configuration from -// the received cluster resource. Returns nil if no CircuitBreakers or no -// Thresholds in CircuitBreakers. -func circuitBreakersFromCluster(cluster *v3clusterpb.Cluster) *uint32 { - for _, threshold := range cluster.GetCircuitBreakers().GetThresholds() { - if threshold.GetPriority() != v3corepb.RoutingPriority_DEFAULT { - continue - } - maxRequestsPb := threshold.GetMaxRequests() - if maxRequestsPb == nil { - return nil - } - maxRequests := maxRequestsPb.GetValue() - return &maxRequests - } - return nil -} - -// UnmarshalEndpoints processes resources received in an EDS response, -// validates them, and transforms them into a native struct which contains only -// fields we are interested in. -func UnmarshalEndpoints(opts *UnmarshalOptions) (map[string]EndpointsUpdateErrTuple, UpdateMetadata, error) { - update := make(map[string]EndpointsUpdateErrTuple) - md, err := processAllResources(opts, update) - return update, md, err -} - -func unmarshalEndpointsResource(r *anypb.Any, logger *grpclog.PrefixLogger) (string, EndpointsUpdate, error) { - if !IsEndpointsResource(r.GetTypeUrl()) { - return "", EndpointsUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) - } - - cla := &v3endpointpb.ClusterLoadAssignment{} - if err := proto.Unmarshal(r.GetValue(), cla); err != nil { - return "", EndpointsUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) - } - logger.Infof("Resource with name: %v, type: %T, contains: %v", cla.GetClusterName(), cla, pretty.ToJSON(cla)) - - u, err := parseEDSRespProto(cla) - if err != nil { - return cla.GetClusterName(), EndpointsUpdate{}, err - } - u.Raw = r - return cla.GetClusterName(), u, nil -} - -func parseAddress(socketAddress *v3corepb.SocketAddress) string { - return net.JoinHostPort(socketAddress.GetAddress(), strconv.Itoa(int(socketAddress.GetPortValue()))) -} - -func parseDropPolicy(dropPolicy *v3endpointpb.ClusterLoadAssignment_Policy_DropOverload) OverloadDropConfig { - percentage := dropPolicy.GetDropPercentage() - var ( - numerator = percentage.GetNumerator() - denominator uint32 - ) - switch percentage.GetDenominator() { - case v3typepb.FractionalPercent_HUNDRED: - denominator = 100 - case v3typepb.FractionalPercent_TEN_THOUSAND: - denominator = 10000 - case v3typepb.FractionalPercent_MILLION: - denominator = 1000000 - } - return OverloadDropConfig{ - Category: dropPolicy.GetCategory(), - Numerator: numerator, - Denominator: denominator, - } -} - -func parseEndpoints(lbEndpoints []*v3endpointpb.LbEndpoint) []Endpoint { - endpoints := make([]Endpoint, 0, len(lbEndpoints)) - for _, lbEndpoint := range lbEndpoints { - endpoints = append(endpoints, Endpoint{ - HealthStatus: EndpointHealthStatus(lbEndpoint.GetHealthStatus()), - Address: parseAddress(lbEndpoint.GetEndpoint().GetAddress().GetSocketAddress()), - Weight: lbEndpoint.GetLoadBalancingWeight().GetValue(), - }) - } - return endpoints -} - -func parseEDSRespProto(m *v3endpointpb.ClusterLoadAssignment) (EndpointsUpdate, error) { - ret := EndpointsUpdate{} - for _, dropPolicy := range m.GetPolicy().GetDropOverloads() { - ret.Drops = append(ret.Drops, parseDropPolicy(dropPolicy)) - } - priorities := make(map[uint32]struct{}) - for _, locality := range m.Endpoints { - l := locality.GetLocality() - if l == nil { - return EndpointsUpdate{}, fmt.Errorf("EDS response contains a locality without ID, locality: %+v", locality) - } - lid := internal.LocalityID{ - Region: l.Region, - Zone: l.Zone, - SubZone: l.SubZone, - } - priority := locality.GetPriority() - priorities[priority] = struct{}{} - ret.Localities = append(ret.Localities, Locality{ - ID: lid, - Endpoints: parseEndpoints(locality.GetLbEndpoints()), - Weight: locality.GetLoadBalancingWeight().GetValue(), - Priority: priority, - }) - } - for i := 0; i < len(priorities); i++ { - if _, ok := priorities[uint32(i)]; !ok { - return EndpointsUpdate{}, fmt.Errorf("priority %v missing (with different priorities %v received)", i, priorities) - } - } - return ret, nil -} - -// ListenerUpdateErrTuple is a tuple with the update and error. It contains the -// results from unmarshal functions. It's used to pass unmarshal results of -// multiple resources together, e.g. in maps like `map[string]{Update,error}`. -type ListenerUpdateErrTuple struct { - Update ListenerUpdate - Err error -} - -// RouteConfigUpdateErrTuple is a tuple with the update and error. It contains -// the results from unmarshal functions. It's used to pass unmarshal results of -// multiple resources together, e.g. in maps like `map[string]{Update,error}`. -type RouteConfigUpdateErrTuple struct { - Update RouteConfigUpdate - Err error -} - -// ClusterUpdateErrTuple is a tuple with the update and error. It contains the -// results from unmarshal functions. It's used to pass unmarshal results of -// multiple resources together, e.g. in maps like `map[string]{Update,error}`. -type ClusterUpdateErrTuple struct { - Update ClusterUpdate - Err error -} - -// EndpointsUpdateErrTuple is a tuple with the update and error. It contains the -// results from unmarshal functions. It's used to pass unmarshal results of -// multiple resources together, e.g. in maps like `map[string]{Update,error}`. -type EndpointsUpdateErrTuple struct { - Update EndpointsUpdate - Err error -} - -// processAllResources unmarshals and validates the resources, populates the -// provided ret (a map), and returns metadata and error. -// -// After this function, the ret map will be populated with both valid and -// invalid updates. Invalid resources will have an entry with the key as the -// resource name, value as an empty update. -// -// The type of the resource is determined by the type of ret. E.g. -// map[string]ListenerUpdate means this is for LDS. -func processAllResources(opts *UnmarshalOptions, ret interface{}) (UpdateMetadata, error) { - timestamp := time.Now() - md := UpdateMetadata{ - Version: opts.Version, - Timestamp: timestamp, - } - var topLevelErrors []error - perResourceErrors := make(map[string]error) - - for _, r := range opts.Resources { - switch ret2 := ret.(type) { - case map[string]ListenerUpdateErrTuple: - name, update, err := unmarshalListenerResource(r, opts.UpdateValidator, opts.Logger) - if err == nil { - ret2[name] = ListenerUpdateErrTuple{Update: update} - continue - } - if name == "" { - topLevelErrors = append(topLevelErrors, err) - continue - } - perResourceErrors[name] = err - // Add place holder in the map so we know this resource name was in - // the response. - ret2[name] = ListenerUpdateErrTuple{Err: err} - case map[string]RouteConfigUpdateErrTuple: - name, update, err := unmarshalRouteConfigResource(r, opts.Logger) - if err == nil { - ret2[name] = RouteConfigUpdateErrTuple{Update: update} - continue - } - if name == "" { - topLevelErrors = append(topLevelErrors, err) - continue - } - perResourceErrors[name] = err - // Add place holder in the map so we know this resource name was in - // the response. - ret2[name] = RouteConfigUpdateErrTuple{Err: err} - case map[string]ClusterUpdateErrTuple: - name, update, err := unmarshalClusterResource(r, opts.UpdateValidator, opts.Logger) - if err == nil { - ret2[name] = ClusterUpdateErrTuple{Update: update} - continue - } - if name == "" { - topLevelErrors = append(topLevelErrors, err) - continue - } - perResourceErrors[name] = err - // Add place holder in the map so we know this resource name was in - // the response. - ret2[name] = ClusterUpdateErrTuple{Err: err} - case map[string]EndpointsUpdateErrTuple: - name, update, err := unmarshalEndpointsResource(r, opts.Logger) - if err == nil { - ret2[name] = EndpointsUpdateErrTuple{Update: update} - continue - } - if name == "" { - topLevelErrors = append(topLevelErrors, err) - continue - } - perResourceErrors[name] = err - // Add place holder in the map so we know this resource name was in - // the response. - ret2[name] = EndpointsUpdateErrTuple{Err: err} - } - } - - if len(topLevelErrors) == 0 && len(perResourceErrors) == 0 { - md.Status = ServiceStatusACKed - return md, nil - } - - var typeStr string - switch ret.(type) { - case map[string]ListenerUpdate: - typeStr = "LDS" - case map[string]RouteConfigUpdate: - typeStr = "RDS" - case map[string]ClusterUpdate: - typeStr = "CDS" - case map[string]EndpointsUpdate: - typeStr = "EDS" - } - - md.Status = ServiceStatusNACKed - errRet := combineErrors(typeStr, topLevelErrors, perResourceErrors) - md.ErrState = &UpdateErrorMetadata{ - Version: opts.Version, - Err: errRet, - Timestamp: timestamp, - } - return md, errRet -} - -func combineErrors(rType string, topLevelErrors []error, perResourceErrors map[string]error) error { - var errStrB strings.Builder - errStrB.WriteString(fmt.Sprintf("error parsing %q response: ", rType)) - if len(topLevelErrors) > 0 { - errStrB.WriteString("top level errors: ") - for i, err := range topLevelErrors { - if i != 0 { - errStrB.WriteString(";\n") - } - errStrB.WriteString(err.Error()) - } - } - if len(perResourceErrors) > 0 { - var i int - for name, err := range perResourceErrors { - if i != 0 { - errStrB.WriteString(";\n") - } - i++ - errStrB.WriteString(fmt.Sprintf("resource %q: %v", name, err.Error())) - } - } - return errors.New(errStrB.String()) -} diff --git a/xds/internal/xdsclient/filter_chain.go b/xds/internal/xdsclient/xdsresource/filter_chain.go similarity index 95% rename from xds/internal/xdsclient/filter_chain.go rename to xds/internal/xdsclient/xdsresource/filter_chain.go index 7503e0e48761..10c779229622 100644 --- a/xds/internal/xdsclient/filter_chain.go +++ b/xds/internal/xdsclient/xdsresource/filter_chain.go @@ -13,10 +13,9 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ -package xdsclient +package xdsresource import ( "errors" @@ -28,6 +27,7 @@ import ( v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" + "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/xds/internal/httpfilter" @@ -61,12 +61,12 @@ type FilterChain struct { HTTPFilters []HTTPFilter // RouteConfigName is the route configuration name for this FilterChain. // - // Only one of RouteConfigName and InlineRouteConfig is set. + // Exactly one of RouteConfigName and InlineRouteConfig is set. RouteConfigName string // InlineRouteConfig is the inline route configuration (RDS response) // returned for this filter chain. // - // Only one of RouteConfigName and InlineRouteConfig is set. + // Exactly one of RouteConfigName and InlineRouteConfig is set. InlineRouteConfig *RouteConfigUpdate } @@ -177,6 +177,7 @@ const ( // 7. Source IP address. // 8. Source port. type FilterChainManager struct { + logger *grpclog.PrefixLogger // Destination prefix is the first match criteria that we support. // Therefore, this multi-stage map is indexed on destination prefixes // specified in the match criteria. @@ -247,9 +248,10 @@ type sourcePrefixEntry struct { // // This function is only exported so that tests outside of this package can // create a FilterChainManager. -func NewFilterChainManager(lis *v3listenerpb.Listener) (*FilterChainManager, error) { +func NewFilterChainManager(lis *v3listenerpb.Listener, logger *grpclog.PrefixLogger) (*FilterChainManager, error) { // Parse all the filter chains and build the internal data structures. fci := &FilterChainManager{ + logger: logger, dstPrefixMap: make(map[string]*destPrefixEntry), RouteConfigNames: make(map[string]bool), } @@ -303,7 +305,7 @@ func (fci *FilterChainManager) addFilterChains(fcs []*v3listenerpb.FilterChain) if fcm.GetDestinationPort().GetValue() != 0 { // Destination port is the first match criteria and we do not // support filter chains which contains this match criteria. - logger.Warningf("Dropping filter chain %+v since it contains unsupported destination_port match field", fc) + fci.logger.Warningf("Dropping filter chain %+v since it contains unsupported destination_port match field", fc) continue } @@ -352,7 +354,7 @@ func (fci *FilterChainManager) addFilterChainsForServerNames(dstEntry *destPrefi // Filter chains specifying server names in their match criteria always fail // a match at connection time. So, these filter chains can be dropped now. if len(fc.GetFilterChainMatch().GetServerNames()) != 0 { - logger.Warningf("Dropping filter chain %+v since it contains unsupported server_names match field", fc) + fci.logger.Warningf("Dropping filter chain %+v since it contains unsupported server_names match field", fc) return nil } @@ -365,13 +367,13 @@ func (fci *FilterChainManager) addFilterChainsForTransportProtocols(dstEntry *de case tp != "" && tp != "raw_buffer": // Only allow filter chains with transport protocol set to empty string // or "raw_buffer". - logger.Warningf("Dropping filter chain %+v since it contains unsupported value for transport_protocols match field", fc) + fci.logger.Warningf("Dropping filter chain %+v since it contains unsupported value for transport_protocols match field", fc) return nil case tp == "" && dstEntry.rawBufferSeen: // If we have already seen filter chains with transport protocol set to // "raw_buffer", we can drop filter chains with transport protocol set // to empty string, since the former takes precedence. - logger.Warningf("Dropping filter chain %+v since it contains unsupported value for transport_protocols match field", fc) + fci.logger.Warningf("Dropping filter chain %+v since it contains unsupported value for transport_protocols match field", fc) return nil case tp != "" && !dstEntry.rawBufferSeen: // This is the first "raw_buffer" that we are seeing. Set the bit and @@ -385,7 +387,7 @@ func (fci *FilterChainManager) addFilterChainsForTransportProtocols(dstEntry *de func (fci *FilterChainManager) addFilterChainsForApplicationProtocols(dstEntry *destPrefixEntry, fc *v3listenerpb.FilterChain) error { if len(fc.GetFilterChainMatch().GetApplicationProtocols()) != 0 { - logger.Warningf("Dropping filter chain %+v since it contains unsupported application_protocols match field", fc) + fci.logger.Warningf("Dropping filter chain %+v since it contains unsupported application_protocols match field", fc) return nil } return fci.addFilterChainsForSourceType(dstEntry, fc) @@ -551,6 +553,25 @@ func (fci *FilterChainManager) filterChainFromProto(fc *v3listenerpb.FilterChain return filterChain, nil } +// Validate takes a function to validate the FilterChains in this manager. +func (fci *FilterChainManager) Validate(f func(fc *FilterChain) error) error { + for _, dst := range fci.dstPrefixMap { + for _, srcType := range dst.srcTypeArr { + if srcType == nil { + continue + } + for _, src := range srcType.srcPrefixMap { + for _, fc := range src.srcPortMap { + if err := f(fc); err != nil { + return err + } + } + } + } + } + return f(fci.def) +} + func processNetworkFilters(filters []*v3listenerpb.Filter) (*FilterChain, error) { filterChain := &FilterChain{} seenNames := make(map[string]bool, len(filters)) diff --git a/xds/internal/xdsclient/filter_chain_test.go b/xds/internal/xdsclient/xdsresource/filter_chain_test.go similarity index 98% rename from xds/internal/xdsclient/filter_chain_test.go rename to xds/internal/xdsclient/xdsresource/filter_chain_test.go index ae1035e76409..dc1ea75778bf 100644 --- a/xds/internal/xdsclient/filter_chain_test.go +++ b/xds/internal/xdsclient/xdsresource/filter_chain_test.go @@ -13,10 +13,9 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ -package xdsclient +package xdsresource import ( "context" @@ -99,7 +98,7 @@ var ( // TestNewFilterChainImpl_Failure_BadMatchFields verifies cases where we have a // single filter chain with match criteria that contains unsupported fields. -func TestNewFilterChainImpl_Failure_BadMatchFields(t *testing.T) { +func (s) TestNewFilterChainImpl_Failure_BadMatchFields(t *testing.T) { tests := []struct { desc string lis *v3listenerpb.Listener @@ -188,7 +187,7 @@ func TestNewFilterChainImpl_Failure_BadMatchFields(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { - if fci, err := NewFilterChainManager(test.lis); err == nil { + if fci, err := NewFilterChainManager(test.lis, nil); err == nil { t.Fatalf("NewFilterChainManager() returned %v when expected to fail", fci) } }) @@ -197,7 +196,7 @@ func TestNewFilterChainImpl_Failure_BadMatchFields(t *testing.T) { // TestNewFilterChainImpl_Failure_OverlappingMatchingRules verifies cases where // there are multiple filter chains and they have overlapping match rules. -func TestNewFilterChainImpl_Failure_OverlappingMatchingRules(t *testing.T) { +func (s) TestNewFilterChainImpl_Failure_OverlappingMatchingRules(t *testing.T) { tests := []struct { desc string lis *v3listenerpb.Listener @@ -287,7 +286,7 @@ func TestNewFilterChainImpl_Failure_OverlappingMatchingRules(t *testing.T) { const wantErr = "multiple filter chains with overlapping matching rules are defined" for _, test := range tests { t.Run(test.desc, func(t *testing.T) { - if _, err := NewFilterChainManager(test.lis); err == nil || !strings.Contains(err.Error(), wantErr) { + if _, err := NewFilterChainManager(test.lis, nil); err == nil || !strings.Contains(err.Error(), wantErr) { t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: %s", err, wantErr) } }) @@ -296,7 +295,7 @@ func TestNewFilterChainImpl_Failure_OverlappingMatchingRules(t *testing.T) { // TestNewFilterChainImpl_Failure_BadSecurityConfig verifies cases where the // security configuration in the filter chain is invalid. -func TestNewFilterChainImpl_Failure_BadSecurityConfig(t *testing.T) { +func (s) TestNewFilterChainImpl_Failure_BadSecurityConfig(t *testing.T) { tests := []struct { desc string lis *v3listenerpb.Listener @@ -509,7 +508,7 @@ func TestNewFilterChainImpl_Failure_BadSecurityConfig(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { - _, err := NewFilterChainManager(test.lis) + _, err := NewFilterChainManager(test.lis, nil) if err == nil || !strings.Contains(err.Error(), test.wantErr) { t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: %s", err, test.wantErr) } @@ -519,7 +518,7 @@ func TestNewFilterChainImpl_Failure_BadSecurityConfig(t *testing.T) { // TestNewFilterChainImpl_Success_RouteUpdate tests the construction of the // filter chain with valid HTTP Filters present. -func TestNewFilterChainImpl_Success_RouteUpdate(t *testing.T) { +func (s) TestNewFilterChainImpl_Success_RouteUpdate(t *testing.T) { oldRBAC := env.RBACSupport env.RBACSupport = true defer func() { @@ -746,7 +745,7 @@ func TestNewFilterChainImpl_Success_RouteUpdate(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - gotFC, err := NewFilterChainManager(test.lis) + gotFC, err := NewFilterChainManager(test.lis, nil) if err != nil { t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: nil", err) } @@ -759,7 +758,7 @@ func TestNewFilterChainImpl_Success_RouteUpdate(t *testing.T) { // TestNewFilterChainImpl_Failure_BadRouteUpdate verifies cases where the Route // Update in the filter chain are invalid. -func TestNewFilterChainImpl_Failure_BadRouteUpdate(t *testing.T) { +func (s) TestNewFilterChainImpl_Failure_BadRouteUpdate(t *testing.T) { oldRBAC := env.RBACSupport env.RBACSupport = true defer func() { @@ -887,7 +886,7 @@ func TestNewFilterChainImpl_Failure_BadRouteUpdate(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - _, err := NewFilterChainManager(test.lis) + _, err := NewFilterChainManager(test.lis, nil) if err == nil || !strings.Contains(err.Error(), test.wantErr) { t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: %s", err, test.wantErr) } @@ -897,7 +896,7 @@ func TestNewFilterChainImpl_Failure_BadRouteUpdate(t *testing.T) { // TestNewFilterChainImpl_Failure_BadHTTPFilters verifies cases where the HTTP // Filters in the filter chain are invalid. -func TestNewFilterChainImpl_Failure_BadHTTPFilters(t *testing.T) { +func (s) TestNewFilterChainImpl_Failure_BadHTTPFilters(t *testing.T) { tests := []struct { name string lis *v3listenerpb.Listener @@ -961,7 +960,7 @@ func TestNewFilterChainImpl_Failure_BadHTTPFilters(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - _, err := NewFilterChainManager(test.lis) + _, err := NewFilterChainManager(test.lis, nil) if err == nil || !strings.Contains(err.Error(), test.wantErr) { t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: %s", err, test.wantErr) } @@ -971,7 +970,7 @@ func TestNewFilterChainImpl_Failure_BadHTTPFilters(t *testing.T) { // TestNewFilterChainImpl_Success_HTTPFilters tests the construction of the // filter chain with valid HTTP Filters present. -func TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { +func (s) TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { oldRBAC := env.RBACSupport env.RBACSupport = true defer func() { @@ -1281,7 +1280,7 @@ func TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - gotFC, err := NewFilterChainManager(test.lis) + gotFC, err := NewFilterChainManager(test.lis, nil) if err != nil { t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: nil", err) } @@ -1294,7 +1293,7 @@ func TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { // TestNewFilterChainImpl_Success_SecurityConfig verifies cases where the // security configuration in the filter chain contains valid data. -func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { +func (s) TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { oldRBAC := env.RBACSupport env.RBACSupport = true defer func() { @@ -1510,7 +1509,7 @@ func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { - gotFC, err := NewFilterChainManager(test.lis) + gotFC, err := NewFilterChainManager(test.lis, nil) if err != nil { t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: nil", err) } @@ -1526,7 +1525,7 @@ func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { // contains unsupported match fields. These configurations should lead to // success at config validation time and the filter chains which contains // unsupported match fields will be skipped at lookup time. -func TestNewFilterChainImpl_Success_UnsupportedMatchFields(t *testing.T) { +func (s) TestNewFilterChainImpl_Success_UnsupportedMatchFields(t *testing.T) { oldRBAC := env.RBACSupport env.RBACSupport = true defer func() { @@ -1683,7 +1682,7 @@ func TestNewFilterChainImpl_Success_UnsupportedMatchFields(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { - gotFC, err := NewFilterChainManager(test.lis) + gotFC, err := NewFilterChainManager(test.lis, nil) if err != nil { t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: nil", err) } @@ -1696,7 +1695,7 @@ func TestNewFilterChainImpl_Success_UnsupportedMatchFields(t *testing.T) { // TestNewFilterChainImpl_Success_AllCombinations verifies different // combinations of the supported match criteria. -func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { +func (s) TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { oldRBAC := env.RBACSupport env.RBACSupport = true defer func() { @@ -2184,7 +2183,7 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { - gotFC, err := NewFilterChainManager(test.lis) + gotFC, err := NewFilterChainManager(test.lis, nil) if err != nil { t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: nil", err) } @@ -2195,7 +2194,7 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { } } -func TestLookup_Failures(t *testing.T) { +func (s) TestLookup_Failures(t *testing.T) { tests := []struct { desc string lis *v3listenerpb.Listener @@ -2335,7 +2334,7 @@ func TestLookup_Failures(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { - fci, err := NewFilterChainManager(test.lis) + fci, err := NewFilterChainManager(test.lis, nil) if err != nil { t.Fatalf("NewFilterChainManager() failed: %v", err) } @@ -2347,7 +2346,7 @@ func TestLookup_Failures(t *testing.T) { } } -func TestLookup_Successes(t *testing.T) { +func (s) TestLookup_Successes(t *testing.T) { oldRBAC := env.RBACSupport env.RBACSupport = true defer func() { @@ -2569,7 +2568,7 @@ func TestLookup_Successes(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { - fci, err := NewFilterChainManager(test.lis) + fci, err := NewFilterChainManager(test.lis, nil) if err != nil { t.Fatalf("NewFilterChainManager() failed: %v", err) } @@ -2615,7 +2614,7 @@ func (si *serverInterceptor) AllowRPC(context.Context) error { return errors.New(si.level) } -func TestHTTPFilterInstantiation(t *testing.T) { +func (s) TestHTTPFilterInstantiation(t *testing.T) { tests := []struct { name string filters []HTTPFilter diff --git a/xds/internal/xdsclient/matcher.go b/xds/internal/xdsclient/xdsresource/matcher.go similarity index 99% rename from xds/internal/xdsclient/matcher.go rename to xds/internal/xdsclient/xdsresource/matcher.go index 85fff30638e6..d7da32a750e0 100644 --- a/xds/internal/xdsclient/matcher.go +++ b/xds/internal/xdsclient/xdsresource/matcher.go @@ -13,10 +13,9 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ -package xdsclient +package xdsresource import ( "fmt" diff --git a/xds/internal/xdsclient/matcher_path.go b/xds/internal/xdsclient/xdsresource/matcher_path.go similarity index 99% rename from xds/internal/xdsclient/matcher_path.go rename to xds/internal/xdsclient/xdsresource/matcher_path.go index 2ca0e4bbcc44..da487e20c58e 100644 --- a/xds/internal/xdsclient/matcher_path.go +++ b/xds/internal/xdsclient/xdsresource/matcher_path.go @@ -13,10 +13,9 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ -package xdsclient +package xdsresource import ( "regexp" diff --git a/xds/internal/xdsclient/matcher_path_test.go b/xds/internal/xdsclient/xdsresource/matcher_path_test.go similarity index 94% rename from xds/internal/xdsclient/matcher_path_test.go rename to xds/internal/xdsclient/xdsresource/matcher_path_test.go index 003d6db72e29..507cf15bed85 100644 --- a/xds/internal/xdsclient/matcher_path_test.go +++ b/xds/internal/xdsclient/xdsresource/matcher_path_test.go @@ -13,17 +13,16 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ -package xdsclient +package xdsresource import ( "regexp" "testing" ) -func TestPathFullMatcherMatch(t *testing.T) { +func (s) TestPathFullMatcherMatch(t *testing.T) { tests := []struct { name string fullPath string @@ -47,7 +46,7 @@ func TestPathFullMatcherMatch(t *testing.T) { } } -func TestPathPrefixMatcherMatch(t *testing.T) { +func (s) TestPathPrefixMatcherMatch(t *testing.T) { tests := []struct { name string prefix string @@ -71,7 +70,7 @@ func TestPathPrefixMatcherMatch(t *testing.T) { } } -func TestPathRegexMatcherMatch(t *testing.T) { +func (s) TestPathRegexMatcherMatch(t *testing.T) { tests := []struct { name string regexPath string diff --git a/xds/internal/xdsclient/matcher_test.go b/xds/internal/xdsclient/xdsresource/matcher_test.go similarity index 98% rename from xds/internal/xdsclient/matcher_test.go rename to xds/internal/xdsclient/xdsresource/matcher_test.go index 724fa8269582..2746e58e6c77 100644 --- a/xds/internal/xdsclient/matcher_test.go +++ b/xds/internal/xdsclient/xdsresource/matcher_test.go @@ -13,10 +13,9 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ -package xdsclient +package xdsresource import ( "context" @@ -29,7 +28,7 @@ import ( "google.golang.org/grpc/metadata" ) -func TestAndMatcherMatch(t *testing.T) { +func (s) TestAndMatcherMatch(t *testing.T) { tests := []struct { name string pm pathMatcher @@ -114,7 +113,7 @@ func TestAndMatcherMatch(t *testing.T) { } } -func TestFractionMatcherMatch(t *testing.T) { +func (s) TestFractionMatcherMatch(t *testing.T) { const fraction = 500000 fm := newFractionMatcher(fraction) defer func() { diff --git a/xds/internal/xdsclient/xdsresource/test_utils_test.go b/xds/internal/xdsclient/xdsresource/test_utils_test.go new file mode 100644 index 000000000000..b352caa23b75 --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/test_utils_test.go @@ -0,0 +1,52 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/protobuf/testing/protocmp" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +var ( + cmpOpts = cmp.Options{ + cmpopts.EquateEmpty(), + cmp.FilterValues(func(x, y error) bool { return true }, cmpopts.EquateErrors()), + cmp.Comparer(func(a, b time.Time) bool { return true }), + protocmp.Transform(), + } + + cmpOptsIgnoreDetails = cmp.Options{ + cmp.Comparer(func(a, b time.Time) bool { return true }), + cmp.Comparer(func(x, y error) bool { + return (x == nil) == (y == nil) + }), + } +) diff --git a/xds/internal/xdsclient/xdsresource/type.go b/xds/internal/xdsclient/xdsresource/type.go new file mode 100644 index 000000000000..3e01d77e4e02 --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/type.go @@ -0,0 +1,107 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "time" + + "google.golang.org/grpc/xds/internal/version" +) + +// UpdateValidatorFunc performs validations on update structs using +// context/logic available at the xdsClient layer. Since these validation are +// performed on internal update structs, they can be shared between different +// API clients. +type UpdateValidatorFunc func(interface{}) error + +// UpdateMetadata contains the metadata for each update, including timestamp, +// raw message, and so on. +type UpdateMetadata struct { + // Status is the status of this resource, e.g. ACKed, NACKed, or + // Not_exist(removed). + Status ServiceStatus + // Version is the version of the xds response. Note that this is the version + // of the resource in use (previous ACKed). If a response is NACKed, the + // NACKed version is in ErrState. + Version string + // Timestamp is when the response is received. + Timestamp time.Time + // ErrState is set when the update is NACKed. + ErrState *UpdateErrorMetadata +} + +// IsListenerResource returns true if the provider URL corresponds to an xDS +// Listener resource. +func IsListenerResource(url string) bool { + return url == version.V2ListenerURL || url == version.V3ListenerURL +} + +// IsHTTPConnManagerResource returns true if the provider URL corresponds to an xDS +// HTTPConnManager resource. +func IsHTTPConnManagerResource(url string) bool { + return url == version.V2HTTPConnManagerURL || url == version.V3HTTPConnManagerURL +} + +// IsRouteConfigResource returns true if the provider URL corresponds to an xDS +// RouteConfig resource. +func IsRouteConfigResource(url string) bool { + return url == version.V2RouteConfigURL || url == version.V3RouteConfigURL +} + +// IsClusterResource returns true if the provider URL corresponds to an xDS +// Cluster resource. +func IsClusterResource(url string) bool { + return url == version.V2ClusterURL || url == version.V3ClusterURL +} + +// IsEndpointsResource returns true if the provider URL corresponds to an xDS +// Endpoints resource. +func IsEndpointsResource(url string) bool { + return url == version.V2EndpointsURL || url == version.V3EndpointsURL +} + +// ServiceStatus is the status of the update. +type ServiceStatus int + +const ( + // ServiceStatusUnknown is the default state, before a watch is started for + // the resource. + ServiceStatusUnknown ServiceStatus = iota + // ServiceStatusRequested is when the watch is started, but before and + // response is received. + ServiceStatusRequested + // ServiceStatusNotExist is when the resource doesn't exist in + // state-of-the-world responses (e.g. LDS and CDS), which means the resource + // is removed by the management server. + ServiceStatusNotExist // Resource is removed in the server, in LDS/CDS. + // ServiceStatusACKed is when the resource is ACKed. + ServiceStatusACKed + // ServiceStatusNACKed is when the resource is NACKed. + ServiceStatusNACKed +) + +// UpdateErrorMetadata is part of UpdateMetadata. It contains the error state +// when a response is NACKed. +type UpdateErrorMetadata struct { + // Version is the version of the NACKed response. + Version string + // Err contains why the response was NACKed. + Err error + // Timestamp is when the NACKed response was received. + Timestamp time.Time +} diff --git a/xds/internal/xdsclient/xdsresource/type_cds.go b/xds/internal/xdsclient/xdsresource/type_cds.go new file mode 100644 index 000000000000..c200380be26f --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/type_cds.go @@ -0,0 +1,87 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import "google.golang.org/protobuf/types/known/anypb" + +// ClusterType is the type of cluster from a received CDS response. +type ClusterType int + +const ( + // ClusterTypeEDS represents the EDS cluster type, which will delegate endpoint + // discovery to the management server. + ClusterTypeEDS ClusterType = iota + // ClusterTypeLogicalDNS represents the Logical DNS cluster type, which essentially + // maps to the gRPC behavior of using the DNS resolver with pick_first LB policy. + ClusterTypeLogicalDNS + // ClusterTypeAggregate represents the Aggregate Cluster type, which provides a + // prioritized list of clusters to use. It is used for failover between clusters + // with a different configuration. + ClusterTypeAggregate +) + +// ClusterLBPolicyRingHash represents ring_hash lb policy, and also contains its +// config. +type ClusterLBPolicyRingHash struct { + MinimumRingSize uint64 + MaximumRingSize uint64 +} + +// ClusterUpdate contains information from a received CDS response, which is of +// interest to the registered CDS watcher. +type ClusterUpdate struct { + ClusterType ClusterType + // ClusterName is the clusterName being watched for through CDS. + ClusterName string + // EDSServiceName is an optional name for EDS. If it's not set, the balancer + // should watch ClusterName for the EDS resources. + EDSServiceName string + // EnableLRS indicates whether or not load should be reported through LRS. + EnableLRS bool + // SecurityCfg contains security configuration sent by the control plane. + SecurityCfg *SecurityConfig + // MaxRequests for circuit breaking, if any (otherwise nil). + MaxRequests *uint32 + // DNSHostName is used only for cluster type DNS. It's the DNS name to + // resolve in "host:port" form + DNSHostName string + // PrioritizedClusterNames is used only for cluster type aggregate. It represents + // a prioritized list of cluster names. + PrioritizedClusterNames []string + + // LBPolicy is the lb policy for this cluster. + // + // This only support round_robin and ring_hash. + // - if it's nil, the lb policy is round_robin + // - if it's not nil, the lb policy is ring_hash, the this field has the config. + // + // When we add more support policies, this can be made an interface, and + // will be set to different types based on the policy type. + LBPolicy *ClusterLBPolicyRingHash + + // Raw is the resource from the xds response. + Raw *anypb.Any +} + +// ClusterUpdateErrTuple is a tuple with the update and error. It contains the +// results from unmarshal functions. It's used to pass unmarshal results of +// multiple resources together, e.g. in maps like `map[string]{Update,error}`. +type ClusterUpdateErrTuple struct { + Update ClusterUpdate + Err error +} diff --git a/xds/internal/xdsclient/xdsresource/type_eds.go b/xds/internal/xdsclient/xdsresource/type_eds.go new file mode 100644 index 000000000000..ad590160f6af --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/type_eds.go @@ -0,0 +1,80 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "google.golang.org/grpc/xds/internal" + "google.golang.org/protobuf/types/known/anypb" +) + +// OverloadDropConfig contains the config to drop overloads. +type OverloadDropConfig struct { + Category string + Numerator uint32 + Denominator uint32 +} + +// EndpointHealthStatus represents the health status of an endpoint. +type EndpointHealthStatus int32 + +const ( + // EndpointHealthStatusUnknown represents HealthStatus UNKNOWN. + EndpointHealthStatusUnknown EndpointHealthStatus = iota + // EndpointHealthStatusHealthy represents HealthStatus HEALTHY. + EndpointHealthStatusHealthy + // EndpointHealthStatusUnhealthy represents HealthStatus UNHEALTHY. + EndpointHealthStatusUnhealthy + // EndpointHealthStatusDraining represents HealthStatus DRAINING. + EndpointHealthStatusDraining + // EndpointHealthStatusTimeout represents HealthStatus TIMEOUT. + EndpointHealthStatusTimeout + // EndpointHealthStatusDegraded represents HealthStatus DEGRADED. + EndpointHealthStatusDegraded +) + +// Endpoint contains information of an endpoint. +type Endpoint struct { + Address string + HealthStatus EndpointHealthStatus + Weight uint32 +} + +// Locality contains information of a locality. +type Locality struct { + Endpoints []Endpoint + ID internal.LocalityID + Priority uint32 + Weight uint32 +} + +// EndpointsUpdate contains an EDS update. +type EndpointsUpdate struct { + Drops []OverloadDropConfig + Localities []Locality + + // Raw is the resource from the xds response. + Raw *anypb.Any +} + +// EndpointsUpdateErrTuple is a tuple with the update and error. It contains the +// results from unmarshal functions. It's used to pass unmarshal results of +// multiple resources together, e.g. in maps like `map[string]{Update,error}`. +type EndpointsUpdateErrTuple struct { + Update EndpointsUpdate + Err error +} diff --git a/xds/internal/xdsclient/xdsresource/type_lds.go b/xds/internal/xdsclient/xdsresource/type_lds.go new file mode 100644 index 000000000000..a2742fb4371a --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/type_lds.go @@ -0,0 +1,87 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "time" + + "google.golang.org/grpc/xds/internal/httpfilter" + "google.golang.org/protobuf/types/known/anypb" +) + +// ListenerUpdate contains information received in an LDS response, which is of +// interest to the registered LDS watcher. +type ListenerUpdate struct { + // RouteConfigName is the route configuration name corresponding to the + // target which is being watched through LDS. + // + // Exactly one of RouteConfigName and InlineRouteConfig is set. + RouteConfigName string + // InlineRouteConfig is the inline route configuration (RDS response) + // returned inside LDS. + // + // Exactly one of RouteConfigName and InlineRouteConfig is set. + InlineRouteConfig *RouteConfigUpdate + + // MaxStreamDuration contains the HTTP connection manager's + // common_http_protocol_options.max_stream_duration field, or zero if + // unset. + MaxStreamDuration time.Duration + // HTTPFilters is a list of HTTP filters (name, config) from the LDS + // response. + HTTPFilters []HTTPFilter + // InboundListenerCfg contains inbound listener configuration. + InboundListenerCfg *InboundListenerConfig + + // Raw is the resource from the xds response. + Raw *anypb.Any +} + +// HTTPFilter represents one HTTP filter from an LDS response's HTTP connection +// manager field. +type HTTPFilter struct { + // Name is an arbitrary name of the filter. Used for applying override + // settings in virtual host / route / weighted cluster configuration (not + // yet supported). + Name string + // Filter is the HTTP filter found in the registry for the config type. + Filter httpfilter.Filter + // Config contains the filter's configuration + Config httpfilter.FilterConfig +} + +// InboundListenerConfig contains information about the inbound listener, i.e +// the server-side listener. +type InboundListenerConfig struct { + // Address is the local address on which the inbound listener is expected to + // accept incoming connections. + Address string + // Port is the local port on which the inbound listener is expected to + // accept incoming connections. + Port string + // FilterChains is the list of filter chains associated with this listener. + FilterChains *FilterChainManager +} + +// ListenerUpdateErrTuple is a tuple with the update and error. It contains the +// results from unmarshal functions. It's used to pass unmarshal results of +// multiple resources together, e.g. in maps like `map[string]{Update,error}`. +type ListenerUpdateErrTuple struct { + Update ListenerUpdate + Err error +} diff --git a/xds/internal/xdsclient/xdsresource/type_rds.go b/xds/internal/xdsclient/xdsresource/type_rds.go new file mode 100644 index 000000000000..3c4d971cd245 --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/type_rds.go @@ -0,0 +1,245 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "regexp" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/xds/matcher" + "google.golang.org/grpc/xds/internal/httpfilter" + "google.golang.org/protobuf/types/known/anypb" +) + +// RouteConfigUpdate contains information received in an RDS response, which is +// of interest to the registered RDS watcher. +type RouteConfigUpdate struct { + VirtualHosts []*VirtualHost + // Raw is the resource from the xds response. + Raw *anypb.Any +} + +// VirtualHost contains the routes for a list of Domains. +// +// Note that the domains in this slice can be a wildcard, not an exact string. +// The consumer of this struct needs to find the best match for its hostname. +type VirtualHost struct { + Domains []string + // Routes contains a list of routes, each containing matchers and + // corresponding action. + Routes []*Route + // HTTPFilterConfigOverride contains any HTTP filter config overrides for + // the virtual host which may be present. An individual filter's override + // may be unused if the matching Route contains an override for that + // filter. + HTTPFilterConfigOverride map[string]httpfilter.FilterConfig + RetryConfig *RetryConfig +} + +// RetryConfig contains all retry-related configuration in either a VirtualHost +// or Route. +type RetryConfig struct { + // RetryOn is a set of status codes on which to retry. Only Canceled, + // DeadlineExceeded, Internal, ResourceExhausted, and Unavailable are + // supported; any other values will be omitted. + RetryOn map[codes.Code]bool + NumRetries uint32 // maximum number of retry attempts + RetryBackoff RetryBackoff // retry backoff policy +} + +// RetryBackoff describes the backoff policy for retries. +type RetryBackoff struct { + BaseInterval time.Duration // initial backoff duration between attempts + MaxInterval time.Duration // maximum backoff duration +} + +// HashPolicyType specifies the type of HashPolicy from a received RDS Response. +type HashPolicyType int + +const ( + // HashPolicyTypeHeader specifies to hash a Header in the incoming request. + HashPolicyTypeHeader HashPolicyType = iota + // HashPolicyTypeChannelID specifies to hash a unique Identifier of the + // Channel. In grpc-go, this will be done using the ClientConn pointer. + HashPolicyTypeChannelID +) + +// HashPolicy specifies the HashPolicy if the upstream cluster uses a hashing +// load balancer. +type HashPolicy struct { + HashPolicyType HashPolicyType + Terminal bool + // Fields used for type HEADER. + HeaderName string + Regex *regexp.Regexp + RegexSubstitution string +} + +// RouteAction is the action of the route from a received RDS response. +type RouteAction int + +const ( + // RouteActionUnsupported are routing types currently unsupported by grpc. + // According to A36, "A Route with an inappropriate action causes RPCs + // matching that route to fail." + RouteActionUnsupported RouteAction = iota + // RouteActionRoute is the expected route type on the client side. Route + // represents routing a request to some upstream cluster. On the client + // side, if an RPC matches to a route that is not RouteActionRoute, the RPC + // will fail according to A36. + RouteActionRoute + // RouteActionNonForwardingAction is the expected route type on the server + // side. NonForwardingAction represents when a route will generate a + // response directly, without forwarding to an upstream host. + RouteActionNonForwardingAction +) + +// Route is both a specification of how to match a request as well as an +// indication of the action to take upon match. +type Route struct { + Path *string + Prefix *string + Regex *regexp.Regexp + // Indicates if prefix/path matching should be case insensitive. The default + // is false (case sensitive). + CaseInsensitive bool + Headers []*HeaderMatcher + Fraction *uint32 + + HashPolicies []*HashPolicy + + // If the matchers above indicate a match, the below configuration is used. + WeightedClusters map[string]WeightedCluster + // If MaxStreamDuration is nil, it indicates neither of the route action's + // max_stream_duration fields (grpc_timeout_header_max nor + // max_stream_duration) were set. In this case, the ListenerUpdate's + // MaxStreamDuration field should be used. If MaxStreamDuration is set to + // an explicit zero duration, the application's deadline should be used. + MaxStreamDuration *time.Duration + // HTTPFilterConfigOverride contains any HTTP filter config overrides for + // the route which may be present. An individual filter's override may be + // unused if the matching WeightedCluster contains an override for that + // filter. + HTTPFilterConfigOverride map[string]httpfilter.FilterConfig + RetryConfig *RetryConfig + + RouteAction RouteAction +} + +// WeightedCluster contains settings for an xds RouteAction.WeightedCluster. +type WeightedCluster struct { + // Weight is the relative weight of the cluster. It will never be zero. + Weight uint32 + // HTTPFilterConfigOverride contains any HTTP filter config overrides for + // the weighted cluster which may be present. + HTTPFilterConfigOverride map[string]httpfilter.FilterConfig +} + +// HeaderMatcher represents header matchers. +type HeaderMatcher struct { + Name string + InvertMatch *bool + ExactMatch *string + RegexMatch *regexp.Regexp + PrefixMatch *string + SuffixMatch *string + RangeMatch *Int64Range + PresentMatch *bool +} + +// Int64Range is a range for header range match. +type Int64Range struct { + Start int64 + End int64 +} + +// SecurityConfig contains the security configuration received as part of the +// Cluster resource on the client-side, and as part of the Listener resource on +// the server-side. +type SecurityConfig struct { + // RootInstanceName identifies the certProvider plugin to be used to fetch + // root certificates. This instance name will be resolved to the plugin name + // and its associated configuration from the certificate_providers field of + // the bootstrap file. + RootInstanceName string + // RootCertName is the certificate name to be passed to the plugin (looked + // up from the bootstrap file) while fetching root certificates. + RootCertName string + // IdentityInstanceName identifies the certProvider plugin to be used to + // fetch identity certificates. This instance name will be resolved to the + // plugin name and its associated configuration from the + // certificate_providers field of the bootstrap file. + IdentityInstanceName string + // IdentityCertName is the certificate name to be passed to the plugin + // (looked up from the bootstrap file) while fetching identity certificates. + IdentityCertName string + // SubjectAltNameMatchers is an optional list of match criteria for SANs + // specified on the peer certificate. Used only on the client-side. + // + // Some intricacies: + // - If this field is empty, then any peer certificate is accepted. + // - If the peer certificate contains a wildcard DNS SAN, and an `exact` + // matcher is configured, a wildcard DNS match is performed instead of a + // regular string comparison. + SubjectAltNameMatchers []matcher.StringMatcher + // RequireClientCert indicates if the server handshake process expects the + // client to present a certificate. Set to true when performing mTLS. Used + // only on the server-side. + RequireClientCert bool +} + +// Equal returns true if sc is equal to other. +func (sc *SecurityConfig) Equal(other *SecurityConfig) bool { + switch { + case sc == nil && other == nil: + return true + case (sc != nil) != (other != nil): + return false + } + switch { + case sc.RootInstanceName != other.RootInstanceName: + return false + case sc.RootCertName != other.RootCertName: + return false + case sc.IdentityInstanceName != other.IdentityInstanceName: + return false + case sc.IdentityCertName != other.IdentityCertName: + return false + case sc.RequireClientCert != other.RequireClientCert: + return false + default: + if len(sc.SubjectAltNameMatchers) != len(other.SubjectAltNameMatchers) { + return false + } + for i := 0; i < len(sc.SubjectAltNameMatchers); i++ { + if !sc.SubjectAltNameMatchers[i].Equal(other.SubjectAltNameMatchers[i]) { + return false + } + } + } + return true +} + +// RouteConfigUpdateErrTuple is a tuple with the update and error. It contains +// the results from unmarshal functions. It's used to pass unmarshal results of +// multiple resources together, e.g. in maps like `map[string]{Update,error}`. +type RouteConfigUpdateErrTuple struct { + Update RouteConfigUpdate + Err error +} diff --git a/xds/internal/xdsclient/xdsresource/unmarshal.go b/xds/internal/xdsclient/xdsresource/unmarshal.go new file mode 100644 index 000000000000..7cd9d32dd6c8 --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/unmarshal.go @@ -0,0 +1,174 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package xdsresource contains functions to proto xds updates (unmarshal from +// proto), and types for the resource updates. +package xdsresource + +import ( + "errors" + "fmt" + "strings" + "time" + + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/protobuf/types/known/anypb" +) + +// UnmarshalOptions wraps the input parameters for `UnmarshalXxx` functions. +type UnmarshalOptions struct { + // Version is the version of the received response. + Version string + // Resources are the xDS resources resources in the received response. + Resources []*anypb.Any + // Logger is the prefix logger to be used during unmarshaling. + Logger *grpclog.PrefixLogger + // UpdateValidator is a post unmarshal validation check provided by the + // upper layer. + UpdateValidator UpdateValidatorFunc +} + +// processAllResources unmarshals and validates the resources, populates the +// provided ret (a map), and returns metadata and error. +// +// After this function, the ret map will be populated with both valid and +// invalid updates. Invalid resources will have an entry with the key as the +// resource name, value as an empty update. +// +// The type of the resource is determined by the type of ret. E.g. +// map[string]ListenerUpdate means this is for LDS. +func processAllResources(opts *UnmarshalOptions, ret interface{}) (UpdateMetadata, error) { + timestamp := time.Now() + md := UpdateMetadata{ + Version: opts.Version, + Timestamp: timestamp, + } + var topLevelErrors []error + perResourceErrors := make(map[string]error) + + for _, r := range opts.Resources { + switch ret2 := ret.(type) { + case map[string]ListenerUpdateErrTuple: + name, update, err := unmarshalListenerResource(r, opts.UpdateValidator, opts.Logger) + if err == nil { + ret2[name] = ListenerUpdateErrTuple{Update: update} + continue + } + if name == "" { + topLevelErrors = append(topLevelErrors, err) + continue + } + perResourceErrors[name] = err + // Add place holder in the map so we know this resource name was in + // the response. + ret2[name] = ListenerUpdateErrTuple{Err: err} + case map[string]RouteConfigUpdateErrTuple: + name, update, err := unmarshalRouteConfigResource(r, opts.Logger) + if err == nil { + ret2[name] = RouteConfigUpdateErrTuple{Update: update} + continue + } + if name == "" { + topLevelErrors = append(topLevelErrors, err) + continue + } + perResourceErrors[name] = err + // Add place holder in the map so we know this resource name was in + // the response. + ret2[name] = RouteConfigUpdateErrTuple{Err: err} + case map[string]ClusterUpdateErrTuple: + name, update, err := unmarshalClusterResource(r, opts.UpdateValidator, opts.Logger) + if err == nil { + ret2[name] = ClusterUpdateErrTuple{Update: update} + continue + } + if name == "" { + topLevelErrors = append(topLevelErrors, err) + continue + } + perResourceErrors[name] = err + // Add place holder in the map so we know this resource name was in + // the response. + ret2[name] = ClusterUpdateErrTuple{Err: err} + case map[string]EndpointsUpdateErrTuple: + name, update, err := unmarshalEndpointsResource(r, opts.Logger) + if err == nil { + ret2[name] = EndpointsUpdateErrTuple{Update: update} + continue + } + if name == "" { + topLevelErrors = append(topLevelErrors, err) + continue + } + perResourceErrors[name] = err + // Add place holder in the map so we know this resource name was in + // the response. + ret2[name] = EndpointsUpdateErrTuple{Err: err} + } + } + + if len(topLevelErrors) == 0 && len(perResourceErrors) == 0 { + md.Status = ServiceStatusACKed + return md, nil + } + + var typeStr string + switch ret.(type) { + case map[string]ListenerUpdate: + typeStr = "LDS" + case map[string]RouteConfigUpdate: + typeStr = "RDS" + case map[string]ClusterUpdate: + typeStr = "CDS" + case map[string]EndpointsUpdate: + typeStr = "EDS" + } + + md.Status = ServiceStatusNACKed + errRet := combineErrors(typeStr, topLevelErrors, perResourceErrors) + md.ErrState = &UpdateErrorMetadata{ + Version: opts.Version, + Err: errRet, + Timestamp: timestamp, + } + return md, errRet +} + +func combineErrors(rType string, topLevelErrors []error, perResourceErrors map[string]error) error { + var errStrB strings.Builder + errStrB.WriteString(fmt.Sprintf("error parsing %q response: ", rType)) + if len(topLevelErrors) > 0 { + errStrB.WriteString("top level errors: ") + for i, err := range topLevelErrors { + if i != 0 { + errStrB.WriteString(";\n") + } + errStrB.WriteString(err.Error()) + } + } + if len(perResourceErrors) > 0 { + var i int + for name, err := range perResourceErrors { + if i != 0 { + errStrB.WriteString(";\n") + } + i++ + errStrB.WriteString(fmt.Sprintf("resource %q: %v", name, err.Error())) + } + } + return errors.New(errStrB.String()) +} diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go new file mode 100644 index 000000000000..a1c6c3ea7a62 --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go @@ -0,0 +1,456 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "errors" + "fmt" + "net" + "strconv" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3aggregateclusterpb "github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3" + v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/xds/env" + "google.golang.org/grpc/internal/xds/matcher" + "google.golang.org/grpc/xds/internal/version" + "google.golang.org/protobuf/types/known/anypb" +) + +// TransportSocket proto message has a `name` field which is expected to be set +// to this value by the management server. +const transportSocketName = "envoy.transport_sockets.tls" + +// UnmarshalCluster processes resources received in an CDS response, validates +// them, and transforms them into a native struct which contains only fields we +// are interested in. +func UnmarshalCluster(opts *UnmarshalOptions) (map[string]ClusterUpdateErrTuple, UpdateMetadata, error) { + update := make(map[string]ClusterUpdateErrTuple) + md, err := processAllResources(opts, update) + return update, md, err +} + +func unmarshalClusterResource(r *anypb.Any, f UpdateValidatorFunc, logger *grpclog.PrefixLogger) (string, ClusterUpdate, error) { + if !IsClusterResource(r.GetTypeUrl()) { + return "", ClusterUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) + } + + cluster := &v3clusterpb.Cluster{} + if err := proto.Unmarshal(r.GetValue(), cluster); err != nil { + return "", ClusterUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) + } + logger.Infof("Resource with name: %v, type: %T, contains: %v", cluster.GetName(), cluster, pretty.ToJSON(cluster)) + cu, err := validateClusterAndConstructClusterUpdate(cluster) + if err != nil { + return cluster.GetName(), ClusterUpdate{}, err + } + cu.Raw = r + if f != nil { + if err := f(cu); err != nil { + return "", ClusterUpdate{}, err + } + } + + return cluster.GetName(), cu, nil +} + +const ( + defaultRingHashMinSize = 1024 + defaultRingHashMaxSize = 8 * 1024 * 1024 // 8M + ringHashSizeUpperBound = 8 * 1024 * 1024 // 8M +) + +func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (ClusterUpdate, error) { + var lbPolicy *ClusterLBPolicyRingHash + switch cluster.GetLbPolicy() { + case v3clusterpb.Cluster_ROUND_ROBIN: + lbPolicy = nil // The default is round_robin, and there's no config to set. + case v3clusterpb.Cluster_RING_HASH: + if !env.RingHashSupport { + return ClusterUpdate{}, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) + } + rhc := cluster.GetRingHashLbConfig() + if rhc.GetHashFunction() != v3clusterpb.Cluster_RingHashLbConfig_XX_HASH { + return ClusterUpdate{}, fmt.Errorf("unsupported ring_hash hash function %v in response: %+v", rhc.GetHashFunction(), cluster) + } + // Minimum defaults to 1024 entries, and limited to 8M entries Maximum + // defaults to 8M entries, and limited to 8M entries + var minSize, maxSize uint64 = defaultRingHashMinSize, defaultRingHashMaxSize + if min := rhc.GetMinimumRingSize(); min != nil { + if min.GetValue() > ringHashSizeUpperBound { + return ClusterUpdate{}, fmt.Errorf("unexpected ring_hash mininum ring size %v in response: %+v", min.GetValue(), cluster) + } + minSize = min.GetValue() + } + if max := rhc.GetMaximumRingSize(); max != nil { + if max.GetValue() > ringHashSizeUpperBound { + return ClusterUpdate{}, fmt.Errorf("unexpected ring_hash maxinum ring size %v in response: %+v", max.GetValue(), cluster) + } + maxSize = max.GetValue() + } + if minSize > maxSize { + return ClusterUpdate{}, fmt.Errorf("ring_hash config min size %v is greater than max %v", minSize, maxSize) + } + lbPolicy = &ClusterLBPolicyRingHash{MinimumRingSize: minSize, MaximumRingSize: maxSize} + default: + return ClusterUpdate{}, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) + } + + // Process security configuration received from the control plane iff the + // corresponding environment variable is set. + var sc *SecurityConfig + if env.ClientSideSecuritySupport { + var err error + if sc, err = securityConfigFromCluster(cluster); err != nil { + return ClusterUpdate{}, err + } + } + + ret := ClusterUpdate{ + ClusterName: cluster.GetName(), + EnableLRS: cluster.GetLrsServer().GetSelf() != nil, + SecurityCfg: sc, + MaxRequests: circuitBreakersFromCluster(cluster), + LBPolicy: lbPolicy, + } + + // Validate and set cluster type from the response. + switch { + case cluster.GetType() == v3clusterpb.Cluster_EDS: + if cluster.GetEdsClusterConfig().GetEdsConfig().GetAds() == nil { + return ClusterUpdate{}, fmt.Errorf("unexpected edsConfig in response: %+v", cluster) + } + ret.ClusterType = ClusterTypeEDS + ret.EDSServiceName = cluster.GetEdsClusterConfig().GetServiceName() + return ret, nil + case cluster.GetType() == v3clusterpb.Cluster_LOGICAL_DNS: + if !env.AggregateAndDNSSupportEnv { + return ClusterUpdate{}, fmt.Errorf("unsupported cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster) + } + ret.ClusterType = ClusterTypeLogicalDNS + dnsHN, err := dnsHostNameFromCluster(cluster) + if err != nil { + return ClusterUpdate{}, err + } + ret.DNSHostName = dnsHN + return ret, nil + case cluster.GetClusterType() != nil && cluster.GetClusterType().Name == "envoy.clusters.aggregate": + if !env.AggregateAndDNSSupportEnv { + return ClusterUpdate{}, fmt.Errorf("unsupported cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster) + } + clusters := &v3aggregateclusterpb.ClusterConfig{} + if err := proto.Unmarshal(cluster.GetClusterType().GetTypedConfig().GetValue(), clusters); err != nil { + return ClusterUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) + } + ret.ClusterType = ClusterTypeAggregate + ret.PrioritizedClusterNames = clusters.Clusters + return ret, nil + default: + return ClusterUpdate{}, fmt.Errorf("unsupported cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster) + } +} + +// dnsHostNameFromCluster extracts the DNS host name from the cluster's load +// assignment. +// +// There should be exactly one locality, with one endpoint, whose address +// contains the address and port. +func dnsHostNameFromCluster(cluster *v3clusterpb.Cluster) (string, error) { + loadAssignment := cluster.GetLoadAssignment() + if loadAssignment == nil { + return "", fmt.Errorf("load_assignment not present for LOGICAL_DNS cluster") + } + if len(loadAssignment.GetEndpoints()) != 1 { + return "", fmt.Errorf("load_assignment for LOGICAL_DNS cluster must have exactly one locality, got: %+v", loadAssignment) + } + endpoints := loadAssignment.GetEndpoints()[0].GetLbEndpoints() + if len(endpoints) != 1 { + return "", fmt.Errorf("locality for LOGICAL_DNS cluster must have exactly one endpoint, got: %+v", endpoints) + } + endpoint := endpoints[0].GetEndpoint() + if endpoint == nil { + return "", fmt.Errorf("endpoint for LOGICAL_DNS cluster not set") + } + socketAddr := endpoint.GetAddress().GetSocketAddress() + if socketAddr == nil { + return "", fmt.Errorf("socket address for endpoint for LOGICAL_DNS cluster not set") + } + if socketAddr.GetResolverName() != "" { + return "", fmt.Errorf("socket address for endpoint for LOGICAL_DNS cluster not set has unexpected custom resolver name: %v", socketAddr.GetResolverName()) + } + host := socketAddr.GetAddress() + if host == "" { + return "", fmt.Errorf("host for endpoint for LOGICAL_DNS cluster not set") + } + port := socketAddr.GetPortValue() + if port == 0 { + return "", fmt.Errorf("port for endpoint for LOGICAL_DNS cluster not set") + } + return net.JoinHostPort(host, strconv.Itoa(int(port))), nil +} + +// securityConfigFromCluster extracts the relevant security configuration from +// the received Cluster resource. +func securityConfigFromCluster(cluster *v3clusterpb.Cluster) (*SecurityConfig, error) { + if tsm := cluster.GetTransportSocketMatches(); len(tsm) != 0 { + return nil, fmt.Errorf("unsupport transport_socket_matches field is non-empty: %+v", tsm) + } + // The Cluster resource contains a `transport_socket` field, which contains + // a oneof `typed_config` field of type `protobuf.Any`. The any proto + // contains a marshaled representation of an `UpstreamTlsContext` message. + ts := cluster.GetTransportSocket() + if ts == nil { + return nil, nil + } + if name := ts.GetName(); name != transportSocketName { + return nil, fmt.Errorf("transport_socket field has unexpected name: %s", name) + } + any := ts.GetTypedConfig() + if any == nil || any.TypeUrl != version.V3UpstreamTLSContextURL { + return nil, fmt.Errorf("transport_socket field has unexpected typeURL: %s", any.TypeUrl) + } + upstreamCtx := &v3tlspb.UpstreamTlsContext{} + if err := proto.Unmarshal(any.GetValue(), upstreamCtx); err != nil { + return nil, fmt.Errorf("failed to unmarshal UpstreamTlsContext in CDS response: %v", err) + } + // The following fields from `UpstreamTlsContext` are ignored: + // - sni + // - allow_renegotiation + // - max_session_keys + if upstreamCtx.GetCommonTlsContext() == nil { + return nil, errors.New("UpstreamTlsContext in CDS response does not contain a CommonTlsContext") + } + + return securityConfigFromCommonTLSContext(upstreamCtx.GetCommonTlsContext(), false) +} + +// common is expected to be not nil. +// The `alpn_protocols` field is ignored. +func securityConfigFromCommonTLSContext(common *v3tlspb.CommonTlsContext, server bool) (*SecurityConfig, error) { + if common.GetTlsParams() != nil { + return nil, fmt.Errorf("unsupported tls_params field in CommonTlsContext message: %+v", common) + } + if common.GetCustomHandshaker() != nil { + return nil, fmt.Errorf("unsupported custom_handshaker field in CommonTlsContext message: %+v", common) + } + + // For now, if we can't get a valid security config from the new fields, we + // fallback to the old deprecated fields. + // TODO: Drop support for deprecated fields. NACK if err != nil here. + sc, _ := securityConfigFromCommonTLSContextUsingNewFields(common, server) + if sc == nil || sc.Equal(&SecurityConfig{}) { + var err error + sc, err = securityConfigFromCommonTLSContextWithDeprecatedFields(common, server) + if err != nil { + return nil, err + } + } + if sc != nil { + // sc == nil is a valid case where the control plane has not sent us any + // security configuration. xDS creds will use fallback creds. + if server { + if sc.IdentityInstanceName == "" { + return nil, errors.New("security configuration on the server-side does not contain identity certificate provider instance name") + } + } else { + if sc.RootInstanceName == "" { + return nil, errors.New("security configuration on the client-side does not contain root certificate provider instance name") + } + } + } + return sc, nil +} + +func securityConfigFromCommonTLSContextWithDeprecatedFields(common *v3tlspb.CommonTlsContext, server bool) (*SecurityConfig, error) { + // The `CommonTlsContext` contains a + // `tls_certificate_certificate_provider_instance` field of type + // `CertificateProviderInstance`, which contains the provider instance name + // and the certificate name to fetch identity certs. + sc := &SecurityConfig{} + if identity := common.GetTlsCertificateCertificateProviderInstance(); identity != nil { + sc.IdentityInstanceName = identity.GetInstanceName() + sc.IdentityCertName = identity.GetCertificateName() + } + + // The `CommonTlsContext` contains a `validation_context_type` field which + // is a oneof. We can get the values that we are interested in from two of + // those possible values: + // - combined validation context: + // - contains a default validation context which holds the list of + // matchers for accepted SANs. + // - contains certificate provider instance configuration + // - certificate provider instance configuration + // - in this case, we do not get a list of accepted SANs. + switch t := common.GetValidationContextType().(type) { + case *v3tlspb.CommonTlsContext_CombinedValidationContext: + combined := common.GetCombinedValidationContext() + var matchers []matcher.StringMatcher + if def := combined.GetDefaultValidationContext(); def != nil { + for _, m := range def.GetMatchSubjectAltNames() { + matcher, err := matcher.StringMatcherFromProto(m) + if err != nil { + return nil, err + } + matchers = append(matchers, matcher) + } + } + if server && len(matchers) != 0 { + return nil, fmt.Errorf("match_subject_alt_names field in validation context is not supported on the server: %v", common) + } + sc.SubjectAltNameMatchers = matchers + if pi := combined.GetValidationContextCertificateProviderInstance(); pi != nil { + sc.RootInstanceName = pi.GetInstanceName() + sc.RootCertName = pi.GetCertificateName() + } + case *v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance: + pi := common.GetValidationContextCertificateProviderInstance() + sc.RootInstanceName = pi.GetInstanceName() + sc.RootCertName = pi.GetCertificateName() + case nil: + // It is valid for the validation context to be nil on the server side. + default: + return nil, fmt.Errorf("validation context contains unexpected type: %T", t) + } + return sc, nil +} + +// gRFC A29 https://github.com/grpc/proposal/blob/master/A29-xds-tls-security.md +// specifies the new way to fetch security configuration and says the following: +// +// Although there are various ways to obtain certificates as per this proto +// (which are supported by Envoy), gRPC supports only one of them and that is +// the `CertificateProviderPluginInstance` proto. +// +// This helper function attempts to fetch security configuration from the +// `CertificateProviderPluginInstance` message, given a CommonTlsContext. +func securityConfigFromCommonTLSContextUsingNewFields(common *v3tlspb.CommonTlsContext, server bool) (*SecurityConfig, error) { + // The `tls_certificate_provider_instance` field of type + // `CertificateProviderPluginInstance` is used to fetch the identity + // certificate provider. + sc := &SecurityConfig{} + identity := common.GetTlsCertificateProviderInstance() + if identity == nil && len(common.GetTlsCertificates()) != 0 { + return nil, fmt.Errorf("expected field tls_certificate_provider_instance is not set, while unsupported field tls_certificates is set in CommonTlsContext message: %+v", common) + } + if identity == nil && common.GetTlsCertificateSdsSecretConfigs() != nil { + return nil, fmt.Errorf("expected field tls_certificate_provider_instance is not set, while unsupported field tls_certificate_sds_secret_configs is set in CommonTlsContext message: %+v", common) + } + sc.IdentityInstanceName = identity.GetInstanceName() + sc.IdentityCertName = identity.GetCertificateName() + + // The `CommonTlsContext` contains a oneof field `validation_context_type`, + // which contains the `CertificateValidationContext` message in one of the + // following ways: + // - `validation_context` field + // - this is directly of type `CertificateValidationContext` + // - `combined_validation_context` field + // - this is of type `CombinedCertificateValidationContext` and contains + // a `default validation context` field of type + // `CertificateValidationContext` + // + // The `CertificateValidationContext` message has the following fields that + // we are interested in: + // - `ca_certificate_provider_instance` + // - this is of type `CertificateProviderPluginInstance` + // - `match_subject_alt_names` + // - this is a list of string matchers + // + // The `CertificateProviderPluginInstance` message contains two fields + // - instance_name + // - this is the certificate provider instance name to be looked up in + // the bootstrap configuration + // - certificate_name + // - this is an opaque name passed to the certificate provider + var validationCtx *v3tlspb.CertificateValidationContext + switch typ := common.GetValidationContextType().(type) { + case *v3tlspb.CommonTlsContext_ValidationContext: + validationCtx = common.GetValidationContext() + case *v3tlspb.CommonTlsContext_CombinedValidationContext: + validationCtx = common.GetCombinedValidationContext().GetDefaultValidationContext() + case nil: + // It is valid for the validation context to be nil on the server side. + return sc, nil + default: + return nil, fmt.Errorf("validation context contains unexpected type: %T", typ) + } + // If we get here, it means that the `CertificateValidationContext` message + // was found through one of the supported ways. It is an error if the + // validation context is specified, but it does not contain the + // ca_certificate_provider_instance field which contains information about + // the certificate provider to be used for the root certificates. + if validationCtx.GetCaCertificateProviderInstance() == nil { + return nil, fmt.Errorf("expected field ca_certificate_provider_instance is missing in CommonTlsContext message: %+v", common) + } + // The following fields are ignored: + // - trusted_ca + // - watched_directory + // - allow_expired_certificate + // - trust_chain_verification + switch { + case len(validationCtx.GetVerifyCertificateSpki()) != 0: + return nil, fmt.Errorf("unsupported verify_certificate_spki field in CommonTlsContext message: %+v", common) + case len(validationCtx.GetVerifyCertificateHash()) != 0: + return nil, fmt.Errorf("unsupported verify_certificate_hash field in CommonTlsContext message: %+v", common) + case validationCtx.GetRequireSignedCertificateTimestamp().GetValue(): + return nil, fmt.Errorf("unsupported require_sugned_ceritificate_timestamp field in CommonTlsContext message: %+v", common) + case validationCtx.GetCrl() != nil: + return nil, fmt.Errorf("unsupported crl field in CommonTlsContext message: %+v", common) + case validationCtx.GetCustomValidatorConfig() != nil: + return nil, fmt.Errorf("unsupported custom_validator_config field in CommonTlsContext message: %+v", common) + } + + if rootProvider := validationCtx.GetCaCertificateProviderInstance(); rootProvider != nil { + sc.RootInstanceName = rootProvider.GetInstanceName() + sc.RootCertName = rootProvider.GetCertificateName() + } + var matchers []matcher.StringMatcher + for _, m := range validationCtx.GetMatchSubjectAltNames() { + matcher, err := matcher.StringMatcherFromProto(m) + if err != nil { + return nil, err + } + matchers = append(matchers, matcher) + } + if server && len(matchers) != 0 { + return nil, fmt.Errorf("match_subject_alt_names field in validation context is not supported on the server: %v", common) + } + sc.SubjectAltNameMatchers = matchers + return sc, nil +} + +// circuitBreakersFromCluster extracts the circuit breakers configuration from +// the received cluster resource. Returns nil if no CircuitBreakers or no +// Thresholds in CircuitBreakers. +func circuitBreakersFromCluster(cluster *v3clusterpb.Cluster) *uint32 { + for _, threshold := range cluster.GetCircuitBreakers().GetThresholds() { + if threshold.GetPriority() != v3corepb.RoutingPriority_DEFAULT { + continue + } + maxRequestsPb := threshold.GetMaxRequests() + if maxRequestsPb == nil { + return nil + } + maxRequests := maxRequestsPb.GetValue() + return &maxRequests + } + return nil +} diff --git a/xds/internal/xdsclient/cds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go similarity index 99% rename from xds/internal/xdsclient/cds_test.go rename to xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go index 21e3b05b9089..3a56965bdc4e 100644 --- a/xds/internal/xdsclient/cds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go @@ -1,6 +1,6 @@ /* * - * Copyright 2020 gRPC authors. + * Copyright 2021 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,10 +13,9 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ -package xdsclient +package xdsresource import ( "regexp" diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_eds.go b/xds/internal/xdsclient/xdsresource/unmarshal_eds.go new file mode 100644 index 000000000000..f1774dedae43 --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/unmarshal_eds.go @@ -0,0 +1,131 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "fmt" + "net" + "strconv" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal" + "google.golang.org/protobuf/types/known/anypb" +) + +// UnmarshalEndpoints processes resources received in an EDS response, +// validates them, and transforms them into a native struct which contains only +// fields we are interested in. +func UnmarshalEndpoints(opts *UnmarshalOptions) (map[string]EndpointsUpdateErrTuple, UpdateMetadata, error) { + update := make(map[string]EndpointsUpdateErrTuple) + md, err := processAllResources(opts, update) + return update, md, err +} + +func unmarshalEndpointsResource(r *anypb.Any, logger *grpclog.PrefixLogger) (string, EndpointsUpdate, error) { + if !IsEndpointsResource(r.GetTypeUrl()) { + return "", EndpointsUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) + } + + cla := &v3endpointpb.ClusterLoadAssignment{} + if err := proto.Unmarshal(r.GetValue(), cla); err != nil { + return "", EndpointsUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) + } + logger.Infof("Resource with name: %v, type: %T, contains: %v", cla.GetClusterName(), cla, pretty.ToJSON(cla)) + + u, err := parseEDSRespProto(cla) + if err != nil { + return cla.GetClusterName(), EndpointsUpdate{}, err + } + u.Raw = r + return cla.GetClusterName(), u, nil +} + +func parseAddress(socketAddress *v3corepb.SocketAddress) string { + return net.JoinHostPort(socketAddress.GetAddress(), strconv.Itoa(int(socketAddress.GetPortValue()))) +} + +func parseDropPolicy(dropPolicy *v3endpointpb.ClusterLoadAssignment_Policy_DropOverload) OverloadDropConfig { + percentage := dropPolicy.GetDropPercentage() + var ( + numerator = percentage.GetNumerator() + denominator uint32 + ) + switch percentage.GetDenominator() { + case v3typepb.FractionalPercent_HUNDRED: + denominator = 100 + case v3typepb.FractionalPercent_TEN_THOUSAND: + denominator = 10000 + case v3typepb.FractionalPercent_MILLION: + denominator = 1000000 + } + return OverloadDropConfig{ + Category: dropPolicy.GetCategory(), + Numerator: numerator, + Denominator: denominator, + } +} + +func parseEndpoints(lbEndpoints []*v3endpointpb.LbEndpoint) []Endpoint { + endpoints := make([]Endpoint, 0, len(lbEndpoints)) + for _, lbEndpoint := range lbEndpoints { + endpoints = append(endpoints, Endpoint{ + HealthStatus: EndpointHealthStatus(lbEndpoint.GetHealthStatus()), + Address: parseAddress(lbEndpoint.GetEndpoint().GetAddress().GetSocketAddress()), + Weight: lbEndpoint.GetLoadBalancingWeight().GetValue(), + }) + } + return endpoints +} + +func parseEDSRespProto(m *v3endpointpb.ClusterLoadAssignment) (EndpointsUpdate, error) { + ret := EndpointsUpdate{} + for _, dropPolicy := range m.GetPolicy().GetDropOverloads() { + ret.Drops = append(ret.Drops, parseDropPolicy(dropPolicy)) + } + priorities := make(map[uint32]struct{}) + for _, locality := range m.Endpoints { + l := locality.GetLocality() + if l == nil { + return EndpointsUpdate{}, fmt.Errorf("EDS response contains a locality without ID, locality: %+v", locality) + } + lid := internal.LocalityID{ + Region: l.Region, + Zone: l.Zone, + SubZone: l.SubZone, + } + priority := locality.GetPriority() + priorities[priority] = struct{}{} + ret.Localities = append(ret.Localities, Locality{ + ID: lid, + Endpoints: parseEndpoints(locality.GetLbEndpoints()), + Weight: locality.GetLoadBalancingWeight().GetValue(), + Priority: priority, + }) + } + for i := 0; i < len(priorities); i++ { + if _, ok := priorities[uint32(i)]; !ok { + return EndpointsUpdate{}, fmt.Errorf("priority %v missing (with different priorities %v received)", i, priorities) + } + } + return ret, nil +} diff --git a/xds/internal/xdsclient/eds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go similarity index 99% rename from xds/internal/xdsclient/eds_test.go rename to xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go index d0af8a988d83..770dbf4c5253 100644 --- a/xds/internal/xdsclient/eds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go @@ -1,6 +1,6 @@ /* * - * Copyright 2020 gRPC authors. + * Copyright 2021 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,10 +13,9 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ -package xdsclient +package xdsresource import ( "fmt" diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_lds.go b/xds/internal/xdsclient/xdsresource/unmarshal_lds.go new file mode 100644 index 000000000000..3a1d0f63156f --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/unmarshal_lds.go @@ -0,0 +1,297 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "errors" + "fmt" + "strconv" + + v1udpatypepb "github.com/cncf/udpa/go/udpa/type/v1" + v3cncftypepb "github.com/cncf/xds/go/xds/type/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal/httpfilter" + "google.golang.org/grpc/xds/internal/version" + "google.golang.org/protobuf/types/known/anypb" +) + +// UnmarshalListener processes resources received in an LDS response, validates +// them, and transforms them into a native struct which contains only fields we +// are interested in. +func UnmarshalListener(opts *UnmarshalOptions) (map[string]ListenerUpdateErrTuple, UpdateMetadata, error) { + update := make(map[string]ListenerUpdateErrTuple) + md, err := processAllResources(opts, update) + return update, md, err +} + +func unmarshalListenerResource(r *anypb.Any, f UpdateValidatorFunc, logger *grpclog.PrefixLogger) (string, ListenerUpdate, error) { + if !IsListenerResource(r.GetTypeUrl()) { + return "", ListenerUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) + } + // TODO: Pass version.TransportAPI instead of relying upon the type URL + v2 := r.GetTypeUrl() == version.V2ListenerURL + lis := &v3listenerpb.Listener{} + if err := proto.Unmarshal(r.GetValue(), lis); err != nil { + return "", ListenerUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) + } + logger.Infof("Resource with name: %v, type: %T, contains: %v", lis.GetName(), lis, pretty.ToJSON(lis)) + + lu, err := processListener(lis, logger, v2) + if err != nil { + return lis.GetName(), ListenerUpdate{}, err + } + if f != nil { + if err := f(*lu); err != nil { + return lis.GetName(), ListenerUpdate{}, err + } + } + lu.Raw = r + return lis.GetName(), *lu, nil +} + +func processListener(lis *v3listenerpb.Listener, logger *grpclog.PrefixLogger, v2 bool) (*ListenerUpdate, error) { + if lis.GetApiListener() != nil { + return processClientSideListener(lis, logger, v2) + } + return processServerSideListener(lis, logger) +} + +// processClientSideListener checks if the provided Listener proto meets +// the expected criteria. If so, it returns a non-empty routeConfigName. +func processClientSideListener(lis *v3listenerpb.Listener, logger *grpclog.PrefixLogger, v2 bool) (*ListenerUpdate, error) { + update := &ListenerUpdate{} + + apiLisAny := lis.GetApiListener().GetApiListener() + if !IsHTTPConnManagerResource(apiLisAny.GetTypeUrl()) { + return nil, fmt.Errorf("unexpected resource type: %q", apiLisAny.GetTypeUrl()) + } + apiLis := &v3httppb.HttpConnectionManager{} + if err := proto.Unmarshal(apiLisAny.GetValue(), apiLis); err != nil { + return nil, fmt.Errorf("failed to unmarshal api_listner: %v", err) + } + // "HttpConnectionManager.xff_num_trusted_hops must be unset or zero and + // HttpConnectionManager.original_ip_detection_extensions must be empty. If + // either field has an incorrect value, the Listener must be NACKed." - A41 + if apiLis.XffNumTrustedHops != 0 { + return nil, fmt.Errorf("xff_num_trusted_hops must be unset or zero %+v", apiLis) + } + if len(apiLis.OriginalIpDetectionExtensions) != 0 { + return nil, fmt.Errorf("original_ip_detection_extensions must be empty %+v", apiLis) + } + + switch apiLis.RouteSpecifier.(type) { + case *v3httppb.HttpConnectionManager_Rds: + if apiLis.GetRds().GetConfigSource().GetAds() == nil { + return nil, fmt.Errorf("ConfigSource is not ADS: %+v", lis) + } + name := apiLis.GetRds().GetRouteConfigName() + if name == "" { + return nil, fmt.Errorf("empty route_config_name: %+v", lis) + } + update.RouteConfigName = name + case *v3httppb.HttpConnectionManager_RouteConfig: + routeU, err := generateRDSUpdateFromRouteConfiguration(apiLis.GetRouteConfig(), logger, v2) + if err != nil { + return nil, fmt.Errorf("failed to parse inline RDS resp: %v", err) + } + update.InlineRouteConfig = &routeU + case nil: + return nil, fmt.Errorf("no RouteSpecifier: %+v", apiLis) + default: + return nil, fmt.Errorf("unsupported type %T for RouteSpecifier", apiLis.RouteSpecifier) + } + + if v2 { + return update, nil + } + + // The following checks and fields only apply to xDS protocol versions v3+. + + update.MaxStreamDuration = apiLis.GetCommonHttpProtocolOptions().GetMaxStreamDuration().AsDuration() + + var err error + if update.HTTPFilters, err = processHTTPFilters(apiLis.GetHttpFilters(), false); err != nil { + return nil, err + } + + return update, nil +} + +func unwrapHTTPFilterConfig(config *anypb.Any) (proto.Message, string, error) { + switch { + case ptypes.Is(config, &v3cncftypepb.TypedStruct{}): + // The real type name is inside the new TypedStruct message. + s := new(v3cncftypepb.TypedStruct) + if err := ptypes.UnmarshalAny(config, s); err != nil { + return nil, "", fmt.Errorf("error unmarshalling TypedStruct filter config: %v", err) + } + return s, s.GetTypeUrl(), nil + case ptypes.Is(config, &v1udpatypepb.TypedStruct{}): + // The real type name is inside the old TypedStruct message. + s := new(v1udpatypepb.TypedStruct) + if err := ptypes.UnmarshalAny(config, s); err != nil { + return nil, "", fmt.Errorf("error unmarshalling TypedStruct filter config: %v", err) + } + return s, s.GetTypeUrl(), nil + default: + return config, config.GetTypeUrl(), nil + } +} + +func validateHTTPFilterConfig(cfg *anypb.Any, lds, optional bool) (httpfilter.Filter, httpfilter.FilterConfig, error) { + config, typeURL, err := unwrapHTTPFilterConfig(cfg) + if err != nil { + return nil, nil, err + } + filterBuilder := httpfilter.Get(typeURL) + if filterBuilder == nil { + if optional { + return nil, nil, nil + } + return nil, nil, fmt.Errorf("no filter implementation found for %q", typeURL) + } + parseFunc := filterBuilder.ParseFilterConfig + if !lds { + parseFunc = filterBuilder.ParseFilterConfigOverride + } + filterConfig, err := parseFunc(config) + if err != nil { + return nil, nil, fmt.Errorf("error parsing config for filter %q: %v", typeURL, err) + } + return filterBuilder, filterConfig, nil +} + +func processHTTPFilterOverrides(cfgs map[string]*anypb.Any) (map[string]httpfilter.FilterConfig, error) { + if len(cfgs) == 0 { + return nil, nil + } + m := make(map[string]httpfilter.FilterConfig) + for name, cfg := range cfgs { + optional := false + s := new(v3routepb.FilterConfig) + if ptypes.Is(cfg, s) { + if err := ptypes.UnmarshalAny(cfg, s); err != nil { + return nil, fmt.Errorf("filter override %q: error unmarshalling FilterConfig: %v", name, err) + } + cfg = s.GetConfig() + optional = s.GetIsOptional() + } + + httpFilter, config, err := validateHTTPFilterConfig(cfg, false, optional) + if err != nil { + return nil, fmt.Errorf("filter override %q: %v", name, err) + } + if httpFilter == nil { + // Optional configs are ignored. + continue + } + m[name] = config + } + return m, nil +} + +func processHTTPFilters(filters []*v3httppb.HttpFilter, server bool) ([]HTTPFilter, error) { + ret := make([]HTTPFilter, 0, len(filters)) + seenNames := make(map[string]bool, len(filters)) + for _, filter := range filters { + name := filter.GetName() + if name == "" { + return nil, errors.New("filter missing name field") + } + if seenNames[name] { + return nil, fmt.Errorf("duplicate filter name %q", name) + } + seenNames[name] = true + + httpFilter, config, err := validateHTTPFilterConfig(filter.GetTypedConfig(), true, filter.GetIsOptional()) + if err != nil { + return nil, err + } + if httpFilter == nil { + // Optional configs are ignored. + continue + } + if server { + if _, ok := httpFilter.(httpfilter.ServerInterceptorBuilder); !ok { + if filter.GetIsOptional() { + continue + } + return nil, fmt.Errorf("HTTP filter %q not supported server-side", name) + } + } else if _, ok := httpFilter.(httpfilter.ClientInterceptorBuilder); !ok { + if filter.GetIsOptional() { + continue + } + return nil, fmt.Errorf("HTTP filter %q not supported client-side", name) + } + + // Save name/config + ret = append(ret, HTTPFilter{Name: name, Filter: httpFilter, Config: config}) + } + // "Validation will fail if a terminal filter is not the last filter in the + // chain or if a non-terminal filter is the last filter in the chain." - A39 + if len(ret) == 0 { + return nil, fmt.Errorf("http filters list is empty") + } + var i int + for ; i < len(ret)-1; i++ { + if ret[i].Filter.IsTerminal() { + return nil, fmt.Errorf("http filter %q is a terminal filter but it is not last in the filter chain", ret[i].Name) + } + } + if !ret[i].Filter.IsTerminal() { + return nil, fmt.Errorf("http filter %q is not a terminal filter", ret[len(ret)-1].Name) + } + return ret, nil +} + +func processServerSideListener(lis *v3listenerpb.Listener, logger *grpclog.PrefixLogger) (*ListenerUpdate, error) { + if n := len(lis.ListenerFilters); n != 0 { + return nil, fmt.Errorf("unsupported field 'listener_filters' contains %d entries", n) + } + if useOrigDst := lis.GetUseOriginalDst(); useOrigDst != nil && useOrigDst.GetValue() { + return nil, errors.New("unsupported field 'use_original_dst' is present and set to true") + } + addr := lis.GetAddress() + if addr == nil { + return nil, fmt.Errorf("no address field in LDS response: %+v", lis) + } + sockAddr := addr.GetSocketAddress() + if sockAddr == nil { + return nil, fmt.Errorf("no socket_address field in LDS response: %+v", lis) + } + lu := &ListenerUpdate{ + InboundListenerCfg: &InboundListenerConfig{ + Address: sockAddr.GetAddress(), + Port: strconv.Itoa(int(sockAddr.GetPortValue())), + }, + } + + fcMgr, err := NewFilterChainManager(lis, logger) + if err != nil { + return nil, err + } + lu.InboundListenerCfg.FilterChains = fcMgr + return lu, nil +} diff --git a/xds/internal/xdsclient/lds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go similarity index 99% rename from xds/internal/xdsclient/lds_test.go rename to xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go index f889e380eab3..138a8928a684 100644 --- a/xds/internal/xdsclient/lds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go @@ -1,6 +1,6 @@ /* * - * Copyright 2020 gRPC authors. + * Copyright 2021 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,10 +13,9 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ -package xdsclient +package xdsresource import ( "fmt" diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_rds.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go new file mode 100644 index 000000000000..0642500f303b --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go @@ -0,0 +1,373 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "fmt" + "regexp" + "strings" + "time" + + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/xds/env" + "google.golang.org/grpc/xds/internal/version" + "google.golang.org/protobuf/types/known/anypb" +) + +// UnmarshalRouteConfig processes resources received in an RDS response, +// validates them, and transforms them into a native struct which contains only +// fields we are interested in. The provided hostname determines the route +// configuration resources of interest. +func UnmarshalRouteConfig(opts *UnmarshalOptions) (map[string]RouteConfigUpdateErrTuple, UpdateMetadata, error) { + update := make(map[string]RouteConfigUpdateErrTuple) + md, err := processAllResources(opts, update) + return update, md, err +} + +func unmarshalRouteConfigResource(r *anypb.Any, logger *grpclog.PrefixLogger) (string, RouteConfigUpdate, error) { + if !IsRouteConfigResource(r.GetTypeUrl()) { + return "", RouteConfigUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) + } + rc := &v3routepb.RouteConfiguration{} + if err := proto.Unmarshal(r.GetValue(), rc); err != nil { + return "", RouteConfigUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) + } + logger.Infof("Resource with name: %v, type: %T, contains: %v.", rc.GetName(), rc, pretty.ToJSON(rc)) + + // TODO: Pass version.TransportAPI instead of relying upon the type URL + v2 := r.GetTypeUrl() == version.V2RouteConfigURL + u, err := generateRDSUpdateFromRouteConfiguration(rc, logger, v2) + if err != nil { + return rc.GetName(), RouteConfigUpdate{}, err + } + u.Raw = r + return rc.GetName(), u, nil +} + +// generateRDSUpdateFromRouteConfiguration checks if the provided +// RouteConfiguration meets the expected criteria. If so, it returns a +// RouteConfigUpdate with nil error. +// +// A RouteConfiguration resource is considered valid when only if it contains a +// VirtualHost whose domain field matches the server name from the URI passed +// to the gRPC channel, and it contains a clusterName or a weighted cluster. +// +// The RouteConfiguration includes a list of virtualHosts, which may have zero +// or more elements. We are interested in the element whose domains field +// matches the server name specified in the "xds:" URI. The only field in the +// VirtualHost proto that the we are interested in is the list of routes. We +// only look at the last route in the list (the default route), whose match +// field must be empty and whose route field must be set. Inside that route +// message, the cluster field will contain the clusterName or weighted clusters +// we are looking for. +func generateRDSUpdateFromRouteConfiguration(rc *v3routepb.RouteConfiguration, logger *grpclog.PrefixLogger, v2 bool) (RouteConfigUpdate, error) { + vhs := make([]*VirtualHost, 0, len(rc.GetVirtualHosts())) + for _, vh := range rc.GetVirtualHosts() { + routes, err := routesProtoToSlice(vh.Routes, logger, v2) + if err != nil { + return RouteConfigUpdate{}, fmt.Errorf("received route is invalid: %v", err) + } + rc, err := generateRetryConfig(vh.GetRetryPolicy()) + if err != nil { + return RouteConfigUpdate{}, fmt.Errorf("received route is invalid: %v", err) + } + vhOut := &VirtualHost{ + Domains: vh.GetDomains(), + Routes: routes, + RetryConfig: rc, + } + if !v2 { + cfgs, err := processHTTPFilterOverrides(vh.GetTypedPerFilterConfig()) + if err != nil { + return RouteConfigUpdate{}, fmt.Errorf("virtual host %+v: %v", vh, err) + } + vhOut.HTTPFilterConfigOverride = cfgs + } + vhs = append(vhs, vhOut) + } + return RouteConfigUpdate{VirtualHosts: vhs}, nil +} + +func generateRetryConfig(rp *v3routepb.RetryPolicy) (*RetryConfig, error) { + if !env.RetrySupport || rp == nil { + return nil, nil + } + + cfg := &RetryConfig{RetryOn: make(map[codes.Code]bool)} + for _, s := range strings.Split(rp.GetRetryOn(), ",") { + switch strings.TrimSpace(strings.ToLower(s)) { + case "cancelled": + cfg.RetryOn[codes.Canceled] = true + case "deadline-exceeded": + cfg.RetryOn[codes.DeadlineExceeded] = true + case "internal": + cfg.RetryOn[codes.Internal] = true + case "resource-exhausted": + cfg.RetryOn[codes.ResourceExhausted] = true + case "unavailable": + cfg.RetryOn[codes.Unavailable] = true + } + } + + if rp.NumRetries == nil { + cfg.NumRetries = 1 + } else { + cfg.NumRetries = rp.GetNumRetries().Value + if cfg.NumRetries < 1 { + return nil, fmt.Errorf("retry_policy.num_retries = %v; must be >= 1", cfg.NumRetries) + } + } + + backoff := rp.GetRetryBackOff() + if backoff == nil { + cfg.RetryBackoff.BaseInterval = 25 * time.Millisecond + } else { + cfg.RetryBackoff.BaseInterval = backoff.GetBaseInterval().AsDuration() + if cfg.RetryBackoff.BaseInterval <= 0 { + return nil, fmt.Errorf("retry_policy.base_interval = %v; must be > 0", cfg.RetryBackoff.BaseInterval) + } + } + if max := backoff.GetMaxInterval(); max == nil { + cfg.RetryBackoff.MaxInterval = 10 * cfg.RetryBackoff.BaseInterval + } else { + cfg.RetryBackoff.MaxInterval = max.AsDuration() + if cfg.RetryBackoff.MaxInterval <= 0 { + return nil, fmt.Errorf("retry_policy.max_interval = %v; must be > 0", cfg.RetryBackoff.MaxInterval) + } + } + + if len(cfg.RetryOn) == 0 { + return &RetryConfig{}, nil + } + return cfg, nil +} + +func routesProtoToSlice(routes []*v3routepb.Route, logger *grpclog.PrefixLogger, v2 bool) ([]*Route, error) { + var routesRet []*Route + for _, r := range routes { + match := r.GetMatch() + if match == nil { + return nil, fmt.Errorf("route %+v doesn't have a match", r) + } + + if len(match.GetQueryParameters()) != 0 { + // Ignore route with query parameters. + logger.Warningf("route %+v has query parameter matchers, the route will be ignored", r) + continue + } + + pathSp := match.GetPathSpecifier() + if pathSp == nil { + return nil, fmt.Errorf("route %+v doesn't have a path specifier", r) + } + + var route Route + switch pt := pathSp.(type) { + case *v3routepb.RouteMatch_Prefix: + route.Prefix = &pt.Prefix + case *v3routepb.RouteMatch_Path: + route.Path = &pt.Path + case *v3routepb.RouteMatch_SafeRegex: + regex := pt.SafeRegex.GetRegex() + re, err := regexp.Compile(regex) + if err != nil { + return nil, fmt.Errorf("route %+v contains an invalid regex %q", r, regex) + } + route.Regex = re + default: + return nil, fmt.Errorf("route %+v has an unrecognized path specifier: %+v", r, pt) + } + + if caseSensitive := match.GetCaseSensitive(); caseSensitive != nil { + route.CaseInsensitive = !caseSensitive.Value + } + + for _, h := range match.GetHeaders() { + var header HeaderMatcher + switch ht := h.GetHeaderMatchSpecifier().(type) { + case *v3routepb.HeaderMatcher_ExactMatch: + header.ExactMatch = &ht.ExactMatch + case *v3routepb.HeaderMatcher_SafeRegexMatch: + regex := ht.SafeRegexMatch.GetRegex() + re, err := regexp.Compile(regex) + if err != nil { + return nil, fmt.Errorf("route %+v contains an invalid regex %q", r, regex) + } + header.RegexMatch = re + case *v3routepb.HeaderMatcher_RangeMatch: + header.RangeMatch = &Int64Range{ + Start: ht.RangeMatch.Start, + End: ht.RangeMatch.End, + } + case *v3routepb.HeaderMatcher_PresentMatch: + header.PresentMatch = &ht.PresentMatch + case *v3routepb.HeaderMatcher_PrefixMatch: + header.PrefixMatch = &ht.PrefixMatch + case *v3routepb.HeaderMatcher_SuffixMatch: + header.SuffixMatch = &ht.SuffixMatch + default: + return nil, fmt.Errorf("route %+v has an unrecognized header matcher: %+v", r, ht) + } + header.Name = h.GetName() + invert := h.GetInvertMatch() + header.InvertMatch = &invert + route.Headers = append(route.Headers, &header) + } + + if fr := match.GetRuntimeFraction(); fr != nil { + d := fr.GetDefaultValue() + n := d.GetNumerator() + switch d.GetDenominator() { + case v3typepb.FractionalPercent_HUNDRED: + n *= 10000 + case v3typepb.FractionalPercent_TEN_THOUSAND: + n *= 100 + case v3typepb.FractionalPercent_MILLION: + } + route.Fraction = &n + } + + switch r.GetAction().(type) { + case *v3routepb.Route_Route: + route.WeightedClusters = make(map[string]WeightedCluster) + action := r.GetRoute() + + // Hash Policies are only applicable for a Ring Hash LB. + if env.RingHashSupport { + hp, err := hashPoliciesProtoToSlice(action.HashPolicy, logger) + if err != nil { + return nil, err + } + route.HashPolicies = hp + } + + switch a := action.GetClusterSpecifier().(type) { + case *v3routepb.RouteAction_Cluster: + route.WeightedClusters[a.Cluster] = WeightedCluster{Weight: 1} + case *v3routepb.RouteAction_WeightedClusters: + wcs := a.WeightedClusters + var totalWeight uint32 + for _, c := range wcs.Clusters { + w := c.GetWeight().GetValue() + if w == 0 { + continue + } + wc := WeightedCluster{Weight: w} + if !v2 { + cfgs, err := processHTTPFilterOverrides(c.GetTypedPerFilterConfig()) + if err != nil { + return nil, fmt.Errorf("route %+v, action %+v: %v", r, a, err) + } + wc.HTTPFilterConfigOverride = cfgs + } + route.WeightedClusters[c.GetName()] = wc + totalWeight += w + } + // envoy xds doc + // default TotalWeight https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/route/v3/route_components.proto.html#envoy-v3-api-field-config-route-v3-weightedcluster-total-weight + wantTotalWeight := uint32(100) + if tw := wcs.GetTotalWeight(); tw != nil { + wantTotalWeight = tw.GetValue() + } + if totalWeight != wantTotalWeight { + return nil, fmt.Errorf("route %+v, action %+v, weights of clusters do not add up to total total weight, got: %v, expected total weight from response: %v", r, a, totalWeight, wantTotalWeight) + } + if totalWeight == 0 { + return nil, fmt.Errorf("route %+v, action %+v, has no valid cluster in WeightedCluster action", r, a) + } + case *v3routepb.RouteAction_ClusterHeader: + continue + default: + return nil, fmt.Errorf("route %+v, has an unknown ClusterSpecifier: %+v", r, a) + } + + msd := action.GetMaxStreamDuration() + // Prefer grpc_timeout_header_max, if set. + dur := msd.GetGrpcTimeoutHeaderMax() + if dur == nil { + dur = msd.GetMaxStreamDuration() + } + if dur != nil { + d := dur.AsDuration() + route.MaxStreamDuration = &d + } + + var err error + route.RetryConfig, err = generateRetryConfig(action.GetRetryPolicy()) + if err != nil { + return nil, fmt.Errorf("route %+v, action %+v: %v", r, action, err) + } + + route.RouteAction = RouteActionRoute + + case *v3routepb.Route_NonForwardingAction: + // Expected to be used on server side. + route.RouteAction = RouteActionNonForwardingAction + default: + route.RouteAction = RouteActionUnsupported + } + + if !v2 { + cfgs, err := processHTTPFilterOverrides(r.GetTypedPerFilterConfig()) + if err != nil { + return nil, fmt.Errorf("route %+v: %v", r, err) + } + route.HTTPFilterConfigOverride = cfgs + } + routesRet = append(routesRet, &route) + } + return routesRet, nil +} + +func hashPoliciesProtoToSlice(policies []*v3routepb.RouteAction_HashPolicy, logger *grpclog.PrefixLogger) ([]*HashPolicy, error) { + var hashPoliciesRet []*HashPolicy + for _, p := range policies { + policy := HashPolicy{Terminal: p.Terminal} + switch p.GetPolicySpecifier().(type) { + case *v3routepb.RouteAction_HashPolicy_Header_: + policy.HashPolicyType = HashPolicyTypeHeader + policy.HeaderName = p.GetHeader().GetHeaderName() + if rr := p.GetHeader().GetRegexRewrite(); rr != nil { + regex := rr.GetPattern().GetRegex() + re, err := regexp.Compile(regex) + if err != nil { + return nil, fmt.Errorf("hash policy %+v contains an invalid regex %q", p, regex) + } + policy.Regex = re + policy.RegexSubstitution = rr.GetSubstitution() + } + case *v3routepb.RouteAction_HashPolicy_FilterState_: + if p.GetFilterState().GetKey() != "io.grpc.channel_id" { + logger.Infof("hash policy %+v contains an invalid key for filter state policy %q", p, p.GetFilterState().GetKey()) + continue + } + policy.HashPolicyType = HashPolicyTypeChannelID + default: + logger.Infof("hash policy %T is an unsupported hash policy", p.GetPolicySpecifier()) + continue + } + + hashPoliciesRet = append(hashPoliciesRet, &policy) + } + return hashPoliciesRet, nil +} diff --git a/xds/internal/xdsclient/rds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go similarity index 99% rename from xds/internal/xdsclient/rds_test.go rename to xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go index 8b419244d672..38a7e99a9ede 100644 --- a/xds/internal/xdsclient/rds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go @@ -1,6 +1,6 @@ /* * - * Copyright 2020 gRPC authors. + * Copyright 2021 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,10 +13,9 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ -package xdsclient +package xdsresource import ( "fmt" diff --git a/xds/server.go b/xds/server.go index b36fa64b5008..28abaf84f5f8 100644 --- a/xds/server.go +++ b/xds/server.go @@ -42,6 +42,7 @@ import ( "google.golang.org/grpc/status" "google.golang.org/grpc/xds/internal/server" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) const serverPrefix = "[xds-server %p] " @@ -330,7 +331,7 @@ func (s *GRPCServer) GracefulStop() { func routeAndProcess(ctx context.Context) error { conn := transport.GetConnection(ctx) cw, ok := conn.(interface { - VirtualHosts() []xdsclient.VirtualHostWithInterceptors + VirtualHosts() []xdsresource.VirtualHostWithInterceptors }) if !ok { return errors.New("missing virtual hosts in incoming context") @@ -347,12 +348,12 @@ func routeAndProcess(ctx context.Context) error { // the RPC gets to this point, there will be a single, unambiguous authority // present in the header map. authority := md.Get(":authority") - vh := xdsclient.FindBestMatchingVirtualHostServer(authority[0], cw.VirtualHosts()) + vh := xdsresource.FindBestMatchingVirtualHostServer(authority[0], cw.VirtualHosts()) if vh == nil { return status.Error(codes.Unavailable, "the incoming RPC did not match a configured Virtual Host") } - var rwi *xdsclient.RouteWithInterceptors + var rwi *xdsresource.RouteWithInterceptors rpcInfo := iresolver.RPCInfo{ Context: ctx, Method: mn, @@ -361,7 +362,7 @@ func routeAndProcess(ctx context.Context) error { if r.M.Match(rpcInfo) { // "NonForwardingAction is expected for all Routes used on server-side; a route with an inappropriate action causes // RPCs matching that route to fail with UNAVAILABLE." - A36 - if r.RouteAction != xdsclient.RouteActionNonForwardingAction { + if r.RouteAction != xdsresource.RouteActionNonForwardingAction { return status.Error(codes.Unavailable, "the incoming RPC matched to a route that was not of action type non forwarding") } rwi = &r diff --git a/xds/server_test.go b/xds/server_test.go index 501b8ba76e20..492a2fa6d6ed 100644 --- a/xds/server_test.go +++ b/xds/server_test.go @@ -41,6 +41,7 @@ import ( "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" @@ -429,7 +430,7 @@ func (s) TestServeSuccess(t *testing.T) { // Push an error to the registered listener watch callback and make sure // that Serve does not return. - client.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{}, xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "LDS resource not found")) + client.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{}, xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "LDS resource not found")) sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) defer sCancel() if _, err := serveDone.Receive(sCtx); err != context.DeadlineExceeded { @@ -447,14 +448,14 @@ func (s) TestServeSuccess(t *testing.T) { // Push a good LDS response, and wait for Serve() to be invoked on the // underlying grpc.Server. - fcm, err := xdsclient.NewFilterChainManager(listenerWithFilterChains) + fcm, err := xdsresource.NewFilterChainManager(listenerWithFilterChains, nil) if err != nil { t.Fatalf("xdsclient.NewFilterChainManager() failed with error: %v", err) } addr, port := splitHostPort(lis.Addr().String()) - client.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{ + client.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{ RouteConfigName: "routeconfig", - InboundListenerCfg: &xdsclient.InboundListenerConfig{ + InboundListenerCfg: &xdsresource.InboundListenerConfig{ Address: addr, Port: port, FilterChains: fcm, @@ -476,9 +477,9 @@ func (s) TestServeSuccess(t *testing.T) { // Push an update to the registered listener watch callback with a Listener // resource whose host:port does not match the actual listening address and // port. This will push the listener to "not-serving" mode. - client.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{ + client.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{ RouteConfigName: "routeconfig", - InboundListenerCfg: &xdsclient.InboundListenerConfig{ + InboundListenerCfg: &xdsresource.InboundListenerConfig{ Address: "10.20.30.40", Port: "666", FilterChains: fcm, @@ -749,7 +750,7 @@ func (s) TestHandleListenerUpdate_NoXDSCreds(t *testing.T) { // Push a good LDS response with security config, and wait for Serve() to be // invoked on the underlying grpc.Server. Also make sure that certificate // providers are not created. - fcm, err := xdsclient.NewFilterChainManager(&v3listenerpb.Listener{ + fcm, err := xdsresource.NewFilterChainManager(&v3listenerpb.Listener{ FilterChains: []*v3listenerpb.FilterChain{ { TransportSocket: &v3corepb.TransportSocket{ @@ -789,14 +790,14 @@ func (s) TestHandleListenerUpdate_NoXDSCreds(t *testing.T) { }, }, }, - }) + }, nil) if err != nil { t.Fatalf("xdsclient.NewFilterChainManager() failed with error: %v", err) } addr, port := splitHostPort(lis.Addr().String()) - client.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{ + client.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{ RouteConfigName: "routeconfig", - InboundListenerCfg: &xdsclient.InboundListenerConfig{ + InboundListenerCfg: &xdsresource.InboundListenerConfig{ Address: addr, Port: port, FilterChains: fcm, @@ -862,7 +863,7 @@ func (s) TestHandleListenerUpdate_ErrorUpdate(t *testing.T) { // Push an error to the registered listener watch callback and make sure // that Serve does not return. - client.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{}, errors.New("LDS error")) + client.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{}, errors.New("LDS error")) sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) defer sCancel() if _, err := serveDone.Receive(sCtx); err != context.DeadlineExceeded {