From f8b681389c74bddbaf694a3167b245b00a62493f Mon Sep 17 00:00:00 2001 From: Bomin Zhang Date: Tue, 17 Jan 2023 17:45:30 +0800 Subject: [PATCH 01/14] move server definition to package protocols --- pkg/filters/grpcproxy/pool.go | 2 +- pkg/filters/grpcproxy/proxy.go | 5 ++ pkg/filters/grpcproxy/server.go | 64 ----------------- pkg/filters/proxy/basepool.go | 2 +- pkg/filters/proxy/loadbalance.go | 2 +- pkg/filters/proxy/pool.go | 2 +- pkg/filters/proxy/proxy.go | 4 ++ pkg/filters/proxy/server_test.go | 71 ------------------- pkg/{filters/proxy => protocols}/server.go | 28 ++++---- .../grpcproxy => protocols}/server_test.go | 30 ++++---- 10 files changed, 42 insertions(+), 168 deletions(-) delete mode 100644 pkg/filters/grpcproxy/server.go delete mode 100644 pkg/filters/proxy/server_test.go rename pkg/{filters/proxy => protocols}/server.go (71%) rename pkg/{filters/grpcproxy => protocols}/server_test.go (67%) diff --git a/pkg/filters/grpcproxy/pool.go b/pkg/filters/grpcproxy/pool.go index e285a40565..e93898b0e5 100644 --- a/pkg/filters/grpcproxy/pool.go +++ b/pkg/filters/grpcproxy/pool.go @@ -192,7 +192,7 @@ func (sp *ServerPool) LoadBalancer() LoadBalancer { func (sp *ServerPool) createLoadBalancer(servers []*Server) { for _, server := range servers { - server.checkAddrPattern() + server.CheckAddrPattern() } spec := sp.spec.LoadBalance diff --git a/pkg/filters/grpcproxy/proxy.go b/pkg/filters/grpcproxy/proxy.go index d5207863ec..3244eef72f 100644 --- a/pkg/filters/grpcproxy/proxy.go +++ b/pkg/filters/grpcproxy/proxy.go @@ -19,8 +19,10 @@ package grpcprxoy import ( "fmt" + "github.com/megaease/easegress/pkg/context" "github.com/megaease/easegress/pkg/filters" + "github.com/megaease/easegress/pkg/protocols" "github.com/megaease/easegress/pkg/protocols/grpcprot" "github.com/megaease/easegress/pkg/resilience" "github.com/megaease/easegress/pkg/supervisor" @@ -80,6 +82,9 @@ type ( filters.BaseSpec `json:",inline"` Pools []*ServerPoolSpec `json:"pools" jsonschema:"required"` } + + // Server is the backend server. + Server = protocols.Server ) // Validate validates Spec. diff --git a/pkg/filters/grpcproxy/server.go b/pkg/filters/grpcproxy/server.go deleted file mode 100644 index 5d1153f14d..0000000000 --- a/pkg/filters/grpcproxy/server.go +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright (c) 2017, MegaEase - * All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package grpcprxoy - -import ( - "fmt" - "net" - "net/url" - "strings" -) - -// Server is proxy server. -type Server struct { - URL string `json:"url" jsonschema:"required,format=url"` - Tags []string `json:"tags" jsonschema:"omitempty,uniqueItems=true"` - Weight int `json:"weight" jsonschema:"omitempty,minimum=0,maximum=100"` - KeepHost bool `json:"keepHost" jsonschema:"omitempty,default=false"` - addrIsHostName bool -} - -// String implements the Stringer interface. -func (s *Server) String() string { - return fmt.Sprintf("%s,%v,%d", s.URL, s.Tags, s.Weight) -} - -// checkAddrPattern checks whether the server address is host name or ip:port, -// not all error cases are handled. -func (s *Server) checkAddrPattern() { - u, err := url.Parse(s.URL) - if err != nil { - return - } - host := u.Host - - square := strings.LastIndexByte(host, ']') - colon := strings.LastIndexByte(host, ':') - - // There is a port number, remove it. - if colon > square { - host = host[:colon] - } - - // IPv6 - if square != -1 && host[0] == '[' { - host = host[1:square] - } - - s.addrIsHostName = net.ParseIP(host) == nil -} diff --git a/pkg/filters/proxy/basepool.go b/pkg/filters/proxy/basepool.go index e493e21f83..fa46b98608 100644 --- a/pkg/filters/proxy/basepool.go +++ b/pkg/filters/proxy/basepool.go @@ -124,7 +124,7 @@ func (bsp *BaseServerPool) LoadBalancer() LoadBalancer { func (bsp *BaseServerPool) createLoadBalancer(spec *LoadBalanceSpec, servers []*Server) { for _, server := range servers { - server.checkAddrPattern() + server.CheckAddrPattern() } if spec == nil { diff --git a/pkg/filters/proxy/loadbalance.go b/pkg/filters/proxy/loadbalance.go index 94b4b0e586..e1c38e1ab8 100644 --- a/pkg/filters/proxy/loadbalance.go +++ b/pkg/filters/proxy/loadbalance.go @@ -231,7 +231,7 @@ func (blb *BaseLoadBalancer) probeServers() { healthyServers := make([]*Server, 0, len(blb.Servers)) for _, svr := range blb.Servers { pass := blb.probeHTTP(svr.URL) - healthy, change := svr.recordHealth(pass, blb.spec.HealthCheck.Passes, blb.spec.HealthCheck.Fails) + healthy, change := svr.RecordHealth(pass, blb.spec.HealthCheck.Passes, blb.spec.HealthCheck.Fails) if change { statusChange = true } diff --git a/pkg/filters/proxy/pool.go b/pkg/filters/proxy/pool.go index f3378af98f..4d64acfa3c 100644 --- a/pkg/filters/proxy/pool.go +++ b/pkg/filters/proxy/pool.go @@ -146,7 +146,7 @@ func (spCtx *serverPoolContext) prepareRequest(svr *Server, ctx stdcontext.Conte // only set host when server address is not host name OR // server is explicitly told to keep the host of the request. - if !svr.addrIsHostName || svr.KeepHost { + if !svr.AddrIsHostName || svr.KeepHost { stdr.Host = req.Host() } diff --git a/pkg/filters/proxy/proxy.go b/pkg/filters/proxy/proxy.go index 0b79e1647e..2ad3fdbd79 100644 --- a/pkg/filters/proxy/proxy.go +++ b/pkg/filters/proxy/proxy.go @@ -29,6 +29,7 @@ import ( "github.com/megaease/easegress/pkg/context" "github.com/megaease/easegress/pkg/filters" "github.com/megaease/easegress/pkg/logger" + "github.com/megaease/easegress/pkg/protocols" "github.com/megaease/easegress/pkg/protocols/httpprot" "github.com/megaease/easegress/pkg/resilience" "github.com/megaease/easegress/pkg/supervisor" @@ -126,6 +127,9 @@ type ( KeyBase64 string `json:"keyBase64" jsonschema:"required,format=base64"` RootCertBase64 string `json:"rootCertBase64" jsonschema:"required,format=base64"` } + + // Server is the backend server. + Server = protocols.Server ) // Validate validates Spec. diff --git a/pkg/filters/proxy/server_test.go b/pkg/filters/proxy/server_test.go deleted file mode 100644 index 487fbf316a..0000000000 --- a/pkg/filters/proxy/server_test.go +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright (c) 2017, MegaEase - * All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package proxy - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestString(t *testing.T) { - assert := assert.New(t) - - svr := Server{ - URL: "abc", - Tags: []string{"test", "standby"}, - Weight: 10, - } - - assert.Equal("abc,[test standby],10", svr.String()) -} - -func TestCheckAddrPattern(t *testing.T) { - assert := assert.New(t) - - server := Server{} - - // regard invalid url as IP:port - server.URL = "@@+=%^httpsidfssjflsdkjfsjf" - server.checkAddrPattern() - assert.False(server.addrIsHostName, "address should be IP:port") - - server.URL = "http://127.0.0.1:1111" - server.checkAddrPattern() - assert.False(server.addrIsHostName, "address should be IP:port") - - server.URL = "https://127.0.0.1:1111" - server.checkAddrPattern() - assert.False(server.addrIsHostName, "address should be IP:port") - - server.URL = "https://[FE80:CD00:0000:0CDE:1257:0000:211E:729C]:1111" - server.checkAddrPattern() - assert.False(server.addrIsHostName, "address should be IP:port") - - server.URL = "https://www.megaease.com:1111" - server.checkAddrPattern() - assert.True(server.addrIsHostName, "address should be host name") - - server.URL = "https://www.megaease.com" - server.checkAddrPattern() - assert.True(server.addrIsHostName, "address should be host name") - - server.URL = "faas-func-name.default.example.com" - server.checkAddrPattern() - assert.True(server.addrIsHostName, "address should not be IP:port") -} diff --git a/pkg/filters/proxy/server.go b/pkg/protocols/server.go similarity index 71% rename from pkg/filters/proxy/server.go rename to pkg/protocols/server.go index d78970fe04..791fcd487e 100644 --- a/pkg/filters/proxy/server.go +++ b/pkg/protocols/server.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package proxy +package protocols import ( "fmt" @@ -26,17 +26,17 @@ import ( "github.com/megaease/easegress/pkg/logger" ) -// Server is proxy server. +// Server is a backend proxy server. type Server struct { - URL string `json:"url" jsonschema:"required,format=url"` - Tags []string `json:"tags" jsonschema:"omitempty,uniqueItems=true"` - Weight int `json:"weight" jsonschema:"omitempty,minimum=0,maximum=100"` - KeepHost bool `json:"keepHost" jsonschema:"omitempty,default=false"` - addrIsHostName bool - health *ServerHealth + URL string `json:"url" jsonschema:"required,format=url"` + Tags []string `json:"tags" jsonschema:"omitempty,uniqueItems=true"` + Weight int `json:"weight" jsonschema:"omitempty,minimum=0,maximum=100"` + KeepHost bool `json:"keepHost" jsonschema:"omitempty,default=false"` + AddrIsHostName bool `json:"-"` + health *ServerHealth `json:"-"` } -// ServerHealth is health status of server +// ServerHealth is health status of a Server type ServerHealth struct { healthy bool fails int @@ -53,9 +53,9 @@ func (s *Server) ID() string { return s.URL } -// checkAddrPattern checks whether the server address is host name or ip:port, +// CheckAddrPattern checks whether the server address is host name or ip:port, // not all error cases are handled. -func (s *Server) checkAddrPattern() { +func (s *Server) CheckAddrPattern() { u, err := url.Parse(s.URL) if err != nil { return @@ -75,11 +75,11 @@ func (s *Server) checkAddrPattern() { host = host[1:square] } - s.addrIsHostName = net.ParseIP(host) == nil + s.AddrIsHostName = net.ParseIP(host) == nil } -// recordHealth records health status, return healthy status and true if status changes -func (s *Server) recordHealth(pass bool, passThreshold, failThreshold int) (bool, bool) { +// RecordHealth records health status, return healthy status and true if status changes +func (s *Server) RecordHealth(pass bool, passThreshold, failThreshold int) (bool, bool) { if s.health == nil { s.health = &ServerHealth{healthy: true} } diff --git a/pkg/filters/grpcproxy/server_test.go b/pkg/protocols/server_test.go similarity index 67% rename from pkg/filters/grpcproxy/server_test.go rename to pkg/protocols/server_test.go index 78cf32e32c..3304537fa6 100644 --- a/pkg/filters/grpcproxy/server_test.go +++ b/pkg/protocols/server_test.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package grpcprxoy +package protocols import ( "testing" @@ -42,30 +42,30 @@ func TestCheckAddrPattern(t *testing.T) { // regard invalid url as IP:port server.URL = "@@+=%^httpsidfssjflsdkjfsjf" - server.checkAddrPattern() - assert.False(server.addrIsHostName, "address should be IP:port") + server.CheckAddrPattern() + assert.False(server.AddrIsHostName, "address should be IP:port") server.URL = "http://127.0.0.1:1111" - server.checkAddrPattern() - assert.False(server.addrIsHostName, "address should be IP:port") + server.CheckAddrPattern() + assert.False(server.AddrIsHostName, "address should be IP:port") server.URL = "https://127.0.0.1:1111" - server.checkAddrPattern() - assert.False(server.addrIsHostName, "address should be IP:port") + server.CheckAddrPattern() + assert.False(server.AddrIsHostName, "address should be IP:port") server.URL = "https://[FE80:CD00:0000:0CDE:1257:0000:211E:729C]:1111" - server.checkAddrPattern() - assert.False(server.addrIsHostName, "address should be IP:port") + server.CheckAddrPattern() + assert.False(server.AddrIsHostName, "address should be IP:port") server.URL = "https://www.megaease.com:1111" - server.checkAddrPattern() - assert.True(server.addrIsHostName, "address should be host name") + server.CheckAddrPattern() + assert.True(server.AddrIsHostName, "address should be host name") server.URL = "https://www.megaease.com" - server.checkAddrPattern() - assert.True(server.addrIsHostName, "address should be host name") + server.CheckAddrPattern() + assert.True(server.AddrIsHostName, "address should be host name") server.URL = "faas-func-name.default.example.com" - server.checkAddrPattern() - assert.True(server.addrIsHostName, "address should not be IP:port") + server.CheckAddrPattern() + assert.True(server.AddrIsHostName, "address should not be IP:port") } From 34fe51c81c4ece70055acad5ac9563a1ee638034 Mon Sep 17 00:00:00 2001 From: Bomin Zhang Date: Mon, 30 Jan 2023 09:19:26 +0800 Subject: [PATCH 02/14] refactor: folder structure for proxies --- pkg/filters/meshadaptor/meshadaptor.go | 2 +- pkg/filters/proxies/basepool.go | 163 +++++++++++ pkg/filters/{ => proxies}/grpcproxy/codec.go | 2 +- .../{ => proxies}/grpcproxy/loadbalance.go | 2 +- .../grpcproxy/loadbalance_test.go | 5 +- pkg/filters/{ => proxies}/grpcproxy/pool.go | 2 +- pkg/filters/{ => proxies}/grpcproxy/proxy.go | 6 +- .../{ => proxies}/grpcproxy/proxy_test.go | 2 +- .../{ => proxies}/grpcproxy/requestmatch.go | 2 +- .../grpcproxy/requestmatch_test.go | 7 +- .../{proxy => proxies/httpproxy}/basepool.go | 18 +- .../httpproxy}/compression.go | 2 +- .../httpproxy}/compression_test.go | 2 +- .../httpproxy}/loadbalance.go | 2 +- .../httpproxy}/loadbalance_test.go | 2 +- .../httpproxy}/memorycache.go | 2 +- .../httpproxy}/memorycache_test.go | 2 +- .../{proxy => proxies/httpproxy}/pool.go | 20 +- .../{proxy => proxies/httpproxy}/pool_test.go | 2 +- .../{proxy => proxies/httpproxy}/proxy.go | 6 +- .../httpproxy}/proxy_test.go | 2 +- .../httpproxy}/requestmatch.go | 2 +- .../httpproxy}/requestmatch_test.go | 2 +- .../{proxy => proxies/httpproxy}/wspool.go | 7 +- .../httpproxy}/wspool_test.go | 2 +- .../{proxy => proxies/httpproxy}/wsproxy.go | 2 +- .../httpproxy}/wsproxy_test.go | 2 +- pkg/filters/proxies/loadbalance.go | 277 ++++++++++++++++++ pkg/{protocols => filters/proxies}/server.go | 2 +- .../proxies}/server_test.go | 2 +- pkg/object/function/worker/ingress.go | 5 +- pkg/object/grpcserver/runtime.go | 4 +- pkg/object/ingresscontroller/translator.go | 2 +- pkg/object/meshcontroller/spec/builder.go | 10 +- pkg/object/meshcontroller/spec/spec.go | 2 +- pkg/object/meshcontroller/spec/spec_test.go | 2 +- pkg/registry/registry.go | 4 +- 37 files changed, 514 insertions(+), 66 deletions(-) create mode 100644 pkg/filters/proxies/basepool.go rename pkg/filters/{ => proxies}/grpcproxy/codec.go (98%) rename pkg/filters/{ => proxies}/grpcproxy/loadbalance.go (99%) rename pkg/filters/{ => proxies}/grpcproxy/loadbalance_test.go (98%) rename pkg/filters/{ => proxies}/grpcproxy/pool.go (99%) rename pkg/filters/{ => proxies}/grpcproxy/proxy.go (97%) rename pkg/filters/{ => proxies}/grpcproxy/proxy_test.go (99%) rename pkg/filters/{ => proxies}/grpcproxy/requestmatch.go (99%) rename pkg/filters/{ => proxies}/grpcproxy/requestmatch_test.go (99%) rename pkg/filters/{proxy => proxies/httpproxy}/basepool.go (87%) rename pkg/filters/{proxy => proxies/httpproxy}/compression.go (99%) rename pkg/filters/{proxy => proxies/httpproxy}/compression_test.go (99%) rename pkg/filters/{proxy => proxies/httpproxy}/loadbalance.go (99%) rename pkg/filters/{proxy => proxies/httpproxy}/loadbalance_test.go (99%) rename pkg/filters/{proxy => proxies/httpproxy}/memorycache.go (99%) rename pkg/filters/{proxy => proxies/httpproxy}/memorycache_test.go (99%) rename pkg/filters/{proxy => proxies/httpproxy}/pool.go (96%) rename pkg/filters/{proxy => proxies/httpproxy}/pool_test.go (99%) rename pkg/filters/{proxy => proxies/httpproxy}/proxy.go (98%) rename pkg/filters/{proxy => proxies/httpproxy}/proxy_test.go (99%) rename pkg/filters/{proxy => proxies/httpproxy}/requestmatch.go (99%) rename pkg/filters/{proxy => proxies/httpproxy}/requestmatch_test.go (99%) rename pkg/filters/{proxy => proxies/httpproxy}/wspool.go (96%) rename pkg/filters/{proxy => proxies/httpproxy}/wspool_test.go (99%) rename pkg/filters/{proxy => proxies/httpproxy}/wsproxy.go (99%) rename pkg/filters/{proxy => proxies/httpproxy}/wsproxy_test.go (99%) create mode 100644 pkg/filters/proxies/loadbalance.go rename pkg/{protocols => filters/proxies}/server.go (99%) rename pkg/{protocols => filters/proxies}/server_test.go (99%) diff --git a/pkg/filters/meshadaptor/meshadaptor.go b/pkg/filters/meshadaptor/meshadaptor.go index 4dba756363..528f278ca8 100644 --- a/pkg/filters/meshadaptor/meshadaptor.go +++ b/pkg/filters/meshadaptor/meshadaptor.go @@ -20,7 +20,7 @@ package meshadaptor import ( "github.com/megaease/easegress/pkg/context" "github.com/megaease/easegress/pkg/filters" - "github.com/megaease/easegress/pkg/filters/proxy" + proxy "github.com/megaease/easegress/pkg/filters/proxies/httpproxy" "github.com/megaease/easegress/pkg/protocols/httpprot" "github.com/megaease/easegress/pkg/protocols/httpprot/httpheader" "github.com/megaease/easegress/pkg/util/pathadaptor" diff --git a/pkg/filters/proxies/basepool.go b/pkg/filters/proxies/basepool.go new file mode 100644 index 0000000000..428bed13fb --- /dev/null +++ b/pkg/filters/proxies/basepool.go @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package proxies + +/* +// BaseServerPool defines a server pool. +type BaseServerPool struct { + name string + done chan struct{} + wg sync.WaitGroup + loadBalancer atomic.Value +} + +// BaseServerPoolSpec is the spec for a base server pool. +type BaseServerPoolSpec struct { + ServerTags []string `json:"serverTags" jsonschema:"omitempty,uniqueItems=true"` + Servers []*Server `json:"servers" jsonschema:"omitempty"` + ServiceRegistry string `json:"serviceRegistry" jsonschema:"omitempty"` + ServiceName string `json:"serviceName" jsonschema:"omitempty"` + LoadBalance *LoadBalanceSpec `json:"loadBalance" jsonschema:"omitempty"` +} + +// Validate validates ServerPoolSpec. +func (sps *BaseServerPoolSpec) Validate() error { + if sps.ServiceName == "" && len(sps.Servers) == 0 { + return fmt.Errorf("both serviceName and servers are empty") + } + + serversGotWeight := 0 + for _, server := range sps.Servers { + if server.Weight > 0 { + serversGotWeight++ + } + } + if serversGotWeight > 0 && serversGotWeight < len(sps.Servers) { + msgFmt := "not all servers have weight(%d/%d)" + return fmt.Errorf(msgFmt, serversGotWeight, len(sps.Servers)) + } + + if sps.ServiceName != "" && sps.LoadBalance.HealthCheck != nil { + return fmt.Errorf("can not open health check for service discovery") + } + + return nil +} + +// Init initialize the base server pool according to the spec. +func (bsp *BaseServerPool) Init(super *supervisor.Supervisor, name string, spec *BaseServerPoolSpec) { + bsp.name = name + bsp.done = make(chan struct{}) + + if spec.ServiceRegistry == "" || spec.ServiceName == "" { + bsp.createLoadBalancer(spec.LoadBalance, spec.Servers) + return + } + + // watch service registry + entity := super.MustGetSystemController(serviceregistry.Kind) + registry := entity.Instance().(*serviceregistry.ServiceRegistry) + + instances, err := registry.ListServiceInstances(spec.ServiceRegistry, spec.ServiceName) + if err != nil { + msgFmt := "first try to use service %s/%s failed(will try again): %v" + logger.Warnf(msgFmt, spec.ServiceRegistry, spec.ServiceName, err) + bsp.createLoadBalancer(spec.LoadBalance, spec.Servers) + } + + bsp.useService(spec, instances) + + watcher := registry.NewServiceWatcher(spec.ServiceRegistry, spec.ServiceName) + bsp.wg.Add(1) + go func() { + for { + select { + case <-bsp.done: + watcher.Stop() + bsp.wg.Done() + return + case event := <-watcher.Watch(): + bsp.useService(spec, event.Instances) + } + } + }() +} + +// LoadBalancer returns the load balancer of the server pool. +func (bsp *BaseServerPool) LoadBalancer() LoadBalancer { + if v := bsp.loadBalancer.Load(); v != nil { + return v.(LoadBalancer) + } + return nil +} + +func (bsp *BaseServerPool) createLoadBalancer(spec *LoadBalanceSpec, servers []*Server) { + for _, server := range servers { + server.CheckAddrPattern() + } + + if spec == nil { + spec = &LoadBalanceSpec{} + } + + lb := NewLoadBalancer(spec, servers) + if old := bsp.loadBalancer.Swap(lb); old != nil { + old.(LoadBalancer).Close() + } +} + +func (bsp *BaseServerPool) useService(spec *BaseServerPoolSpec, instances map[string]*serviceregistry.ServiceInstanceSpec) { + servers := make([]*Server, 0) + + for _, instance := range instances { + // default to true in case of sp.spec.ServerTags is empty + match := true + + for _, tag := range spec.ServerTags { + if match = stringtool.StrInSlice(tag, instance.Tags); match { + break + } + } + + if match { + servers = append(servers, &Server{ + URL: instance.URL(), + Tags: instance.Tags, + Weight: instance.Weight, + }) + } + } + + if len(servers) == 0 { + msgFmt := "%s/%s: no service instance satisfy tags: %v" + logger.Warnf(msgFmt, spec.ServiceRegistry, spec.ServiceName, spec.ServerTags) + servers = spec.Servers + } + + bsp.createLoadBalancer(spec.LoadBalance, servers) +} + +func (bsp *BaseServerPool) close() { + close(bsp.done) + bsp.wg.Wait() + if lb := bsp.LoadBalancer(); lb != nil { + lb.Close() + } +} + +*/ diff --git a/pkg/filters/grpcproxy/codec.go b/pkg/filters/proxies/grpcproxy/codec.go similarity index 98% rename from pkg/filters/grpcproxy/codec.go rename to pkg/filters/proxies/grpcproxy/codec.go index 859617f173..501598ef18 100644 --- a/pkg/filters/grpcproxy/codec.go +++ b/pkg/filters/proxies/grpcproxy/codec.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package grpcprxoy +package grpcproxy import ( "google.golang.org/protobuf/proto" diff --git a/pkg/filters/grpcproxy/loadbalance.go b/pkg/filters/proxies/grpcproxy/loadbalance.go similarity index 99% rename from pkg/filters/grpcproxy/loadbalance.go rename to pkg/filters/proxies/grpcproxy/loadbalance.go index 93644b0cd6..568cac6938 100644 --- a/pkg/filters/grpcproxy/loadbalance.go +++ b/pkg/filters/proxies/grpcproxy/loadbalance.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package grpcprxoy +package grpcproxy import ( "fmt" diff --git a/pkg/filters/grpcproxy/loadbalance_test.go b/pkg/filters/proxies/grpcproxy/loadbalance_test.go similarity index 98% rename from pkg/filters/grpcproxy/loadbalance_test.go rename to pkg/filters/proxies/grpcproxy/loadbalance_test.go index 1788fae103..ec6af1d2ed 100644 --- a/pkg/filters/grpcproxy/loadbalance_test.go +++ b/pkg/filters/proxies/grpcproxy/loadbalance_test.go @@ -15,14 +15,15 @@ * limitations under the License. */ -package grpcprxoy +package grpcproxy import ( "context" + "testing" + "github.com/megaease/easegress/pkg/protocols/grpcprot" "github.com/stretchr/testify/assert" "google.golang.org/grpc/metadata" - "testing" ) func TestForwardLB(t *testing.T) { diff --git a/pkg/filters/grpcproxy/pool.go b/pkg/filters/proxies/grpcproxy/pool.go similarity index 99% rename from pkg/filters/grpcproxy/pool.go rename to pkg/filters/proxies/grpcproxy/pool.go index e93898b0e5..4635254d70 100644 --- a/pkg/filters/grpcproxy/pool.go +++ b/pkg/filters/proxies/grpcproxy/pool.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package grpcprxoy +package grpcproxy import ( stdcontext "context" diff --git a/pkg/filters/grpcproxy/proxy.go b/pkg/filters/proxies/grpcproxy/proxy.go similarity index 97% rename from pkg/filters/grpcproxy/proxy.go rename to pkg/filters/proxies/grpcproxy/proxy.go index 3244eef72f..2d019241b2 100644 --- a/pkg/filters/grpcproxy/proxy.go +++ b/pkg/filters/proxies/grpcproxy/proxy.go @@ -15,14 +15,14 @@ * limitations under the License. */ -package grpcprxoy +package grpcproxy import ( "fmt" "github.com/megaease/easegress/pkg/context" "github.com/megaease/easegress/pkg/filters" - "github.com/megaease/easegress/pkg/protocols" + "github.com/megaease/easegress/pkg/filters/proxies" "github.com/megaease/easegress/pkg/protocols/grpcprot" "github.com/megaease/easegress/pkg/resilience" "github.com/megaease/easegress/pkg/supervisor" @@ -84,7 +84,7 @@ type ( } // Server is the backend server. - Server = protocols.Server + Server = proxies.Server ) // Validate validates Spec. diff --git a/pkg/filters/grpcproxy/proxy_test.go b/pkg/filters/proxies/grpcproxy/proxy_test.go similarity index 99% rename from pkg/filters/grpcproxy/proxy_test.go rename to pkg/filters/proxies/grpcproxy/proxy_test.go index b44c14b3ed..b72c22aafe 100644 --- a/pkg/filters/grpcproxy/proxy_test.go +++ b/pkg/filters/proxies/grpcproxy/proxy_test.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package grpcprxoy +package grpcproxy import ( "os" diff --git a/pkg/filters/grpcproxy/requestmatch.go b/pkg/filters/proxies/grpcproxy/requestmatch.go similarity index 99% rename from pkg/filters/grpcproxy/requestmatch.go rename to pkg/filters/proxies/grpcproxy/requestmatch.go index 852807262c..9f7ecfe517 100644 --- a/pkg/filters/grpcproxy/requestmatch.go +++ b/pkg/filters/proxies/grpcproxy/requestmatch.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package grpcprxoy +package grpcproxy import ( "fmt" diff --git a/pkg/filters/grpcproxy/requestmatch_test.go b/pkg/filters/proxies/grpcproxy/requestmatch_test.go similarity index 99% rename from pkg/filters/grpcproxy/requestmatch_test.go rename to pkg/filters/proxies/grpcproxy/requestmatch_test.go index 1a9c9bf8e3..1b9e6e6d08 100644 --- a/pkg/filters/grpcproxy/requestmatch_test.go +++ b/pkg/filters/proxies/grpcproxy/requestmatch_test.go @@ -15,17 +15,18 @@ * limitations under the License. */ -package grpcprxoy +package grpcproxy import ( "context" "fmt" - "github.com/megaease/easegress/pkg/protocols/grpcprot" - "google.golang.org/grpc/metadata" "math/rand" "strconv" "testing" + "github.com/megaease/easegress/pkg/protocols/grpcprot" + "google.golang.org/grpc/metadata" + "github.com/stretchr/testify/assert" ) diff --git a/pkg/filters/proxy/basepool.go b/pkg/filters/proxies/httpproxy/basepool.go similarity index 87% rename from pkg/filters/proxy/basepool.go rename to pkg/filters/proxies/httpproxy/basepool.go index fa46b98608..eb3ccc5978 100644 --- a/pkg/filters/proxy/basepool.go +++ b/pkg/filters/proxies/httpproxy/basepool.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package proxy +package httpproxy import ( "fmt" @@ -33,18 +33,16 @@ type BaseServerPool struct { name string done chan struct{} wg sync.WaitGroup - filter RequestMatcher loadBalancer atomic.Value } // BaseServerPoolSpec is the spec for a base server pool. type BaseServerPoolSpec struct { - Filter *RequestMatcherSpec `json:"filter" jsonschema:"omitempty"` - ServerTags []string `json:"serverTags" jsonschema:"omitempty,uniqueItems=true"` - Servers []*Server `json:"servers" jsonschema:"omitempty"` - ServiceRegistry string `json:"serviceRegistry" jsonschema:"omitempty"` - ServiceName string `json:"serviceName" jsonschema:"omitempty"` - LoadBalance *LoadBalanceSpec `json:"loadBalance" jsonschema:"omitempty"` + ServerTags []string `json:"serverTags" jsonschema:"omitempty,uniqueItems=true"` + Servers []*Server `json:"servers" jsonschema:"omitempty"` + ServiceRegistry string `json:"serviceRegistry" jsonschema:"omitempty"` + ServiceName string `json:"serviceName" jsonschema:"omitempty"` + LoadBalance *LoadBalanceSpec `json:"loadBalance" jsonschema:"omitempty"` } // Validate validates ServerPoolSpec. @@ -76,10 +74,6 @@ func (bsp *BaseServerPool) Init(super *supervisor.Supervisor, name string, spec bsp.name = name bsp.done = make(chan struct{}) - if spec.Filter != nil { - bsp.filter = NewRequestMatcher(spec.Filter) - } - if spec.ServiceRegistry == "" || spec.ServiceName == "" { bsp.createLoadBalancer(spec.LoadBalance, spec.Servers) return diff --git a/pkg/filters/proxy/compression.go b/pkg/filters/proxies/httpproxy/compression.go similarity index 99% rename from pkg/filters/proxy/compression.go rename to pkg/filters/proxies/httpproxy/compression.go index e6edfa468f..4854c4a8bf 100644 --- a/pkg/filters/proxy/compression.go +++ b/pkg/filters/proxies/httpproxy/compression.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package proxy +package httpproxy import ( "net/http" diff --git a/pkg/filters/proxy/compression_test.go b/pkg/filters/proxies/httpproxy/compression_test.go similarity index 99% rename from pkg/filters/proxy/compression_test.go rename to pkg/filters/proxies/httpproxy/compression_test.go index 7df132f65d..2832406e5b 100644 --- a/pkg/filters/proxy/compression_test.go +++ b/pkg/filters/proxies/httpproxy/compression_test.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package proxy +package httpproxy import ( "io" diff --git a/pkg/filters/proxy/loadbalance.go b/pkg/filters/proxies/httpproxy/loadbalance.go similarity index 99% rename from pkg/filters/proxy/loadbalance.go rename to pkg/filters/proxies/httpproxy/loadbalance.go index e1c38e1ab8..f82b1d7fbd 100644 --- a/pkg/filters/proxy/loadbalance.go +++ b/pkg/filters/proxies/httpproxy/loadbalance.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package proxy +package httpproxy import ( "crypto/hmac" diff --git a/pkg/filters/proxy/loadbalance_test.go b/pkg/filters/proxies/httpproxy/loadbalance_test.go similarity index 99% rename from pkg/filters/proxy/loadbalance_test.go rename to pkg/filters/proxies/httpproxy/loadbalance_test.go index 2785157f3e..8feb0ab966 100644 --- a/pkg/filters/proxy/loadbalance_test.go +++ b/pkg/filters/proxies/httpproxy/loadbalance_test.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package proxy +package httpproxy import ( "fmt" diff --git a/pkg/filters/proxy/memorycache.go b/pkg/filters/proxies/httpproxy/memorycache.go similarity index 99% rename from pkg/filters/proxy/memorycache.go rename to pkg/filters/proxies/httpproxy/memorycache.go index b0da99e5d2..3ddd25a397 100644 --- a/pkg/filters/proxy/memorycache.go +++ b/pkg/filters/proxies/httpproxy/memorycache.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package proxy +package httpproxy import ( "net/http" diff --git a/pkg/filters/proxy/memorycache_test.go b/pkg/filters/proxies/httpproxy/memorycache_test.go similarity index 99% rename from pkg/filters/proxy/memorycache_test.go rename to pkg/filters/proxies/httpproxy/memorycache_test.go index 35a1667fbd..dd780ebfb5 100644 --- a/pkg/filters/proxy/memorycache_test.go +++ b/pkg/filters/proxies/httpproxy/memorycache_test.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package proxy +package httpproxy import ( "net/http" diff --git a/pkg/filters/proxy/pool.go b/pkg/filters/proxies/httpproxy/pool.go similarity index 96% rename from pkg/filters/proxy/pool.go rename to pkg/filters/proxies/httpproxy/pool.go index 4d64acfa3c..685fbbca95 100644 --- a/pkg/filters/proxy/pool.go +++ b/pkg/filters/proxies/httpproxy/pool.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package proxy +package httpproxy import ( stdcontext "context" @@ -162,6 +162,7 @@ func (spCtx *serverPoolContext) prepareRequest(svr *Server, ctx stdcontext.Conte type ServerPool struct { BaseServerPool + filter RequestMatcher proxy *Proxy spec *ServerPoolSpec failureCodes map[int]struct{} @@ -179,12 +180,13 @@ type ServerPool struct { type ServerPoolSpec struct { BaseServerPoolSpec `json:",inline"` - SpanName string `json:"spanName" jsonschema:"omitempty"` - ServerMaxBodySize int64 `json:"serverMaxBodySize" jsonschema:"omitempty"` - Timeout string `json:"timeout" jsonschema:"omitempty,format=duration"` - RetryPolicy string `json:"retryPolicy" jsonschema:"omitempty"` - CircuitBreakerPolicy string `json:"circuitBreakerPolicy" jsonschema:"omitempty"` - MemoryCache *MemoryCacheSpec `json:"memoryCache,omitempty" jsonschema:"omitempty"` + Filter *RequestMatcherSpec `json:"filter" jsonschema:"omitempty"` + SpanName string `json:"spanName" jsonschema:"omitempty"` + ServerMaxBodySize int64 `json:"serverMaxBodySize" jsonschema:"omitempty"` + Timeout string `json:"timeout" jsonschema:"omitempty,format=duration"` + RetryPolicy string `json:"retryPolicy" jsonschema:"omitempty"` + CircuitBreakerPolicy string `json:"circuitBreakerPolicy" jsonschema:"omitempty"` + MemoryCache *MemoryCacheSpec `json:"memoryCache,omitempty" jsonschema:"omitempty"` // FailureCodes would be 5xx if it isn't assigned any value. FailureCodes []int `json:"failureCodes" jsonschema:"omitempty,uniqueItems=true"` @@ -202,6 +204,10 @@ func NewServerPool(proxy *Proxy, spec *ServerPoolSpec, name string) *ServerPool spec: spec, httpStat: httpstat.New(), } + if spec.Filter != nil { + sp.filter = NewRequestMatcher(spec.Filter) + } + sp.BaseServerPool.Init(proxy.super, name, &spec.BaseServerPoolSpec) if spec.MemoryCache != nil { diff --git a/pkg/filters/proxy/pool_test.go b/pkg/filters/proxies/httpproxy/pool_test.go similarity index 99% rename from pkg/filters/proxy/pool_test.go rename to pkg/filters/proxies/httpproxy/pool_test.go index fb9b3a8b1d..4e6921ea2c 100644 --- a/pkg/filters/proxy/pool_test.go +++ b/pkg/filters/proxies/httpproxy/pool_test.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package proxy +package httpproxy import ( "net/http" diff --git a/pkg/filters/proxy/proxy.go b/pkg/filters/proxies/httpproxy/proxy.go similarity index 98% rename from pkg/filters/proxy/proxy.go rename to pkg/filters/proxies/httpproxy/proxy.go index 2ad3fdbd79..beabc64d4f 100644 --- a/pkg/filters/proxy/proxy.go +++ b/pkg/filters/proxies/httpproxy/proxy.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package proxy +package httpproxy import ( "crypto/tls" @@ -28,8 +28,8 @@ import ( "github.com/megaease/easegress/pkg/context" "github.com/megaease/easegress/pkg/filters" + "github.com/megaease/easegress/pkg/filters/proxies" "github.com/megaease/easegress/pkg/logger" - "github.com/megaease/easegress/pkg/protocols" "github.com/megaease/easegress/pkg/protocols/httpprot" "github.com/megaease/easegress/pkg/resilience" "github.com/megaease/easegress/pkg/supervisor" @@ -129,7 +129,7 @@ type ( } // Server is the backend server. - Server = protocols.Server + Server = proxies.Server ) // Validate validates Spec. diff --git a/pkg/filters/proxy/proxy_test.go b/pkg/filters/proxies/httpproxy/proxy_test.go similarity index 99% rename from pkg/filters/proxy/proxy_test.go rename to pkg/filters/proxies/httpproxy/proxy_test.go index ee5b80fe9e..c348dfb164 100644 --- a/pkg/filters/proxy/proxy_test.go +++ b/pkg/filters/proxies/httpproxy/proxy_test.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package proxy +package httpproxy import ( "fmt" diff --git a/pkg/filters/proxy/requestmatch.go b/pkg/filters/proxies/httpproxy/requestmatch.go similarity index 99% rename from pkg/filters/proxy/requestmatch.go rename to pkg/filters/proxies/httpproxy/requestmatch.go index 33c872cc78..7637f08662 100644 --- a/pkg/filters/proxy/requestmatch.go +++ b/pkg/filters/proxies/httpproxy/requestmatch.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package proxy +package httpproxy import ( "fmt" diff --git a/pkg/filters/proxy/requestmatch_test.go b/pkg/filters/proxies/httpproxy/requestmatch_test.go similarity index 99% rename from pkg/filters/proxy/requestmatch_test.go rename to pkg/filters/proxies/httpproxy/requestmatch_test.go index 6609ed166e..e65bc4471c 100644 --- a/pkg/filters/proxy/requestmatch_test.go +++ b/pkg/filters/proxies/httpproxy/requestmatch_test.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package proxy +package httpproxy import ( "fmt" diff --git a/pkg/filters/proxy/wspool.go b/pkg/filters/proxies/httpproxy/wspool.go similarity index 96% rename from pkg/filters/proxy/wspool.go rename to pkg/filters/proxies/httpproxy/wspool.go index a437d00560..361b94c187 100644 --- a/pkg/filters/proxy/wspool.go +++ b/pkg/filters/proxies/httpproxy/wspool.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package proxy +package httpproxy import ( "fmt" @@ -37,6 +37,7 @@ import ( type WebSocketServerPool struct { BaseServerPool + filter RequestMatcher proxy *WebSocketProxy spec *WebSocketServerPoolSpec httpStat *httpstat.HTTPStat @@ -45,6 +46,7 @@ type WebSocketServerPool struct { // WebSocketServerPoolSpec is the spec for a server pool. type WebSocketServerPoolSpec struct { BaseServerPoolSpec `json:",inline"` + Filter *RequestMatcherSpec `json:"filter" jsonschema:"omitempty"` } // NewWebSocketServerPool creates a new server pool according to spec. @@ -54,6 +56,9 @@ func NewWebSocketServerPool(proxy *WebSocketProxy, spec *WebSocketServerPoolSpec spec: spec, httpStat: httpstat.New(), } + if spec.Filter != nil { + sp.filter = NewRequestMatcher(spec.Filter) + } sp.Init(proxy.super, name, &spec.BaseServerPoolSpec) return sp } diff --git a/pkg/filters/proxy/wspool_test.go b/pkg/filters/proxies/httpproxy/wspool_test.go similarity index 99% rename from pkg/filters/proxy/wspool_test.go rename to pkg/filters/proxies/httpproxy/wspool_test.go index c64906861e..8d8cfea3a6 100644 --- a/pkg/filters/proxy/wspool_test.go +++ b/pkg/filters/proxies/httpproxy/wspool_test.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package proxy +package httpproxy import ( "crypto/tls" diff --git a/pkg/filters/proxy/wsproxy.go b/pkg/filters/proxies/httpproxy/wsproxy.go similarity index 99% rename from pkg/filters/proxy/wsproxy.go rename to pkg/filters/proxies/httpproxy/wsproxy.go index 0d8a00d92c..0736cb0391 100644 --- a/pkg/filters/proxy/wsproxy.go +++ b/pkg/filters/proxies/httpproxy/wsproxy.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package proxy +package httpproxy import ( "fmt" diff --git a/pkg/filters/proxy/wsproxy_test.go b/pkg/filters/proxies/httpproxy/wsproxy_test.go similarity index 99% rename from pkg/filters/proxy/wsproxy_test.go rename to pkg/filters/proxies/httpproxy/wsproxy_test.go index efe6a659f5..448501fce2 100644 --- a/pkg/filters/proxy/wsproxy_test.go +++ b/pkg/filters/proxies/httpproxy/wsproxy_test.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package proxy +package httpproxy import ( "net/http" diff --git a/pkg/filters/proxies/loadbalance.go b/pkg/filters/proxies/loadbalance.go new file mode 100644 index 0000000000..caaad1e6d4 --- /dev/null +++ b/pkg/filters/proxies/loadbalance.go @@ -0,0 +1,277 @@ +package proxies + +import "github.com/megaease/easegress/pkg/protocols" + +// LoadBalancer is the interface of a load balancer. +type LoadBalancer[Request protocols.Request, Response protocols.Response] interface { + ChooseServer(req Request) *Server + ReturnServer(server *Server, req Request, resp Response) + Close() +} + +// StickySessionSpec is the spec for sticky session. +type StickySessionSpec struct { + Mode string `json:"mode" jsonschema:"required,enum=CookieConsistentHash,enum=DurationBased,enum=ApplicationBased"` + // AppCookieName is the user-defined cookie name in CookieConsistentHash and ApplicationBased mode. + AppCookieName string `json:"appCookieName" jsonschema:"omitempty"` + // LBCookieName is the generated cookie name in DurationBased and ApplicationBased mode. + LBCookieName string `json:"lbCookieName" jsonschema:"omitempty"` + // LBCookieExpire is the expire seconds of generated cookie in DurationBased and ApplicationBased mode. + LBCookieExpire string `json:"lbCookieExpire" jsonschema:"omitempty,format=duration"` +} + +// HealthCheckSpec is the spec for health check. +type HealthCheckSpec struct { + // Interval is the interval duration for health check. + Interval string `json:"interval" jsonschema:"omitempty,format=duration"` + // Path is the health check path for server + Path string `json:"path" jsonschema:"omitempty"` + // Timeout is the timeout duration for health check, default is 3. + Timeout string `json:"timeout" jsonschema:"omitempty,format=duration"` + // Fails is the consecutive fails count for assert fail, default is 1. + Fails int `json:"fails" jsonschema:"omitempty,minimum=1"` + // Passes is the consecutive passes count for assert pass, default is 1. + Passes int `json:"passes" jsonschema:"omitempty,minimum=1"` +} + +// LoadBalanceSpec is the spec to create a load balancer. +type LoadBalanceSpec struct { + Policy string `json:"policy" jsonschema:"omitempty,enum=,enum=roundRobin,enum=random,enum=weightedRandom,enum=ipHash,enum=headerHash"` + HeaderHashKey string `json:"headerHashKey" jsonschema:"omitempty"` + StickySession *StickySessionSpec `json:"stickySession" jsonschema:"omitempty"` + HealthCheck *HealthCheckSpec `json:"healthCheck" jsonschema:"omitempty"` +} + +/* +// BaseLoadBalancer implement the common part of load balancer. +type BaseLoadBalancer[TRequest Request, TResponse Response] struct { + spec *LoadBalanceSpec + Servers []*Server + healthyServers atomic.Value + consistentHash *consistent.Consistent + cookieExpire time.Duration + done chan bool + probeClient *http.Client + probeInterval time.Duration + probeTimeout time.Duration +} + +// HealthyServers return healthy servers +func (blb *BaseLoadBalancer[TRequest, TResponse]) HealthyServers() []*Server { + return blb.healthyServers.Load().([]*Server) +} + +// init initializes load balancer +func (blb *BaseLoadBalancer[TRequest, TResponse]) init(spec *LoadBalanceSpec, servers []*Server) { + blb.spec = spec + blb.Servers = servers + blb.healthyServers.Store(servers) + + blb.initStickySession(spec.StickySession, blb.HealthyServers()) + blb.initHealthCheck(spec.HealthCheck, servers) +} + +// initStickySession initializes for sticky session +func (blb *BaseLoadBalancer[TRequest, TResponse]) initStickySession(spec *StickySessionSpec, servers []*Server) { + if spec == nil || len(servers) == 0 { + return + } + + switch spec.Mode { + case StickySessionModeCookieConsistentHash: + blb.initConsistentHash() + case StickySessionModeDurationBased, StickySessionModeApplicationBased: + blb.configLBCookie() + } +} + +// initHealthCheck initializes for health check +func (blb *BaseLoadBalancer[TRequest, TResponse]) initHealthCheck(spec *HealthCheckSpec, servers []*Server) { + if spec == nil || len(servers) == 0 { + return + } + + blb.probeInterval, _ = time.ParseDuration(spec.Interval) + if blb.probeInterval <= 0 { + blb.probeInterval = HealthCheckDefaultInterval + } + blb.probeTimeout, _ = time.ParseDuration(spec.Timeout) + if blb.probeTimeout <= 0 { + blb.probeTimeout = HealthCheckDefaultTimeout + } + if spec.Fails == 0 { + spec.Fails = HealthCheckDefaultFailThreshold + } + if spec.Passes == 0 { + spec.Passes = HealthCheckDefaultPassThreshold + } + blb.probeClient = &http.Client{Timeout: blb.probeTimeout} + ticker := time.NewTicker(blb.probeInterval) + blb.done = make(chan bool) + go func() { + for { + select { + case <-blb.done: + ticker.Stop() + return + case <-ticker.C: + blb.probeServers() + } + } + }() +} + +// probeServers checks health status of servers +func (blb *BaseLoadBalancer[TRequest, TResponse]) probeServers() { + statusChange := false + healthyServers := make([]*Server, 0, len(blb.Servers)) + for _, svr := range blb.Servers { + pass := blb.probeHTTP(svr.URL) + healthy, change := svr.RecordHealth(pass, blb.spec.HealthCheck.Passes, blb.spec.HealthCheck.Fails) + if change { + statusChange = true + } + if healthy { + healthyServers = append(healthyServers, svr) + } + } + if statusChange { + blb.healthyServers.Store(healthyServers) + // init consistent hash in sticky session when servers change + blb.initStickySession(blb.spec.StickySession, blb.HealthyServers()) + } +} + +// probeHTTP checks http url status +func (blb *BaseLoadBalancer[TRequest, TResponse]) probeHTTP(url string) bool { + if blb.spec.HealthCheck.Path != "" { + url += blb.spec.HealthCheck.Path + } + res, err := blb.probeClient.Get(url) + if err != nil || res.StatusCode > 500 { + return false + } + return true +} + +// initConsistentHash initializes for consistent hash mode +func (blb *BaseLoadBalancer[TRequest, TResponse]) initConsistentHash() { + members := make([]consistent.Member, len(blb.HealthyServers())) + for i, s := range blb.HealthyServers() { + members[i] = hashMember{server: s} + } + + cfg := consistent.Config{ + PartitionCount: 1024, + ReplicationFactor: 50, + Load: 1.25, + Hasher: hasher{}, + } + blb.consistentHash = consistent.New(members, cfg) +} + +// configLBCookie configures properties for load balancer-generated cookie +func (blb *BaseLoadBalancer[TRequest, TResponse]) configLBCookie() { + if blb.spec.StickySession.LBCookieName == "" { + blb.spec.StickySession.LBCookieName = StickySessionDefaultLBCookieName + } + + blb.cookieExpire, _ = time.ParseDuration(blb.spec.StickySession.LBCookieExpire) + if blb.cookieExpire <= 0 { + blb.cookieExpire = StickySessionDefaultLBCookieExpire + } +} + +// ChooseServer chooses the sticky server if enable +func (blb *BaseLoadBalancer[TRequest, TResponse]) ChooseServer(req *httpprot.Request) *Server { + if blb.spec.StickySession == nil { + return nil + } + + switch blb.spec.StickySession.Mode { + case StickySessionModeCookieConsistentHash: + return blb.chooseServerByConsistentHash(req) + case StickySessionModeDurationBased, StickySessionModeApplicationBased: + return blb.chooseServerByLBCookie(req) + } + + return nil +} + +// chooseServerByConsistentHash chooses server using consistent hash on cookie +func (blb *BaseLoadBalancer[TRequest, TResponse]) chooseServerByConsistentHash(req *httpprot.Request) *Server { + cookie, err := req.Cookie(blb.spec.StickySession.AppCookieName) + if err != nil { + return nil + } + + m := blb.consistentHash.LocateKey([]byte(cookie.Value)) + if m != nil { + return m.(hashMember).server + } + + return nil +} + +// chooseServerByLBCookie chooses server by load balancer-generated cookie +func (blb *BaseLoadBalancer[TRequest, TResponse]) chooseServerByLBCookie(req *httpprot.Request) *Server { + cookie, err := req.Cookie(blb.spec.StickySession.LBCookieName) + if err != nil { + return nil + } + + signed, err := hex.DecodeString(cookie.Value) + if err != nil || len(signed) != KeyLen+sha256.Size { + return nil + } + + key := signed[:KeyLen] + macBytes := signed[KeyLen:] + for _, s := range blb.HealthyServers() { + mac := hmac.New(sha256.New, key) + mac.Write([]byte(s.ID())) + expected := mac.Sum(nil) + if hmac.Equal(expected, macBytes) { + return s + } + } + + return nil +} + +// ReturnServer does some custom work before return server +func (blb *BaseLoadBalancer[TRequest, TResponse]) ReturnServer(server *Server, req *httpprot.Request, resp *httpprot.Response) { + if blb.spec.StickySession == nil { + return + } + + setCookie := false + switch blb.spec.StickySession.Mode { + case StickySessionModeDurationBased: + setCookie = true + case StickySessionModeApplicationBased: + for _, c := range resp.Cookies() { + if c.Name == blb.spec.StickySession.AppCookieName { + setCookie = true + break + } + } + } + if setCookie { + cookie := &http.Cookie{ + Name: blb.spec.StickySession.LBCookieName, + Value: sign([]byte(server.ID())), + Expires: time.Now().Add(blb.cookieExpire), + } + resp.SetCookie(cookie) + } +} + +// Close closes resources +func (blb *BaseLoadBalancer[TRequest, TResponse]) Close() { + if blb.done != nil { + close(blb.done) + } +} + +*/ diff --git a/pkg/protocols/server.go b/pkg/filters/proxies/server.go similarity index 99% rename from pkg/protocols/server.go rename to pkg/filters/proxies/server.go index 791fcd487e..1fa0032bbc 100644 --- a/pkg/protocols/server.go +++ b/pkg/filters/proxies/server.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package protocols +package proxies import ( "fmt" diff --git a/pkg/protocols/server_test.go b/pkg/filters/proxies/server_test.go similarity index 99% rename from pkg/protocols/server_test.go rename to pkg/filters/proxies/server_test.go index 3304537fa6..607dd22d01 100644 --- a/pkg/protocols/server_test.go +++ b/pkg/filters/proxies/server_test.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package protocols +package proxies import ( "testing" diff --git a/pkg/object/function/worker/ingress.go b/pkg/object/function/worker/ingress.go index 67fa4e5665..c49fb8b3e6 100644 --- a/pkg/object/function/worker/ingress.go +++ b/pkg/object/function/worker/ingress.go @@ -19,10 +19,11 @@ package worker import ( "fmt" - "github.com/megaease/easegress/pkg/object/httpserver/routers" "sync" - "github.com/megaease/easegress/pkg/filters/proxy" + "github.com/megaease/easegress/pkg/object/httpserver/routers" + + proxy "github.com/megaease/easegress/pkg/filters/proxies/httpproxy" "github.com/megaease/easegress/pkg/filters/requestadaptor" "github.com/megaease/easegress/pkg/logger" "github.com/megaease/easegress/pkg/object/function/spec" diff --git a/pkg/object/grpcserver/runtime.go b/pkg/object/grpcserver/runtime.go index 705180a6b2..67bb79e0fa 100644 --- a/pkg/object/grpcserver/runtime.go +++ b/pkg/object/grpcserver/runtime.go @@ -24,7 +24,7 @@ import ( "time" "github.com/megaease/easegress/pkg/context" - "github.com/megaease/easegress/pkg/filters/grpcproxy" + "github.com/megaease/easegress/pkg/filters/proxies/grpcproxy" "github.com/megaease/easegress/pkg/graceupdate" "github.com/megaease/easegress/pkg/logger" "github.com/megaease/easegress/pkg/supervisor" @@ -242,7 +242,7 @@ func (r *runtime) startServer() { r.setError(err) return } - opts := []grpc.ServerOption{grpc.UnknownServiceHandler(r.mux.handler), grpc.CustomCodec(&grpcprxoy.GrpcCodec{})} + opts := []grpc.ServerOption{grpc.UnknownServiceHandler(r.mux.handler), grpc.CustomCodec(&grpcproxy.GrpcCodec{})} keepaliveOpts := r.buildServerKeepaliveOpt() if len(keepaliveOpts) != 0 { diff --git a/pkg/object/ingresscontroller/translator.go b/pkg/object/ingresscontroller/translator.go index ce2edc8555..c4f5bd9aff 100644 --- a/pkg/object/ingresscontroller/translator.go +++ b/pkg/object/ingresscontroller/translator.go @@ -26,7 +26,7 @@ import ( "github.com/megaease/easegress/pkg/object/httpserver/routers" - "github.com/megaease/easegress/pkg/filters/proxy" + proxy "github.com/megaease/easegress/pkg/filters/proxies/httpproxy" "github.com/megaease/easegress/pkg/logger" "github.com/megaease/easegress/pkg/object/httpserver" "github.com/megaease/easegress/pkg/object/pipeline" diff --git a/pkg/object/meshcontroller/spec/builder.go b/pkg/object/meshcontroller/spec/builder.go index d8d35689c7..7473f297e4 100644 --- a/pkg/object/meshcontroller/spec/builder.go +++ b/pkg/object/meshcontroller/spec/builder.go @@ -23,7 +23,7 @@ import ( "github.com/megaease/easegress/pkg/filters" "github.com/megaease/easegress/pkg/filters/meshadaptor" "github.com/megaease/easegress/pkg/filters/mock" - "github.com/megaease/easegress/pkg/filters/proxy" + proxy "github.com/megaease/easegress/pkg/filters/proxies/httpproxy" "github.com/megaease/easegress/pkg/filters/ratelimiter" "github.com/megaease/easegress/pkg/logger" "github.com/megaease/easegress/pkg/object/pipeline" @@ -262,12 +262,12 @@ func (b *pipelineSpecBuilder) appendProxyWithCanary(param *proxyParam) *pipeline } candidatePools[i] = &proxy.ServerPoolSpec{ BaseServerPoolSpec: proxy.BaseServerPoolSpec{ - Filter: &proxy.RequestMatcherSpec{ - MatchAllHeaders: true, - Headers: headers, - }, LoadBalance: param.lb, }, + Filter: &proxy.RequestMatcherSpec{ + MatchAllHeaders: true, + Headers: headers, + }, Timeout: param.timeout, RetryPolicy: param.retryPolicy, CircuitBreakerPolicy: param.circuitBreakerPolicy, diff --git a/pkg/object/meshcontroller/spec/spec.go b/pkg/object/meshcontroller/spec/spec.go index 1a43c09d26..c66558c8aa 100644 --- a/pkg/object/meshcontroller/spec/spec.go +++ b/pkg/object/meshcontroller/spec/spec.go @@ -23,7 +23,7 @@ import ( "github.com/megaease/easegress/pkg/cluster/customdata" "github.com/megaease/easegress/pkg/filters/mock" - "github.com/megaease/easegress/pkg/filters/proxy" + proxy "github.com/megaease/easegress/pkg/filters/proxies/httpproxy" "github.com/megaease/easegress/pkg/filters/ratelimiter" "github.com/megaease/easegress/pkg/resilience" "github.com/megaease/easegress/pkg/util/urlrule" diff --git a/pkg/object/meshcontroller/spec/spec_test.go b/pkg/object/meshcontroller/spec/spec_test.go index 88ce25da7d..ad449d1137 100644 --- a/pkg/object/meshcontroller/spec/spec_test.go +++ b/pkg/object/meshcontroller/spec/spec_test.go @@ -23,7 +23,7 @@ import ( "testing" "github.com/megaease/easegress/pkg/filters/mock" - "github.com/megaease/easegress/pkg/filters/proxy" + proxy "github.com/megaease/easegress/pkg/filters/proxies/httpproxy" "github.com/megaease/easegress/pkg/filters/ratelimiter" "github.com/megaease/easegress/pkg/logger" _ "github.com/megaease/easegress/pkg/object/httpserver" diff --git a/pkg/registry/registry.go b/pkg/registry/registry.go index d13dbb56a0..ab236c8c9c 100644 --- a/pkg/registry/registry.go +++ b/pkg/registry/registry.go @@ -24,7 +24,6 @@ import ( _ "github.com/megaease/easegress/pkg/filters/connectcontrol" _ "github.com/megaease/easegress/pkg/filters/corsadaptor" _ "github.com/megaease/easegress/pkg/filters/fallback" - _ "github.com/megaease/easegress/pkg/filters/grpcproxy" _ "github.com/megaease/easegress/pkg/filters/headerlookup" _ "github.com/megaease/easegress/pkg/filters/headertojson" _ "github.com/megaease/easegress/pkg/filters/kafka" @@ -34,7 +33,8 @@ import ( _ "github.com/megaease/easegress/pkg/filters/mqttclientauth" _ "github.com/megaease/easegress/pkg/filters/oidcadaptor" _ "github.com/megaease/easegress/pkg/filters/opafilter" - _ "github.com/megaease/easegress/pkg/filters/proxy" + _ "github.com/megaease/easegress/pkg/filters/proxies/grpcproxy" + _ "github.com/megaease/easegress/pkg/filters/proxies/httpproxy" _ "github.com/megaease/easegress/pkg/filters/ratelimiter" _ "github.com/megaease/easegress/pkg/filters/redirector" _ "github.com/megaease/easegress/pkg/filters/remotefilter" From 5d4d882eceadab4741e780453e5207cf911be124 Mon Sep 17 00:00:00 2001 From: Bomin Zhang Date: Wed, 1 Feb 2023 18:22:20 +0800 Subject: [PATCH 03/14] refactor load balancer (WIP) --- pkg/filters/proxies/basepool.go | 78 ++-- pkg/filters/proxies/grpcproxy/loadbalance.go | 15 +- pkg/filters/proxies/grpcproxy/pool.go | 118 +----- pkg/filters/proxies/grpcproxy/proxy.go | 10 +- pkg/filters/proxies/healthcheck.go | 74 ++++ pkg/filters/proxies/httpproxy/basepool.go | 171 -------- pkg/filters/proxies/httpproxy/loadbalance.go | 18 +- pkg/filters/proxies/httpproxy/pool.go | 27 +- pkg/filters/proxies/httpproxy/proxy.go | 12 +- pkg/filters/proxies/httpproxy/wspool.go | 19 +- pkg/filters/proxies/httpproxy/wsproxy.go | 4 +- pkg/filters/proxies/loadbalance.go | 390 +++++++++---------- pkg/filters/proxies/server.go | 63 ++- pkg/filters/proxies/stickysession.go | 228 +++++++++++ 14 files changed, 627 insertions(+), 600 deletions(-) create mode 100644 pkg/filters/proxies/healthcheck.go delete mode 100644 pkg/filters/proxies/httpproxy/basepool.go create mode 100644 pkg/filters/proxies/stickysession.go diff --git a/pkg/filters/proxies/basepool.go b/pkg/filters/proxies/basepool.go index 428bed13fb..db5c3a8a9d 100644 --- a/pkg/filters/proxies/basepool.go +++ b/pkg/filters/proxies/basepool.go @@ -17,17 +17,33 @@ package proxies -/* -// BaseServerPool defines a server pool. -type BaseServerPool struct { - name string +import ( + "fmt" + "sync" + "sync/atomic" + + "github.com/megaease/easegress/pkg/logger" + "github.com/megaease/easegress/pkg/object/serviceregistry" + "github.com/megaease/easegress/pkg/supervisor" + "github.com/megaease/easegress/pkg/util/stringtool" +) + +// ServerPoolImpl is the interface for server pool. +type ServerPoolImpl interface { + CreateLoadBalancer(spec *LoadBalanceSpec, servers []*Server) LoadBalancer +} + +// ServerPoolBase defines a base server pool. +type ServerPoolBase struct { + spImpl ServerPoolImpl + Name string done chan struct{} wg sync.WaitGroup loadBalancer atomic.Value } -// BaseServerPoolSpec is the spec for a base server pool. -type BaseServerPoolSpec struct { +// ServerPoolBaseSpec is the spec for a base server pool. +type ServerPoolBaseSpec struct { ServerTags []string `json:"serverTags" jsonschema:"omitempty,uniqueItems=true"` Servers []*Server `json:"servers" jsonschema:"omitempty"` ServiceRegistry string `json:"serviceRegistry" jsonschema:"omitempty"` @@ -36,7 +52,7 @@ type BaseServerPoolSpec struct { } // Validate validates ServerPoolSpec. -func (sps *BaseServerPoolSpec) Validate() error { +func (sps *ServerPoolBaseSpec) Validate() error { if sps.ServiceName == "" && len(sps.Servers) == 0 { return fmt.Errorf("both serviceName and servers are empty") } @@ -60,12 +76,12 @@ func (sps *BaseServerPoolSpec) Validate() error { } // Init initialize the base server pool according to the spec. -func (bsp *BaseServerPool) Init(super *supervisor.Supervisor, name string, spec *BaseServerPoolSpec) { - bsp.name = name - bsp.done = make(chan struct{}) +func (spb *ServerPoolBase) Init(spImpl ServerPoolImpl, super *supervisor.Supervisor, name string, spec *ServerPoolBaseSpec) { + spb.Name = name + spb.done = make(chan struct{}) if spec.ServiceRegistry == "" || spec.ServiceName == "" { - bsp.createLoadBalancer(spec.LoadBalance, spec.Servers) + spb.createLoadBalancer(spec.LoadBalance, spec.Servers) return } @@ -77,36 +93,36 @@ func (bsp *BaseServerPool) Init(super *supervisor.Supervisor, name string, spec if err != nil { msgFmt := "first try to use service %s/%s failed(will try again): %v" logger.Warnf(msgFmt, spec.ServiceRegistry, spec.ServiceName, err) - bsp.createLoadBalancer(spec.LoadBalance, spec.Servers) + spb.createLoadBalancer(spec.LoadBalance, spec.Servers) } - bsp.useService(spec, instances) + spb.useService(spec, instances) watcher := registry.NewServiceWatcher(spec.ServiceRegistry, spec.ServiceName) - bsp.wg.Add(1) + spb.wg.Add(1) go func() { for { select { - case <-bsp.done: + case <-spb.done: watcher.Stop() - bsp.wg.Done() + spb.wg.Done() return case event := <-watcher.Watch(): - bsp.useService(spec, event.Instances) + spb.useService(spec, event.Instances) } } }() } // LoadBalancer returns the load balancer of the server pool. -func (bsp *BaseServerPool) LoadBalancer() LoadBalancer { - if v := bsp.loadBalancer.Load(); v != nil { +func (spb *ServerPoolBase) LoadBalancer() LoadBalancer { + if v := spb.loadBalancer.Load(); v != nil { return v.(LoadBalancer) } return nil } -func (bsp *BaseServerPool) createLoadBalancer(spec *LoadBalanceSpec, servers []*Server) { +func (spb *ServerPoolBase) createLoadBalancer(spec *LoadBalanceSpec, servers []*Server) { for _, server := range servers { server.CheckAddrPattern() } @@ -115,13 +131,13 @@ func (bsp *BaseServerPool) createLoadBalancer(spec *LoadBalanceSpec, servers []* spec = &LoadBalanceSpec{} } - lb := NewLoadBalancer(spec, servers) - if old := bsp.loadBalancer.Swap(lb); old != nil { + lb := spb.spImpl.CreateLoadBalancer(spec, servers) + if old := spb.loadBalancer.Swap(lb); old != nil { old.(LoadBalancer).Close() } } -func (bsp *BaseServerPool) useService(spec *BaseServerPoolSpec, instances map[string]*serviceregistry.ServiceInstanceSpec) { +func (spb *ServerPoolBase) useService(spec *ServerPoolBaseSpec, instances map[string]*serviceregistry.ServiceInstanceSpec) { servers := make([]*Server, 0) for _, instance := range instances { @@ -149,15 +165,17 @@ func (bsp *BaseServerPool) useService(spec *BaseServerPoolSpec, instances map[st servers = spec.Servers } - bsp.createLoadBalancer(spec.LoadBalance, servers) + spb.createLoadBalancer(spec.LoadBalance, servers) } -func (bsp *BaseServerPool) close() { - close(bsp.done) - bsp.wg.Wait() - if lb := bsp.LoadBalancer(); lb != nil { +func (spb *ServerPoolBase) Done() <-chan struct{} { + return spb.done +} + +func (spb *ServerPoolBase) Close() { + close(spb.done) + spb.wg.Wait() + if lb := spb.LoadBalancer(); lb != nil { lb.Close() } } - -*/ diff --git a/pkg/filters/proxies/grpcproxy/loadbalance.go b/pkg/filters/proxies/grpcproxy/loadbalance.go index 568cac6938..f891a9db1a 100644 --- a/pkg/filters/proxies/grpcproxy/loadbalance.go +++ b/pkg/filters/proxies/grpcproxy/loadbalance.go @@ -17,19 +17,7 @@ package grpcproxy -import ( - "fmt" - "hash/fnv" - "math/rand" - "net" - "sync" - "sync/atomic" - - "github.com/megaease/easegress/pkg/protocols/grpcprot" - - "github.com/megaease/easegress/pkg/logger" -) - +/* // LoadBalancer is the interface of an gRPC load balancer. type LoadBalancer interface { ChooseServer(req *grpcprot.Request) *Server @@ -250,3 +238,4 @@ func (f *forwardLoadBalancer) ChooseServer(req *grpcprot.Request) *Server { func (f *forwardLoadBalancer) ReturnServer(s *Server) { f.server.Put(s) } +*/ diff --git a/pkg/filters/proxies/grpcproxy/pool.go b/pkg/filters/proxies/grpcproxy/pool.go index 4635254d70..0e4e1456fb 100644 --- a/pkg/filters/proxies/grpcproxy/pool.go +++ b/pkg/filters/proxies/grpcproxy/pool.go @@ -21,8 +21,6 @@ import ( stdcontext "context" "fmt" "io" - "sync" - "sync/atomic" "time" "github.com/megaease/easegress/pkg/protocols/grpcprot" @@ -36,9 +34,7 @@ import ( "github.com/megaease/easegress/pkg/context" "github.com/megaease/easegress/pkg/logger" - "github.com/megaease/easegress/pkg/object/serviceregistry" "github.com/megaease/easegress/pkg/resilience" - "github.com/megaease/easegress/pkg/util/stringtool" ) const ( @@ -99,14 +95,12 @@ var ( // ServerPool defines a server pool. type ServerPool struct { + BaseServerPool + proxy *Proxy spec *ServerPoolSpec - done chan struct{} - wg sync.WaitGroup - name string filter RequestMatcher - loadBalancer atomic.Value timeout time.Duration connectTimeout time.Duration circuitBreakerWrapper resilience.Wrapper @@ -115,13 +109,10 @@ type ServerPool struct { // ServerPoolSpec is the spec for a server pool. type ServerPoolSpec struct { + BaseServerPoolSpec `json:",inline"` + SpanName string `json:"spanName" jsonschema:"omitempty"` Filter *RequestMatcherSpec `json:"filter" jsonschema:"omitempty"` - ServerTags []string `json:"serverTags" jsonschema:"omitempty,uniqueItems=true"` - Servers []*Server `json:"servers" jsonschema:"omitempty"` - ServiceRegistry string `json:"serviceRegistry" jsonschema:"omitempty"` - ServiceName string `json:"serviceName" jsonschema:"omitempty"` - LoadBalance *LoadBalanceSpec `json:"loadBalance" jsonschema:"omitempty"` Timeout string `json:"timeout" jsonschema:"omitempty,format=duration"` ConnectTimeout string `json:"connectTimeout" jsonschema:"omitempty,format=duration"` CircuitBreakerPolicy string `json:"circuitBreakerPolicy" jsonschema:"omitempty"` @@ -158,19 +149,13 @@ func NewServerPool(proxy *Proxy, spec *ServerPoolSpec, name string) *ServerPool sp := &ServerPool{ proxy: proxy, spec: spec, - done: make(chan struct{}), - name: name, } if spec.Filter != nil { sp.filter = NewRequestMatcher(spec.Filter) } - if spec.ServiceRegistry == "" || spec.ServiceName == "" { - sp.createLoadBalancer(sp.spec.Servers) - } else { - sp.watchServers() - } + sp.BaseServerPool.Init(sp, proxy.super, name, &spec.BaseServerPoolSpec) if spec.Timeout != "" { sp.timeout, _ = time.ParseDuration(spec.Timeout) @@ -185,83 +170,8 @@ func NewServerPool(proxy *Proxy, spec *ServerPoolSpec, name string) *ServerPool return sp } -// LoadBalancer returns the load balancer of the server pool. -func (sp *ServerPool) LoadBalancer() LoadBalancer { - return sp.loadBalancer.Load().(LoadBalancer) -} - -func (sp *ServerPool) createLoadBalancer(servers []*Server) { - for _, server := range servers { - server.CheckAddrPattern() - } - - spec := sp.spec.LoadBalance - if spec == nil { - spec = &LoadBalanceSpec{} - } - - lb := NewLoadBalancer(spec, servers) - sp.loadBalancer.Store(lb) -} - -func (sp *ServerPool) watchServers() { - entity := sp.proxy.super.MustGetSystemController(serviceregistry.Kind) - registry := entity.Instance().(*serviceregistry.ServiceRegistry) - - instances, err := registry.ListServiceInstances(sp.spec.ServiceRegistry, sp.spec.ServiceName) - if err != nil { - msgFmt := "first try to use service %s/%s failed(will try again): %v" - logger.Warnf(msgFmt, sp.spec.ServiceRegistry, sp.spec.ServiceName, err) - sp.createLoadBalancer(sp.spec.Servers) - } - - sp.useService(instances) - - watcher := registry.NewServiceWatcher(sp.spec.ServiceRegistry, sp.spec.ServiceName) - sp.wg.Add(1) - go func() { - for { - select { - case <-sp.done: - watcher.Stop() - sp.wg.Done() - return - case event := <-watcher.Watch(): - sp.useService(event.Instances) - } - } - }() -} - -func (sp *ServerPool) useService(instances map[string]*serviceregistry.ServiceInstanceSpec) { - servers := make([]*Server, 0) - - for _, instance := range instances { - // default to true in case of sp.spec.ServerTags is empty - match := true - - for _, tag := range sp.spec.ServerTags { - if match = stringtool.StrInSlice(tag, instance.Tags); match { - break - } - } - - if match { - servers = append(servers, &Server{ - URL: instance.URL(), - Tags: instance.Tags, - Weight: instance.Weight, - }) - } - } - - if len(servers) == 0 { - msgFmt := "%s/%s: no service instance satisfy tags: %v" - logger.Warnf(msgFmt, sp.spec.ServiceRegistry, sp.spec.ServiceName, sp.spec.ServerTags) - servers = sp.spec.Servers - } - - sp.createLoadBalancer(servers) +func (sp *ServerPool) CreateLoadBalancer(spec *LoadBalanceSpec, servers []*Server) LoadBalancer { + return nil } // InjectResiliencePolicy injects resilience policies to the server pool. @@ -326,7 +236,7 @@ func (sp *ServerPool) handle(ctx *context.Context) string { // CircuitBreaker is the most outside resiliencer, if the error // is ErrShortCircuited, we are sure the response is nil. if err == resilience.ErrShortCircuited { - logger.Debugf("%s: short circuited by circuit break policy", sp.name) + logger.Debugf("%s: short circuited by circuit break policy", sp.Name) spCtx.AddTag("short circuited") sp.buildOutputResponse(spCtx, status.Newf(codes.Unavailable, "short circuited by circuit break policy")) return resultShortCircuited @@ -348,12 +258,11 @@ func (sp *ServerPool) doHandle(ctx stdcontext.Context, spCtx *serverPoolContext) svr := lb.ChooseServer(spCtx.req) // if there's no available server. if svr == nil { - logger.Debugf("%s: no available server", sp.name) + logger.Debugf("%s: no available server", sp.Name) return serverPoolError{status.New(codes.InvalidArgument, "no available server"), resultClientError} } - if f, ok := lb.(ReusableServerLB); ok { - defer f.ReturnServer(svr) - } + defer lb.ReturnServer(svr, spCtx.req, spCtx.resp) + // maybe be rewrite by grpcserver.MuxPath#rewrite fullMethodName := spCtx.req.FullMethod() if fullMethodName == "" { @@ -428,11 +337,6 @@ func (sp *ServerPool) buildOutputResponse(spCtx *serverPoolContext, s *status.St spCtx.SetOutputResponse(spCtx.resp) } -func (sp *ServerPool) close() { - close(sp.done) - sp.wg.Wait() -} - func (sp *ServerPool) forwardE2E(src grpc.Stream, dst grpc.Stream, header *grpcprot.Header) chan error { ret := make(chan error, 1) go func() { diff --git a/pkg/filters/proxies/grpcproxy/proxy.go b/pkg/filters/proxies/grpcproxy/proxy.go index 2d019241b2..d1a78854c3 100644 --- a/pkg/filters/proxies/grpcproxy/proxy.go +++ b/pkg/filters/proxies/grpcproxy/proxy.go @@ -84,7 +84,11 @@ type ( } // Server is the backend server. - Server = proxies.Server + Server = proxies.Server + LoadBalancer = proxies.LoadBalancer + LoadBalanceSpec = proxies.LoadBalanceSpec + BaseServerPool = proxies.ServerPoolBase + BaseServerPoolSpec = proxies.ServerPoolBaseSpec ) // Validate validates Spec. @@ -157,10 +161,10 @@ func (p *Proxy) Status() interface{} { // Close closes Proxy. func (p *Proxy) Close() { - p.mainPool.close() + p.mainPool.Close() for _, v := range p.candidatePools { - v.close() + v.Close() } } diff --git a/pkg/filters/proxies/healthcheck.go b/pkg/filters/proxies/healthcheck.go new file mode 100644 index 0000000000..d1ab47a261 --- /dev/null +++ b/pkg/filters/proxies/healthcheck.go @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package proxies + +import ( + "net/http" + "time" +) + +// HealthCheckSpec is the spec for health check. +type HealthCheckSpec struct { + // Interval is the interval duration for health check. + Interval string `json:"interval" jsonschema:"omitempty,format=duration"` + // Path is the health check path for server + Path string `json:"path" jsonschema:"omitempty"` + // Timeout is the timeout duration for health check, default is 3. + Timeout string `json:"timeout" jsonschema:"omitempty,format=duration"` + // Fails is the consecutive fails count for assert fail, default is 1. + Fails int `json:"fails" jsonschema:"omitempty,minimum=1"` + // Passes is the consecutive passes count for assert pass, default is 1. + Passes int `json:"passes" jsonschema:"omitempty,minimum=1"` +} + +// HealthChecker checks whether a server is healthy or not. +type HealthChecker interface { + Check(svr *Server) bool + Close() +} + +// HTTPHealthChecker is a health checker for HTTP protocol. +type HTTPHealthChecker struct { + path string + client *http.Client +} + +// NewHTTPHealthChecker creates a new HTTPHealthChecker. +func NewHTTPHealthChecker(spec *HealthCheckSpec) HealthChecker { + timeout, _ := time.ParseDuration(spec.Timeout) + if timeout <= 0 { + timeout = 3 * time.Second + } + + return &HTTPHealthChecker{ + path: spec.Path, + client: &http.Client{Timeout: timeout}, + } +} + +// Check checks whether a server is healthy or not. +func (hc *HTTPHealthChecker) Check(svr *Server) bool { + // TODO: should use url.JoinPath? + url := svr.URL + hc.path + resp, err := hc.client.Get(url) + return err == nil && resp.StatusCode < 500 +} + +// Close closes the health checker +func (hc *HTTPHealthChecker) Close() { +} diff --git a/pkg/filters/proxies/httpproxy/basepool.go b/pkg/filters/proxies/httpproxy/basepool.go deleted file mode 100644 index eb3ccc5978..0000000000 --- a/pkg/filters/proxies/httpproxy/basepool.go +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Copyright (c) 2017, MegaEase - * All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package httpproxy - -import ( - "fmt" - "sync" - "sync/atomic" - - "github.com/megaease/easegress/pkg/logger" - "github.com/megaease/easegress/pkg/object/serviceregistry" - "github.com/megaease/easegress/pkg/supervisor" - "github.com/megaease/easegress/pkg/util/stringtool" -) - -// BaseServerPool defines a server pool. -type BaseServerPool struct { - name string - done chan struct{} - wg sync.WaitGroup - loadBalancer atomic.Value -} - -// BaseServerPoolSpec is the spec for a base server pool. -type BaseServerPoolSpec struct { - ServerTags []string `json:"serverTags" jsonschema:"omitempty,uniqueItems=true"` - Servers []*Server `json:"servers" jsonschema:"omitempty"` - ServiceRegistry string `json:"serviceRegistry" jsonschema:"omitempty"` - ServiceName string `json:"serviceName" jsonschema:"omitempty"` - LoadBalance *LoadBalanceSpec `json:"loadBalance" jsonschema:"omitempty"` -} - -// Validate validates ServerPoolSpec. -func (sps *BaseServerPoolSpec) Validate() error { - if sps.ServiceName == "" && len(sps.Servers) == 0 { - return fmt.Errorf("both serviceName and servers are empty") - } - - serversGotWeight := 0 - for _, server := range sps.Servers { - if server.Weight > 0 { - serversGotWeight++ - } - } - if serversGotWeight > 0 && serversGotWeight < len(sps.Servers) { - msgFmt := "not all servers have weight(%d/%d)" - return fmt.Errorf(msgFmt, serversGotWeight, len(sps.Servers)) - } - - if sps.ServiceName != "" && sps.LoadBalance.HealthCheck != nil { - return fmt.Errorf("can not open health check for service discovery") - } - - return nil -} - -// Init initialize the base server pool according to the spec. -func (bsp *BaseServerPool) Init(super *supervisor.Supervisor, name string, spec *BaseServerPoolSpec) { - bsp.name = name - bsp.done = make(chan struct{}) - - if spec.ServiceRegistry == "" || spec.ServiceName == "" { - bsp.createLoadBalancer(spec.LoadBalance, spec.Servers) - return - } - - // watch service registry - entity := super.MustGetSystemController(serviceregistry.Kind) - registry := entity.Instance().(*serviceregistry.ServiceRegistry) - - instances, err := registry.ListServiceInstances(spec.ServiceRegistry, spec.ServiceName) - if err != nil { - msgFmt := "first try to use service %s/%s failed(will try again): %v" - logger.Warnf(msgFmt, spec.ServiceRegistry, spec.ServiceName, err) - bsp.createLoadBalancer(spec.LoadBalance, spec.Servers) - } - - bsp.useService(spec, instances) - - watcher := registry.NewServiceWatcher(spec.ServiceRegistry, spec.ServiceName) - bsp.wg.Add(1) - go func() { - for { - select { - case <-bsp.done: - watcher.Stop() - bsp.wg.Done() - return - case event := <-watcher.Watch(): - bsp.useService(spec, event.Instances) - } - } - }() -} - -// LoadBalancer returns the load balancer of the server pool. -func (bsp *BaseServerPool) LoadBalancer() LoadBalancer { - if v := bsp.loadBalancer.Load(); v != nil { - return v.(LoadBalancer) - } - return nil -} - -func (bsp *BaseServerPool) createLoadBalancer(spec *LoadBalanceSpec, servers []*Server) { - for _, server := range servers { - server.CheckAddrPattern() - } - - if spec == nil { - spec = &LoadBalanceSpec{} - } - - lb := NewLoadBalancer(spec, servers) - if old := bsp.loadBalancer.Swap(lb); old != nil { - old.(LoadBalancer).Close() - } -} - -func (bsp *BaseServerPool) useService(spec *BaseServerPoolSpec, instances map[string]*serviceregistry.ServiceInstanceSpec) { - servers := make([]*Server, 0) - - for _, instance := range instances { - // default to true in case of sp.spec.ServerTags is empty - match := true - - for _, tag := range spec.ServerTags { - if match = stringtool.StrInSlice(tag, instance.Tags); match { - break - } - } - - if match { - servers = append(servers, &Server{ - URL: instance.URL(), - Tags: instance.Tags, - Weight: instance.Weight, - }) - } - } - - if len(servers) == 0 { - msgFmt := "%s/%s: no service instance satisfy tags: %v" - logger.Warnf(msgFmt, spec.ServiceRegistry, spec.ServiceName, spec.ServerTags) - servers = spec.Servers - } - - bsp.createLoadBalancer(spec.LoadBalance, servers) -} - -func (bsp *BaseServerPool) close() { - close(bsp.done) - bsp.wg.Wait() - if lb := bsp.LoadBalancer(); lb != nil { - lb.Close() - } -} diff --git a/pkg/filters/proxies/httpproxy/loadbalance.go b/pkg/filters/proxies/httpproxy/loadbalance.go index f82b1d7fbd..b31edd7cb3 100644 --- a/pkg/filters/proxies/httpproxy/loadbalance.go +++ b/pkg/filters/proxies/httpproxy/loadbalance.go @@ -18,22 +18,7 @@ package httpproxy import ( - "crypto/hmac" - "crypto/sha256" - "encoding/binary" - "encoding/hex" - "fmt" - "hash/fnv" - "hash/maphash" - "math/rand" - "net/http" - "sync/atomic" "time" - - "github.com/buraksezer/consistent" - "github.com/megaease/easegress/pkg/logger" - "github.com/megaease/easegress/pkg/protocols/httpprot" - "github.com/spaolacci/murmur3" ) const ( @@ -69,6 +54,7 @@ const ( HealthCheckDefaultPassThreshold = 1 ) +/* // LoadBalancer is the interface of an HTTP load balancer. type LoadBalancer interface { ChooseServer(req *httpprot.Request) *Server @@ -534,3 +520,5 @@ func (lb *headerHashLoadBalancer) ChooseServer(req *httpprot.Request) *Server { hash.Write([]byte(v)) return lb.HealthyServers()[hash.Sum32()%uint32(len(lb.HealthyServers()))] } + +*/ diff --git a/pkg/filters/proxies/httpproxy/pool.go b/pkg/filters/proxies/httpproxy/pool.go index 685fbbca95..7faea87a26 100644 --- a/pkg/filters/proxies/httpproxy/pool.go +++ b/pkg/filters/proxies/httpproxy/pool.go @@ -29,6 +29,7 @@ import ( gohttpstat "github.com/tcnksm/go-httpstat" "github.com/megaease/easegress/pkg/context" + "github.com/megaease/easegress/pkg/filters/proxies" "github.com/megaease/easegress/pkg/logger" "github.com/megaease/easegress/pkg/protocols/httpprot" "github.com/megaease/easegress/pkg/protocols/httpprot/httpstat" @@ -208,7 +209,7 @@ func NewServerPool(proxy *Proxy, spec *ServerPoolSpec, name string) *ServerPool sp.filter = NewRequestMatcher(spec.Filter) } - sp.BaseServerPool.Init(proxy.super, name, &spec.BaseServerPoolSpec) + sp.BaseServerPool.Init(sp, proxy.super, name, &spec.BaseServerPoolSpec) if spec.MemoryCache != nil { sp.memoryCache = NewMemoryCache(spec.MemoryCache) @@ -227,6 +228,12 @@ func NewServerPool(proxy *Proxy, spec *ServerPoolSpec, name string) *ServerPool return sp } +func (sp *ServerPool) CreateLoadBalancer(spec *LoadBalanceSpec, servers []*Server) LoadBalancer { + lb := proxies.NewGeneralLoadBalancer(spec, servers) + lb.Init(proxies.NewHTTPSessionSticker, proxies.NewHTTPHealthChecker, nil) + return lb +} + func (sp *ServerPool) status() *ServerPoolStatus { s := &ServerPoolStatus{Stat: sp.httpStat.Status()} return s @@ -274,7 +281,7 @@ func (sp *ServerPool) collectMetrics(spCtx *serverPoolContext) { sp.httpStat.Stat(metric) sp.exportPrometheusMetrics(metric) spCtx.LazyAddTag(func() string { - return sp.name + "#duration: " + metric.Duration.String() + return sp.Name + "#duration: " + metric.Duration.String() }) } @@ -310,7 +317,7 @@ func (sp *ServerPool) handleMirror(spCtx *serverPoolContext) { err := spCtx.prepareRequest(svr, spCtx.req.Context(), true) if err != nil { - logger.Errorf("%s: failed to prepare request: %v", sp.name, err) + logger.Errorf("%s: failed to prepare request: %v", sp.Name, err) return } @@ -362,7 +369,7 @@ func (sp *ServerPool) handle(ctx *context.Context, mirror bool) string { spanName := sp.spec.SpanName if spanName == "" { - spanName = sp.name + spanName = sp.Name } spCtx.span = ctx.Span().NewChild(spanName) defer spCtx.span.End() @@ -388,7 +395,7 @@ func (sp *ServerPool) handle(ctx *context.Context, mirror bool) string { // CircuitBreaker is the most outside resiliencer, if the error // is ErrShortCircuited, we are sure the response is nil. if err == resilience.ErrShortCircuited { - logger.Errorf("%s: short circuited by circuit break policy", sp.name) + logger.Errorf("%s: short circuited by circuit break policy", sp.Name) spCtx.AddTag("short circuited") sp.buildFailureResponse(spCtx, http.StatusServiceUnavailable) return resultShortCircuited @@ -412,7 +419,7 @@ func (sp *ServerPool) doHandle(stdctx stdcontext.Context, spCtx *serverPoolConte // if there's no available server. if svr == nil { - logger.Errorf("%s: no available server", sp.name) + logger.Errorf("%s: no available server", sp.Name) return serverPoolError{http.StatusServiceUnavailable, resultInternalError} } @@ -420,13 +427,13 @@ func (sp *ServerPool) doHandle(stdctx stdcontext.Context, spCtx *serverPoolConte statResult := &gohttpstat.Result{} stdctx = gohttpstat.WithHTTPStat(stdctx, statResult) if err := spCtx.prepareRequest(svr, stdctx, false); err != nil { - logger.Errorf("%s: failed to prepare request: %v", sp.name, err) + logger.Errorf("%s: failed to prepare request: %v", sp.Name, err) return serverPoolError{http.StatusInternalServerError, resultInternalError} } resp, err := fnSendRequest(spCtx.stdReq, sp.proxy.client) if err != nil { - logger.Errorf("%s: failed to send request: %v", sp.name, err) + logger.Errorf("%s: failed to send request: %v", sp.Name, err) statResult.End(fasttime.Now()) spCtx.LazyAddTag(func() string { @@ -494,7 +501,7 @@ func (sp *ServerPool) buildResponse(spCtx *serverPoolContext) (err error) { resp, err := httpprot.NewResponse(spCtx.stdResp) if err != nil { - logger.Errorf("%s: NewResponse returns an error: %v", sp.name, err) + logger.Errorf("%s: NewResponse returns an error: %v", sp.Name, err) body.Close() return err } @@ -504,7 +511,7 @@ func (sp *ServerPool) buildResponse(spCtx *serverPoolContext) (err error) { maxBodySize = sp.proxy.spec.ServerMaxBodySize } if err = resp.FetchPayload(maxBodySize); err != nil { - logger.Errorf("%s: failed to fetch response payload: %v", sp.name, err) + logger.Errorf("%s: failed to fetch response payload: %v", sp.Name, err) body.Close() return err } diff --git a/pkg/filters/proxies/httpproxy/proxy.go b/pkg/filters/proxies/httpproxy/proxy.go index beabc64d4f..db684b2ecd 100644 --- a/pkg/filters/proxies/httpproxy/proxy.go +++ b/pkg/filters/proxies/httpproxy/proxy.go @@ -129,7 +129,11 @@ type ( } // Server is the backend server. - Server = proxies.Server + Server = proxies.Server + LoadBalancer = proxies.LoadBalancer + LoadBalanceSpec = proxies.LoadBalanceSpec + BaseServerPool = proxies.ServerPoolBase + BaseServerPoolSpec = proxies.ServerPoolBaseSpec ) // Validate validates Spec. @@ -284,14 +288,14 @@ func (p *Proxy) Status() interface{} { // Close closes Proxy. func (p *Proxy) Close() { - p.mainPool.close() + p.mainPool.Close() for _, v := range p.candidatePools { - v.close() + v.Close() } if p.mirrorPool != nil { - p.mirrorPool.close() + p.mirrorPool.Close() } } diff --git a/pkg/filters/proxies/httpproxy/wspool.go b/pkg/filters/proxies/httpproxy/wspool.go index 361b94c187..a89121c203 100644 --- a/pkg/filters/proxies/httpproxy/wspool.go +++ b/pkg/filters/proxies/httpproxy/wspool.go @@ -59,10 +59,14 @@ func NewWebSocketServerPool(proxy *WebSocketProxy, spec *WebSocketServerPoolSpec if spec.Filter != nil { sp.filter = NewRequestMatcher(spec.Filter) } - sp.Init(proxy.super, name, &spec.BaseServerPoolSpec) + sp.Init(sp, proxy.super, name, &spec.BaseServerPoolSpec) return sp } +func (sp *WebSocketServerPool) CreateLoadBalancer(spec *LoadBalanceSpec, servers []*Server) LoadBalancer { + return nil +} + func (sp *WebSocketServerPool) buildFailureResponse(ctx *context.Context, statusCode int) { resp, _ := ctx.GetOutputResponse().(*httpprot.Response) if resp == nil { @@ -153,7 +157,7 @@ func (sp *WebSocketServerPool) handle(ctx *context.Context) (result string) { // if there's no available server. if svr == nil { - logger.Errorf("%s: no available server", sp.name) + logger.Errorf("%s: no available server", sp.Name) sp.buildFailureResponse(ctx, http.StatusServiceUnavailable) metric.StatusCode = http.StatusServiceUnavailable return resultInternalError @@ -161,7 +165,7 @@ func (sp *WebSocketServerPool) handle(ctx *context.Context) (result string) { stdw, _ := ctx.GetData("HTTP_RESPONSE_WRITER").(http.ResponseWriter) if stdw == nil { - logger.Errorf("%s: cannot get response writer from context", sp.name) + logger.Errorf("%s: cannot get response writer from context", sp.Name) sp.buildFailureResponse(ctx, http.StatusInternalServerError) metric.StatusCode = http.StatusInternalServerError return resultInternalError @@ -172,7 +176,7 @@ func (sp *WebSocketServerPool) handle(ctx *context.Context) (result string) { // dial to the server svrConn, err := sp.dialServer(svr, req) if err != nil { - logger.Errorf("%s: dial to %s failed: %v", sp.name, svr.URL, err) + logger.Errorf("%s: dial to %s failed: %v", sp.Name, svr.URL, err) return } @@ -201,7 +205,7 @@ func (sp *WebSocketServerPool) handle(ctx *context.Context) (result string) { select { case <-stop: break - case <-sp.done: + case <-sp.Done(): svrConn.Close() clntConn.Close() } @@ -231,8 +235,3 @@ func (sp *WebSocketServerPool) status() *ServerPoolStatus { Stat: sp.httpStat.Status(), } } - -func (sp *WebSocketServerPool) close() { - close(sp.done) - sp.wg.Wait() -} diff --git a/pkg/filters/proxies/httpproxy/wsproxy.go b/pkg/filters/proxies/httpproxy/wsproxy.go index 0736cb0391..0e20463465 100644 --- a/pkg/filters/proxies/httpproxy/wsproxy.go +++ b/pkg/filters/proxies/httpproxy/wsproxy.go @@ -155,10 +155,10 @@ func (p *WebSocketProxy) Status() interface{} { // Close closes WebSocketProxy. func (p *WebSocketProxy) Close() { - p.mainPool.close() + p.mainPool.Close() for _, v := range p.candidatePools { - v.close() + v.Close() } } diff --git a/pkg/filters/proxies/loadbalance.go b/pkg/filters/proxies/loadbalance.go index caaad1e6d4..ba94cf8a5e 100644 --- a/pkg/filters/proxies/loadbalance.go +++ b/pkg/filters/proxies/loadbalance.go @@ -1,39 +1,40 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package proxies -import "github.com/megaease/easegress/pkg/protocols" +import ( + "fmt" + "hash/fnv" + "math/rand" + "sync/atomic" + "time" + + "github.com/megaease/easegress/pkg/logger" + "github.com/megaease/easegress/pkg/protocols" +) // LoadBalancer is the interface of a load balancer. -type LoadBalancer[Request protocols.Request, Response protocols.Response] interface { - ChooseServer(req Request) *Server - ReturnServer(server *Server, req Request, resp Response) +type LoadBalancer interface { + ChooseServer(req protocols.Request) *Server + ReturnServer(server *Server, req protocols.Request, resp protocols.Response) Close() } -// StickySessionSpec is the spec for sticky session. -type StickySessionSpec struct { - Mode string `json:"mode" jsonschema:"required,enum=CookieConsistentHash,enum=DurationBased,enum=ApplicationBased"` - // AppCookieName is the user-defined cookie name in CookieConsistentHash and ApplicationBased mode. - AppCookieName string `json:"appCookieName" jsonschema:"omitempty"` - // LBCookieName is the generated cookie name in DurationBased and ApplicationBased mode. - LBCookieName string `json:"lbCookieName" jsonschema:"omitempty"` - // LBCookieExpire is the expire seconds of generated cookie in DurationBased and ApplicationBased mode. - LBCookieExpire string `json:"lbCookieExpire" jsonschema:"omitempty,format=duration"` -} - -// HealthCheckSpec is the spec for health check. -type HealthCheckSpec struct { - // Interval is the interval duration for health check. - Interval string `json:"interval" jsonschema:"omitempty,format=duration"` - // Path is the health check path for server - Path string `json:"path" jsonschema:"omitempty"` - // Timeout is the timeout duration for health check, default is 3. - Timeout string `json:"timeout" jsonschema:"omitempty,format=duration"` - // Fails is the consecutive fails count for assert fail, default is 1. - Fails int `json:"fails" jsonschema:"omitempty,minimum=1"` - // Passes is the consecutive passes count for assert pass, default is 1. - Passes int `json:"passes" jsonschema:"omitempty,minimum=1"` -} - // LoadBalanceSpec is the spec to create a load balancer. type LoadBalanceSpec struct { Policy string `json:"policy" jsonschema:"omitempty,enum=,enum=roundRobin,enum=random,enum=weightedRandom,enum=ipHash,enum=headerHash"` @@ -42,236 +43,231 @@ type LoadBalanceSpec struct { HealthCheck *HealthCheckSpec `json:"healthCheck" jsonschema:"omitempty"` } -/* -// BaseLoadBalancer implement the common part of load balancer. -type BaseLoadBalancer[TRequest Request, TResponse Response] struct { - spec *LoadBalanceSpec - Servers []*Server - healthyServers atomic.Value - consistentHash *consistent.Consistent - cookieExpire time.Duration - done chan bool - probeClient *http.Client - probeInterval time.Duration - probeTimeout time.Duration +// LoadBalancePolicy is the interface of a load balance policy. +type LoadBalancePolicy interface { + ChooseServer(req protocols.Request, sg *ServerGroup) *Server + Close() } -// HealthyServers return healthy servers -func (blb *BaseLoadBalancer[TRequest, TResponse]) HealthyServers() []*Server { - return blb.healthyServers.Load().([]*Server) -} +// GeneralLoadBalancer implements a general purpose load balancer. +type GeneralLoadBalancer struct { + spec *LoadBalanceSpec + servers []*Server + healthyServers atomic.Pointer[ServerGroup] -// init initializes load balancer -func (blb *BaseLoadBalancer[TRequest, TResponse]) init(spec *LoadBalanceSpec, servers []*Server) { - blb.spec = spec - blb.Servers = servers - blb.healthyServers.Store(servers) + done chan struct{} - blb.initStickySession(spec.StickySession, blb.HealthyServers()) - blb.initHealthCheck(spec.HealthCheck, servers) + lbp LoadBalancePolicy + ss SessionSticker + hc HealthChecker } -// initStickySession initializes for sticky session -func (blb *BaseLoadBalancer[TRequest, TResponse]) initStickySession(spec *StickySessionSpec, servers []*Server) { - if spec == nil || len(servers) == 0 { - return +// NewGeneralLoadBalancer creates a new GeneralLoadBalancer. +func NewGeneralLoadBalancer(spec *LoadBalanceSpec, servers []*Server) *GeneralLoadBalancer { + lb := &GeneralLoadBalancer{ + spec: spec, + servers: servers, } + lb.healthyServers.Store(newServerGroup(servers)) + return lb +} - switch spec.Mode { - case StickySessionModeCookieConsistentHash: - blb.initConsistentHash() - case StickySessionModeDurationBased, StickySessionModeApplicationBased: - blb.configLBCookie() +// Init initializes the load balancer. +func (glb *GeneralLoadBalancer) Init( + fnNewSessionSticker func(*StickySessionSpec) SessionSticker, + fnNewHealthChecker func(*HealthCheckSpec) HealthChecker, + aa any, +) { + // glb.lbp = lbp + + if glb.spec.StickySession != nil { + ss := fnNewSessionSticker(glb.spec.StickySession) + ss.UpdateServers(glb.servers) + glb.ss = ss } -} -// initHealthCheck initializes for health check -func (blb *BaseLoadBalancer[TRequest, TResponse]) initHealthCheck(spec *HealthCheckSpec, servers []*Server) { - if spec == nil || len(servers) == 0 { + if glb.spec.HealthCheck == nil { return } - blb.probeInterval, _ = time.ParseDuration(spec.Interval) - if blb.probeInterval <= 0 { - blb.probeInterval = HealthCheckDefaultInterval - } - blb.probeTimeout, _ = time.ParseDuration(spec.Timeout) - if blb.probeTimeout <= 0 { - blb.probeTimeout = HealthCheckDefaultTimeout + if glb.spec.HealthCheck.Fails <= 0 { + glb.spec.HealthCheck.Fails = 1 } - if spec.Fails == 0 { - spec.Fails = HealthCheckDefaultFailThreshold + + if glb.spec.HealthCheck.Passes <= 0 { + glb.spec.HealthCheck.Passes = 1 } - if spec.Passes == 0 { - spec.Passes = HealthCheckDefaultPassThreshold + + glb.hc = fnNewHealthChecker(glb.spec.HealthCheck) + + interval, _ := time.ParseDuration(glb.spec.HealthCheck.Interval) + if interval <= 0 { + interval = time.Minute } - blb.probeClient = &http.Client{Timeout: blb.probeTimeout} - ticker := time.NewTicker(blb.probeInterval) - blb.done = make(chan bool) + + ticker := time.NewTicker(interval) + glb.done = make(chan struct{}) go func() { for { select { - case <-blb.done: + case <-glb.done: ticker.Stop() return case <-ticker.C: - blb.probeServers() + glb.checkServers() } } }() } -// probeServers checks health status of servers -func (blb *BaseLoadBalancer[TRequest, TResponse]) probeServers() { - statusChange := false - healthyServers := make([]*Server, 0, len(blb.Servers)) - for _, svr := range blb.Servers { - pass := blb.probeHTTP(svr.URL) - healthy, change := svr.RecordHealth(pass, blb.spec.HealthCheck.Passes, blb.spec.HealthCheck.Fails) - if change { - statusChange = true +func (glb *GeneralLoadBalancer) checkServers() { + changed := false + + servers := make([]*Server, 0, len(glb.servers)) + for _, svr := range glb.servers { + succ := glb.hc.Check(svr) + if succ { + if svr.HealthCounter < 0 { + svr.HealthCounter = 0 + } + svr.HealthCounter++ + if svr.Unhealth && svr.HealthCounter >= glb.spec.HealthCheck.Passes { + logger.Warnf("server:%v becomes healthy.", svr.ID()) + svr.Unhealth = false + changed = true + } + } else { + if svr.HealthCounter > 0 { + svr.HealthCounter = 0 + } + svr.HealthCounter-- + if svr.Healthy() && svr.HealthCounter <= -glb.spec.HealthCheck.Fails { + logger.Warnf("server:%v becomes healthy.", svr.ID()) + svr.Unhealth = true + changed = true + } } - if healthy { - healthyServers = append(healthyServers, svr) + + if svr.Healthy() { + servers = append(servers, svr) } } - if statusChange { - blb.healthyServers.Store(healthyServers) - // init consistent hash in sticky session when servers change - blb.initStickySession(blb.spec.StickySession, blb.HealthyServers()) - } -} -// probeHTTP checks http url status -func (blb *BaseLoadBalancer[TRequest, TResponse]) probeHTTP(url string) bool { - if blb.spec.HealthCheck.Path != "" { - url += blb.spec.HealthCheck.Path + if !changed { + return } - res, err := blb.probeClient.Get(url) - if err != nil || res.StatusCode > 500 { - return false + + glb.healthyServers.Store(newServerGroup(servers)) + if glb.ss != nil { + glb.ss.UpdateServers(servers) } - return true } -// initConsistentHash initializes for consistent hash mode -func (blb *BaseLoadBalancer[TRequest, TResponse]) initConsistentHash() { - members := make([]consistent.Member, len(blb.HealthyServers())) - for i, s := range blb.HealthyServers() { - members[i] = hashMember{server: s} +// ChooseServer chooses a server according to the load balancing spec. +func (glb *GeneralLoadBalancer) ChooseServer(req protocols.Request) *Server { + sg := glb.healthyServers.Load() + if len(sg.Servers) == 0 { + return nil } - cfg := consistent.Config{ - PartitionCount: 1024, - ReplicationFactor: 50, - Load: 1.25, - Hasher: hasher{}, + if glb.ss != nil { + if svr := glb.ss.GetServer(req, sg); svr != nil { + return svr + } } - blb.consistentHash = consistent.New(members, cfg) -} -// configLBCookie configures properties for load balancer-generated cookie -func (blb *BaseLoadBalancer[TRequest, TResponse]) configLBCookie() { - if blb.spec.StickySession.LBCookieName == "" { - blb.spec.StickySession.LBCookieName = StickySessionDefaultLBCookieName - } + return glb.lbp.ChooseServer(req, sg) +} - blb.cookieExpire, _ = time.ParseDuration(blb.spec.StickySession.LBCookieExpire) - if blb.cookieExpire <= 0 { - blb.cookieExpire = StickySessionDefaultLBCookieExpire +// ReturnServer returns a server to the load balancer. +func (glb *GeneralLoadBalancer) ReturnServer(server *Server, req protocols.Request, resp protocols.Response) { + if glb.ss != nil { + glb.ss.ReturnServer(server, req, resp) } } -// ChooseServer chooses the sticky server if enable -func (blb *BaseLoadBalancer[TRequest, TResponse]) ChooseServer(req *httpprot.Request) *Server { - if blb.spec.StickySession == nil { - return nil +// Close closes the load balancer +func (glb *GeneralLoadBalancer) Close() { + if glb.hc != nil { + glb.hc.Close() } - - switch blb.spec.StickySession.Mode { - case StickySessionModeCookieConsistentHash: - return blb.chooseServerByConsistentHash(req) - case StickySessionModeDurationBased, StickySessionModeApplicationBased: - return blb.chooseServerByLBCookie(req) + if glb.ss != nil { + glb.ss.Close() } - - return nil + glb.lbp.Close() } -// chooseServerByConsistentHash chooses server using consistent hash on cookie -func (blb *BaseLoadBalancer[TRequest, TResponse]) chooseServerByConsistentHash(req *httpprot.Request) *Server { - cookie, err := req.Cookie(blb.spec.StickySession.AppCookieName) - if err != nil { - return nil - } +// RandomLoadBalancePolicy is a load balance policy that chooses a server randomly. +type RandomLoadBalancePolicy struct { +} - m := blb.consistentHash.LocateKey([]byte(cookie.Value)) - if m != nil { - return m.(hashMember).server - } +// ChooseServer chooses a server randomly. +func (lbp *RandomLoadBalancePolicy) ChooseServer(req protocols.Request, sg *ServerGroup) *Server { + return sg.Servers[rand.Intn(len(sg.Servers))] +} - return nil +// RoundRobinLoadBalancePolicy is a load balance policy that chooses a server by round robin. +type RoundRobinLoadBalancePolicy struct { + counter uint64 } -// chooseServerByLBCookie chooses server by load balancer-generated cookie -func (blb *BaseLoadBalancer[TRequest, TResponse]) chooseServerByLBCookie(req *httpprot.Request) *Server { - cookie, err := req.Cookie(blb.spec.StickySession.LBCookieName) - if err != nil { - return nil - } +// ChooseServer chooses a server by round robin. +func (lbp *RoundRobinLoadBalancePolicy) ChooseServer(req protocols.Request, sg *ServerGroup) *Server { + counter := atomic.AddUint64(&lbp.counter, 1) - 1 + return sg.Servers[int(counter)%len(sg.Servers)] +} - signed, err := hex.DecodeString(cookie.Value) - if err != nil || len(signed) != KeyLen+sha256.Size { - return nil - } +// WeightedRandomLoadBalancePolicy is a load balance policy that chooses a server randomly by weight. +type WeightedRandomLoadBalancePolicy struct { +} - key := signed[:KeyLen] - macBytes := signed[KeyLen:] - for _, s := range blb.HealthyServers() { - mac := hmac.New(sha256.New, key) - mac.Write([]byte(s.ID())) - expected := mac.Sum(nil) - if hmac.Equal(expected, macBytes) { - return s +// ChooseServer chooses a server randomly by weight. +func (lbp *WeightedRandomLoadBalancePolicy) ChooseServer(req protocols.Request, sg *ServerGroup) *Server { + w := rand.Intn(sg.TotalWeight) + for _, svr := range sg.Servers { + w -= svr.Weight + if w < 0 { + return svr } } - return nil + panic(fmt.Errorf("BUG: should not run to here, total weight=%d", sg.TotalWeight)) } -// ReturnServer does some custom work before return server -func (blb *BaseLoadBalancer[TRequest, TResponse]) ReturnServer(server *Server, req *httpprot.Request, resp *httpprot.Response) { - if blb.spec.StickySession == nil { - return - } +// IPHashLoadBalancePolicy is a load balance policy that chooses a server by ip hash. +type IPHashLoadBalancePolicy struct { +} - setCookie := false - switch blb.spec.StickySession.Mode { - case StickySessionModeDurationBased: - setCookie = true - case StickySessionModeApplicationBased: - for _, c := range resp.Cookies() { - if c.Name == blb.spec.StickySession.AppCookieName { - setCookie = true - break - } - } +// ChooseServer chooses a server by ip hash. +func (lbp *IPHashLoadBalancePolicy) ChooseServer(req protocols.Request, sg *ServerGroup) *Server { + type realIPer interface { + RealIP() string } - if setCookie { - cookie := &http.Cookie{ - Name: blb.spec.StickySession.LBCookieName, - Value: sign([]byte(server.ID())), - Expires: time.Now().Add(blb.cookieExpire), - } - resp.SetCookie(cookie) + + ri, ok := req.(realIPer) + if !ok { + panic("IPHashLoadBalancePolicy only support request with RealIP()") } + + ip := ri.RealIP() + hash := fnv.New32() + hash.Write([]byte(ip)) + return sg.Servers[hash.Sum32()%uint32(len(sg.Servers))] } -// Close closes resources -func (blb *BaseLoadBalancer[TRequest, TResponse]) Close() { - if blb.done != nil { - close(blb.done) - } +// HeaderHashLoadBalancePolicy is a load balance policy that chooses a server by header hash. +type HeaderHashLoadBalancePolicy struct { + spec *LoadBalanceSpec } -*/ +// ChooseServer chooses a server by header hash. +func (lbp *HeaderHashLoadBalancePolicy) ChooseServer(req protocols.Request, sg *ServerGroup) *Server { + v, ok := req.Header().Get(lbp.spec.HeaderHashKey).(string) + if !ok { + panic("HeaderHashLoadBalancePolicy only support headers with string values") + } + + hash := fnv.New32() + hash.Write([]byte(v)) + return sg.Servers[hash.Sum32()%uint32(len(sg.Servers))] +} diff --git a/pkg/filters/proxies/server.go b/pkg/filters/proxies/server.go index 1fa0032bbc..6ea5ff2a28 100644 --- a/pkg/filters/proxies/server.go +++ b/pkg/filters/proxies/server.go @@ -22,25 +22,19 @@ import ( "net" "net/url" "strings" - - "github.com/megaease/easegress/pkg/logger" ) // Server is a backend proxy server. type Server struct { - URL string `json:"url" jsonschema:"required,format=url"` - Tags []string `json:"tags" jsonschema:"omitempty,uniqueItems=true"` - Weight int `json:"weight" jsonschema:"omitempty,minimum=0,maximum=100"` - KeepHost bool `json:"keepHost" jsonschema:"omitempty,default=false"` - AddrIsHostName bool `json:"-"` - health *ServerHealth `json:"-"` -} - -// ServerHealth is health status of a Server -type ServerHealth struct { - healthy bool - fails int - passes int + URL string `json:"url" jsonschema:"required,format=url"` + Tags []string `json:"tags" jsonschema:"omitempty,uniqueItems=true"` + Weight int `json:"weight" jsonschema:"omitempty,minimum=0,maximum=100"` + KeepHost bool `json:"keepHost" jsonschema:"omitempty,default=false"` + AddrIsHostName bool `json:"-"` + Unhealth bool `json:"-"` + // HealthCounter is used to count the number of successive health checks + // result, positive for healthy, negative for unhealthy + HealthCounter int `json:"-"` } // String implements the Stringer interface. @@ -78,28 +72,21 @@ func (s *Server) CheckAddrPattern() { s.AddrIsHostName = net.ParseIP(host) == nil } -// RecordHealth records health status, return healthy status and true if status changes -func (s *Server) RecordHealth(pass bool, passThreshold, failThreshold int) (bool, bool) { - if s.health == nil { - s.health = &ServerHealth{healthy: true} - } - h := s.health - if pass { - h.passes++ - h.fails = 0 - } else { - h.passes = 0 - h.fails++ - } - change := false - if h.passes >= passThreshold && !h.healthy { - h.healthy = true - logger.Warnf("server:%v becomes healthy.", s.ID()) - change = true - } else if h.fails >= failThreshold && h.healthy { - logger.Warnf("server:%v becomes unhealthy!", s.ID()) - h.healthy = false - change = true +// Healthy returns whether the server is healthy +func (s *Server) Healthy() bool { + return !s.Unhealth +} + +// ServerGroup is a group of servers. +type ServerGroup struct { + TotalWeight int + Servers []*Server +} + +func newServerGroup(servers []*Server) *ServerGroup { + sg := &ServerGroup{Servers: servers} + for _, s := range servers { + sg.TotalWeight += s.Weight } - return h.healthy, change + return sg } diff --git a/pkg/filters/proxies/stickysession.go b/pkg/filters/proxies/stickysession.go new file mode 100644 index 0000000000..760fd3f903 --- /dev/null +++ b/pkg/filters/proxies/stickysession.go @@ -0,0 +1,228 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package proxies + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/binary" + "encoding/hex" + "hash/maphash" + "net/http" + "sync/atomic" + "time" + + "github.com/buraksezer/consistent" + "github.com/megaease/easegress/pkg/protocols" + "github.com/megaease/easegress/pkg/protocols/httpprot" + "github.com/spaolacci/murmur3" +) + +const ( + // StickySessionModeCookieConsistentHash is the sticky session mode of consistent hash on app cookie. + StickySessionModeCookieConsistentHash = "CookieConsistentHash" + // StickySessionModeDurationBased uses a load balancer-generated cookie for stickiness. + StickySessionModeDurationBased = "DurationBased" + // StickySessionModeApplicationBased uses a load balancer-generated cookie depends on app cookie for stickiness. + StickySessionModeApplicationBased = "ApplicationBased" + + // KeyLen is the key length used by HMAC. + KeyLen = 8 +) + +// StickySessionSpec is the spec for sticky session. +type StickySessionSpec struct { + Mode string `json:"mode" jsonschema:"required,enum=CookieConsistentHash,enum=DurationBased,enum=ApplicationBased"` + // AppCookieName is the user-defined cookie name in CookieConsistentHash and ApplicationBased mode. + AppCookieName string `json:"appCookieName" jsonschema:"omitempty"` + // LBCookieName is the generated cookie name in DurationBased and ApplicationBased mode. + LBCookieName string `json:"lbCookieName" jsonschema:"omitempty"` + // LBCookieExpire is the expire seconds of generated cookie in DurationBased and ApplicationBased mode. + LBCookieExpire string `json:"lbCookieExpire" jsonschema:"omitempty,format=duration"` +} + +// SessionSticker is the interface for session stickiness. +type SessionSticker interface { + UpdateServers(servers []*Server) + GetServer(req protocols.Request, sg *ServerGroup) *Server + ReturnServer(server *Server, req protocols.Request, resp protocols.Response) + Close() +} + +// hashMember is member used for hash +type hashMember struct { + server *Server +} + +// String implements consistent.Member interface +func (m hashMember) String() string { + return m.server.ID() +} + +// hasher is used for hash +type hasher struct{} + +// Sum64 implement hash function using murmur3 +func (h hasher) Sum64(data []byte) uint64 { + return murmur3.Sum64(data) +} + +// HTTPSessionSticker implements sticky session for HTTP. +type HTTPSessionSticker struct { + spec *StickySessionSpec + consistentHash atomic.Pointer[consistent.Consistent] + cookieExpire time.Duration +} + +// NewHTTPSessionSticker creates a new HTTPSessionSticker. +func NewHTTPSessionSticker(spec *StickySessionSpec) SessionSticker { + ss := &HTTPSessionSticker{spec: spec} + + ss.cookieExpire, _ = time.ParseDuration(spec.LBCookieExpire) + if ss.cookieExpire <= 0 { + ss.cookieExpire = time.Hour * 2 + } + + return ss +} + +// UpdateServers update the servers for the HTTPSessionSticker. +func (ss *HTTPSessionSticker) UpdateServers(servers []*Server) { + if ss.spec.Mode != StickySessionModeCookieConsistentHash { + return + } + + members := make([]consistent.Member, len(servers)) + for i, s := range servers { + members[i] = hashMember{server: s} + } + + cfg := consistent.Config{ + PartitionCount: 1024, + ReplicationFactor: 50, + Load: 1.25, + Hasher: hasher{}, + } + + ss.consistentHash.Store(consistent.New(members, cfg)) +} + +func (ss *HTTPSessionSticker) getServerByConsistentHash(req *httpprot.Request) *Server { + cookie, err := req.Cookie(ss.spec.AppCookieName) + if err != nil { + return nil + } + + m := ss.consistentHash.Load().LocateKey([]byte(cookie.Value)) + if m != nil { + return m.(hashMember).server + } + + return nil +} + +func (ss *HTTPSessionSticker) getServerByLBCookie(req *httpprot.Request, sg *ServerGroup) *Server { + cookie, err := req.Cookie(ss.spec.LBCookieName) + if err != nil { + return nil + } + + signed, err := hex.DecodeString(cookie.Value) + if err != nil || len(signed) != KeyLen+sha256.Size { + return nil + } + + key := signed[:KeyLen] + macBytes := signed[KeyLen:] + for _, s := range sg.Servers { + mac := hmac.New(sha256.New, key) + mac.Write([]byte(s.ID())) + expected := mac.Sum(nil) + if hmac.Equal(expected, macBytes) { + return s + } + } + + return nil +} + +// GetServer returns the server for the request. +func (ss *HTTPSessionSticker) GetServer(req protocols.Request, sg *ServerGroup) *Server { + httpreq, ok := req.(*httpprot.Request) + if !ok { + panic("not http request") + } + + switch ss.spec.Mode { + case StickySessionModeCookieConsistentHash: + return ss.getServerByConsistentHash(httpreq) + case StickySessionModeDurationBased, StickySessionModeApplicationBased: + return ss.getServerByLBCookie(httpreq, sg) + } + + return nil +} + +// sign signs plain text byte array to encoded string +func sign(plain []byte) string { + signed := make([]byte, KeyLen+sha256.Size) + key := signed[:KeyLen] + macBytes := signed[KeyLen:] + + // use maphash to generate random key fast + binary.LittleEndian.PutUint64(key, new(maphash.Hash).Sum64()) + mac := hmac.New(sha256.New, key) + mac.Write(plain) + mac.Sum(macBytes[:0]) + + return hex.EncodeToString(signed) +} + +// ReturnServer returns the server to the session sticker. +func (ss *HTTPSessionSticker) ReturnServer(server *Server, req protocols.Request, resp protocols.Response) { + httpresp, ok := resp.(*httpprot.Response) + if !ok { + panic("not http response") + } + + setCookie := false + switch ss.spec.Mode { + case StickySessionModeDurationBased: + setCookie = true + case StickySessionModeApplicationBased: + for _, c := range httpresp.Cookies() { + if c.Name == ss.spec.AppCookieName { + setCookie = true + break + } + } + } + + if setCookie { + cookie := &http.Cookie{ + Name: ss.spec.LBCookieName, + Value: sign([]byte(server.ID())), + Expires: time.Now().Add(ss.cookieExpire), + } + httpresp.SetCookie(cookie) + } +} + +// Close closes the HTTPSessionSticker. +func (ss *HTTPSessionSticker) Close() { +} From 200992feaa3eda611fbd8290c67db8cbe8f53d9a Mon Sep 17 00:00:00 2001 From: Bomin Zhang Date: Thu, 2 Feb 2023 09:59:51 +0800 Subject: [PATCH 04/14] continue refactor load balancer --- pkg/filters/proxies/grpcproxy/loadbalance.go | 222 ++------ pkg/filters/proxies/grpcproxy/pool.go | 10 +- pkg/filters/proxies/httpproxy/loadbalance.go | 524 ------------------ pkg/filters/proxies/httpproxy/pool.go | 1 + pkg/filters/proxies/httpproxy/wspool.go | 6 +- pkg/filters/proxies/loadbalance.go | 39 +- .../{httpproxy => }/loadbalance_test.go | 2 +- 7 files changed, 82 insertions(+), 722 deletions(-) delete mode 100644 pkg/filters/proxies/httpproxy/loadbalance.go rename pkg/filters/proxies/{httpproxy => }/loadbalance_test.go (99%) diff --git a/pkg/filters/proxies/grpcproxy/loadbalance.go b/pkg/filters/proxies/grpcproxy/loadbalance.go index f891a9db1a..b8915c8b67 100644 --- a/pkg/filters/proxies/grpcproxy/loadbalance.go +++ b/pkg/filters/proxies/grpcproxy/loadbalance.go @@ -17,216 +17,55 @@ package grpcproxy -/* -// LoadBalancer is the interface of an gRPC load balancer. -type LoadBalancer interface { - ChooseServer(req *grpcprot.Request) *Server -} - -// ReusableServerLB is the interface of an HTTP load balancer with reusable servers . -type ReusableServerLB interface { - LoadBalancer - ReturnServer(server *Server) -} - -// LoadBalanceSpec is the spec to create a load balancer. -type LoadBalanceSpec struct { - Policy string `json:"policy" jsonschema:"enum=roundRobin,enum=random,enum=weightedRandom,enum=ipHash,enum=headerHash,enum=forward"` - HeaderHashKey string `json:"headerHashKey" jsonschema:"omitempty"` - ForwardKey string `json:"forwardKey" jsonschema:"omitempty"` -} - -// NewLoadBalancer creates a load balancer for servers according to spec. -func NewLoadBalancer(spec *LoadBalanceSpec, servers []*Server) LoadBalancer { - switch spec.Policy { - case "roundRobin", "": - return newRoundRobinLoadBalancer(servers) - case "random": - return newRandomLoadBalancer(servers) - case "weightedRandom": - return newWeightedRandomLoadBalancer(servers) - case "ipHash": - return newIPHashLoadBalancer(servers) - case "headerHash": - return newHeaderHashLoadBalancer(servers, spec.HeaderHashKey) - case "forward": - return newForwardLoadBalancer(spec.ForwardKey) - default: - logger.Errorf("unsupported load balancing policy: %s", spec.Policy) - return newRoundRobinLoadBalancer(servers) - } -} - -// BaseLoadBalancer implement the common part of load balancer. -type BaseLoadBalancer struct { - Servers []*Server -} - -// randomLoadBalancer does load balancing in a random manner. -type randomLoadBalancer struct { - BaseLoadBalancer -} - -func newRandomLoadBalancer(servers []*Server) *randomLoadBalancer { - return &randomLoadBalancer{ - BaseLoadBalancer: BaseLoadBalancer{ - Servers: servers, - }, - } -} - -// ChooseServer implements the LoadBalancer interface. -func (lb *randomLoadBalancer) ChooseServer(req *grpcprot.Request) *Server { - if len(lb.Servers) == 0 { - return nil - } - return lb.Servers[rand.Intn(len(lb.Servers))] -} - -// roundRobinLoadBalancer does load balancing in a round robin manner. -type roundRobinLoadBalancer struct { - BaseLoadBalancer - counter uint64 -} - -func newRoundRobinLoadBalancer(servers []*Server) *roundRobinLoadBalancer { - return &roundRobinLoadBalancer{ - BaseLoadBalancer: BaseLoadBalancer{ - Servers: servers, - }, - } -} - -// ChooseServer implements the LoadBalancer interface. -func (lb *roundRobinLoadBalancer) ChooseServer(req *grpcprot.Request) *Server { - if len(lb.Servers) == 0 { - return nil - } - counter := atomic.AddUint64(&lb.counter, 1) - 1 - return lb.Servers[int(counter)%len(lb.Servers)] -} - -// WeightedRandomLoadBalancer does load balancing in a weighted random manner. -type WeightedRandomLoadBalancer struct { - BaseLoadBalancer - totalWeight int -} - -func newWeightedRandomLoadBalancer(servers []*Server) *WeightedRandomLoadBalancer { - lb := &WeightedRandomLoadBalancer{ - BaseLoadBalancer: BaseLoadBalancer{ - Servers: servers, - }, - } - for _, server := range servers { - lb.totalWeight += server.Weight - } - return lb -} - -// ChooseServer implements the LoadBalancer interface. -func (lb *WeightedRandomLoadBalancer) ChooseServer(req *grpcprot.Request) *Server { - if len(lb.Servers) == 0 { - return nil - } - - randomWeight := rand.Intn(lb.totalWeight) - for _, server := range lb.Servers { - randomWeight -= server.Weight - if randomWeight < 0 { - return server - } - } - - panic(fmt.Errorf("BUG: should not run to here, total weight=%d", lb.totalWeight)) -} - -// ipHashLoadBalancer does load balancing based on IP hash. -type ipHashLoadBalancer struct { - BaseLoadBalancer -} - -func newIPHashLoadBalancer(servers []*Server) *ipHashLoadBalancer { - return &ipHashLoadBalancer{ - BaseLoadBalancer: BaseLoadBalancer{ - Servers: servers, - }, - } -} +import ( + "net" + "sync" -// ChooseServer implements the LoadBalancer interface. -func (lb *ipHashLoadBalancer) ChooseServer(req *grpcprot.Request) *Server { - if len(lb.Servers) == 0 { - return nil - } - ip := req.RealIP() - hash := fnv.New32() - hash.Write([]byte(ip)) - return lb.Servers[hash.Sum32()%uint32(len(lb.Servers))] -} + "github.com/megaease/easegress/pkg/logger" + "github.com/megaease/easegress/pkg/protocols" + "github.com/megaease/easegress/pkg/protocols/grpcprot" +) -// headerHashLoadBalancer does load balancing based on header hash. -type headerHashLoadBalancer struct { - BaseLoadBalancer - key string -} - -func newHeaderHashLoadBalancer(servers []*Server, key string) *headerHashLoadBalancer { - return &headerHashLoadBalancer{ - BaseLoadBalancer: BaseLoadBalancer{ - Servers: servers, - }, - key: key, - } -} - -// ChooseServer implements the LoadBalancer interface. -func (lb *headerHashLoadBalancer) ChooseServer(req *grpcprot.Request) *Server { - if len(lb.Servers) == 0 { - return nil - } - v := req.RawHeader().RawGet(lb.key) - var vv string - if v == nil { - vv = "" - } else { - vv = v[0] - } - hash := fnv.New32() - hash.Write([]byte(vv)) - return lb.Servers[hash.Sum32()%uint32(len(lb.Servers))] -} +const ( + // LoadBalancePolicyForward is the load balance policy of forward. + LoadBalancePolicyForward = "forward" +) type forwardLoadBalancer struct { - server *sync.Pool + servers sync.Pool forwardKey string } -func newForwardLoadBalancer(forwardKey string) *forwardLoadBalancer { +func newForwardLoadBalancer(spec *LoadBalanceSpec) *forwardLoadBalancer { return &forwardLoadBalancer{ - server: &sync.Pool{ + servers: sync.Pool{ New: func() interface{} { return &Server{} }, }, - forwardKey: forwardKey, + forwardKey: spec.ForwardKey, } } // ChooseServer implements the LoadBalancer interface -func (f *forwardLoadBalancer) ChooseServer(req *grpcprot.Request) *Server { - target := req.RawHeader().GetFirst(f.forwardKey) +func (f *forwardLoadBalancer) ChooseServer(req protocols.Request) *Server { + grpcreq, ok := req.(*grpcprot.Request) + if !ok { + panic("not a gRPC request") + } + + target := grpcreq.RawHeader().GetFirst(f.forwardKey) if target == "" { - logger.Debugf("request %v from %v context no target address %s", req.FullMethod(), req.RealIP(), target) + logger.Debugf("request %v from %v context no target address %s", grpcreq.FullMethod(), grpcreq.RealIP(), target) return nil } if _, _, err := net.SplitHostPort(target); err != nil { - logger.Debugf("request %v from %v context target address %s invalid", req.FullMethod(), req.RealIP(), target) + logger.Debugf("request %v from %v context target address %s invalid", grpcreq.FullMethod(), grpcreq.RealIP(), target) return nil } - if s, ok := f.server.Get().(*Server); ok { + if s, ok := f.servers.Get().(*Server); ok { s.URL = target return s } @@ -234,8 +73,11 @@ func (f *forwardLoadBalancer) ChooseServer(req *grpcprot.Request) *Server { return nil } -// ReturnServer implements the ReusableServerLB interface -func (f *forwardLoadBalancer) ReturnServer(s *Server) { - f.server.Put(s) +// ReturnServer returns the server to the load balancer. +func (f *forwardLoadBalancer) ReturnServer(s *Server, req protocols.Request, resp protocols.Response) { + f.servers.Put(s) +} + +// Close closes the load balancer. +func (lb *forwardLoadBalancer) Close() { } -*/ diff --git a/pkg/filters/proxies/grpcproxy/pool.go b/pkg/filters/proxies/grpcproxy/pool.go index 0e4e1456fb..8ae5eb3627 100644 --- a/pkg/filters/proxies/grpcproxy/pool.go +++ b/pkg/filters/proxies/grpcproxy/pool.go @@ -23,6 +23,7 @@ import ( "io" "time" + "github.com/megaease/easegress/pkg/filters/proxies" "github.com/megaease/easegress/pkg/protocols/grpcprot" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -170,8 +171,15 @@ func NewServerPool(proxy *Proxy, spec *ServerPoolSpec, name string) *ServerPool return sp } +// CreateLoadBalancer creates a load balancer according to spec. func (sp *ServerPool) CreateLoadBalancer(spec *LoadBalanceSpec, servers []*Server) LoadBalancer { - return nil + if spec.Policy == "forward" { + return newForwardLoadBalancer(spec) + } + + lb := proxies.NewGeneralLoadBalancer(spec, servers) + lb.Init(nil, nil, nil) + return lb } // InjectResiliencePolicy injects resilience policies to the server pool. diff --git a/pkg/filters/proxies/httpproxy/loadbalance.go b/pkg/filters/proxies/httpproxy/loadbalance.go deleted file mode 100644 index b31edd7cb3..0000000000 --- a/pkg/filters/proxies/httpproxy/loadbalance.go +++ /dev/null @@ -1,524 +0,0 @@ -/* - * Copyright (c) 2017, MegaEase - * All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package httpproxy - -import ( - "time" -) - -const ( - // LoadBalancePolicyRoundRobin is the load balance policy of round robin. - LoadBalancePolicyRoundRobin = "roundRobin" - // LoadBalancePolicyRandom is the load balance policy of random. - LoadBalancePolicyRandom = "random" - // LoadBalancePolicyWeightedRandom is the load balance policy of weighted random. - LoadBalancePolicyWeightedRandom = "weightedRandom" - // LoadBalancePolicyIPHash is the load balance policy of IP hash. - LoadBalancePolicyIPHash = "ipHash" - // LoadBalancePolicyHeaderHash is the load balance policy of HTTP header hash. - LoadBalancePolicyHeaderHash = "headerHash" - // StickySessionModeCookieConsistentHash is the sticky session mode of consistent hash on app cookie. - StickySessionModeCookieConsistentHash = "CookieConsistentHash" - // StickySessionModeDurationBased uses a load balancer-generated cookie for stickiness. - StickySessionModeDurationBased = "DurationBased" - // StickySessionModeApplicationBased uses a load balancer-generated cookie depends on app cookie for stickiness. - StickySessionModeApplicationBased = "ApplicationBased" - // StickySessionDefaultLBCookieName is the default name of the load balancer-generated cookie. - StickySessionDefaultLBCookieName = "EG_SESSION" - // StickySessionDefaultLBCookieExpire is the default expiration duration of the load balancer-generated cookie. - StickySessionDefaultLBCookieExpire = time.Hour * 2 - // KeyLen is the key length used by HMAC. - KeyLen = 8 - // HealthCheckDefaultInterval is the default interval for health check - HealthCheckDefaultInterval = time.Second * 60 - // HealthCheckDefaultTimeout is the default timeout for health check - HealthCheckDefaultTimeout = time.Second * 3 - // HealthCheckDefaultFailThreshold is the default fail threshold for health check - HealthCheckDefaultFailThreshold = 1 - // HealthCheckDefaultPassThreshold is the default pass threshold for health check - HealthCheckDefaultPassThreshold = 1 -) - -/* -// LoadBalancer is the interface of an HTTP load balancer. -type LoadBalancer interface { - ChooseServer(req *httpprot.Request) *Server - ReturnServer(server *Server, req *httpprot.Request, resp *httpprot.Response) - HealthyServers() []*Server - Close() -} - -// StickySessionSpec is the spec for sticky session. -type StickySessionSpec struct { - Mode string `json:"mode" jsonschema:"required,enum=CookieConsistentHash,enum=DurationBased,enum=ApplicationBased"` - // AppCookieName is the user-defined cookie name in CookieConsistentHash and ApplicationBased mode. - AppCookieName string `json:"appCookieName" jsonschema:"omitempty"` - // LBCookieName is the generated cookie name in DurationBased and ApplicationBased mode. - LBCookieName string `json:"lbCookieName" jsonschema:"omitempty"` - // LBCookieExpire is the expire seconds of generated cookie in DurationBased and ApplicationBased mode. - LBCookieExpire string `json:"lbCookieExpire" jsonschema:"omitempty,format=duration"` -} - -// HealthCheckSpec is the spec for health check. -type HealthCheckSpec struct { - // Interval is the interval duration for health check. - Interval string `json:"interval" jsonschema:"omitempty,format=duration"` - // Path is the health check path for server - Path string `json:"path" jsonschema:"omitempty"` - // Timeout is the timeout duration for health check, default is 3. - Timeout string `json:"timeout" jsonschema:"omitempty,format=duration"` - // Fails is the consecutive fails count for assert fail, default is 1. - Fails int `json:"fails" jsonschema:"omitempty,minimum=1"` - // Passes is the consecutive passes count for assert pass, default is 1. - Passes int `json:"passes" jsonschema:"omitempty,minimum=1"` -} - -// LoadBalanceSpec is the spec to create a load balancer. -type LoadBalanceSpec struct { - Policy string `json:"policy" jsonschema:"omitempty,enum=,enum=roundRobin,enum=random,enum=weightedRandom,enum=ipHash,enum=headerHash"` - HeaderHashKey string `json:"headerHashKey" jsonschema:"omitempty"` - StickySession *StickySessionSpec `json:"stickySession" jsonschema:"omitempty"` - HealthCheck *HealthCheckSpec `json:"healthCheck" jsonschema:"omitempty"` -} - -// NewLoadBalancer creates a load balancer for servers according to spec. -func NewLoadBalancer(spec *LoadBalanceSpec, servers []*Server) LoadBalancer { - switch spec.Policy { - case LoadBalancePolicyRoundRobin, "": - return newRoundRobinLoadBalancer(spec, servers) - case LoadBalancePolicyRandom: - return newRandomLoadBalancer(spec, servers) - case LoadBalancePolicyWeightedRandom: - return newWeightedRandomLoadBalancer(spec, servers) - case LoadBalancePolicyIPHash: - return newIPHashLoadBalancer(spec, servers) - case LoadBalancePolicyHeaderHash: - return newHeaderHashLoadBalancer(spec, servers) - default: - logger.Errorf("unsupported load balancing policy: %s", spec.Policy) - return newRoundRobinLoadBalancer(spec, servers) - } -} - -// hashMember is member used for hash -type hashMember struct { - server *Server -} - -// String implements consistent.Member interface -func (m hashMember) String() string { - return m.server.ID() -} - -// hasher is used for hash -type hasher struct{} - -// Sum64 implement hash function using murmur3 -func (h hasher) Sum64(data []byte) uint64 { - return murmur3.Sum64(data) -} - -// BaseLoadBalancer implement the common part of load balancer. -type BaseLoadBalancer struct { - spec *LoadBalanceSpec - Servers []*Server - healthyServers atomic.Value - consistentHash *consistent.Consistent - cookieExpire time.Duration - done chan bool - probeClient *http.Client - probeInterval time.Duration - probeTimeout time.Duration -} - -// HealthyServers return healthy servers -func (blb *BaseLoadBalancer) HealthyServers() []*Server { - return blb.healthyServers.Load().([]*Server) -} - -// init initializes load balancer -func (blb *BaseLoadBalancer) init(spec *LoadBalanceSpec, servers []*Server) { - blb.spec = spec - blb.Servers = servers - blb.healthyServers.Store(servers) - - blb.initStickySession(spec.StickySession, blb.HealthyServers()) - blb.initHealthCheck(spec.HealthCheck, servers) -} - -// initStickySession initializes for sticky session -func (blb *BaseLoadBalancer) initStickySession(spec *StickySessionSpec, servers []*Server) { - if spec == nil || len(servers) == 0 { - return - } - - switch spec.Mode { - case StickySessionModeCookieConsistentHash: - blb.initConsistentHash() - case StickySessionModeDurationBased, StickySessionModeApplicationBased: - blb.configLBCookie() - } -} - -// initHealthCheck initializes for health check -func (blb *BaseLoadBalancer) initHealthCheck(spec *HealthCheckSpec, servers []*Server) { - if spec == nil || len(servers) == 0 { - return - } - - blb.probeInterval, _ = time.ParseDuration(spec.Interval) - if blb.probeInterval <= 0 { - blb.probeInterval = HealthCheckDefaultInterval - } - blb.probeTimeout, _ = time.ParseDuration(spec.Timeout) - if blb.probeTimeout <= 0 { - blb.probeTimeout = HealthCheckDefaultTimeout - } - if spec.Fails == 0 { - spec.Fails = HealthCheckDefaultFailThreshold - } - if spec.Passes == 0 { - spec.Passes = HealthCheckDefaultPassThreshold - } - blb.probeClient = &http.Client{Timeout: blb.probeTimeout} - ticker := time.NewTicker(blb.probeInterval) - blb.done = make(chan bool) - go func() { - for { - select { - case <-blb.done: - ticker.Stop() - return - case <-ticker.C: - blb.probeServers() - } - } - }() -} - -// probeServers checks health status of servers -func (blb *BaseLoadBalancer) probeServers() { - statusChange := false - healthyServers := make([]*Server, 0, len(blb.Servers)) - for _, svr := range blb.Servers { - pass := blb.probeHTTP(svr.URL) - healthy, change := svr.RecordHealth(pass, blb.spec.HealthCheck.Passes, blb.spec.HealthCheck.Fails) - if change { - statusChange = true - } - if healthy { - healthyServers = append(healthyServers, svr) - } - } - if statusChange { - blb.healthyServers.Store(healthyServers) - // init consistent hash in sticky session when servers change - blb.initStickySession(blb.spec.StickySession, blb.HealthyServers()) - } -} - -// probeHTTP checks http url status -func (blb *BaseLoadBalancer) probeHTTP(url string) bool { - if blb.spec.HealthCheck.Path != "" { - url += blb.spec.HealthCheck.Path - } - res, err := blb.probeClient.Get(url) - if err != nil || res.StatusCode > 500 { - return false - } - return true -} - -// initConsistentHash initializes for consistent hash mode -func (blb *BaseLoadBalancer) initConsistentHash() { - members := make([]consistent.Member, len(blb.HealthyServers())) - for i, s := range blb.HealthyServers() { - members[i] = hashMember{server: s} - } - - cfg := consistent.Config{ - PartitionCount: 1024, - ReplicationFactor: 50, - Load: 1.25, - Hasher: hasher{}, - } - blb.consistentHash = consistent.New(members, cfg) -} - -// configLBCookie configures properties for load balancer-generated cookie -func (blb *BaseLoadBalancer) configLBCookie() { - if blb.spec.StickySession.LBCookieName == "" { - blb.spec.StickySession.LBCookieName = StickySessionDefaultLBCookieName - } - - blb.cookieExpire, _ = time.ParseDuration(blb.spec.StickySession.LBCookieExpire) - if blb.cookieExpire <= 0 { - blb.cookieExpire = StickySessionDefaultLBCookieExpire - } -} - -// ChooseServer chooses the sticky server if enable -func (blb *BaseLoadBalancer) ChooseServer(req *httpprot.Request) *Server { - if blb.spec.StickySession == nil { - return nil - } - - switch blb.spec.StickySession.Mode { - case StickySessionModeCookieConsistentHash: - return blb.chooseServerByConsistentHash(req) - case StickySessionModeDurationBased, StickySessionModeApplicationBased: - return blb.chooseServerByLBCookie(req) - } - - return nil -} - -// chooseServerByConsistentHash chooses server using consistent hash on cookie -func (blb *BaseLoadBalancer) chooseServerByConsistentHash(req *httpprot.Request) *Server { - cookie, err := req.Cookie(blb.spec.StickySession.AppCookieName) - if err != nil { - return nil - } - - m := blb.consistentHash.LocateKey([]byte(cookie.Value)) - if m != nil { - return m.(hashMember).server - } - - return nil -} - -// chooseServerByLBCookie chooses server by load balancer-generated cookie -func (blb *BaseLoadBalancer) chooseServerByLBCookie(req *httpprot.Request) *Server { - cookie, err := req.Cookie(blb.spec.StickySession.LBCookieName) - if err != nil { - return nil - } - - signed, err := hex.DecodeString(cookie.Value) - if err != nil || len(signed) != KeyLen+sha256.Size { - return nil - } - - key := signed[:KeyLen] - macBytes := signed[KeyLen:] - for _, s := range blb.HealthyServers() { - mac := hmac.New(sha256.New, key) - mac.Write([]byte(s.ID())) - expected := mac.Sum(nil) - if hmac.Equal(expected, macBytes) { - return s - } - } - - return nil -} - -// ReturnServer does some custom work before return server -func (blb *BaseLoadBalancer) ReturnServer(server *Server, req *httpprot.Request, resp *httpprot.Response) { - if blb.spec.StickySession == nil { - return - } - - setCookie := false - switch blb.spec.StickySession.Mode { - case StickySessionModeDurationBased: - setCookie = true - case StickySessionModeApplicationBased: - for _, c := range resp.Cookies() { - if c.Name == blb.spec.StickySession.AppCookieName { - setCookie = true - break - } - } - } - if setCookie { - cookie := &http.Cookie{ - Name: blb.spec.StickySession.LBCookieName, - Value: sign([]byte(server.ID())), - Expires: time.Now().Add(blb.cookieExpire), - } - resp.SetCookie(cookie) - } -} - -// Close closes resources -func (blb *BaseLoadBalancer) Close() { - if blb.done != nil { - close(blb.done) - } -} - -// sign signs plain text byte array to encoded string -func sign(plain []byte) string { - signed := make([]byte, KeyLen+sha256.Size) - key := signed[:KeyLen] - macBytes := signed[KeyLen:] - - // use maphash to generate random key fast - binary.LittleEndian.PutUint64(key, new(maphash.Hash).Sum64()) - mac := hmac.New(sha256.New, key) - mac.Write(plain) - mac.Sum(macBytes[:0]) - - return hex.EncodeToString(signed) -} - -// randomLoadBalancer does load balancing in a random manner. -type randomLoadBalancer struct { - BaseLoadBalancer -} - -func newRandomLoadBalancer(spec *LoadBalanceSpec, servers []*Server) *randomLoadBalancer { - lb := &randomLoadBalancer{} - lb.init(spec, servers) - return lb -} - -// ChooseServer implements the LoadBalancer interface. -func (lb *randomLoadBalancer) ChooseServer(req *httpprot.Request) *Server { - if len(lb.HealthyServers()) == 0 { - return nil - } - - if server := lb.BaseLoadBalancer.ChooseServer(req); server != nil { - return server - } - - return lb.HealthyServers()[rand.Intn(len(lb.HealthyServers()))] -} - -// roundRobinLoadBalancer does load balancing in a round robin manner. -type roundRobinLoadBalancer struct { - BaseLoadBalancer - counter uint64 -} - -func newRoundRobinLoadBalancer(spec *LoadBalanceSpec, servers []*Server) *roundRobinLoadBalancer { - lb := &roundRobinLoadBalancer{} - lb.init(spec, servers) - return lb -} - -// ChooseServer implements the LoadBalancer interface. -func (lb *roundRobinLoadBalancer) ChooseServer(req *httpprot.Request) *Server { - if len(lb.HealthyServers()) == 0 { - return nil - } - - if server := lb.BaseLoadBalancer.ChooseServer(req); server != nil { - return server - } - - counter := atomic.AddUint64(&lb.counter, 1) - 1 - return lb.HealthyServers()[int(counter)%len(lb.HealthyServers())] -} - -// WeightedRandomLoadBalancer does load balancing in a weighted random manner. -type WeightedRandomLoadBalancer struct { - BaseLoadBalancer - totalWeight int -} - -func newWeightedRandomLoadBalancer(spec *LoadBalanceSpec, servers []*Server) *WeightedRandomLoadBalancer { - lb := &WeightedRandomLoadBalancer{} - lb.init(spec, servers) - for _, server := range lb.HealthyServers() { - lb.totalWeight += server.Weight - } - return lb -} - -// ChooseServer implements the LoadBalancer interface. -func (lb *WeightedRandomLoadBalancer) ChooseServer(req *httpprot.Request) *Server { - if len(lb.HealthyServers()) == 0 { - return nil - } - - if server := lb.BaseLoadBalancer.ChooseServer(req); server != nil { - return server - } - - randomWeight := rand.Intn(lb.totalWeight) - for _, server := range lb.HealthyServers() { - randomWeight -= server.Weight - if randomWeight < 0 { - return server - } - } - - panic(fmt.Errorf("BUG: should not run to here, total weight=%d", lb.totalWeight)) -} - -// ipHashLoadBalancer does load balancing based on IP hash. -type ipHashLoadBalancer struct { - BaseLoadBalancer -} - -func newIPHashLoadBalancer(spec *LoadBalanceSpec, servers []*Server) *ipHashLoadBalancer { - lb := &ipHashLoadBalancer{} - lb.init(spec, servers) - return lb -} - -// ChooseServer implements the LoadBalancer interface. -func (lb *ipHashLoadBalancer) ChooseServer(req *httpprot.Request) *Server { - if len(lb.HealthyServers()) == 0 { - return nil - } - - if server := lb.BaseLoadBalancer.ChooseServer(req); server != nil { - return server - } - - ip := req.RealIP() - hash := fnv.New32() - hash.Write([]byte(ip)) - return lb.HealthyServers()[hash.Sum32()%uint32(len(lb.HealthyServers()))] -} - -// headerHashLoadBalancer does load balancing based on header hash. -type headerHashLoadBalancer struct { - BaseLoadBalancer - key string -} - -func newHeaderHashLoadBalancer(spec *LoadBalanceSpec, servers []*Server) *headerHashLoadBalancer { - lb := &headerHashLoadBalancer{} - lb.init(spec, servers) - lb.key = spec.HeaderHashKey - return lb -} - -// ChooseServer implements the LoadBalancer interface. -func (lb *headerHashLoadBalancer) ChooseServer(req *httpprot.Request) *Server { - if len(lb.HealthyServers()) == 0 { - return nil - } - - if server := lb.BaseLoadBalancer.ChooseServer(req); server != nil { - return server - } - - v := req.HTTPHeader().Get(lb.key) - hash := fnv.New32() - hash.Write([]byte(v)) - return lb.HealthyServers()[hash.Sum32()%uint32(len(lb.HealthyServers()))] -} - -*/ diff --git a/pkg/filters/proxies/httpproxy/pool.go b/pkg/filters/proxies/httpproxy/pool.go index 7faea87a26..1bb2d43366 100644 --- a/pkg/filters/proxies/httpproxy/pool.go +++ b/pkg/filters/proxies/httpproxy/pool.go @@ -228,6 +228,7 @@ func NewServerPool(proxy *Proxy, spec *ServerPoolSpec, name string) *ServerPool return sp } +// CreateLoadBalancer creates a load balancer according to spec. func (sp *ServerPool) CreateLoadBalancer(spec *LoadBalanceSpec, servers []*Server) LoadBalancer { lb := proxies.NewGeneralLoadBalancer(spec, servers) lb.Init(proxies.NewHTTPSessionSticker, proxies.NewHTTPHealthChecker, nil) diff --git a/pkg/filters/proxies/httpproxy/wspool.go b/pkg/filters/proxies/httpproxy/wspool.go index a89121c203..7e280b2fe1 100644 --- a/pkg/filters/proxies/httpproxy/wspool.go +++ b/pkg/filters/proxies/httpproxy/wspool.go @@ -26,6 +26,7 @@ import ( "sync" "github.com/megaease/easegress/pkg/context" + "github.com/megaease/easegress/pkg/filters/proxies" "github.com/megaease/easegress/pkg/logger" "github.com/megaease/easegress/pkg/protocols/httpprot" "github.com/megaease/easegress/pkg/protocols/httpprot/httpstat" @@ -63,8 +64,11 @@ func NewWebSocketServerPool(proxy *WebSocketProxy, spec *WebSocketServerPoolSpec return sp } +// CreateLoadBalancer creates a load balancer according to spec. func (sp *WebSocketServerPool) CreateLoadBalancer(spec *LoadBalanceSpec, servers []*Server) LoadBalancer { - return nil + lb := proxies.NewGeneralLoadBalancer(spec, servers) + lb.Init(proxies.NewHTTPSessionSticker, proxies.NewHTTPHealthChecker, nil) + return lb } func (sp *WebSocketServerPool) buildFailureResponse(ctx *context.Context, statusCode int) { diff --git a/pkg/filters/proxies/loadbalance.go b/pkg/filters/proxies/loadbalance.go index ba94cf8a5e..ca2006cf9c 100644 --- a/pkg/filters/proxies/loadbalance.go +++ b/pkg/filters/proxies/loadbalance.go @@ -28,6 +28,19 @@ import ( "github.com/megaease/easegress/pkg/protocols" ) +const ( + // LoadBalancePolicyRoundRobin is the load balance policy of round robin. + LoadBalancePolicyRoundRobin = "roundRobin" + // LoadBalancePolicyRandom is the load balance policy of random. + LoadBalancePolicyRandom = "random" + // LoadBalancePolicyWeightedRandom is the load balance policy of weighted random. + LoadBalancePolicyWeightedRandom = "weightedRandom" + // LoadBalancePolicyIPHash is the load balance policy of IP hash. + LoadBalancePolicyIPHash = "ipHash" + // LoadBalancePolicyHeaderHash is the load balance policy of HTTP header hash. + LoadBalancePolicyHeaderHash = "headerHash" +) + // LoadBalancer is the interface of a load balancer. type LoadBalancer interface { ChooseServer(req protocols.Request) *Server @@ -37,8 +50,9 @@ type LoadBalancer interface { // LoadBalanceSpec is the spec to create a load balancer. type LoadBalanceSpec struct { - Policy string `json:"policy" jsonschema:"omitempty,enum=,enum=roundRobin,enum=random,enum=weightedRandom,enum=ipHash,enum=headerHash"` + Policy string `json:"policy" jsonschema:"omitempty"` HeaderHashKey string `json:"headerHashKey" jsonschema:"omitempty"` + ForwardKey string `json:"forwardKey" jsonschema:"omitempty"` StickySession *StickySessionSpec `json:"stickySession" jsonschema:"omitempty"` HealthCheck *HealthCheckSpec `json:"healthCheck" jsonschema:"omitempty"` } @@ -46,7 +60,6 @@ type LoadBalanceSpec struct { // LoadBalancePolicy is the interface of a load balance policy. type LoadBalancePolicy interface { ChooseServer(req protocols.Request, sg *ServerGroup) *Server - Close() } // GeneralLoadBalancer implements a general purpose load balancer. @@ -76,9 +89,26 @@ func NewGeneralLoadBalancer(spec *LoadBalanceSpec, servers []*Server) *GeneralLo func (glb *GeneralLoadBalancer) Init( fnNewSessionSticker func(*StickySessionSpec) SessionSticker, fnNewHealthChecker func(*HealthCheckSpec) HealthChecker, - aa any, + lbp LoadBalancePolicy, ) { - // glb.lbp = lbp + if lbp == nil { + switch glb.spec.Policy { + case LoadBalancePolicyRoundRobin, "": + lbp = &RoundRobinLoadBalancePolicy{} + case LoadBalancePolicyRandom: + lbp = &RandomLoadBalancePolicy{} + case LoadBalancePolicyWeightedRandom: + lbp = &WeightedRandomLoadBalancePolicy{} + case LoadBalancePolicyIPHash: + lbp = &IPHashLoadBalancePolicy{} + case LoadBalancePolicyHeaderHash: + lbp = &HeaderHashLoadBalancePolicy{} + default: + logger.Errorf("unsupported load balancing policy: %s", glb.spec.Policy) + lbp = &RoundRobinLoadBalancePolicy{} + } + } + glb.lbp = lbp if glb.spec.StickySession != nil { ss := fnNewSessionSticker(glb.spec.StickySession) @@ -194,7 +224,6 @@ func (glb *GeneralLoadBalancer) Close() { if glb.ss != nil { glb.ss.Close() } - glb.lbp.Close() } // RandomLoadBalancePolicy is a load balance policy that chooses a server randomly. diff --git a/pkg/filters/proxies/httpproxy/loadbalance_test.go b/pkg/filters/proxies/loadbalance_test.go similarity index 99% rename from pkg/filters/proxies/httpproxy/loadbalance_test.go rename to pkg/filters/proxies/loadbalance_test.go index 8feb0ab966..162461f7d3 100644 --- a/pkg/filters/proxies/httpproxy/loadbalance_test.go +++ b/pkg/filters/proxies/loadbalance_test.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package httpproxy +package proxies import ( "fmt" From d684f5ade48c6559efa964c922f09e76711199cb Mon Sep 17 00:00:00 2001 From: Bomin Zhang Date: Thu, 2 Feb 2023 15:59:14 +0800 Subject: [PATCH 05/14] refactor load balance complete. --- .../proxies/grpcproxy/loadbalance_test.go | 7 +- pkg/filters/proxies/healthcheck_test.go | 47 ++++ pkg/filters/proxies/httpproxy/pool_test.go | 43 --- pkg/filters/proxies/httpproxy/wsproxy_test.go | 9 - pkg/filters/proxies/loadbalance.go | 11 +- pkg/filters/proxies/loadbalance_test.go | 260 ++++++------------ .../proxies/{basepool.go => serverpool.go} | 5 +- pkg/filters/proxies/serverpool_test.go | 87 ++++++ pkg/filters/proxies/stickysession.go | 12 + pkg/filters/proxies/stickysession_test.go | 136 +++++++++ 10 files changed, 376 insertions(+), 241 deletions(-) create mode 100644 pkg/filters/proxies/healthcheck_test.go rename pkg/filters/proxies/{basepool.go => serverpool.go} (95%) create mode 100644 pkg/filters/proxies/serverpool_test.go create mode 100644 pkg/filters/proxies/stickysession_test.go diff --git a/pkg/filters/proxies/grpcproxy/loadbalance_test.go b/pkg/filters/proxies/grpcproxy/loadbalance_test.go index ec6af1d2ed..13992aa062 100644 --- a/pkg/filters/proxies/grpcproxy/loadbalance_test.go +++ b/pkg/filters/proxies/grpcproxy/loadbalance_test.go @@ -29,18 +29,15 @@ import ( func TestForwardLB(t *testing.T) { assertions := assert.New(t) key := "forward-target" - balancer := NewLoadBalancer(&LoadBalanceSpec{Policy: "forward", ForwardKey: key}, nil) + lb := newForwardLoadBalancer(&LoadBalanceSpec{Policy: "forward", ForwardKey: key}) sm := grpcprot.NewFakeServerStream(metadata.NewIncomingContext(context.Background(), metadata.MD{})) req := grpcprot.NewRequestWithServerStream(sm) target := "127.0.0.1:8849" - assertions.Nil(balancer.ChooseServer(req)) + assertions.Nil(lb.ChooseServer(req)) req.Header().Set(key, target) - lb, ok := balancer.(ReusableServerLB) - assertions.True(ok) assertions.Equal(target, lb.ChooseServer(req).URL) - } diff --git a/pkg/filters/proxies/healthcheck_test.go b/pkg/filters/proxies/healthcheck_test.go new file mode 100644 index 0000000000..5d92f89802 --- /dev/null +++ b/pkg/filters/proxies/healthcheck_test.go @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package proxies + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +type MockHealthChecker struct { + result bool +} + +func (c *MockHealthChecker) Check(svr *Server) bool { + return c.result +} + +func (c *MockHealthChecker) Close() { +} + +func TestHTTPHealthChecker(t *testing.T) { + spec := &HealthCheckSpec{} + c := NewHTTPHealthChecker(spec) + assert.NotNil(t, c) + + spec = &HealthCheckSpec{Timeout: "100ms"} + c = NewHTTPHealthChecker(spec) + c.Check(&Server{URL: "https://www.megaease.com"}) + + c.Close() +} diff --git a/pkg/filters/proxies/httpproxy/pool_test.go b/pkg/filters/proxies/httpproxy/pool_test.go index 4e6921ea2c..1c5eac73b2 100644 --- a/pkg/filters/proxies/httpproxy/pool_test.go +++ b/pkg/filters/proxies/httpproxy/pool_test.go @@ -23,7 +23,6 @@ import ( "testing" "github.com/megaease/easegress/pkg/context" - "github.com/megaease/easegress/pkg/object/serviceregistry" "github.com/megaease/easegress/pkg/option" "github.com/megaease/easegress/pkg/protocols/httpprot" "github.com/megaease/easegress/pkg/resilience" @@ -198,48 +197,6 @@ func TestCopyCORSHeaders(t *testing.T) { assert.Equal("dst", dst.Values("X-Dst")[0]) } -func TestUseService(t *testing.T) { - assert := assert.New(t) - - yamlConfig := `spanName: test -serverTags: [a1, a2] -servers: -- url: http://192.168.1.1 -` - - spec := &ServerPoolSpec{} - err := codectool.Unmarshal([]byte(yamlConfig), spec) - assert.NoError(err) - assert.NoError(spec.Validate()) - - p := &Proxy{} - p.super = supervisor.NewMock(option.New(), nil, sync.Map{}, sync.Map{}, nil, - nil, false, nil, nil) - sp := NewServerPool(p, spec, "test") - svr := sp.LoadBalancer().ChooseServer(nil) - assert.Equal("http://192.168.1.1", svr.URL) - - sp.useService(&spec.BaseServerPoolSpec, nil) - assert.Equal("http://192.168.1.1", svr.URL) - - sp.useService(&spec.BaseServerPoolSpec, map[string]*serviceregistry.ServiceInstanceSpec{ - "2": { - Address: "192.168.1.2", - Tags: []string{"a2"}, - Port: 80, - }, - "3": { - Address: "192.168.1.3", - Tags: []string{"a3"}, - Port: 80, - }, - }) - svr = sp.LoadBalancer().ChooseServer(nil) - assert.Equal("http://192.168.1.2:80", svr.URL) - svr = sp.LoadBalancer().ChooseServer(nil) - assert.Equal("http://192.168.1.2:80", svr.URL) -} - func TestRemoveHopByHopHeader(t *testing.T) { assert := assert.New(t) diff --git a/pkg/filters/proxies/httpproxy/wsproxy_test.go b/pkg/filters/proxies/httpproxy/wsproxy_test.go index 448501fce2..645a091cc1 100644 --- a/pkg/filters/proxies/httpproxy/wsproxy_test.go +++ b/pkg/filters/proxies/httpproxy/wsproxy_test.go @@ -160,13 +160,4 @@ pools: ctx.SetData("HTTP_RESPONSE_WRITER", httptest.NewRecorder()) assert.Equal(resultClientError, proxy.Handle(ctx)) } - - // no server - proxy.mainPool.loadBalancer.Store(NewLoadBalancer(&LoadBalanceSpec{}, nil)) - { - stdr, _ := http.NewRequest(http.MethodGet, "wss://www.megaease.com", nil) - ctx := getCtx(stdr) - ctx.SetData("HTTP_RESPONSE_WRITER", httptest.NewRecorder()) - assert.Equal(resultInternalError, proxy.Handle(ctx)) - } } diff --git a/pkg/filters/proxies/loadbalance.go b/pkg/filters/proxies/loadbalance.go index ca2006cf9c..d2906523f1 100644 --- a/pkg/filters/proxies/loadbalance.go +++ b/pkg/filters/proxies/loadbalance.go @@ -49,6 +49,10 @@ type LoadBalancer interface { } // LoadBalanceSpec is the spec to create a load balancer. +// +// TODO: this spec currently include all options for all load balance policies, +// this is not good as new policies could be added in the future, we should +// convert it to a map later. type LoadBalanceSpec struct { Policy string `json:"policy" jsonschema:"omitempty"` HeaderHashKey string `json:"headerHashKey" jsonschema:"omitempty"` @@ -91,6 +95,7 @@ func (glb *GeneralLoadBalancer) Init( fnNewHealthChecker func(*HealthCheckSpec) HealthChecker, lbp LoadBalancePolicy, ) { + // load balance policy if lbp == nil { switch glb.spec.Policy { case LoadBalancePolicyRoundRobin, "": @@ -102,7 +107,7 @@ func (glb *GeneralLoadBalancer) Init( case LoadBalancePolicyIPHash: lbp = &IPHashLoadBalancePolicy{} case LoadBalancePolicyHeaderHash: - lbp = &HeaderHashLoadBalancePolicy{} + lbp = &HeaderHashLoadBalancePolicy{spec: glb.spec} default: logger.Errorf("unsupported load balancing policy: %s", glb.spec.Policy) lbp = &RoundRobinLoadBalancePolicy{} @@ -110,12 +115,14 @@ func (glb *GeneralLoadBalancer) Init( } glb.lbp = lbp + // sticky session if glb.spec.StickySession != nil { ss := fnNewSessionSticker(glb.spec.StickySession) ss.UpdateServers(glb.servers) glb.ss = ss } + // health check if glb.spec.HealthCheck == nil { return } @@ -196,7 +203,7 @@ func (glb *GeneralLoadBalancer) checkServers() { // ChooseServer chooses a server according to the load balancing spec. func (glb *GeneralLoadBalancer) ChooseServer(req protocols.Request) *Server { sg := glb.healthyServers.Load() - if len(sg.Servers) == 0 { + if sg == nil || len(sg.Servers) == 0 { return nil } diff --git a/pkg/filters/proxies/loadbalance_test.go b/pkg/filters/proxies/loadbalance_test.go index 162461f7d3..af355da670 100644 --- a/pkg/filters/proxies/loadbalance_test.go +++ b/pkg/filters/proxies/loadbalance_test.go @@ -21,13 +21,21 @@ import ( "fmt" "math/rand" "net/http" + "os" "testing" "time" + "github.com/megaease/easegress/pkg/logger" "github.com/megaease/easegress/pkg/protocols/httpprot" "github.com/stretchr/testify/assert" ) +func TestMain(m *testing.M) { + logger.InitNop() + code := m.Run() + os.Exit(code) +} + func prepareServers(count int) []*Server { svrs := make([]*Server, 0, count) for i := 0; i < count; i++ { @@ -36,51 +44,52 @@ func prepareServers(count int) []*Server { return svrs } -func readCookie(cookies []*http.Cookie, name string) *http.Cookie { - for _, c := range cookies { - if c.Name == name { - return c - } +func TestGeneralLoadBalancer(t *testing.T) { + servers := prepareServers(10) + spec := &LoadBalanceSpec{ + Policy: LoadBalancePolicyRoundRobin, + StickySession: &StickySessionSpec{ + Mode: StickySessionModeCookieConsistentHash, + AppCookieName: "app_cookie", + }, + HealthCheck: &HealthCheckSpec{ + Interval: "5ms", + }, } - return nil -} -func TestRoundRobinLoadBalancer(t *testing.T) { - assert := assert.New(t) + lb := NewGeneralLoadBalancer(spec, servers) - var svrs []*Server - lb := NewLoadBalancer(&LoadBalanceSpec{Policy: "roundRobin"}, svrs) - assert.Nil(lb.ChooseServer(nil)) + lb.Init(NewHTTPSessionSticker, func(hcs *HealthCheckSpec) HealthChecker { + return &MockHealthChecker{result: false} + }, nil) - svrs = prepareServers(10) - lb = NewLoadBalancer(&LoadBalanceSpec{Policy: "roundRobin"}, svrs) - for i := 0; i < 10; i++ { - svr := lb.ChooseServer(nil) - assert.Equal(svr.Weight, i+1) - } + time.Sleep(20 * time.Millisecond) + assert.Equal(t, len(lb.healthyServers.Load().Servers), 0) - lb = NewLoadBalancer(&LoadBalanceSpec{Policy: "unknow"}, svrs) - for i := 0; i < 10; i++ { - svr := lb.ChooseServer(nil) - assert.Equal(svr.Weight, i+1) - } + lb.Close() + + lb = NewGeneralLoadBalancer(spec, servers) + + lb.Init(NewHTTPSessionSticker, func(hcs *HealthCheckSpec) HealthChecker { + return &MockHealthChecker{result: true} + }, nil) + + time.Sleep(20 * time.Millisecond) + assert.Equal(t, len(lb.healthyServers.Load().Servers), 10) + lb.Close() } -func TestRandomLoadBalancer(t *testing.T) { - assert := assert.New(t) +func TestRandomLoadBalancePolicy(t *testing.T) { rand.Seed(0) - var svrs []*Server - lb := NewLoadBalancer(&LoadBalanceSpec{Policy: "random"}, svrs) - assert.Nil(lb.ChooseServer(nil)) - - svrs = prepareServers(10) counter := [10]int{} + servers := prepareServers(10) + + lb := NewGeneralLoadBalancer(&LoadBalanceSpec{Policy: LoadBalancePolicyRandom}, servers) + lb.Init(nil, nil, nil) - lb = NewLoadBalancer(&LoadBalanceSpec{Policy: "random"}, svrs) for i := 0; i < 10000; i++ { svr := lb.ChooseServer(nil) - assert.NotNil(svr) counter[svr.Weight-1]++ } @@ -91,21 +100,37 @@ func TestRandomLoadBalancer(t *testing.T) { } } -func TestWeightedRandomLoadBalancer(t *testing.T) { - assert := assert.New(t) - rand.Seed(0) +func TestRoundRobinLoadBalancePolicy(t *testing.T) { + servers := prepareServers(10) - var svrs []*Server - lb := NewLoadBalancer(&LoadBalanceSpec{Policy: "weightedRandom"}, svrs) - assert.Nil(lb.ChooseServer(nil)) + lb := NewGeneralLoadBalancer(&LoadBalanceSpec{Policy: LoadBalancePolicyRoundRobin}, servers) + lb.Init(nil, nil, nil) + + for i := 0; i < 10; i++ { + svr := lb.ChooseServer(nil) + assert.Equal(t, svr.Weight, i+1) + } + + lb = NewGeneralLoadBalancer(&LoadBalanceSpec{Policy: "UnknowPolicy"}, servers) + lb.Init(nil, nil, nil) + + for i := 0; i < 10; i++ { + svr := lb.ChooseServer(nil) + assert.Equal(t, svr.Weight, i+1) + } +} + +func TestWeightedRandomLoadBalancePolicy(t *testing.T) { + rand.Seed(0) - svrs = prepareServers(10) counter := [10]int{} + servers := prepareServers(10) - lb = NewLoadBalancer(&LoadBalanceSpec{Policy: "weightedRandom"}, svrs) - for i := 0; i < 10000; i++ { + lb := NewGeneralLoadBalancer(&LoadBalanceSpec{Policy: LoadBalancePolicyWeightedRandom}, servers) + lb.Init(nil, nil, nil) + + for i := 0; i < 1000; i++ { svr := lb.ChooseServer(nil) - assert.NotNil(svr) counter[svr.Weight-1]++ } @@ -118,17 +143,13 @@ func TestWeightedRandomLoadBalancer(t *testing.T) { } } -func TestIPHashLoadBalancer(t *testing.T) { - assert := assert.New(t) - - var svrs []*Server - lb := NewLoadBalancer(&LoadBalanceSpec{Policy: "ipHash"}, svrs) - assert.Nil(lb.ChooseServer(nil)) +func TestIPHashLoadBalancePolicy(t *testing.T) { + counter := [10]int{} + servers := prepareServers(10) - svrs = prepareServers(10) - lb = NewLoadBalancer(&LoadBalanceSpec{Policy: "ipHash"}, svrs) + lb := NewGeneralLoadBalancer(&LoadBalanceSpec{Policy: LoadBalancePolicyIPHash}, servers) + lb.Init(nil, nil, nil) - counter := [10]int{} for i := 0; i < 100; i++ { req := &http.Request{Header: http.Header{}} req.Header.Add("X-Real-Ip", fmt.Sprintf("192.168.1.%d", i+1)) @@ -138,27 +159,17 @@ func TestIPHashLoadBalancer(t *testing.T) { } for i := 0; i < 10; i++ { - assert.GreaterOrEqual(counter[i], 1) + assert.GreaterOrEqual(t, counter[i], 1) } } -func TestHeaderHashLoadBalancer(t *testing.T) { - assert := assert.New(t) - - var svrs []*Server - lb := NewLoadBalancer(&LoadBalanceSpec{ - Policy: "headerHash", - HeaderHashKey: "X-Header", - }, svrs) - assert.Nil(lb.ChooseServer(nil)) +func TestHeaderHashLoadBalancePolicy(t *testing.T) { + counter := [10]int{} + servers := prepareServers(10) - svrs = prepareServers(10) - lb = NewLoadBalancer(&LoadBalanceSpec{ - Policy: "headerHash", - HeaderHashKey: "X-Header", - }, svrs) + lb := NewGeneralLoadBalancer(&LoadBalanceSpec{Policy: LoadBalancePolicyHeaderHash, HeaderHashKey: "X-Header"}, servers) + lb.Init(nil, nil, nil) - counter := [10]int{} for i := 0; i < 100; i++ { req := &http.Request{Header: http.Header{}} req.Header.Add("X-Header", fmt.Sprintf("abcd-%d", i)) @@ -168,119 +179,6 @@ func TestHeaderHashLoadBalancer(t *testing.T) { } for i := 0; i < 10; i++ { - assert.GreaterOrEqual(counter[i], 1) + assert.GreaterOrEqual(t, counter[i], 1) } } - -func TestStickySession_ConsistentHash(t *testing.T) { - assert := assert.New(t) - - servers := prepareServers(10) - lb := NewLoadBalancer(&LoadBalanceSpec{ - Policy: LoadBalancePolicyRandom, - StickySession: &StickySessionSpec{ - Mode: "CookieConsistentHash", - AppCookieName: "AppCookie", - }, - }, servers) - - req := &http.Request{Header: http.Header{}} - req.AddCookie(&http.Cookie{Name: "AppCookie", Value: "abcd-1"}) - r, _ := httpprot.NewRequest(req) - svr1 := lb.ChooseServer(r) - - for i := 0; i < 100; i++ { - svr := lb.ChooseServer(r) - assert.Equal(svr1, svr) - } -} - -func TestStickySession_DurationBased(t *testing.T) { - assert := assert.New(t) - - servers := prepareServers(10) - lb := NewLoadBalancer(&LoadBalanceSpec{ - Policy: LoadBalancePolicyRandom, - StickySession: &StickySessionSpec{ - Mode: StickySessionModeDurationBased, - }, - }, servers) - - r, _ := httpprot.NewRequest(&http.Request{Header: http.Header{}}) - svr1 := lb.ChooseServer(r) - resp, _ := httpprot.NewResponse(&http.Response{Header: http.Header{}}) - lb.ReturnServer(svr1, r, resp) - c := readCookie(resp.Cookies(), StickySessionDefaultLBCookieName) - - for i := 0; i < 100; i++ { - req := &http.Request{Header: http.Header{}} - req.AddCookie(&http.Cookie{Name: StickySessionDefaultLBCookieName, Value: c.Value}) - r, _ = httpprot.NewRequest(req) - svr := lb.ChooseServer(r) - assert.Equal(svr1, svr) - - resp, _ = httpprot.NewResponse(&http.Response{Header: http.Header{}}) - lb.ReturnServer(svr, r, resp) - c = readCookie(resp.Cookies(), StickySessionDefaultLBCookieName) - } -} - -func TestStickySession_ApplicationBased(t *testing.T) { - assert := assert.New(t) - - servers := prepareServers(10) - appCookieName := "x-app-cookie" - lb := NewLoadBalancer(&LoadBalanceSpec{ - Policy: LoadBalancePolicyRandom, - StickySession: &StickySessionSpec{ - Mode: StickySessionModeApplicationBased, - AppCookieName: appCookieName, - }, - }, servers) - - r, _ := httpprot.NewRequest(&http.Request{Header: http.Header{}}) - svr1 := lb.ChooseServer(r) - resp, _ := httpprot.NewResponse(&http.Response{Header: http.Header{}}) - resp.SetCookie(&http.Cookie{Name: appCookieName, Value: ""}) - lb.ReturnServer(svr1, r, resp) - c := readCookie(resp.Cookies(), StickySessionDefaultLBCookieName) - - for i := 0; i < 100; i++ { - req := &http.Request{Header: http.Header{}} - req.AddCookie(&http.Cookie{Name: StickySessionDefaultLBCookieName, Value: c.Value}) - r, _ = httpprot.NewRequest(req) - svr := lb.ChooseServer(r) - assert.Equal(svr1, svr) - - resp, _ = httpprot.NewResponse(&http.Response{Header: http.Header{}}) - resp.SetCookie(&http.Cookie{Name: appCookieName, Value: ""}) - lb.ReturnServer(svr, r, resp) - c = readCookie(resp.Cookies(), StickySessionDefaultLBCookieName) - } -} - -func BenchmarkSign(b *testing.B) { - for i := 0; i < b.N; i++ { - sign([]byte("192.168.1.2")) - } -} - -func TestHealthCheck(t *testing.T) { - assert := assert.New(t) - servers := prepareServers(3) - lb := NewLoadBalancer(&LoadBalanceSpec{ - Policy: LoadBalancePolicyRandom, - HealthCheck: &HealthCheckSpec{ - Interval: "3s", - Fails: 2, - }, - }, servers) - - assert.Equal(len(servers), len(lb.HealthyServers())) - - time.Sleep(5 * time.Second) - assert.Equal(len(servers), len(lb.HealthyServers())) - - time.Sleep(5 * time.Second) - assert.Equal(0, len(lb.HealthyServers())) -} diff --git a/pkg/filters/proxies/basepool.go b/pkg/filters/proxies/serverpool.go similarity index 95% rename from pkg/filters/proxies/basepool.go rename to pkg/filters/proxies/serverpool.go index db5c3a8a9d..0e41cf1893 100644 --- a/pkg/filters/proxies/basepool.go +++ b/pkg/filters/proxies/serverpool.go @@ -68,7 +68,7 @@ func (sps *ServerPoolBaseSpec) Validate() error { return fmt.Errorf(msgFmt, serversGotWeight, len(sps.Servers)) } - if sps.ServiceName != "" && sps.LoadBalance.HealthCheck != nil { + if sps.ServiceName != "" && sps.LoadBalance != nil && sps.LoadBalance.HealthCheck != nil { return fmt.Errorf("can not open health check for service discovery") } @@ -77,6 +77,7 @@ func (sps *ServerPoolBaseSpec) Validate() error { // Init initialize the base server pool according to the spec. func (spb *ServerPoolBase) Init(spImpl ServerPoolImpl, super *supervisor.Supervisor, name string, spec *ServerPoolBaseSpec) { + spb.spImpl = spImpl spb.Name = name spb.done = make(chan struct{}) @@ -168,10 +169,12 @@ func (spb *ServerPoolBase) useService(spec *ServerPoolBaseSpec, instances map[st spb.createLoadBalancer(spec.LoadBalance, servers) } +// Done returns the done channel, which indicates the closing of the server pool. func (spb *ServerPoolBase) Done() <-chan struct{} { return spb.done } +// Close closes the server pool. func (spb *ServerPoolBase) Close() { close(spb.done) spb.wg.Wait() diff --git a/pkg/filters/proxies/serverpool_test.go b/pkg/filters/proxies/serverpool_test.go new file mode 100644 index 0000000000..3c5d897658 --- /dev/null +++ b/pkg/filters/proxies/serverpool_test.go @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package proxies + +import ( + "testing" + + "github.com/megaease/easegress/pkg/object/serviceregistry" + "github.com/stretchr/testify/assert" +) + +func TestServerPoolSpecValidate(t *testing.T) { + assert := assert.New(t) + + spec := &ServerPoolBaseSpec{} + assert.Error(spec.Validate()) + + spec.Servers = []*Server{ + {}, {}, + } + + assert.NoError(spec.Validate()) + + spec.Servers[0].Weight = 1 + assert.Error(spec.Validate()) + + spec.Servers[1].Weight = 1 + spec.ServiceName = "test_service" + spec.LoadBalance = &LoadBalanceSpec{ + HealthCheck: &HealthCheckSpec{}, + } + assert.Error(spec.Validate()) +} + +type MockServerPoolImpl struct { +} + +func (m *MockServerPoolImpl) CreateLoadBalancer(spec *LoadBalanceSpec, servers []*Server) LoadBalancer { + lb := NewGeneralLoadBalancer(spec, servers) + lb.Init(NewHTTPSessionSticker, NewHTTPHealthChecker, nil) + return lb +} + +func TestUseService(t *testing.T) { + assert := assert.New(t) + + spec := &ServerPoolBaseSpec{ + ServerTags: []string{"a2"}, + LoadBalance: &LoadBalanceSpec{}, + } + + sp := &ServerPoolBase{ + spImpl: &MockServerPoolImpl{}, + } + + sp.useService(spec, map[string]*serviceregistry.ServiceInstanceSpec{ + "2": { + Address: "192.168.1.2", + Tags: []string{"a2"}, + Port: 80, + }, + "3": { + Address: "192.168.1.3", + Tags: []string{"a3"}, + Port: 80, + }, + }) + svr := sp.LoadBalancer().ChooseServer(nil) + assert.Equal("http://192.168.1.2:80", svr.URL) + svr = sp.LoadBalancer().ChooseServer(nil) + assert.Equal("http://192.168.1.2:80", svr.URL) +} diff --git a/pkg/filters/proxies/stickysession.go b/pkg/filters/proxies/stickysession.go index 760fd3f903..0ebc372a6b 100644 --- a/pkg/filters/proxies/stickysession.go +++ b/pkg/filters/proxies/stickysession.go @@ -41,6 +41,9 @@ const ( // StickySessionModeApplicationBased uses a load balancer-generated cookie depends on app cookie for stickiness. StickySessionModeApplicationBased = "ApplicationBased" + // StickySessionDefaultLBCookieName is the default name of the load balancer-generated cookie. + StickySessionDefaultLBCookieName = "EG_SESSION" + // KeyLen is the key length used by HMAC. KeyLen = 8 ) @@ -91,6 +94,10 @@ type HTTPSessionSticker struct { // NewHTTPSessionSticker creates a new HTTPSessionSticker. func NewHTTPSessionSticker(spec *StickySessionSpec) SessionSticker { + if spec.LBCookieName == "" { + spec.LBCookieName = StickySessionDefaultLBCookieName + } + ss := &HTTPSessionSticker{spec: spec} ss.cookieExpire, _ = time.ParseDuration(spec.LBCookieExpire) @@ -107,6 +114,11 @@ func (ss *HTTPSessionSticker) UpdateServers(servers []*Server) { return } + if len(servers) == 0 { + // TODO: consistentHash panics in this case, we need to handle it. + return + } + members := make([]consistent.Member, len(servers)) for i, s := range servers { members[i] = hashMember{server: s} diff --git a/pkg/filters/proxies/stickysession_test.go b/pkg/filters/proxies/stickysession_test.go new file mode 100644 index 0000000000..5421317d31 --- /dev/null +++ b/pkg/filters/proxies/stickysession_test.go @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package proxies + +import ( + "net/http" + "testing" + + "github.com/megaease/easegress/pkg/protocols/httpprot" + "github.com/stretchr/testify/assert" +) + +func readCookie(cookies []*http.Cookie, name string) *http.Cookie { + for _, c := range cookies { + if c.Name == name { + return c + } + } + return nil +} + +func TestStickySession_ConsistentHash(t *testing.T) { + assert := assert.New(t) + + servers := prepareServers(10) + spec := &LoadBalanceSpec{ + Policy: LoadBalancePolicyRandom, + StickySession: &StickySessionSpec{ + Mode: "CookieConsistentHash", + AppCookieName: "AppCookie", + }, + } + + lb := NewGeneralLoadBalancer(spec, servers) + lb.Init(NewHTTPSessionSticker, NewHTTPHealthChecker, nil) + + req := &http.Request{Header: http.Header{}} + req.AddCookie(&http.Cookie{Name: "AppCookie", Value: "abcd-1"}) + r, _ := httpprot.NewRequest(req) + svr1 := lb.ChooseServer(r) + + for i := 0; i < 100; i++ { + svr := lb.ChooseServer(r) + assert.Equal(svr1, svr) + } +} + +func TestStickySession_DurationBased(t *testing.T) { + assert := assert.New(t) + + servers := prepareServers(10) + spec := &LoadBalanceSpec{ + Policy: LoadBalancePolicyRandom, + StickySession: &StickySessionSpec{ + Mode: StickySessionModeDurationBased, + }, + } + + lb := NewGeneralLoadBalancer(spec, servers) + lb.Init(NewHTTPSessionSticker, NewHTTPHealthChecker, nil) + + r, _ := httpprot.NewRequest(&http.Request{Header: http.Header{}}) + svr1 := lb.ChooseServer(r) + resp, _ := httpprot.NewResponse(&http.Response{Header: http.Header{}}) + lb.ReturnServer(svr1, r, resp) + c := readCookie(resp.Cookies(), StickySessionDefaultLBCookieName) + + for i := 0; i < 100; i++ { + req := &http.Request{Header: http.Header{}} + req.AddCookie(&http.Cookie{Name: StickySessionDefaultLBCookieName, Value: c.Value}) + r, _ = httpprot.NewRequest(req) + svr := lb.ChooseServer(r) + assert.Equal(svr1, svr) + + resp, _ = httpprot.NewResponse(&http.Response{Header: http.Header{}}) + lb.ReturnServer(svr, r, resp) + c = readCookie(resp.Cookies(), StickySessionDefaultLBCookieName) + } +} + +func TestStickySession_ApplicationBased(t *testing.T) { + assert := assert.New(t) + + servers := prepareServers(10) + appCookieName := "x-app-cookie" + spec := &LoadBalanceSpec{ + Policy: LoadBalancePolicyRandom, + StickySession: &StickySessionSpec{ + Mode: StickySessionModeApplicationBased, + AppCookieName: appCookieName, + }, + } + lb := NewGeneralLoadBalancer(spec, servers) + lb.Init(NewHTTPSessionSticker, NewHTTPHealthChecker, nil) + + r, _ := httpprot.NewRequest(&http.Request{Header: http.Header{}}) + svr1 := lb.ChooseServer(r) + resp, _ := httpprot.NewResponse(&http.Response{Header: http.Header{}}) + resp.SetCookie(&http.Cookie{Name: appCookieName, Value: ""}) + lb.ReturnServer(svr1, r, resp) + c := readCookie(resp.Cookies(), StickySessionDefaultLBCookieName) + + for i := 0; i < 100; i++ { + req := &http.Request{Header: http.Header{}} + req.AddCookie(&http.Cookie{Name: StickySessionDefaultLBCookieName, Value: c.Value}) + r, _ = httpprot.NewRequest(req) + svr := lb.ChooseServer(r) + assert.Equal(svr1, svr) + + resp, _ = httpprot.NewResponse(&http.Response{Header: http.Header{}}) + resp.SetCookie(&http.Cookie{Name: appCookieName, Value: ""}) + lb.ReturnServer(svr, r, resp) + c = readCookie(resp.Cookies(), StickySessionDefaultLBCookieName) + } +} + +func BenchmarkSign(b *testing.B) { + for i := 0; i < b.N; i++ { + sign([]byte("192.168.1.2")) + } +} From 01c9056dd9bded75bbc44cbd96461a20fe3cd48d Mon Sep 17 00:00:00 2001 From: Bomin Zhang Date: Thu, 2 Feb 2023 17:28:22 +0800 Subject: [PATCH 06/14] fix tests --- pkg/filters/proxies/loadbalance_test.go | 1 + pkg/object/meshcontroller/spec/spec_test.go | 23 +++++++++++---------- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/pkg/filters/proxies/loadbalance_test.go b/pkg/filters/proxies/loadbalance_test.go index af355da670..ba1ddb9b43 100644 --- a/pkg/filters/proxies/loadbalance_test.go +++ b/pkg/filters/proxies/loadbalance_test.go @@ -68,6 +68,7 @@ func TestGeneralLoadBalancer(t *testing.T) { lb.Close() + servers = prepareServers(10) lb = NewGeneralLoadBalancer(spec, servers) lb.Init(NewHTTPSessionSticker, func(hcs *HealthCheckSpec) HealthChecker { diff --git a/pkg/object/meshcontroller/spec/spec_test.go b/pkg/object/meshcontroller/spec/spec_test.go index ad449d1137..a5e19e4e72 100644 --- a/pkg/object/meshcontroller/spec/spec_test.go +++ b/pkg/object/meshcontroller/spec/spec_test.go @@ -23,6 +23,7 @@ import ( "testing" "github.com/megaease/easegress/pkg/filters/mock" + "github.com/megaease/easegress/pkg/filters/proxies" proxy "github.com/megaease/easegress/pkg/filters/proxies/httpproxy" "github.com/megaease/easegress/pkg/filters/ratelimiter" "github.com/megaease/easegress/pkg/logger" @@ -195,7 +196,7 @@ func TestSidecarEgressPipelineSpec(t *testing.T) { s := &Service{ Name: "delivery-mesh", LoadBalance: &LoadBalance{ - Policy: proxy.LoadBalancePolicyIPHash, + Policy: proxies.LoadBalancePolicyIPHash, }, Sidecar: &Sidecar{ Address: "127.0.0.1", @@ -258,7 +259,7 @@ func TestSidecarEgressPipelineWithCanarySpec(t *testing.T) { s := &Service{ Name: "order-002-canary", LoadBalance: &LoadBalance{ - Policy: proxy.LoadBalancePolicyIPHash, + Policy: proxies.LoadBalancePolicyIPHash, }, Sidecar: &Sidecar{ Address: "127.0.0.1", @@ -422,7 +423,7 @@ func TestSidecarEgressPipelineWithMultipleCanarySpec(t *testing.T) { s := &Service{ Name: "order-003-canary-array", LoadBalance: &LoadBalance{ - Policy: proxy.LoadBalancePolicyIPHash, + Policy: proxies.LoadBalancePolicyIPHash, }, Sidecar: &Sidecar{ Address: "127.0.0.1", @@ -471,7 +472,7 @@ func TestSidecarEgressPipelineWithCanaryNoInstanceSpec(t *testing.T) { s := &Service{ Name: "order-004-canary-no-instance", LoadBalance: &LoadBalance{ - Policy: proxy.LoadBalancePolicyIPHash, + Policy: proxies.LoadBalancePolicyIPHash, }, Sidecar: &Sidecar{ Address: "127.0.0.1", @@ -520,7 +521,7 @@ func TestSidecarEgressPipelineWithCanaryInstanceMultipleLabelSpec(t *testing.T) s := &Service{ Name: "order-005-canary-instance-multiple-label", LoadBalance: &LoadBalance{ - Policy: proxy.LoadBalancePolicyIPHash, + Policy: proxies.LoadBalancePolicyIPHash, }, Sidecar: &Sidecar{ Address: "127.0.0.1", @@ -589,7 +590,7 @@ func TestSidecarIngressWithResiliencePipelineSpec(t *testing.T) { s := &Service{ Name: "order-001", LoadBalance: &LoadBalance{ - Policy: proxy.LoadBalancePolicyRandom, + Policy: proxies.LoadBalancePolicyRandom, }, Sidecar: &Sidecar{ Address: "127.0.0.1", @@ -630,7 +631,7 @@ func TestSidecarEgressResiliencePipelineSpec(t *testing.T) { s := &Service{ Name: "order-001", LoadBalance: &LoadBalance{ - Policy: proxy.LoadBalancePolicyIPHash, + Policy: proxies.LoadBalancePolicyIPHash, }, Sidecar: &Sidecar{ Address: "127.0.0.1", @@ -738,7 +739,7 @@ func TestIngressPipelineSpec(t *testing.T) { s := &Service{ Name: "order-001", LoadBalance: &LoadBalance{ - Policy: proxy.LoadBalancePolicyRandom, + Policy: proxies.LoadBalancePolicyRandom, }, Sidecar: &Sidecar{ Address: "127.0.0.1", @@ -790,7 +791,7 @@ func TestSidecarIngressPipelineSpecCert(t *testing.T) { s := &Service{ Name: "order-001", LoadBalance: &LoadBalance{ - Policy: proxy.LoadBalancePolicyRandom, + Policy: proxies.LoadBalancePolicyRandom, }, Sidecar: &Sidecar{ Address: "127.0.0.1", @@ -835,7 +836,7 @@ func TestSidecarIngressPipelineSpec(t *testing.T) { s := &Service{ Name: "order-001", LoadBalance: &LoadBalance{ - Policy: proxy.LoadBalancePolicyRandom, + Policy: proxies.LoadBalancePolicyRandom, }, Sidecar: &Sidecar{ Address: "127.0.0.1", @@ -865,7 +866,7 @@ func TestEgressName(t *testing.T) { s := &Service{ Name: "order-001", LoadBalance: &LoadBalance{ - Policy: proxy.LoadBalancePolicyRandom, + Policy: proxies.LoadBalancePolicyRandom, }, Sidecar: &Sidecar{ Address: "127.0.0.1", From 3757e341720d957ae3ab913f56f74b7bd0e7cb14 Mon Sep 17 00:00:00 2001 From: Bomin Zhang Date: Fri, 3 Feb 2023 14:05:13 +0800 Subject: [PATCH 07/14] refactor request matcher --- pkg/filters/proxies/grpcproxy/proxy.go | 1 + pkg/filters/proxies/grpcproxy/requestmatch.go | 216 +++--------------- .../proxies/grpcproxy/requestmatch_test.go | 209 ++++------------- pkg/filters/proxies/httpproxy/proxy.go | 1 + pkg/filters/proxies/httpproxy/requestmatch.go | 186 ++------------- .../proxies/httpproxy/requestmatch_test.go | 192 ++++------------ pkg/filters/proxies/loadbalance.go | 8 +- pkg/filters/proxies/requestmatch.go | 196 ++++++++++++++++ pkg/filters/proxies/requestmatch_test.go | 149 ++++++++++++ pkg/object/meshcontroller/spec/builder.go | 17 +- pkg/object/meshcontroller/spec/spec.go | 11 +- pkg/object/meshcontroller/spec/spec_test.go | 7 +- 12 files changed, 507 insertions(+), 686 deletions(-) create mode 100644 pkg/filters/proxies/requestmatch.go create mode 100644 pkg/filters/proxies/requestmatch_test.go diff --git a/pkg/filters/proxies/grpcproxy/proxy.go b/pkg/filters/proxies/grpcproxy/proxy.go index d1a78854c3..0c7e419ede 100644 --- a/pkg/filters/proxies/grpcproxy/proxy.go +++ b/pkg/filters/proxies/grpcproxy/proxy.go @@ -85,6 +85,7 @@ type ( // Server is the backend server. Server = proxies.Server + RequestMatcher = proxies.RequestMatcher LoadBalancer = proxies.LoadBalancer LoadBalanceSpec = proxies.LoadBalanceSpec BaseServerPool = proxies.ServerPoolBase diff --git a/pkg/filters/proxies/grpcproxy/requestmatch.go b/pkg/filters/proxies/grpcproxy/requestmatch.go index 9f7ecfe517..2d46db4447 100644 --- a/pkg/filters/proxies/grpcproxy/requestmatch.go +++ b/pkg/filters/proxies/grpcproxy/requestmatch.go @@ -18,58 +18,29 @@ package grpcproxy import ( - "fmt" - "hash/fnv" - "math/rand" - "regexp" - "strings" - + "github.com/megaease/easegress/pkg/filters/proxies" + "github.com/megaease/easegress/pkg/protocols" "github.com/megaease/easegress/pkg/protocols/grpcprot" - - "github.com/megaease/easegress/pkg/logger" ) -// RequestMatcher is the interface to match requests. -type RequestMatcher interface { - Match(req *grpcprot.Request) bool -} - // RequestMatcherSpec describe RequestMatcher type RequestMatcherSpec struct { - Policy string `json:"policy" jsonschema:"omitempty,enum=,enum=general,enum=ipHash,enum=headerHash,enum=random"` - MatchAllHeaders bool `json:"matchAllHeaders" jsonschema:"omitempty"` - Headers map[string]*StringMatcher `json:"headers" jsonschema:"omitempty"` - URLs []*URLMatcher `json:"urls" jsonschema:"omitempty"` - Permil uint32 `json:"permil" jsonschema:"omitempty,minimum=0,maximum=1000"` - HeaderHashKey string `json:"headerHashKey" jsonschema:"omitempty"` + proxies.RequestMatcherBaseSpec `json:",inline"` + Methods []*proxies.StringMatcher `json:"methods" jsonschema:"omitempty"` } // Validate validtes the RequestMatcherSpec. func (s *RequestMatcherSpec) Validate() error { - if s.Policy == "general" || s.Policy == "" { - if len(s.Headers) == 0 { - return fmt.Errorf("headers is not specified") - } - } else if s.Permil == 0 { - return fmt.Errorf("permil is not specified") - } - - for _, v := range s.Headers { - if err := v.Validate(); err != nil { - return err - } + if err := s.RequestMatcherBaseSpec.Validate(); err != nil { + return err } - for _, r := range s.URLs { + for _, r := range s.Methods { if err := r.Validate(); err != nil { return err } } - if s.Policy == "headerHash" && s.HeaderHashKey == "" { - return fmt.Errorf("headerHash needs to specify headerHashKey") - } - return nil } @@ -80,90 +51,48 @@ func NewRequestMatcher(spec *RequestMatcherSpec) RequestMatcher { matcher := &generalMatcher{ matchAllHeaders: spec.MatchAllHeaders, headers: spec.Headers, - urls: spec.URLs, + methods: spec.Methods, } matcher.init() return matcher - case "ipHash": - return &ipHashMatcher{permill: spec.Permil} - case "headerHash": - return &headerHashMatcher{ - permill: spec.Permil, - headerHashKey: spec.HeaderHashKey, - } - case "random": - return &randomMatcher{permill: spec.Permil} + default: + return proxies.NewRequestMatcher(&spec.RequestMatcherBaseSpec) } - - logger.Errorf("BUG: unsupported probability policy: %s", spec.Policy) - return &ipHashMatcher{permill: spec.Permil} -} - -// randomMatcher implements random request matcher. -type randomMatcher struct { - permill uint32 -} - -// Match implements protocols.Matcher. -func (rm randomMatcher) Match(req *grpcprot.Request) bool { - return rand.Uint32()%1000 < rm.permill -} - -// headerHashMatcher implements header hash request matcher. -type headerHashMatcher struct { - permill uint32 - headerHashKey string -} - -// Match implements protocols.Matcher. -func (hhm headerHashMatcher) Match(req *grpcprot.Request) bool { - v := req.RawHeader().GetFirst(hhm.headerHashKey) - hash := fnv.New32() - hash.Write([]byte(v)) - return hash.Sum32()%1000 < hhm.permill -} - -// ipHashMatcher implements IP address hash matcher. -type ipHashMatcher struct { - permill uint32 -} - -// Match implements protocols.Matcher. -func (iphm ipHashMatcher) Match(req *grpcprot.Request) bool { - ip := req.RealIP() - hash := fnv.New32() - hash.Write([]byte(ip)) - return hash.Sum32()%1000 < iphm.permill } // generalMatcher implements general grpc matcher. type generalMatcher struct { matchAllHeaders bool - headers map[string]*StringMatcher - urls []*URLMatcher + headers map[string]*proxies.StringMatcher + methods []*proxies.StringMatcher } func (gm *generalMatcher) init() { for _, h := range gm.headers { - h.init() + h.Init() } - for _, url := range gm.urls { - url.init() + for _, m := range gm.methods { + m.Init() } } // Match implements protocols.Matcher. -func (gm *generalMatcher) Match(req *grpcprot.Request) bool { +func (gm *generalMatcher) Match(req protocols.Request) bool { + grpcreq, ok := req.(*grpcprot.Request) + if !ok { + panic("not a grpc request") + } + matched := false if gm.matchAllHeaders { - matched = gm.matchAllHeader(req) + matched = gm.matchAllHeader(grpcreq) } else { - matched = gm.matchOneHeader(req) + matched = gm.matchOneHeader(grpcreq) } - if matched && len(gm.urls) > 0 { - matched = gm.matchURL(req) + if matched && len(gm.methods) > 0 { + matched = gm.matchMethod(grpcreq) } return matched @@ -209,98 +138,9 @@ func (gm *generalMatcher) matchAllHeader(req *grpcprot.Request) bool { return true } -func (gm *generalMatcher) matchURL(req *grpcprot.Request) bool { - for _, url := range gm.urls { - if url.Match(req) { - return true - } - } - return false -} - -// URLMatcher defines the match rule of a grpc request -type URLMatcher struct { - URL *StringMatcher `json:"url" jsonschema:"required"` -} - -// Validate validates the MethodAndURLMatcher. -func (r *URLMatcher) Validate() error { - return r.URL.Validate() -} - -func (r *URLMatcher) init() { - r.URL.init() -} - -// Match matches a request. -func (r *URLMatcher) Match(req *grpcprot.Request) bool { - return r.URL.Match(req.FullMethod()) -} - -// StringMatcher defines the match rule of a string -type StringMatcher struct { - Exact string `json:"exact" jsonschema:"omitempty"` - Prefix string `json:"prefix" jsonschema:"omitempty"` - RegEx string `json:"regex" jsonschema:"omitempty,format=regexp"` - Empty bool `json:"empty" jsonschema:"omitempty"` - re *regexp.Regexp -} - -// Validate validates the StringMatcher. -func (sm *StringMatcher) Validate() error { - if sm.Empty { - if sm.Exact != "" || sm.Prefix != "" || sm.RegEx != "" { - return fmt.Errorf("empty is conflict with other patterns") - } - return nil - } - - if sm.Exact != "" { - return nil - } - - if sm.Prefix != "" { - return nil - } - - if sm.RegEx != "" { - return nil - } - - return fmt.Errorf("all patterns are empty") -} - -func (sm *StringMatcher) init() { - if sm.RegEx != "" { - sm.re = regexp.MustCompile(sm.RegEx) - } -} - -// Match matches a string. -func (sm *StringMatcher) Match(value string) bool { - if sm.Empty && value == "" { - return true - } - - if sm.Exact != "" && value == sm.Exact { - return true - } - - if sm.Prefix != "" && strings.HasPrefix(value, sm.Prefix) { - return true - } - - if sm.re == nil { - return false - } - - return sm.re.MatchString(value) -} - -// MatchAny return true if any of the values matches. -func (sm *StringMatcher) MatchAny(values []string) bool { - for _, v := range values { - if sm.Match(v) { +func (gm *generalMatcher) matchMethod(req *grpcprot.Request) bool { + for _, m := range gm.methods { + if m.Match(req.FullMethod()) { return true } } diff --git a/pkg/filters/proxies/grpcproxy/requestmatch_test.go b/pkg/filters/proxies/grpcproxy/requestmatch_test.go index 1b9e6e6d08..8a7bb1f65d 100644 --- a/pkg/filters/proxies/grpcproxy/requestmatch_test.go +++ b/pkg/filters/proxies/grpcproxy/requestmatch_test.go @@ -19,11 +19,9 @@ package grpcproxy import ( "context" - "fmt" - "math/rand" - "strconv" "testing" + "github.com/megaease/easegress/pkg/filters/proxies" "github.com/megaease/easegress/pkg/protocols/grpcprot" "google.golang.org/grpc/metadata" @@ -40,116 +38,38 @@ func TestRequestMatcherSpecValidate(t *testing.T) { spec.Permil = 100 assert.Error(spec.Validate()) - spec.Headers = map[string]*StringMatcher{} - spec.Headers["X-Test"] = &StringMatcher{ + spec.Headers = map[string]*proxies.StringMatcher{} + spec.Headers["X-Test"] = &proxies.StringMatcher{ Empty: true, Exact: "abc", } assert.Error(spec.Validate()) - spec.Headers["X-Test"] = &StringMatcher{Exact: "abc"} - spec.URLs = append(spec.URLs, &URLMatcher{ - URL: &StringMatcher{ - Empty: true, - Exact: "abc", - }, + spec.Headers["X-Test"] = &proxies.StringMatcher{Exact: "abc"} + spec.Methods = append(spec.Methods, &proxies.StringMatcher{ + Empty: true, + Exact: "abc", }) assert.Error(spec.Validate()) - spec.URLs[0] = &URLMatcher{ - URL: &StringMatcher{Empty: true}, - } + spec.Methods[0] = &proxies.StringMatcher{Empty: true} assert.Error(spec.Validate()) spec.HeaderHashKey = "X-Test" assert.NoError(spec.Validate()) } -func TestRandomMatcher(t *testing.T) { - rand.Seed(0) - - rm := NewRequestMatcher(&RequestMatcherSpec{ - Policy: "random", - Permil: 100, - }) - - match := 0 - for i := 0; i < 10000; i++ { - if rm.Match(nil) { - match++ - } - } - - if match < 900 || match > 1100 { - t.Error("random matcher is not working as configured") - } -} - -func TestHeaderHashMatcher(t *testing.T) { - rm := NewRequestMatcher(&RequestMatcherSpec{ - Policy: "headerHash", - HeaderHashKey: "X-Test", - Permil: 100, - }) - - sm := grpcprot.NewFakeServerStream(metadata.NewIncomingContext(context.Background(), metadata.MD{})) - req := grpcprot.NewRequestWithServerStream(sm) - - match := 0 - for i := 0; i < 10000; i++ { - req.Header().Set("X-Test", strconv.Itoa(i)) - if rm.Match(req) { - match++ - } - } - - if match < 900 || match > 1100 { - t.Error("header hash matcher is not working as configured") - } -} - -func TestIPHashMatcher(t *testing.T) { - rm := NewRequestMatcher(&RequestMatcherSpec{ - Policy: "unknownPolicy", - Permil: 100, - }) - switch rm.(type) { - case *ipHashMatcher: - break - default: - t.Error("should create an ip hash matcher") - } - - rm = NewRequestMatcher(&RequestMatcherSpec{ - Policy: "ipHash", - Permil: 100, - }) - - match := 0 - for i := 0; i < 10000; i++ { - a, b := i/256, i%256 - sm := grpcprot.NewFakeServerStream(metadata.NewIncomingContext(context.Background(), metadata.MD{})) - req := grpcprot.NewRequestWithServerStream(sm) - req.SetRealIP(fmt.Sprintf("192.168.%d.%d", a, b)) - if rm.Match(req) { - match++ - } - } - - if match < 900 || match > 1100 { - t.Errorf("ip hash matcher is not working as configured") - } -} - func TestGeneralMatche(t *testing.T) { assert := assert.New(t) // match all headers rm := NewRequestMatcher(&RequestMatcherSpec{ - MatchAllHeaders: true, - Headers: map[string]*StringMatcher{ - "X-Test1": {Exact: "test1"}, - "X-Test2": {Exact: "test2"}, + RequestMatcherBaseSpec: proxies.RequestMatcherBaseSpec{ + MatchAllHeaders: true, + Headers: map[string]*proxies.StringMatcher{ + "X-Test1": {Exact: "test1"}, + "X-Test2": {Exact: "test2"}, + }, }, }) @@ -163,10 +83,12 @@ func TestGeneralMatche(t *testing.T) { assert.False(rm.Match(req)) rm = NewRequestMatcher(&RequestMatcherSpec{ - MatchAllHeaders: true, - Headers: map[string]*StringMatcher{ - "X-Test1": {Exact: "test1"}, - "X-Test2": {Empty: true, Exact: "test2"}, + RequestMatcherBaseSpec: proxies.RequestMatcherBaseSpec{ + MatchAllHeaders: true, + Headers: map[string]*proxies.StringMatcher{ + "X-Test1": {Exact: "test1"}, + "X-Test2": {Empty: true, Exact: "test2"}, + }, }, }) @@ -175,9 +97,11 @@ func TestGeneralMatche(t *testing.T) { // match one header rm = NewRequestMatcher(&RequestMatcherSpec{ - Headers: map[string]*StringMatcher{ - "X-Test1": {Exact: "test1"}, - "X-Test2": {Empty: true, Exact: "test2"}, + RequestMatcherBaseSpec: proxies.RequestMatcherBaseSpec{ + Headers: map[string]*proxies.StringMatcher{ + "X-Test1": {Exact: "test1"}, + "X-Test2": {Empty: true, Exact: "test2"}, + }, }, }) assert.True(rm.Match(req)) @@ -186,9 +110,11 @@ func TestGeneralMatche(t *testing.T) { assert.True(rm.Match(req)) rm = NewRequestMatcher(&RequestMatcherSpec{ - Headers: map[string]*StringMatcher{ - "X-Test1": {Exact: "test1"}, - "X-Test2": {Exact: "test2"}, + RequestMatcherBaseSpec: proxies.RequestMatcherBaseSpec{ + Headers: map[string]*proxies.StringMatcher{ + "X-Test1": {Exact: "test1"}, + "X-Test2": {Exact: "test2"}, + }, }, }) assert.False(rm.Match(req)) @@ -197,75 +123,28 @@ func TestGeneralMatche(t *testing.T) { req.Header().Set("X-Test1", "test1") req.SetFullMethod("/abc") rm = NewRequestMatcher(&RequestMatcherSpec{ - Headers: map[string]*StringMatcher{ - "X-Test1": {Exact: "test1"}, - "X-Test2": {Exact: "test2"}, - }, - URLs: []*URLMatcher{ - { - URL: &StringMatcher{ - Exact: "/abc", - }, + RequestMatcherBaseSpec: proxies.RequestMatcherBaseSpec{ + Headers: map[string]*proxies.StringMatcher{ + "X-Test1": {Exact: "test1"}, + "X-Test2": {Exact: "test2"}, }, }, + Methods: []*proxies.StringMatcher{ + {Exact: "/abc"}, + }, }) assert.True(rm.Match(req)) rm = NewRequestMatcher(&RequestMatcherSpec{ - Headers: map[string]*StringMatcher{ - "X-Test1": {Exact: "test1"}, - "X-Test2": {Exact: "test2"}, - }, - URLs: []*URLMatcher{ - { - URL: &StringMatcher{ - Exact: "/abcd", - }, + RequestMatcherBaseSpec: proxies.RequestMatcherBaseSpec{ + Headers: map[string]*proxies.StringMatcher{ + "X-Test1": {Exact: "test1"}, + "X-Test2": {Exact: "test2"}, }, }, + Methods: []*proxies.StringMatcher{ + {Exact: "/abcd"}, + }, }) assert.False(rm.Match(req)) } - -func TestStringMatcher(t *testing.T) { - assert := assert.New(t) - - // validation - sm := &StringMatcher{Empty: true} - assert.NoError(sm.Validate()) - sm.init() - - sm = &StringMatcher{Empty: true, Exact: "abc"} - assert.Error(sm.Validate()) - - sm = &StringMatcher{} - assert.Error(sm.Validate()) - - sm = &StringMatcher{RegEx: "^abc[0-9]+$"} - assert.NoError(sm.Validate()) - sm.init() - - sm.Prefix = "/xyz" - assert.NoError(sm.Validate()) - - sm.Exact = "/abc" - assert.NoError(sm.Validate()) - - // match - sm = &StringMatcher{Empty: true} - assert.True(sm.Match("")) - assert.False(sm.Match("abc")) - - sm = &StringMatcher{RegEx: "^abc[0-9]+$"} - sm.init() - assert.True(sm.Match("abc123")) - assert.False(sm.Match("abc123d")) - - sm.Prefix = "/xyz" - assert.True(sm.Match("/xyz123")) - assert.False(sm.Match("/Xyz123")) - - sm.Exact = "/hello" - assert.True(sm.Match("/hello")) - assert.False(sm.Match("/Hello")) -} diff --git a/pkg/filters/proxies/httpproxy/proxy.go b/pkg/filters/proxies/httpproxy/proxy.go index db684b2ecd..fb662fb361 100644 --- a/pkg/filters/proxies/httpproxy/proxy.go +++ b/pkg/filters/proxies/httpproxy/proxy.go @@ -130,6 +130,7 @@ type ( // Server is the backend server. Server = proxies.Server + RequestMatcher = proxies.RequestMatcher LoadBalancer = proxies.LoadBalancer LoadBalanceSpec = proxies.LoadBalanceSpec BaseServerPool = proxies.ServerPoolBase diff --git a/pkg/filters/proxies/httpproxy/requestmatch.go b/pkg/filters/proxies/httpproxy/requestmatch.go index 7637f08662..37350bb5d9 100644 --- a/pkg/filters/proxies/httpproxy/requestmatch.go +++ b/pkg/filters/proxies/httpproxy/requestmatch.go @@ -18,46 +18,22 @@ package httpproxy import ( - "fmt" - "hash/fnv" - "math/rand" - "regexp" - "strings" - - "github.com/megaease/easegress/pkg/logger" + "github.com/megaease/easegress/pkg/filters/proxies" + "github.com/megaease/easegress/pkg/protocols" "github.com/megaease/easegress/pkg/protocols/httpprot" "github.com/megaease/easegress/pkg/util/stringtool" ) -// RequestMatcher is the interface to match requests. -type RequestMatcher interface { - Match(req *httpprot.Request) bool -} - // RequestMatcherSpec describe RequestMatcher type RequestMatcherSpec struct { - Policy string `json:"policy" jsonschema:"omitempty,enum=,enum=general,enum=ipHash,enum=headerHash,enum=random"` - MatchAllHeaders bool `json:"matchAllHeaders" jsonschema:"omitempty"` - Headers map[string]*StringMatcher `json:"headers" jsonschema:"omitempty"` - URLs []*MethodAndURLMatcher `json:"urls" jsonschema:"omitempty"` - Permil uint32 `json:"permil" jsonschema:"omitempty,minimum=0,maximum=1000"` - HeaderHashKey string `json:"headerHashKey" jsonschema:"omitempty"` + proxies.RequestMatcherBaseSpec `json:",inline"` + URLs []*MethodAndURLMatcher `json:"urls" jsonschema:"omitempty"` } // Validate validtes the RequestMatcherSpec. func (s *RequestMatcherSpec) Validate() error { - if s.Policy == "general" || s.Policy == "" { - if len(s.Headers) == 0 { - return fmt.Errorf("headers is not specified") - } - } else if s.Permil == 0 { - return fmt.Errorf("permil is not specified") - } - - for _, v := range s.Headers { - if err := v.Validate(); err != nil { - return err - } + if err := s.RequestMatcherBaseSpec.Validate(); err != nil { + return err } for _, r := range s.URLs { @@ -66,15 +42,11 @@ func (s *RequestMatcherSpec) Validate() error { } } - if s.Policy == "headerHash" && s.HeaderHashKey == "" { - return fmt.Errorf("headerHash needs to specify headerHashKey") - } - return nil } // NewRequestMatcher creates a new traffic matcher according to spec. -func NewRequestMatcher(spec *RequestMatcherSpec) RequestMatcher { +func NewRequestMatcher(spec *RequestMatcherSpec) proxies.RequestMatcher { switch spec.Policy { case "", "general": matcher := &generalMatcher{ @@ -84,68 +56,21 @@ func NewRequestMatcher(spec *RequestMatcherSpec) RequestMatcher { } matcher.init() return matcher - case "ipHash": - return &ipHashMatcher{permill: spec.Permil} - case "headerHash": - return &headerHashMatcher{ - permill: spec.Permil, - headerHashKey: spec.HeaderHashKey, - } - case "random": - return &randomMatcher{permill: spec.Permil} + default: + return proxies.NewRequestMatcher(&spec.RequestMatcherBaseSpec) } - - logger.Errorf("BUG: unsupported probability policy: %s", spec.Policy) - return &ipHashMatcher{permill: spec.Permil} -} - -// randomMatcher implements random request matcher. -type randomMatcher struct { - permill uint32 -} - -// Match implements protocols.Matcher. -func (rm randomMatcher) Match(req *httpprot.Request) bool { - return rand.Uint32()%1000 < rm.permill -} - -// headerHashMatcher implements header hash request matcher. -type headerHashMatcher struct { - permill uint32 - headerHashKey string -} - -// Match implements protocols.Matcher. -func (hhm headerHashMatcher) Match(req *httpprot.Request) bool { - v := req.HTTPHeader().Get(hhm.headerHashKey) - hash := fnv.New32() - hash.Write([]byte(v)) - return hash.Sum32()%1000 < hhm.permill -} - -// ipHashMatcher implements IP address hash matcher. -type ipHashMatcher struct { - permill uint32 -} - -// Match implements protocols.Matcher. -func (iphm ipHashMatcher) Match(req *httpprot.Request) bool { - ip := req.RealIP() - hash := fnv.New32() - hash.Write([]byte(ip)) - return hash.Sum32()%1000 < iphm.permill } // generalMatcher implements general HTTP matcher. type generalMatcher struct { matchAllHeaders bool - headers map[string]*StringMatcher + headers map[string]*proxies.StringMatcher urls []*MethodAndURLMatcher } func (gm *generalMatcher) init() { for _, h := range gm.headers { - h.init() + h.Init() } for _, url := range gm.urls { @@ -154,16 +79,21 @@ func (gm *generalMatcher) init() { } // Match implements protocols.Matcher. -func (gm *generalMatcher) Match(req *httpprot.Request) bool { +func (gm *generalMatcher) Match(req protocols.Request) bool { + httpreq, ok := req.(*httpprot.Request) + if !ok { + panic("BUG: not a http request") + } + matched := false if gm.matchAllHeaders { - matched = gm.matchAllHeader(req) + matched = gm.matchAllHeader(httpreq) } else { - matched = gm.matchOneHeader(req) + matched = gm.matchOneHeader(httpreq) } if matched && len(gm.urls) > 0 { - matched = gm.matchURL(req) + matched = gm.matchURL(httpreq) } return matched @@ -220,8 +150,8 @@ func (gm *generalMatcher) matchURL(req *httpprot.Request) bool { // MethodAndURLMatcher defines the match rule of a http request type MethodAndURLMatcher struct { - Methods []string `json:"methods" jsonschema:"omitempty,uniqueItems=true,format=httpmethod-array"` - URL *StringMatcher `json:"url" jsonschema:"required"` + Methods []string `json:"methods" jsonschema:"omitempty,uniqueItems=true,format=httpmethod-array"` + URL *proxies.StringMatcher `json:"url" jsonschema:"required"` } // Validate validates the MethodAndURLMatcher. @@ -230,7 +160,7 @@ func (r *MethodAndURLMatcher) Validate() error { } func (r *MethodAndURLMatcher) init() { - r.URL.init() + r.URL.Init() } // Match matches a request. @@ -243,73 +173,3 @@ func (r *MethodAndURLMatcher) Match(req *httpprot.Request) bool { return r.URL.Match(req.URL().Path) } - -// StringMatcher defines the match rule of a string -type StringMatcher struct { - Exact string `json:"exact" jsonschema:"omitempty"` - Prefix string `json:"prefix" jsonschema:"omitempty"` - RegEx string `json:"regex" jsonschema:"omitempty,format=regexp"` - Empty bool `json:"empty" jsonschema:"omitempty"` - re *regexp.Regexp -} - -// Validate validates the StringMatcher. -func (sm *StringMatcher) Validate() error { - if sm.Empty { - if sm.Exact != "" || sm.Prefix != "" || sm.RegEx != "" { - return fmt.Errorf("empty is conflict with other patterns") - } - return nil - } - - if sm.Exact != "" { - return nil - } - - if sm.Prefix != "" { - return nil - } - - if sm.RegEx != "" { - return nil - } - - return fmt.Errorf("all patterns are empty") -} - -func (sm *StringMatcher) init() { - if sm.RegEx != "" { - sm.re = regexp.MustCompile(sm.RegEx) - } -} - -// Match matches a string. -func (sm *StringMatcher) Match(value string) bool { - if sm.Empty && value == "" { - return true - } - - if sm.Exact != "" && value == sm.Exact { - return true - } - - if sm.Prefix != "" && strings.HasPrefix(value, sm.Prefix) { - return true - } - - if sm.re == nil { - return false - } - - return sm.re.MatchString(value) -} - -// MatchAny return true if any of the values matches. -func (sm *StringMatcher) MatchAny(values []string) bool { - for _, v := range values { - if sm.Match(v) { - return true - } - } - return false -} diff --git a/pkg/filters/proxies/httpproxy/requestmatch_test.go b/pkg/filters/proxies/httpproxy/requestmatch_test.go index e65bc4471c..f9a76b8acf 100644 --- a/pkg/filters/proxies/httpproxy/requestmatch_test.go +++ b/pkg/filters/proxies/httpproxy/requestmatch_test.go @@ -18,12 +18,10 @@ package httpproxy import ( - "fmt" - "math/rand" "net/http" - "strconv" "testing" + "github.com/megaease/easegress/pkg/filters/proxies" "github.com/megaease/easegress/pkg/protocols/httpprot" "github.com/stretchr/testify/assert" ) @@ -38,16 +36,16 @@ func TestRequestMatcherSpecValidate(t *testing.T) { spec.Permil = 100 assert.Error(spec.Validate()) - spec.Headers = map[string]*StringMatcher{} - spec.Headers["X-Test"] = &StringMatcher{ + spec.Headers = map[string]*proxies.StringMatcher{} + spec.Headers["X-Test"] = &proxies.StringMatcher{ Empty: true, Exact: "abc", } assert.Error(spec.Validate()) - spec.Headers["X-Test"] = &StringMatcher{Exact: "abc"} + spec.Headers["X-Test"] = &proxies.StringMatcher{Exact: "abc"} spec.URLs = append(spec.URLs, &MethodAndURLMatcher{ - URL: &StringMatcher{ + URL: &proxies.StringMatcher{ Empty: true, Exact: "abc", }, @@ -55,7 +53,7 @@ func TestRequestMatcherSpecValidate(t *testing.T) { assert.Error(spec.Validate()) spec.URLs[0] = &MethodAndURLMatcher{ - URL: &StringMatcher{Empty: true}, + URL: &proxies.StringMatcher{Empty: true}, } assert.Error(spec.Validate()) @@ -63,92 +61,17 @@ func TestRequestMatcherSpecValidate(t *testing.T) { assert.NoError(spec.Validate()) } -func TestRandomMatcher(t *testing.T) { - rand.Seed(0) - - rm := NewRequestMatcher(&RequestMatcherSpec{ - Policy: "random", - Permil: 100, - }) - - match := 0 - for i := 0; i < 10000; i++ { - if rm.Match(nil) { - match++ - } - } - - if match < 900 || match > 1100 { - t.Error("random matcher is not working as configured") - } -} - -func TestHeaderHashMatcher(t *testing.T) { - rm := NewRequestMatcher(&RequestMatcherSpec{ - Policy: "headerHash", - HeaderHashKey: "X-Test", - Permil: 100, - }) - - stdr, _ := http.NewRequest(http.MethodGet, "http://megaease.com/abc", nil) - req, _ := httpprot.NewRequest(stdr) - - match := 0 - for i := 0; i < 10000; i++ { - stdr.Header.Set("X-Test", strconv.Itoa(i)) - if rm.Match(req) { - match++ - } - } - - if match < 900 || match > 1100 { - t.Error("header hash matcher is not working as configured") - } -} - -func TestIPHashMatcher(t *testing.T) { - rm := NewRequestMatcher(&RequestMatcherSpec{ - Policy: "unknownPolicy", - Permil: 100, - }) - switch rm.(type) { - case *ipHashMatcher: - break - default: - t.Error("should create an ip hash matcher") - } - - rm = NewRequestMatcher(&RequestMatcherSpec{ - Policy: "ipHash", - Permil: 100, - }) - - stdr := &http.Request{Header: http.Header{}} - - match := 0 - for i := 0; i < 10000; i++ { - a, b := i/256, i%256 - stdr.Header.Set("X-Real-Ip", fmt.Sprintf("192.168.%d.%d", a, b)) - req, _ := httpprot.NewRequest(stdr) - if rm.Match(req) { - match++ - } - } - - if match < 900 || match > 1100 { - t.Errorf("ip hash matcher is not working as configured") - } -} - func TestGeneralMatche(t *testing.T) { assert := assert.New(t) // match all headers rm := NewRequestMatcher(&RequestMatcherSpec{ - MatchAllHeaders: true, - Headers: map[string]*StringMatcher{ - "X-Test1": {Exact: "test1"}, - "X-Test2": {Exact: "test2"}, + RequestMatcherBaseSpec: proxies.RequestMatcherBaseSpec{ + MatchAllHeaders: true, + Headers: map[string]*proxies.StringMatcher{ + "X-Test1": {Exact: "test1"}, + "X-Test2": {Exact: "test2"}, + }, }, }) @@ -162,10 +85,12 @@ func TestGeneralMatche(t *testing.T) { assert.False(rm.Match(req)) rm = NewRequestMatcher(&RequestMatcherSpec{ - MatchAllHeaders: true, - Headers: map[string]*StringMatcher{ - "X-Test1": {Exact: "test1"}, - "X-Test2": {Empty: true, Exact: "test2"}, + RequestMatcherBaseSpec: proxies.RequestMatcherBaseSpec{ + MatchAllHeaders: true, + Headers: map[string]*proxies.StringMatcher{ + "X-Test1": {Exact: "test1"}, + "X-Test2": {Empty: true, Exact: "test2"}, + }, }, }) @@ -174,9 +99,11 @@ func TestGeneralMatche(t *testing.T) { // match one header rm = NewRequestMatcher(&RequestMatcherSpec{ - Headers: map[string]*StringMatcher{ - "X-Test1": {Exact: "test1"}, - "X-Test2": {Empty: true, Exact: "test2"}, + RequestMatcherBaseSpec: proxies.RequestMatcherBaseSpec{ + Headers: map[string]*proxies.StringMatcher{ + "X-Test1": {Exact: "test1"}, + "X-Test2": {Empty: true, Exact: "test2"}, + }, }, }) assert.True(rm.Match(req)) @@ -185,9 +112,11 @@ func TestGeneralMatche(t *testing.T) { assert.True(rm.Match(req)) rm = NewRequestMatcher(&RequestMatcherSpec{ - Headers: map[string]*StringMatcher{ - "X-Test1": {Exact: "test1"}, - "X-Test2": {Exact: "test2"}, + RequestMatcherBaseSpec: proxies.RequestMatcherBaseSpec{ + Headers: map[string]*proxies.StringMatcher{ + "X-Test1": {Exact: "test1"}, + "X-Test2": {Exact: "test2"}, + }, }, }) assert.False(rm.Match(req)) @@ -196,14 +125,16 @@ func TestGeneralMatche(t *testing.T) { stdr.Header.Set("X-Test1", "test1") rm = NewRequestMatcher(&RequestMatcherSpec{ - Headers: map[string]*StringMatcher{ - "X-Test1": {Exact: "test1"}, - "X-Test2": {Exact: "test2"}, + RequestMatcherBaseSpec: proxies.RequestMatcherBaseSpec{ + Headers: map[string]*proxies.StringMatcher{ + "X-Test1": {Exact: "test1"}, + "X-Test2": {Exact: "test2"}, + }, }, URLs: []*MethodAndURLMatcher{ { Methods: []string{http.MethodGet}, - URL: &StringMatcher{ + URL: &proxies.StringMatcher{ Exact: "/abc", }, }, @@ -212,14 +143,16 @@ func TestGeneralMatche(t *testing.T) { assert.True(rm.Match(req)) rm = NewRequestMatcher(&RequestMatcherSpec{ - Headers: map[string]*StringMatcher{ - "X-Test1": {Exact: "test1"}, - "X-Test2": {Exact: "test2"}, + RequestMatcherBaseSpec: proxies.RequestMatcherBaseSpec{ + Headers: map[string]*proxies.StringMatcher{ + "X-Test1": {Exact: "test1"}, + "X-Test2": {Exact: "test2"}, + }, }, URLs: []*MethodAndURLMatcher{ { Methods: []string{http.MethodGet}, - URL: &StringMatcher{ + URL: &proxies.StringMatcher{ Exact: "/abcd", }, }, @@ -232,7 +165,7 @@ func TestMethodAndURLMatcher(t *testing.T) { assert := assert.New(t) m := &MethodAndURLMatcher{ - URL: &StringMatcher{ + URL: &proxies.StringMatcher{ Exact: "/abc", }, } @@ -251,46 +184,3 @@ func TestMethodAndURLMatcher(t *testing.T) { m.Methods = []string{http.MethodPost} assert.False(m.Match(req)) } - -func TestStringMatcher(t *testing.T) { - assert := assert.New(t) - - // validation - sm := &StringMatcher{Empty: true} - assert.NoError(sm.Validate()) - sm.init() - - sm = &StringMatcher{Empty: true, Exact: "abc"} - assert.Error(sm.Validate()) - - sm = &StringMatcher{} - assert.Error(sm.Validate()) - - sm = &StringMatcher{RegEx: "^abc[0-9]+$"} - assert.NoError(sm.Validate()) - sm.init() - - sm.Prefix = "/xyz" - assert.NoError(sm.Validate()) - - sm.Exact = "/abc" - assert.NoError(sm.Validate()) - - // match - sm = &StringMatcher{Empty: true} - assert.True(sm.Match("")) - assert.False(sm.Match("abc")) - - sm = &StringMatcher{RegEx: "^abc[0-9]+$"} - sm.init() - assert.True(sm.Match("abc123")) - assert.False(sm.Match("abc123d")) - - sm.Prefix = "/xyz" - assert.True(sm.Match("/xyz123")) - assert.False(sm.Match("/Xyz123")) - - sm.Exact = "/hello" - assert.True(sm.Match("/hello")) - assert.False(sm.Match("/Hello")) -} diff --git a/pkg/filters/proxies/loadbalance.go b/pkg/filters/proxies/loadbalance.go index d2906523f1..1a3b6c13ff 100644 --- a/pkg/filters/proxies/loadbalance.go +++ b/pkg/filters/proxies/loadbalance.go @@ -274,12 +274,12 @@ func (lbp *WeightedRandomLoadBalancePolicy) ChooseServer(req protocols.Request, type IPHashLoadBalancePolicy struct { } +type realIPer interface { + RealIP() string +} + // ChooseServer chooses a server by ip hash. func (lbp *IPHashLoadBalancePolicy) ChooseServer(req protocols.Request, sg *ServerGroup) *Server { - type realIPer interface { - RealIP() string - } - ri, ok := req.(realIPer) if !ok { panic("IPHashLoadBalancePolicy only support request with RealIP()") diff --git a/pkg/filters/proxies/requestmatch.go b/pkg/filters/proxies/requestmatch.go new file mode 100644 index 0000000000..1fba5ab175 --- /dev/null +++ b/pkg/filters/proxies/requestmatch.go @@ -0,0 +1,196 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package proxies + +import ( + "fmt" + "hash/fnv" + "math/rand" + "regexp" + "strings" + + "github.com/megaease/easegress/pkg/logger" + "github.com/megaease/easegress/pkg/protocols" +) + +// RequestMatcher is the interface to match requests. +type RequestMatcher interface { + Match(req protocols.Request) bool +} + +// RequestMatcherBaseSpec describe RequestMatcher +type RequestMatcherBaseSpec struct { + Policy string `json:"policy" jsonschema:"omitempty,enum=,enum=general,enum=ipHash,enum=headerHash,enum=random"` + MatchAllHeaders bool `json:"matchAllHeaders" jsonschema:"omitempty"` + Headers map[string]*StringMatcher `json:"headers" jsonschema:"omitempty"` + Permil uint32 `json:"permil" jsonschema:"omitempty,minimum=0,maximum=1000"` + HeaderHashKey string `json:"headerHashKey" jsonschema:"omitempty"` +} + +// Validate validtes the RequestMatcherBaseSpec. +func (s *RequestMatcherBaseSpec) Validate() error { + if s.Policy == "general" || s.Policy == "" { + if len(s.Headers) == 0 { + return fmt.Errorf("headers is not specified") + } + } else if s.Permil == 0 { + return fmt.Errorf("permil is not specified") + } + + for _, v := range s.Headers { + if err := v.Validate(); err != nil { + return err + } + } + + if s.Policy == "headerHash" && s.HeaderHashKey == "" { + return fmt.Errorf("headerHash needs to specify headerHashKey") + } + + return nil +} + +// NewRequestMatcher creates a new traffic matcher according to spec. +func NewRequestMatcher(spec *RequestMatcherBaseSpec) RequestMatcher { + switch spec.Policy { + case "ipHash": + return &ipHashMatcher{permill: spec.Permil} + case "headerHash": + return &headerHashMatcher{ + permill: spec.Permil, + headerHashKey: spec.HeaderHashKey, + } + case "random": + return &randomMatcher{permill: spec.Permil} + } + + logger.Errorf("BUG: unsupported probability policy: %s", spec.Policy) + return &ipHashMatcher{permill: spec.Permil} +} + +// randomMatcher implements random request matcher. +type randomMatcher struct { + permill uint32 +} + +// Match implements protocols.Matcher. +func (rm randomMatcher) Match(req protocols.Request) bool { + return rand.Uint32()%1000 < rm.permill +} + +// headerHashMatcher implements header hash request matcher. +type headerHashMatcher struct { + permill uint32 + headerHashKey string +} + +// Match implements protocols.Matcher. +func (hhm headerHashMatcher) Match(req protocols.Request) bool { + v := req.Header().Get(hhm.headerHashKey).(string) + hash := fnv.New32() + hash.Write([]byte(v)) + return hash.Sum32()%1000 < hhm.permill +} + +// ipHashMatcher implements IP address hash matcher. +type ipHashMatcher struct { + permill uint32 +} + +// Match implements protocols.Matcher. +func (iphm ipHashMatcher) Match(req protocols.Request) bool { + ri, ok := req.(realIPer) + if !ok { + panic("IPHashLoadBalancePolicy only support request with RealIP()") + } + + ip := ri.RealIP() + hash := fnv.New32() + hash.Write([]byte(ip)) + return hash.Sum32()%1000 < iphm.permill +} + +// StringMatcher defines the match rule of a string +type StringMatcher struct { + Exact string `json:"exact" jsonschema:"omitempty"` + Prefix string `json:"prefix" jsonschema:"omitempty"` + RegEx string `json:"regex" jsonschema:"omitempty,format=regexp"` + Empty bool `json:"empty" jsonschema:"omitempty"` + re *regexp.Regexp +} + +// Validate validates the StringMatcher. +func (sm *StringMatcher) Validate() error { + if sm.Empty { + if sm.Exact != "" || sm.Prefix != "" || sm.RegEx != "" { + return fmt.Errorf("empty is conflict with other patterns") + } + return nil + } + + if sm.Exact != "" { + return nil + } + + if sm.Prefix != "" { + return nil + } + + if sm.RegEx != "" { + return nil + } + + return fmt.Errorf("all patterns are empty") +} + +func (sm *StringMatcher) Init() { + if sm.RegEx != "" { + sm.re = regexp.MustCompile(sm.RegEx) + } +} + +// Match matches a string. +func (sm *StringMatcher) Match(value string) bool { + if sm.Empty && value == "" { + return true + } + + if sm.Exact != "" && value == sm.Exact { + return true + } + + if sm.Prefix != "" && strings.HasPrefix(value, sm.Prefix) { + return true + } + + if sm.re == nil { + return false + } + + return sm.re.MatchString(value) +} + +// MatchAny return true if any of the values matches. +func (sm *StringMatcher) MatchAny(values []string) bool { + for _, v := range values { + if sm.Match(v) { + return true + } + } + return false +} diff --git a/pkg/filters/proxies/requestmatch_test.go b/pkg/filters/proxies/requestmatch_test.go new file mode 100644 index 0000000000..8121584a06 --- /dev/null +++ b/pkg/filters/proxies/requestmatch_test.go @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package proxies + +import ( + "fmt" + "math/rand" + "net/http" + "strconv" + "testing" + + "github.com/megaease/easegress/pkg/protocols/httpprot" + "github.com/stretchr/testify/assert" +) + +func TestRandomMatcher(t *testing.T) { + rand.Seed(0) + + rm := NewRequestMatcher(&RequestMatcherBaseSpec{ + Policy: "random", + Permil: 100, + }) + + match := 0 + for i := 0; i < 10000; i++ { + if rm.Match(nil) { + match++ + } + } + + if match < 900 || match > 1100 { + t.Error("random matcher is not working as configured") + } +} + +func TestHeaderHashMatcher(t *testing.T) { + rm := NewRequestMatcher(&RequestMatcherBaseSpec{ + Policy: "headerHash", + HeaderHashKey: "X-Test", + Permil: 100, + }) + + stdr, _ := http.NewRequest(http.MethodGet, "http://megaease.com/abc", nil) + req, _ := httpprot.NewRequest(stdr) + + match := 0 + for i := 0; i < 10000; i++ { + stdr.Header.Set("X-Test", strconv.Itoa(i)) + if rm.Match(req) { + match++ + } + } + + if match < 900 || match > 1100 { + t.Error("header hash matcher is not working as configured") + } +} + +func TestIPHashMatcher(t *testing.T) { + rm := NewRequestMatcher(&RequestMatcherBaseSpec{ + Policy: "unknownPolicy", + Permil: 100, + }) + switch rm.(type) { + case *ipHashMatcher: + break + default: + t.Error("should create an ip hash matcher") + } + + rm = NewRequestMatcher(&RequestMatcherBaseSpec{ + Policy: "ipHash", + Permil: 100, + }) + + stdr := &http.Request{Header: http.Header{}} + + match := 0 + for i := 0; i < 10000; i++ { + a, b := i/256, i%256 + stdr.Header.Set("X-Real-Ip", fmt.Sprintf("192.168.%d.%d", a, b)) + req, _ := httpprot.NewRequest(stdr) + if rm.Match(req) { + match++ + } + } + + if match < 900 || match > 1100 { + t.Errorf("ip hash matcher is not working as configured") + } +} + +func TestStringMatcher(t *testing.T) { + assert := assert.New(t) + + // validation + sm := &StringMatcher{Empty: true} + assert.NoError(sm.Validate()) + sm.Init() + + sm = &StringMatcher{Empty: true, Exact: "abc"} + assert.Error(sm.Validate()) + + sm = &StringMatcher{} + assert.Error(sm.Validate()) + + sm = &StringMatcher{RegEx: "^abc[0-9]+$"} + assert.NoError(sm.Validate()) + sm.Init() + + sm.Prefix = "/xyz" + assert.NoError(sm.Validate()) + + sm.Exact = "/abc" + assert.NoError(sm.Validate()) + + // match + sm = &StringMatcher{Empty: true} + assert.True(sm.Match("")) + assert.False(sm.Match("abc")) + + sm = &StringMatcher{RegEx: "^abc[0-9]+$"} + sm.Init() + assert.True(sm.Match("abc123")) + assert.False(sm.Match("abc123d")) + + sm.Prefix = "/xyz" + assert.True(sm.Match("/xyz123")) + assert.False(sm.Match("/Xyz123")) + + sm.Exact = "/hello" + assert.True(sm.Match("/hello")) + assert.False(sm.Match("/Hello")) +} diff --git a/pkg/object/meshcontroller/spec/builder.go b/pkg/object/meshcontroller/spec/builder.go index 7473f297e4..999822c882 100644 --- a/pkg/object/meshcontroller/spec/builder.go +++ b/pkg/object/meshcontroller/spec/builder.go @@ -23,6 +23,7 @@ import ( "github.com/megaease/easegress/pkg/filters" "github.com/megaease/easegress/pkg/filters/meshadaptor" "github.com/megaease/easegress/pkg/filters/mock" + "github.com/megaease/easegress/pkg/filters/proxies" proxy "github.com/megaease/easegress/pkg/filters/proxies/httpproxy" "github.com/megaease/easegress/pkg/filters/ratelimiter" "github.com/megaease/easegress/pkg/logger" @@ -257,7 +258,7 @@ func (b *pipelineSpecBuilder) appendProxyWithCanary(param *proxyParam) *pipeline if candidatePools[i] == nil { headers := canary.TrafficRules.Clone().Headers - headers[ServiceCanaryHeaderKey] = &proxy.StringMatcher{ + headers[ServiceCanaryHeaderKey] = &proxies.StringMatcher{ Exact: canary.Name, } candidatePools[i] = &proxy.ServerPoolSpec{ @@ -265,8 +266,10 @@ func (b *pipelineSpecBuilder) appendProxyWithCanary(param *proxyParam) *pipeline LoadBalance: param.lb, }, Filter: &proxy.RequestMatcherSpec{ - MatchAllHeaders: true, - Headers: headers, + RequestMatcherBaseSpec: proxies.RequestMatcherBaseSpec{ + MatchAllHeaders: true, + Headers: headers, + }, }, Timeout: param.timeout, RetryPolicy: param.retryPolicy, @@ -326,13 +329,15 @@ func (b *pipelineSpecBuilder) appendMeshAdaptor(canaries []*ServiceCanary) *pipe // NOTE: It means that setting `X-Mesh-Service-Canary: canaryName` // if `X-Mesh-Service-Canary` does not exist and other headers are matching. headers := canary.TrafficRules.Clone().Headers - headers[ServiceCanaryHeaderKey] = &proxy.StringMatcher{ + headers[ServiceCanaryHeaderKey] = &proxies.StringMatcher{ Empty: true, } adaptors[i] = &meshadaptor.ServiceCanaryAdaptor{ Filter: &proxy.RequestMatcherSpec{ - MatchAllHeaders: true, - Headers: headers, + RequestMatcherBaseSpec: proxies.RequestMatcherBaseSpec{ + MatchAllHeaders: true, + Headers: headers, + }, }, Header: &httpheader.AdaptSpec{ Set: map[string]string{ diff --git a/pkg/object/meshcontroller/spec/spec.go b/pkg/object/meshcontroller/spec/spec.go index c66558c8aa..8167f560d0 100644 --- a/pkg/object/meshcontroller/spec/spec.go +++ b/pkg/object/meshcontroller/spec/spec.go @@ -23,6 +23,7 @@ import ( "github.com/megaease/easegress/pkg/cluster/customdata" "github.com/megaease/easegress/pkg/filters/mock" + "github.com/megaease/easegress/pkg/filters/proxies" proxy "github.com/megaease/easegress/pkg/filters/proxies/httpproxy" "github.com/megaease/easegress/pkg/filters/ratelimiter" "github.com/megaease/easegress/pkg/resilience" @@ -229,9 +230,9 @@ type ( // CanaryRule is one matching rule for canary. CanaryRule struct { - ServiceInstanceLabels map[string]string `json:"serviceInstanceLabels" jsonschema:"required"` - Headers map[string]*proxy.StringMatcher `json:"headers" jsonschema:"required"` - URLs []*urlrule.URLRule `json:"urls" jsonschema:"required"` + ServiceInstanceLabels map[string]string `json:"serviceInstanceLabels" jsonschema:"required"` + Headers map[string]*proxies.StringMatcher `json:"headers" jsonschema:"required"` + URLs []*urlrule.URLRule `json:"urls" jsonschema:"required"` } // ServiceCanary is the service canary entry. @@ -247,7 +248,7 @@ type ( // TrafficRules is the rules of traffic. TrafficRules struct { - Headers map[string]*proxy.StringMatcher `json:"headers" jsonschema:"required"` + Headers map[string]*proxies.StringMatcher `json:"headers" jsonschema:"required"` } // LoadBalance is the spec of service load balance. @@ -480,7 +481,7 @@ func (sc ServiceCanary) Validate() error { // Clone clones TrafficRules. func (tr *TrafficRules) Clone() *TrafficRules { - headers := map[string]*proxy.StringMatcher{} + headers := map[string]*proxies.StringMatcher{} for k, v := range tr.Headers { stringMatch := *v headers[k] = &stringMatch diff --git a/pkg/object/meshcontroller/spec/spec_test.go b/pkg/object/meshcontroller/spec/spec_test.go index a5e19e4e72..725445f89d 100644 --- a/pkg/object/meshcontroller/spec/spec_test.go +++ b/pkg/object/meshcontroller/spec/spec_test.go @@ -24,7 +24,6 @@ import ( "github.com/megaease/easegress/pkg/filters/mock" "github.com/megaease/easegress/pkg/filters/proxies" - proxy "github.com/megaease/easegress/pkg/filters/proxies/httpproxy" "github.com/megaease/easegress/pkg/filters/ratelimiter" "github.com/megaease/easegress/pkg/logger" _ "github.com/megaease/easegress/pkg/object/httpserver" @@ -239,7 +238,7 @@ func TestSidecarEgressPipelineSpec(t *testing.T) { }, }, TrafficRules: &TrafficRules{ - Headers: map[string]*proxy.StringMatcher{ + Headers: map[string]*proxies.StringMatcher{ "X-Location": { Exact: "Beijing", }, @@ -947,7 +946,7 @@ func TestAppendProxyWithCanary(t *testing.T) { }, }, TrafficRules: &TrafficRules{ - Headers: map[string]*proxy.StringMatcher{ + Headers: map[string]*proxies.StringMatcher{ "X-Location": { Exact: "Beijing", }, @@ -977,7 +976,7 @@ func TestAppendMeshAdaptor(t *testing.T) { }, }, TrafficRules: &TrafficRules{ - Headers: map[string]*proxy.StringMatcher{ + Headers: map[string]*proxies.StringMatcher{ "X-Location": { Exact: "Beijing", }, From 5b83b5cb7ea9a186c4e3ede47b80808ffaf803b2 Mon Sep 17 00:00:00 2001 From: Bomin Zhang Date: Sun, 5 Feb 2023 15:56:49 +0800 Subject: [PATCH 08/14] fix bugs --- pkg/filters/proxies/loadbalance.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/filters/proxies/loadbalance.go b/pkg/filters/proxies/loadbalance.go index 1a3b6c13ff..4dd3329f40 100644 --- a/pkg/filters/proxies/loadbalance.go +++ b/pkg/filters/proxies/loadbalance.go @@ -179,7 +179,7 @@ func (glb *GeneralLoadBalancer) checkServers() { } svr.HealthCounter-- if svr.Healthy() && svr.HealthCounter <= -glb.spec.HealthCheck.Fails { - logger.Warnf("server:%v becomes healthy.", svr.ID()) + logger.Warnf("server:%v becomes unhealthy.", svr.ID()) svr.Unhealth = true changed = true } @@ -226,6 +226,7 @@ func (glb *GeneralLoadBalancer) ReturnServer(server *Server, req protocols.Reque // Close closes the load balancer func (glb *GeneralLoadBalancer) Close() { if glb.hc != nil { + close(glb.done) glb.hc.Close() } if glb.ss != nil { From 7cdca74c062dc5c8a4cbb0827eb7af6ac3a8a8d2 Mon Sep 17 00:00:00 2001 From: Bomin Zhang Date: Mon, 6 Feb 2023 10:41:15 +0800 Subject: [PATCH 09/14] add RealIP to Request interface --- pkg/filters/proxies/loadbalance.go | 11 +---------- pkg/filters/proxies/requestmatch.go | 7 +------ pkg/protocols/mqttprot/request.go | 5 +++++ pkg/protocols/protocols.go | 3 +++ 4 files changed, 10 insertions(+), 16 deletions(-) diff --git a/pkg/filters/proxies/loadbalance.go b/pkg/filters/proxies/loadbalance.go index 4dd3329f40..cc797d5c66 100644 --- a/pkg/filters/proxies/loadbalance.go +++ b/pkg/filters/proxies/loadbalance.go @@ -275,18 +275,9 @@ func (lbp *WeightedRandomLoadBalancePolicy) ChooseServer(req protocols.Request, type IPHashLoadBalancePolicy struct { } -type realIPer interface { - RealIP() string -} - // ChooseServer chooses a server by ip hash. func (lbp *IPHashLoadBalancePolicy) ChooseServer(req protocols.Request, sg *ServerGroup) *Server { - ri, ok := req.(realIPer) - if !ok { - panic("IPHashLoadBalancePolicy only support request with RealIP()") - } - - ip := ri.RealIP() + ip := req.RealIP() hash := fnv.New32() hash.Write([]byte(ip)) return sg.Servers[hash.Sum32()%uint32(len(sg.Servers))] diff --git a/pkg/filters/proxies/requestmatch.go b/pkg/filters/proxies/requestmatch.go index 1fba5ab175..6046af1bee 100644 --- a/pkg/filters/proxies/requestmatch.go +++ b/pkg/filters/proxies/requestmatch.go @@ -114,12 +114,7 @@ type ipHashMatcher struct { // Match implements protocols.Matcher. func (iphm ipHashMatcher) Match(req protocols.Request) bool { - ri, ok := req.(realIPer) - if !ok { - panic("IPHashLoadBalancePolicy only support request with RealIP()") - } - - ip := ri.RealIP() + ip := req.RealIP() hash := fnv.New32() hash.Write([]byte(ip)) return hash.Sum32()%1000 < iphm.permill diff --git a/pkg/protocols/mqttprot/request.go b/pkg/protocols/mqttprot/request.go index 198bf51c6d..11113ca100 100644 --- a/pkg/protocols/mqttprot/request.go +++ b/pkg/protocols/mqttprot/request.go @@ -140,6 +140,11 @@ func (r *Request) Header() protocols.Header { return nil } +// RealIP returns the real IP of the request. +func (r *Request) RealIP() string { + panic("not implemented") +} + // SetPayload set the payload of the request to payload. func (r *Request) SetPayload(payload interface{}) { p, ok := payload.([]byte) diff --git a/pkg/protocols/protocols.go b/pkg/protocols/protocols.go index bb4bb9b0e5..642120920e 100644 --- a/pkg/protocols/protocols.go +++ b/pkg/protocols/protocols.go @@ -39,6 +39,9 @@ type Request interface { // Header returns the header of the request. Header() Header + // RealIP returns the real IP of the request. + RealIP() string + // IsStream returns whether the payload is a stream, which cannot be // read for more than once. IsStream() bool From faae5f0159638e3bb635e83faddf0bcf3f9485b8 Mon Sep 17 00:00:00 2001 From: Bomin Zhang Date: Mon, 6 Feb 2023 17:14:04 +0800 Subject: [PATCH 10/14] move StringMatcher to package stringtool --- doc/reference/filters.md | 25 +++--- pkg/filters/mock/mock.go | 12 +-- pkg/filters/proxies/grpcproxy/requestmatch.go | 7 +- .../proxies/grpcproxy/requestmatch_test.go | 27 +++--- pkg/filters/proxies/httpproxy/requestmatch.go | 6 +- .../proxies/httpproxy/requestmatch_test.go | 29 +++---- pkg/filters/proxies/requestmatch.go | 83 ++----------------- pkg/filters/proxies/requestmatch_test.go | 44 ---------- pkg/object/meshcontroller/spec/builder.go | 5 +- pkg/object/meshcontroller/spec/spec.go | 12 +-- pkg/object/meshcontroller/spec/spec_test.go | 11 +-- pkg/util/stringtool/stringtool.go | 72 ++++++++++++++++ pkg/util/stringtool/stringtool_test.go | 43 ++++++++++ pkg/util/urlrule/urlrule.go | 74 +---------------- pkg/util/urlrule/urlrule_test.go | 32 +++---- 15 files changed, 213 insertions(+), 269 deletions(-) diff --git a/doc/reference/filters.md b/doc/reference/filters.md index 7a8f749c8f..a618dd9ef3 100644 --- a/doc/reference/filters.md +++ b/doc/reference/filters.md @@ -58,15 +58,15 @@ - [DataBuilder](#databuilder) - [Configuration](#configuration-18) - [Results](#results-18) - - [OIDCAdaptor](#OIDCAdaptor) + - [OIDCAdaptor](#oidcadaptor) - [Configuration](#configuration-19) - [Results](#results-19) - - [OPAFilter](#OPAFilter) + - [OPAFilter](#opafilter) - [Configuration](#configuration-20) - - [Results](#results-20) - - [Redirector](#Redirector) + - [Results](#results-20) + - [Redirector](#redirector) - [Configuration](#configuration-21) - - [Results](#results-21) + - [Results](#results-21) - [Common Types](#common-types) - [pathadaptor.Spec](#pathadaptorspec) - [pathadaptor.RegexpReplace](#pathadaptorregexpreplace) @@ -75,9 +75,10 @@ - [proxy.Server](#proxyserver) - [proxy.LoadBalanceSpec](#proxyloadbalancespec) - [proxy.StickySessionSpec](#proxystickysessionspec) + - [proxy.HealthCheckSpec](#proxyhealthcheckspec) - [proxy.MemoryCacheSpec](#proxymemorycachespec) - [proxy.RequestMatcherSpec](#proxyrequestmatcherspec) - - [proxy.StringMatcher](#proxystringmatcher) + - [StringMatcher](#stringmatcher) - [proxy.MethodAndURLMatcher](#proxymethodandurlmatcher) - [urlrule.URLRule](#urlruleurlrule) - [proxy.Compression](#proxycompression) @@ -88,6 +89,8 @@ - [ratelimiter.Policy](#ratelimiterpolicy) - [httpheader.ValueValidator](#httpheadervaluevalidator) - [validator.JWTValidatorSpec](#validatorjwtvalidatorspec) + - [validator.BasicAuthValidatorSpec](#validatorbasicauthvalidatorspec) + - [basicAuth.LDAPSpec](#basicauthldapspec) - [signer.Spec](#signerspec) - [signer.HeaderHoisting](#signerheaderhoisting) - [signer.Literal](#signerliteral) @@ -1394,13 +1397,13 @@ Polices: | Name | Type | Description | Required | | ---- | ---- | ----------- | -------- | | policy | string | Policy used to match requests, support `general`, `ipHash`, `headerHash`, `random` | No | -| headers | map[string][proxy.StringMatcher](#proxystringmatcher) | Request header filter options. The key of this map is header name, and the value of this map is header value match criteria | No | +| headers | map[string][StringMatcher](#stringmatcher) | Request header filter options. The key of this map is header name, and the value of this map is header value match criteria | No | | urls | [][proxy.MethodAndURLMatcher](#proxyMethodAndURLMatcher) | Request URL match criteria | No | | permil | uint32 | the probability of requests been matched. Value between 0 to 1000 | No | | matchAllHeaders | bool | All rules in headers should be match | No | | headerHashKey | string | Used by policy `headerHash`. | No | -### proxy.StringMatcher +### StringMatcher The relationship between `exact`, `prefix`, and `regex` is `OR`. @@ -1418,7 +1421,7 @@ The relationship between `methods` and `url` is `AND`. | Name | Type | Description | Required | | ------- | ------------------------------------------ | ---------------------------------------------------------------- | -------- | | methods | []string | HTTP method criteria, Default is an empty list means all methods | No | -| url | [proxy.StringMatcher](#proxystringmatcher) | Criteria to match a URL | Yes | +| url | [StringMatcher](#stringmatcher) | Criteria to match a URL | Yes | ### urlrule.URLRule @@ -1427,7 +1430,7 @@ The relationship between `methods` and `url` is `AND`. | Name | Type | Description | Required | | --------- | ------------------------------------------ | ---------------------------------------------------------------- | -------- | | methods | []string | HTTP method criteria, Default is an empty list means all methods | No | -| url | [urlrule.StringMatch](#urlruleStringMatch) | Criteria to match a URL | Yes | +| url | [StringMatcher](#StringMatcher) | Criteria to match a URL | Yes | | policyRef | string | Name of resilience policy for matched requests | No | @@ -1472,7 +1475,7 @@ The relationship between `methods` and `url` is `AND`. | path | string | Path match criteria, if request path is the value of this option, then the response of the request is mocked according to this rule | No | | pathPrefix | string | Path prefix match criteria, if request path begins with the value of this option, then the response of the request is mocked according to this rule | No | | matchAllHeaders | bool | Whether to match all headers | No | -| headers | map[string][url.StringMatch](#urlrulestringmatch) | Headers to match, key is a header name, value is the rule to match the header value | No | +| headers | map[string][StringMatcher](#stringmatcher) | Headers to match, key is a header name, value is the rule to match the header value | No | ### ratelimiter.Policy diff --git a/pkg/filters/mock/mock.go b/pkg/filters/mock/mock.go index 88024e4548..cda7177a73 100644 --- a/pkg/filters/mock/mock.go +++ b/pkg/filters/mock/mock.go @@ -25,7 +25,7 @@ import ( "github.com/megaease/easegress/pkg/filters" "github.com/megaease/easegress/pkg/logger" "github.com/megaease/easegress/pkg/protocols/httpprot" - "github.com/megaease/easegress/pkg/util/urlrule" + "github.com/megaease/easegress/pkg/util/stringtool" ) const ( @@ -77,10 +77,10 @@ type ( // MatchRule is the rule to match a request MatchRule struct { - Path string `json:"path,omitempty" jsonschema:"omitempty,pattern=^/"` - PathPrefix string `json:"pathPrefix,omitempty" jsonschema:"omitempty,pattern=^/"` - Headers map[string]*urlrule.StringMatch `json:"headers" jsonschema:"omitempty"` - MatchAllHeaders bool `json:"matchAllHeaders" jsonschema:"omitempty"` + Path string `json:"path,omitempty" jsonschema:"omitempty,pattern=^/"` + PathPrefix string `json:"pathPrefix,omitempty" jsonschema:"omitempty,pattern=^/"` + Headers map[string]*stringtool.StringMatcher `json:"headers" jsonschema:"omitempty"` + MatchAllHeaders bool `json:"matchAllHeaders" jsonschema:"omitempty"` } ) @@ -149,7 +149,7 @@ func (m *Mock) match(ctx *context.Context) *Rule { return strings.HasPrefix(path, rule.Match.PathPrefix) } - matchOneHeader := func(key string, rule *urlrule.StringMatch) bool { + matchOneHeader := func(key string, rule *stringtool.StringMatcher) bool { values := header.Values(key) if len(values) == 0 { return rule.Empty diff --git a/pkg/filters/proxies/grpcproxy/requestmatch.go b/pkg/filters/proxies/grpcproxy/requestmatch.go index 2d46db4447..c0f728fbc7 100644 --- a/pkg/filters/proxies/grpcproxy/requestmatch.go +++ b/pkg/filters/proxies/grpcproxy/requestmatch.go @@ -21,12 +21,13 @@ import ( "github.com/megaease/easegress/pkg/filters/proxies" "github.com/megaease/easegress/pkg/protocols" "github.com/megaease/easegress/pkg/protocols/grpcprot" + "github.com/megaease/easegress/pkg/util/stringtool" ) // RequestMatcherSpec describe RequestMatcher type RequestMatcherSpec struct { proxies.RequestMatcherBaseSpec `json:",inline"` - Methods []*proxies.StringMatcher `json:"methods" jsonschema:"omitempty"` + Methods []*stringtool.StringMatcher `json:"methods" jsonschema:"omitempty"` } // Validate validtes the RequestMatcherSpec. @@ -63,8 +64,8 @@ func NewRequestMatcher(spec *RequestMatcherSpec) RequestMatcher { // generalMatcher implements general grpc matcher. type generalMatcher struct { matchAllHeaders bool - headers map[string]*proxies.StringMatcher - methods []*proxies.StringMatcher + headers map[string]*stringtool.StringMatcher + methods []*stringtool.StringMatcher } func (gm *generalMatcher) init() { diff --git a/pkg/filters/proxies/grpcproxy/requestmatch_test.go b/pkg/filters/proxies/grpcproxy/requestmatch_test.go index 8a7bb1f65d..23269a62bc 100644 --- a/pkg/filters/proxies/grpcproxy/requestmatch_test.go +++ b/pkg/filters/proxies/grpcproxy/requestmatch_test.go @@ -23,6 +23,7 @@ import ( "github.com/megaease/easegress/pkg/filters/proxies" "github.com/megaease/easegress/pkg/protocols/grpcprot" + "github.com/megaease/easegress/pkg/util/stringtool" "google.golang.org/grpc/metadata" "github.com/stretchr/testify/assert" @@ -38,21 +39,21 @@ func TestRequestMatcherSpecValidate(t *testing.T) { spec.Permil = 100 assert.Error(spec.Validate()) - spec.Headers = map[string]*proxies.StringMatcher{} - spec.Headers["X-Test"] = &proxies.StringMatcher{ + spec.Headers = map[string]*stringtool.StringMatcher{} + spec.Headers["X-Test"] = &stringtool.StringMatcher{ Empty: true, Exact: "abc", } assert.Error(spec.Validate()) - spec.Headers["X-Test"] = &proxies.StringMatcher{Exact: "abc"} - spec.Methods = append(spec.Methods, &proxies.StringMatcher{ + spec.Headers["X-Test"] = &stringtool.StringMatcher{Exact: "abc"} + spec.Methods = append(spec.Methods, &stringtool.StringMatcher{ Empty: true, Exact: "abc", }) assert.Error(spec.Validate()) - spec.Methods[0] = &proxies.StringMatcher{Empty: true} + spec.Methods[0] = &stringtool.StringMatcher{Empty: true} assert.Error(spec.Validate()) spec.HeaderHashKey = "X-Test" @@ -66,7 +67,7 @@ func TestGeneralMatche(t *testing.T) { rm := NewRequestMatcher(&RequestMatcherSpec{ RequestMatcherBaseSpec: proxies.RequestMatcherBaseSpec{ MatchAllHeaders: true, - Headers: map[string]*proxies.StringMatcher{ + Headers: map[string]*stringtool.StringMatcher{ "X-Test1": {Exact: "test1"}, "X-Test2": {Exact: "test2"}, }, @@ -85,7 +86,7 @@ func TestGeneralMatche(t *testing.T) { rm = NewRequestMatcher(&RequestMatcherSpec{ RequestMatcherBaseSpec: proxies.RequestMatcherBaseSpec{ MatchAllHeaders: true, - Headers: map[string]*proxies.StringMatcher{ + Headers: map[string]*stringtool.StringMatcher{ "X-Test1": {Exact: "test1"}, "X-Test2": {Empty: true, Exact: "test2"}, }, @@ -98,7 +99,7 @@ func TestGeneralMatche(t *testing.T) { // match one header rm = NewRequestMatcher(&RequestMatcherSpec{ RequestMatcherBaseSpec: proxies.RequestMatcherBaseSpec{ - Headers: map[string]*proxies.StringMatcher{ + Headers: map[string]*stringtool.StringMatcher{ "X-Test1": {Exact: "test1"}, "X-Test2": {Empty: true, Exact: "test2"}, }, @@ -111,7 +112,7 @@ func TestGeneralMatche(t *testing.T) { rm = NewRequestMatcher(&RequestMatcherSpec{ RequestMatcherBaseSpec: proxies.RequestMatcherBaseSpec{ - Headers: map[string]*proxies.StringMatcher{ + Headers: map[string]*stringtool.StringMatcher{ "X-Test1": {Exact: "test1"}, "X-Test2": {Exact: "test2"}, }, @@ -124,12 +125,12 @@ func TestGeneralMatche(t *testing.T) { req.SetFullMethod("/abc") rm = NewRequestMatcher(&RequestMatcherSpec{ RequestMatcherBaseSpec: proxies.RequestMatcherBaseSpec{ - Headers: map[string]*proxies.StringMatcher{ + Headers: map[string]*stringtool.StringMatcher{ "X-Test1": {Exact: "test1"}, "X-Test2": {Exact: "test2"}, }, }, - Methods: []*proxies.StringMatcher{ + Methods: []*stringtool.StringMatcher{ {Exact: "/abc"}, }, }) @@ -137,12 +138,12 @@ func TestGeneralMatche(t *testing.T) { rm = NewRequestMatcher(&RequestMatcherSpec{ RequestMatcherBaseSpec: proxies.RequestMatcherBaseSpec{ - Headers: map[string]*proxies.StringMatcher{ + Headers: map[string]*stringtool.StringMatcher{ "X-Test1": {Exact: "test1"}, "X-Test2": {Exact: "test2"}, }, }, - Methods: []*proxies.StringMatcher{ + Methods: []*stringtool.StringMatcher{ {Exact: "/abcd"}, }, }) diff --git a/pkg/filters/proxies/httpproxy/requestmatch.go b/pkg/filters/proxies/httpproxy/requestmatch.go index 37350bb5d9..904a846e02 100644 --- a/pkg/filters/proxies/httpproxy/requestmatch.go +++ b/pkg/filters/proxies/httpproxy/requestmatch.go @@ -64,7 +64,7 @@ func NewRequestMatcher(spec *RequestMatcherSpec) proxies.RequestMatcher { // generalMatcher implements general HTTP matcher. type generalMatcher struct { matchAllHeaders bool - headers map[string]*proxies.StringMatcher + headers map[string]*stringtool.StringMatcher urls []*MethodAndURLMatcher } @@ -150,8 +150,8 @@ func (gm *generalMatcher) matchURL(req *httpprot.Request) bool { // MethodAndURLMatcher defines the match rule of a http request type MethodAndURLMatcher struct { - Methods []string `json:"methods" jsonschema:"omitempty,uniqueItems=true,format=httpmethod-array"` - URL *proxies.StringMatcher `json:"url" jsonschema:"required"` + Methods []string `json:"methods" jsonschema:"omitempty,uniqueItems=true,format=httpmethod-array"` + URL *stringtool.StringMatcher `json:"url" jsonschema:"required"` } // Validate validates the MethodAndURLMatcher. diff --git a/pkg/filters/proxies/httpproxy/requestmatch_test.go b/pkg/filters/proxies/httpproxy/requestmatch_test.go index f9a76b8acf..9b38ceba13 100644 --- a/pkg/filters/proxies/httpproxy/requestmatch_test.go +++ b/pkg/filters/proxies/httpproxy/requestmatch_test.go @@ -23,6 +23,7 @@ import ( "github.com/megaease/easegress/pkg/filters/proxies" "github.com/megaease/easegress/pkg/protocols/httpprot" + "github.com/megaease/easegress/pkg/util/stringtool" "github.com/stretchr/testify/assert" ) @@ -36,16 +37,16 @@ func TestRequestMatcherSpecValidate(t *testing.T) { spec.Permil = 100 assert.Error(spec.Validate()) - spec.Headers = map[string]*proxies.StringMatcher{} - spec.Headers["X-Test"] = &proxies.StringMatcher{ + spec.Headers = map[string]*stringtool.StringMatcher{} + spec.Headers["X-Test"] = &stringtool.StringMatcher{ Empty: true, Exact: "abc", } assert.Error(spec.Validate()) - spec.Headers["X-Test"] = &proxies.StringMatcher{Exact: "abc"} + spec.Headers["X-Test"] = &stringtool.StringMatcher{Exact: "abc"} spec.URLs = append(spec.URLs, &MethodAndURLMatcher{ - URL: &proxies.StringMatcher{ + URL: &stringtool.StringMatcher{ Empty: true, Exact: "abc", }, @@ -53,7 +54,7 @@ func TestRequestMatcherSpecValidate(t *testing.T) { assert.Error(spec.Validate()) spec.URLs[0] = &MethodAndURLMatcher{ - URL: &proxies.StringMatcher{Empty: true}, + URL: &stringtool.StringMatcher{Empty: true}, } assert.Error(spec.Validate()) @@ -68,7 +69,7 @@ func TestGeneralMatche(t *testing.T) { rm := NewRequestMatcher(&RequestMatcherSpec{ RequestMatcherBaseSpec: proxies.RequestMatcherBaseSpec{ MatchAllHeaders: true, - Headers: map[string]*proxies.StringMatcher{ + Headers: map[string]*stringtool.StringMatcher{ "X-Test1": {Exact: "test1"}, "X-Test2": {Exact: "test2"}, }, @@ -87,7 +88,7 @@ func TestGeneralMatche(t *testing.T) { rm = NewRequestMatcher(&RequestMatcherSpec{ RequestMatcherBaseSpec: proxies.RequestMatcherBaseSpec{ MatchAllHeaders: true, - Headers: map[string]*proxies.StringMatcher{ + Headers: map[string]*stringtool.StringMatcher{ "X-Test1": {Exact: "test1"}, "X-Test2": {Empty: true, Exact: "test2"}, }, @@ -100,7 +101,7 @@ func TestGeneralMatche(t *testing.T) { // match one header rm = NewRequestMatcher(&RequestMatcherSpec{ RequestMatcherBaseSpec: proxies.RequestMatcherBaseSpec{ - Headers: map[string]*proxies.StringMatcher{ + Headers: map[string]*stringtool.StringMatcher{ "X-Test1": {Exact: "test1"}, "X-Test2": {Empty: true, Exact: "test2"}, }, @@ -113,7 +114,7 @@ func TestGeneralMatche(t *testing.T) { rm = NewRequestMatcher(&RequestMatcherSpec{ RequestMatcherBaseSpec: proxies.RequestMatcherBaseSpec{ - Headers: map[string]*proxies.StringMatcher{ + Headers: map[string]*stringtool.StringMatcher{ "X-Test1": {Exact: "test1"}, "X-Test2": {Exact: "test2"}, }, @@ -126,7 +127,7 @@ func TestGeneralMatche(t *testing.T) { rm = NewRequestMatcher(&RequestMatcherSpec{ RequestMatcherBaseSpec: proxies.RequestMatcherBaseSpec{ - Headers: map[string]*proxies.StringMatcher{ + Headers: map[string]*stringtool.StringMatcher{ "X-Test1": {Exact: "test1"}, "X-Test2": {Exact: "test2"}, }, @@ -134,7 +135,7 @@ func TestGeneralMatche(t *testing.T) { URLs: []*MethodAndURLMatcher{ { Methods: []string{http.MethodGet}, - URL: &proxies.StringMatcher{ + URL: &stringtool.StringMatcher{ Exact: "/abc", }, }, @@ -144,7 +145,7 @@ func TestGeneralMatche(t *testing.T) { rm = NewRequestMatcher(&RequestMatcherSpec{ RequestMatcherBaseSpec: proxies.RequestMatcherBaseSpec{ - Headers: map[string]*proxies.StringMatcher{ + Headers: map[string]*stringtool.StringMatcher{ "X-Test1": {Exact: "test1"}, "X-Test2": {Exact: "test2"}, }, @@ -152,7 +153,7 @@ func TestGeneralMatche(t *testing.T) { URLs: []*MethodAndURLMatcher{ { Methods: []string{http.MethodGet}, - URL: &proxies.StringMatcher{ + URL: &stringtool.StringMatcher{ Exact: "/abcd", }, }, @@ -165,7 +166,7 @@ func TestMethodAndURLMatcher(t *testing.T) { assert := assert.New(t) m := &MethodAndURLMatcher{ - URL: &proxies.StringMatcher{ + URL: &stringtool.StringMatcher{ Exact: "/abc", }, } diff --git a/pkg/filters/proxies/requestmatch.go b/pkg/filters/proxies/requestmatch.go index 6046af1bee..3f9d2db931 100644 --- a/pkg/filters/proxies/requestmatch.go +++ b/pkg/filters/proxies/requestmatch.go @@ -21,11 +21,10 @@ import ( "fmt" "hash/fnv" "math/rand" - "regexp" - "strings" "github.com/megaease/easegress/pkg/logger" "github.com/megaease/easegress/pkg/protocols" + "github.com/megaease/easegress/pkg/util/stringtool" ) // RequestMatcher is the interface to match requests. @@ -35,11 +34,11 @@ type RequestMatcher interface { // RequestMatcherBaseSpec describe RequestMatcher type RequestMatcherBaseSpec struct { - Policy string `json:"policy" jsonschema:"omitempty,enum=,enum=general,enum=ipHash,enum=headerHash,enum=random"` - MatchAllHeaders bool `json:"matchAllHeaders" jsonschema:"omitempty"` - Headers map[string]*StringMatcher `json:"headers" jsonschema:"omitempty"` - Permil uint32 `json:"permil" jsonschema:"omitempty,minimum=0,maximum=1000"` - HeaderHashKey string `json:"headerHashKey" jsonschema:"omitempty"` + Policy string `json:"policy" jsonschema:"omitempty,enum=,enum=general,enum=ipHash,enum=headerHash,enum=random"` + MatchAllHeaders bool `json:"matchAllHeaders" jsonschema:"omitempty"` + Headers map[string]*stringtool.StringMatcher `json:"headers" jsonschema:"omitempty"` + Permil uint32 `json:"permil" jsonschema:"omitempty,minimum=0,maximum=1000"` + HeaderHashKey string `json:"headerHashKey" jsonschema:"omitempty"` } // Validate validtes the RequestMatcherBaseSpec. @@ -119,73 +118,3 @@ func (iphm ipHashMatcher) Match(req protocols.Request) bool { hash.Write([]byte(ip)) return hash.Sum32()%1000 < iphm.permill } - -// StringMatcher defines the match rule of a string -type StringMatcher struct { - Exact string `json:"exact" jsonschema:"omitempty"` - Prefix string `json:"prefix" jsonschema:"omitempty"` - RegEx string `json:"regex" jsonschema:"omitempty,format=regexp"` - Empty bool `json:"empty" jsonschema:"omitempty"` - re *regexp.Regexp -} - -// Validate validates the StringMatcher. -func (sm *StringMatcher) Validate() error { - if sm.Empty { - if sm.Exact != "" || sm.Prefix != "" || sm.RegEx != "" { - return fmt.Errorf("empty is conflict with other patterns") - } - return nil - } - - if sm.Exact != "" { - return nil - } - - if sm.Prefix != "" { - return nil - } - - if sm.RegEx != "" { - return nil - } - - return fmt.Errorf("all patterns are empty") -} - -func (sm *StringMatcher) Init() { - if sm.RegEx != "" { - sm.re = regexp.MustCompile(sm.RegEx) - } -} - -// Match matches a string. -func (sm *StringMatcher) Match(value string) bool { - if sm.Empty && value == "" { - return true - } - - if sm.Exact != "" && value == sm.Exact { - return true - } - - if sm.Prefix != "" && strings.HasPrefix(value, sm.Prefix) { - return true - } - - if sm.re == nil { - return false - } - - return sm.re.MatchString(value) -} - -// MatchAny return true if any of the values matches. -func (sm *StringMatcher) MatchAny(values []string) bool { - for _, v := range values { - if sm.Match(v) { - return true - } - } - return false -} diff --git a/pkg/filters/proxies/requestmatch_test.go b/pkg/filters/proxies/requestmatch_test.go index 8121584a06..3eb855f592 100644 --- a/pkg/filters/proxies/requestmatch_test.go +++ b/pkg/filters/proxies/requestmatch_test.go @@ -25,7 +25,6 @@ import ( "testing" "github.com/megaease/easegress/pkg/protocols/httpprot" - "github.com/stretchr/testify/assert" ) func TestRandomMatcher(t *testing.T) { @@ -104,46 +103,3 @@ func TestIPHashMatcher(t *testing.T) { t.Errorf("ip hash matcher is not working as configured") } } - -func TestStringMatcher(t *testing.T) { - assert := assert.New(t) - - // validation - sm := &StringMatcher{Empty: true} - assert.NoError(sm.Validate()) - sm.Init() - - sm = &StringMatcher{Empty: true, Exact: "abc"} - assert.Error(sm.Validate()) - - sm = &StringMatcher{} - assert.Error(sm.Validate()) - - sm = &StringMatcher{RegEx: "^abc[0-9]+$"} - assert.NoError(sm.Validate()) - sm.Init() - - sm.Prefix = "/xyz" - assert.NoError(sm.Validate()) - - sm.Exact = "/abc" - assert.NoError(sm.Validate()) - - // match - sm = &StringMatcher{Empty: true} - assert.True(sm.Match("")) - assert.False(sm.Match("abc")) - - sm = &StringMatcher{RegEx: "^abc[0-9]+$"} - sm.Init() - assert.True(sm.Match("abc123")) - assert.False(sm.Match("abc123d")) - - sm.Prefix = "/xyz" - assert.True(sm.Match("/xyz123")) - assert.False(sm.Match("/Xyz123")) - - sm.Exact = "/hello" - assert.True(sm.Match("/hello")) - assert.False(sm.Match("/Hello")) -} diff --git a/pkg/object/meshcontroller/spec/builder.go b/pkg/object/meshcontroller/spec/builder.go index 999822c882..e72019e2f0 100644 --- a/pkg/object/meshcontroller/spec/builder.go +++ b/pkg/object/meshcontroller/spec/builder.go @@ -32,6 +32,7 @@ import ( "github.com/megaease/easegress/pkg/resilience" "github.com/megaease/easegress/pkg/supervisor" "github.com/megaease/easegress/pkg/util/codectool" + "github.com/megaease/easegress/pkg/util/stringtool" ) type ( @@ -258,7 +259,7 @@ func (b *pipelineSpecBuilder) appendProxyWithCanary(param *proxyParam) *pipeline if candidatePools[i] == nil { headers := canary.TrafficRules.Clone().Headers - headers[ServiceCanaryHeaderKey] = &proxies.StringMatcher{ + headers[ServiceCanaryHeaderKey] = &stringtool.StringMatcher{ Exact: canary.Name, } candidatePools[i] = &proxy.ServerPoolSpec{ @@ -329,7 +330,7 @@ func (b *pipelineSpecBuilder) appendMeshAdaptor(canaries []*ServiceCanary) *pipe // NOTE: It means that setting `X-Mesh-Service-Canary: canaryName` // if `X-Mesh-Service-Canary` does not exist and other headers are matching. headers := canary.TrafficRules.Clone().Headers - headers[ServiceCanaryHeaderKey] = &proxies.StringMatcher{ + headers[ServiceCanaryHeaderKey] = &stringtool.StringMatcher{ Empty: true, } adaptors[i] = &meshadaptor.ServiceCanaryAdaptor{ diff --git a/pkg/object/meshcontroller/spec/spec.go b/pkg/object/meshcontroller/spec/spec.go index 8167f560d0..ff8c4c93ef 100644 --- a/pkg/object/meshcontroller/spec/spec.go +++ b/pkg/object/meshcontroller/spec/spec.go @@ -23,10 +23,10 @@ import ( "github.com/megaease/easegress/pkg/cluster/customdata" "github.com/megaease/easegress/pkg/filters/mock" - "github.com/megaease/easegress/pkg/filters/proxies" proxy "github.com/megaease/easegress/pkg/filters/proxies/httpproxy" "github.com/megaease/easegress/pkg/filters/ratelimiter" "github.com/megaease/easegress/pkg/resilience" + "github.com/megaease/easegress/pkg/util/stringtool" "github.com/megaease/easegress/pkg/util/urlrule" v1 "k8s.io/api/core/v1" @@ -230,9 +230,9 @@ type ( // CanaryRule is one matching rule for canary. CanaryRule struct { - ServiceInstanceLabels map[string]string `json:"serviceInstanceLabels" jsonschema:"required"` - Headers map[string]*proxies.StringMatcher `json:"headers" jsonschema:"required"` - URLs []*urlrule.URLRule `json:"urls" jsonschema:"required"` + ServiceInstanceLabels map[string]string `json:"serviceInstanceLabels" jsonschema:"required"` + Headers map[string]*stringtool.StringMatcher `json:"headers" jsonschema:"required"` + URLs []*urlrule.URLRule `json:"urls" jsonschema:"required"` } // ServiceCanary is the service canary entry. @@ -248,7 +248,7 @@ type ( // TrafficRules is the rules of traffic. TrafficRules struct { - Headers map[string]*proxies.StringMatcher `json:"headers" jsonschema:"required"` + Headers map[string]*stringtool.StringMatcher `json:"headers" jsonschema:"required"` } // LoadBalance is the spec of service load balance. @@ -481,7 +481,7 @@ func (sc ServiceCanary) Validate() error { // Clone clones TrafficRules. func (tr *TrafficRules) Clone() *TrafficRules { - headers := map[string]*proxies.StringMatcher{} + headers := map[string]*stringtool.StringMatcher{} for k, v := range tr.Headers { stringMatch := *v headers[k] = &stringMatch diff --git a/pkg/object/meshcontroller/spec/spec_test.go b/pkg/object/meshcontroller/spec/spec_test.go index 725445f89d..5c4d1ff514 100644 --- a/pkg/object/meshcontroller/spec/spec_test.go +++ b/pkg/object/meshcontroller/spec/spec_test.go @@ -29,6 +29,7 @@ import ( _ "github.com/megaease/easegress/pkg/object/httpserver" "github.com/megaease/easegress/pkg/resilience" "github.com/megaease/easegress/pkg/util/codectool" + "github.com/megaease/easegress/pkg/util/stringtool" "github.com/megaease/easegress/pkg/util/urlrule" v2alpha1 "github.com/megaease/easemesh-api/v2alpha1" ) @@ -238,7 +239,7 @@ func TestSidecarEgressPipelineSpec(t *testing.T) { }, }, TrafficRules: &TrafficRules{ - Headers: map[string]*proxies.StringMatcher{ + Headers: map[string]*stringtool.StringMatcher{ "X-Location": { Exact: "Beijing", }, @@ -610,7 +611,7 @@ func TestSidecarIngressWithResiliencePipelineSpec(t *testing.T) { URLs: []*ratelimiter.URLRule{{ URLRule: urlrule.URLRule{ Methods: []string{"GET"}, - URL: urlrule.StringMatch{ + URL: stringtool.StringMatcher{ Exact: "/path1", Prefix: "/path2/", RegEx: "^/path3/[0-9]+$", @@ -716,7 +717,7 @@ func TestPipelineBuilder(t *testing.T) { URLs: []*ratelimiter.URLRule{{ URLRule: urlrule.URLRule{ Methods: []string{"GET"}, - URL: urlrule.StringMatch{ + URL: stringtool.StringMatcher{ Exact: "/path1", Prefix: "/path2/", RegEx: "^/path3/[0-9]+$", @@ -946,7 +947,7 @@ func TestAppendProxyWithCanary(t *testing.T) { }, }, TrafficRules: &TrafficRules{ - Headers: map[string]*proxies.StringMatcher{ + Headers: map[string]*stringtool.StringMatcher{ "X-Location": { Exact: "Beijing", }, @@ -976,7 +977,7 @@ func TestAppendMeshAdaptor(t *testing.T) { }, }, TrafficRules: &TrafficRules{ - Headers: map[string]*proxies.StringMatcher{ + Headers: map[string]*stringtool.StringMatcher{ "X-Location": { Exact: "Beijing", }, diff --git a/pkg/util/stringtool/stringtool.go b/pkg/util/stringtool/stringtool.go index cab03c7570..5ea60d506e 100644 --- a/pkg/util/stringtool/stringtool.go +++ b/pkg/util/stringtool/stringtool.go @@ -18,6 +18,8 @@ package stringtool import ( + "fmt" + "regexp" "strings" ) @@ -91,3 +93,73 @@ func IsAnyEmpty(strs ...string) bool { return false } + +// StringMatcher defines the match rule of a string +type StringMatcher struct { + Exact string `json:"exact" jsonschema:"omitempty"` + Prefix string `json:"prefix" jsonschema:"omitempty"` + RegEx string `json:"regex" jsonschema:"omitempty,format=regexp"` + Empty bool `json:"empty" jsonschema:"omitempty"` + re *regexp.Regexp +} + +// Validate validates the StringMatcher. +func (sm *StringMatcher) Validate() error { + if sm.Empty { + if sm.Exact != "" || sm.Prefix != "" || sm.RegEx != "" { + return fmt.Errorf("empty is conflict with other patterns") + } + return nil + } + + if sm.Exact != "" { + return nil + } + + if sm.Prefix != "" { + return nil + } + + if sm.RegEx != "" { + return nil + } + + return fmt.Errorf("all patterns are empty") +} + +func (sm *StringMatcher) Init() { + if sm.RegEx != "" { + sm.re = regexp.MustCompile(sm.RegEx) + } +} + +// Match matches a string. +func (sm *StringMatcher) Match(value string) bool { + if sm.Empty && value == "" { + return true + } + + if sm.Exact != "" && value == sm.Exact { + return true + } + + if sm.Prefix != "" && strings.HasPrefix(value, sm.Prefix) { + return true + } + + if sm.re == nil { + return false + } + + return sm.re.MatchString(value) +} + +// MatchAny return true if any of the values matches. +func (sm *StringMatcher) MatchAny(values []string) bool { + for _, v := range values { + if sm.Match(v) { + return true + } + } + return false +} diff --git a/pkg/util/stringtool/stringtool_test.go b/pkg/util/stringtool/stringtool_test.go index 610805f906..ea117c65b8 100644 --- a/pkg/util/stringtool/stringtool_test.go +++ b/pkg/util/stringtool/stringtool_test.go @@ -60,3 +60,46 @@ func TestIsAnyEmpty(t *testing.T) { assert.False(IsAnyEmpty("a", "a", "a")) assert.False(IsAnyEmpty([]string{"a", "a", "a"}...)) } + +func TestStringMatcher(t *testing.T) { + assert := assert.New(t) + + // validation + sm := &StringMatcher{Empty: true} + assert.NoError(sm.Validate()) + sm.Init() + + sm = &StringMatcher{Empty: true, Exact: "abc"} + assert.Error(sm.Validate()) + + sm = &StringMatcher{} + assert.Error(sm.Validate()) + + sm = &StringMatcher{RegEx: "^abc[0-9]+$"} + assert.NoError(sm.Validate()) + sm.Init() + + sm.Prefix = "/xyz" + assert.NoError(sm.Validate()) + + sm.Exact = "/abc" + assert.NoError(sm.Validate()) + + // match + sm = &StringMatcher{Empty: true} + assert.True(sm.Match("")) + assert.False(sm.Match("abc")) + + sm = &StringMatcher{RegEx: "^abc[0-9]+$"} + sm.Init() + assert.True(sm.Match("abc123")) + assert.False(sm.Match("abc123d")) + + sm.Prefix = "/xyz" + assert.True(sm.Match("/xyz123")) + assert.False(sm.Match("/Xyz123")) + + sm.Exact = "/hello" + assert.True(sm.Match("/hello")) + assert.False(sm.Match("/Hello")) +} diff --git a/pkg/util/urlrule/urlrule.go b/pkg/util/urlrule/urlrule.go index 084ee04808..310c3880e5 100644 --- a/pkg/util/urlrule/urlrule.go +++ b/pkg/util/urlrule/urlrule.go @@ -18,85 +18,21 @@ package urlrule import ( - "fmt" "net/http" - "regexp" - "strings" "github.com/megaease/easegress/pkg/util/stringtool" ) type ( - // StringMatch defines the match rule of a string - StringMatch struct { - Exact string `json:"exact" jsonschema:"omitempty"` - Prefix string `json:"prefix" jsonschema:"omitempty"` - RegEx string `json:"regex" jsonschema:"omitempty,format=regexp"` - Empty bool `json:"empty" jsonschema:"omitempty"` - re *regexp.Regexp - } - // URLRule defines the match rule of a http request URLRule struct { id string - Methods []string `json:"methods" jsonschema:"omitempty,uniqueItems=true,format=httpmethod-array"` - URL StringMatch `json:"url" jsonschema:"required"` - PolicyRef string `json:"policyRef" jsonschema:"omitempty"` + Methods []string `json:"methods" jsonschema:"omitempty,uniqueItems=true,format=httpmethod-array"` + URL stringtool.StringMatcher `json:"url" jsonschema:"required"` + PolicyRef string `json:"policyRef" jsonschema:"omitempty"` } ) -// Validate validates the StringMatch object -func (sm StringMatch) Validate() error { - if sm.Empty { - if sm.Exact != "" || sm.Prefix != "" || sm.RegEx != "" { - return fmt.Errorf("empty is conflict with other patterns") - } - return nil - } - - if sm.Exact != "" { - return nil - } - - if sm.Prefix != "" { - return nil - } - - if sm.RegEx != "" { - return nil - } - - return fmt.Errorf("all patterns is empty") -} - -// Init initializes an StringMatch -func (sm *StringMatch) Init() { - if sm.RegEx != "" { - sm.re = regexp.MustCompile(sm.RegEx) - } -} - -// Match matches a string to the pattern -func (sm *StringMatch) Match(value string) bool { - if sm.Empty && value == "" { - return true - } - - if sm.Exact != "" && value == sm.Exact { - return true - } - - if sm.Prefix != "" && strings.HasPrefix(value, sm.Prefix) { - return true - } - - if sm.re == nil { - return false - } - - return sm.re.MatchString(value) -} - // ID returns the ID of the URLRule. // ID is the first valid one of Exact, Prefix, RegEx. func (r *URLRule) ID() string { @@ -112,9 +48,7 @@ func (r *URLRule) Init() { } else { r.id = r.URL.RegEx } - if r.URL.RegEx != "" { - r.URL.re = regexp.MustCompile(r.URL.RegEx) - } + r.URL.Init() } // Match matches a URL to the rule diff --git a/pkg/util/urlrule/urlrule_test.go b/pkg/util/urlrule/urlrule_test.go index 18b7fbb2e4..aad81c3f2c 100644 --- a/pkg/util/urlrule/urlrule_test.go +++ b/pkg/util/urlrule/urlrule_test.go @@ -20,6 +20,8 @@ package urlrule import ( "net/http" "testing" + + "github.com/megaease/easegress/pkg/util/stringtool" ) func TestURLRULEMatch(t *testing.T) { @@ -29,7 +31,7 @@ func TestURLRULEMatch(t *testing.T) { "GET", "POST", }, - URL: StringMatch{ + URL: stringtool.StringMatcher{ Prefix: "/", }, } @@ -56,7 +58,7 @@ func TestURLRegxMatch(t *testing.T) { "GET", "POST", }, - URL: StringMatch{ + URL: stringtool.StringMatcher{ RegEx: "^\\/app\\/.+$", }, } @@ -80,7 +82,7 @@ func TestURLExactMatch(t *testing.T) { "GET", "POST", }, - URL: StringMatch{ + URL: stringtool.StringMatcher{ Exact: "/app/v2/user", }, } @@ -103,7 +105,7 @@ func TestURLExactNotMatch(t *testing.T) { "GET", "POST", }, - URL: StringMatch{ + URL: stringtool.StringMatcher{ Exact: "/app/v2/user", }, } @@ -126,7 +128,7 @@ func TestURLPrefixNotMatch(t *testing.T) { "GET", "POST", }, - URL: StringMatch{ + URL: stringtool.StringMatcher{ Prefix: "/app/v3", }, } @@ -148,7 +150,7 @@ func TestURLRULENoMatchMethod(t *testing.T) { Methods: []string{ "DELETE", }, - URL: StringMatch{ + URL: stringtool.StringMatcher{ Prefix: "/", }, } @@ -168,7 +170,7 @@ func TestURLRULENoMatchURL(t *testing.T) { Methods: []string{ "POST", }, - URL: StringMatch{ + URL: stringtool.StringMatcher{ Exact: "/user", }, } @@ -184,7 +186,7 @@ func TestURLRULENoMatchURL(t *testing.T) { } func TestFailStringMatch(t *testing.T) { - sm := StringMatch{} + sm := stringtool.StringMatcher{} err := sm.Validate() if err == nil { @@ -199,7 +201,7 @@ func TestDeepEqual(t *testing.T) { "GET", "POST", }, - URL: StringMatch{ + URL: stringtool.StringMatcher{ Exact: "/app/v2/user", }, } @@ -210,7 +212,7 @@ func TestDeepEqual(t *testing.T) { "GET", "POST", }, - URL: StringMatch{ + URL: stringtool.StringMatcher{ Exact: "/app/v2/user", }, } @@ -222,7 +224,7 @@ func TestDeepEqual(t *testing.T) { "POST", "DELETE", }, - URL: StringMatch{ + URL: stringtool.StringMatcher{ RegEx: "^/app/v2", }, } @@ -233,7 +235,7 @@ func TestDeepEqual(t *testing.T) { "POST", "PATCH", }, - URL: StringMatch{ + URL: stringtool.StringMatcher{ Prefix: "/app/v3/user", }, } @@ -244,7 +246,7 @@ func TestDeepEqual(t *testing.T) { "GET", "POST", }, - URL: StringMatch{ + URL: stringtool.StringMatcher{ Exact: "/app/v3/user", }, } @@ -255,7 +257,7 @@ func TestDeepEqual(t *testing.T) { "GET", "POST", }, - URL: StringMatch{ + URL: stringtool.StringMatcher{ Exact: "/app/v2/user", Prefix: "/app", }, @@ -267,7 +269,7 @@ func TestDeepEqual(t *testing.T) { "GET", "POST", }, - URL: StringMatch{ + URL: stringtool.StringMatcher{ Exact: "/app/v2/user", Prefix: "/app", RegEx: "^.*$", From 7bf022c82c06bba59d79c91fd8ec24721fb39631 Mon Sep 17 00:00:00 2001 From: Bomin Zhang Date: Wed, 8 Feb 2023 15:31:19 +0800 Subject: [PATCH 11/14] remove some redundant code --- pkg/object/grpcserver/grpcserver.go | 26 ++-- pkg/object/grpcserver/mux.go | 184 +++++++++++--------------- pkg/object/grpcserver/mux_test.go | 13 +- pkg/object/grpcserver/runtime.go | 192 ++++++++++++++-------------- pkg/object/grpcserver/spec.go | 19 +-- pkg/object/httpserver/mux.go | 2 +- pkg/object/httpserver/runtime.go | 18 +-- pkg/object/httpserver/spec.go | 4 +- 8 files changed, 214 insertions(+), 244 deletions(-) diff --git a/pkg/object/grpcserver/grpcserver.go b/pkg/object/grpcserver/grpcserver.go index 1eeb974648..5f0a0f3dbb 100644 --- a/pkg/object/grpcserver/grpcserver.go +++ b/pkg/object/grpcserver/grpcserver.go @@ -59,21 +59,10 @@ func (g *GRPCServer) DefaultSpec() interface{} { } } -// Status returns the status of GrpcServer. -func (g *GRPCServer) Status() *supervisor.Status { - return &supervisor.Status{ - ObjectStatus: g.runtime.Status(), - } -} - -// Close close GrpcServer -func (g *GRPCServer) Close() { - g.runtime.Close() -} - // Init first create GrpcServer by Spec.name func (g *GRPCServer) Init(superSpec *supervisor.Spec, muxMapper context.MuxMapper) { g.runtime = newRuntime(superSpec, muxMapper) + g.runtime.eventChan <- &eventReload{ nextSuperSpec: superSpec, muxMapper: muxMapper, @@ -83,8 +72,21 @@ func (g *GRPCServer) Init(superSpec *supervisor.Spec, muxMapper context.MuxMappe // Inherit inherits previous generation of GrpcServer. func (g *GRPCServer) Inherit(superSpec *supervisor.Spec, previousGeneration supervisor.Object, muxMapper context.MuxMapper) { g.runtime = previousGeneration.(*GRPCServer).runtime + g.runtime.eventChan <- &eventReload{ nextSuperSpec: superSpec, muxMapper: muxMapper, } } + +// Status returns the status of GrpcServer. +func (g *GRPCServer) Status() *supervisor.Status { + return &supervisor.Status{ + ObjectStatus: g.runtime.Status(), + } +} + +// Close close GrpcServer +func (g *GRPCServer) Close() { + g.runtime.Close() +} diff --git a/pkg/object/grpcserver/mux.go b/pkg/object/grpcserver/mux.go index da95d88fef..509c624eba 100644 --- a/pkg/object/grpcserver/mux.go +++ b/pkg/object/grpcserver/mux.go @@ -19,6 +19,7 @@ package grpcserver import ( "fmt" + "github.com/megaease/easegress/pkg/protocols/grpcprot" "github.com/megaease/easegress/pkg/util/fasttime" "google.golang.org/grpc" @@ -66,18 +67,18 @@ type ( host string hostRegexp string hostRE *regexp.Regexp - paths []*MuxPath + methods []*MuxMethod } - // MuxPath describes httpserver's path - MuxPath struct { + // MuxMethod describes gRPCserver's method + MuxMethod struct { ipFilter *ipfilter.IPFilter ipFilterChain *ipfilter.IPFilters - path string - pathPrefix string - pathRegexp string - pathRE *regexp.Regexp + method string + methodPrefix string + methodRegexp string + methodRE *regexp.Regexp backend string headers []*Header matchAllHeader bool @@ -86,49 +87,13 @@ type ( route struct { code codes.Code message string - path *MuxPath + method *MuxMethod } ) -// newIPFilterChain returns nil if the number of final filters is zero. -func newIPFilterChain(parentIPFilters *ipfilter.IPFilters, childSpec *ipfilter.Spec) *ipfilter.IPFilters { - var ipFilters *ipfilter.IPFilters - if parentIPFilters != nil { - ipFilters = ipfilter.NewIPFilters(parentIPFilters.Filters()...) - } else { - ipFilters = ipfilter.NewIPFilters() - } - - if childSpec != nil { - ipFilters.Append(ipfilter.New(childSpec)) - } - - if len(ipFilters.Filters()) == 0 { - return nil - } - - return ipFilters -} - -func newIPFilter(spec *ipfilter.Spec) *ipfilter.IPFilter { - if spec == nil { - return nil - } - - return ipfilter.New(spec) -} - -func allowIP(ipFilter *ipfilter.IPFilter, ip string) bool { - if ipFilter == nil { - return true - } - - return ipFilter.Allow(ip) -} - -func (mi *muxInstance) getRouteFromCache(host, path string) *route { +func (mi *muxInstance) getRouteFromCache(host, method string) *route { if mi.cache != nil { - key := stringtool.Cat(host, path) + key := stringtool.Cat(host, method) if value, ok := mi.cache.Get(key); ok { return value.(*route) } @@ -136,14 +101,14 @@ func (mi *muxInstance) getRouteFromCache(host, path string) *route { return nil } -func (mi *muxInstance) putRouteToCache(host, path string, r *route) { - if mi.cache != nil && host != "" && path != "" { - key := stringtool.Cat(host, path) +func (mi *muxInstance) putRouteToCache(host, method string, r *route) { + if mi.cache != nil && host != "" && method != "" { + key := stringtool.Cat(host, method) mi.cache.Add(key, r) } } -func newMuxRule(parentIPFilters *ipfilter.IPFilters, rule *Rule, paths []*MuxPath) *muxRule { +func newMuxRule(parentIPFilters *ipfilter.IPFilters, rule *Rule, methods []*MuxMethod) *muxRule { var hostRE *regexp.Regexp if rule.HostRegexp != "" { @@ -156,13 +121,13 @@ func newMuxRule(parentIPFilters *ipfilter.IPFilters, rule *Rule, paths []*MuxPat } return &muxRule{ - ipFilter: newIPFilter(rule.IPFilter), - ipFilterChain: newIPFilterChain(parentIPFilters, rule.IPFilter), + ipFilter: ipfilter.New(rule.IPFilter), + ipFilterChain: ipfilter.NewIPFilterChain(parentIPFilters, rule.IPFilter), host: rule.Host, hostRegexp: rule.HostRegexp, hostRE: hostRE, - paths: paths, + methods: methods, } } @@ -181,48 +146,48 @@ func (mr *muxRule) match(host string) bool { return false } -func newMuxPath(parentIPFilters *ipfilter.IPFilters, path *Path) *MuxPath { - var pathRE *regexp.Regexp - if path.PathRegexp != "" { +func newMuxMethod(parentIPFilters *ipfilter.IPFilters, method *Method) *MuxMethod { + var methodRE *regexp.Regexp + if method.MethodRegexp != "" { var err error - pathRE, err = regexp.Compile(path.PathRegexp) + methodRE, err = regexp.Compile(method.MethodRegexp) // defensive programming if err != nil { - logger.Errorf("BUG: compile %s failed: %v", path.PathRegexp, err) + logger.Errorf("BUG: compile %s failed: %v", method.MethodRegexp, err) } } - for _, p := range path.Headers { + for _, p := range method.Headers { p.initHeaderRoute() } - return &MuxPath{ - ipFilter: newIPFilter(path.IPFilter), - ipFilterChain: newIPFilterChain(parentIPFilters, path.IPFilter), + return &MuxMethod{ + ipFilter: ipfilter.New(method.IPFilter), + ipFilterChain: ipfilter.NewIPFilterChain(parentIPFilters, method.IPFilter), - path: path.Path, - pathPrefix: path.PathPrefix, - pathRegexp: path.PathRegexp, - pathRE: pathRE, - backend: path.Backend, - headers: path.Headers, - matchAllHeader: path.MatchAllHeader, + method: method.Method, + methodPrefix: method.MethodPrefix, + methodRegexp: method.MethodRegexp, + methodRE: methodRE, + backend: method.Backend, + headers: method.Headers, + matchAllHeader: method.MatchAllHeader, } } -func (mp *MuxPath) matchPath(path string) bool { - if mp.path == "" && mp.pathPrefix == "" && mp.pathRE == nil { +func (mm *MuxMethod) matchMethod(method string) bool { + if mm.method == "" && mm.methodPrefix == "" && mm.methodRE == nil { return true } - if mp.path != "" && mp.path == path { + if mm.method != "" && mm.method == method { return true } - if mp.pathPrefix != "" && strings.HasPrefix(path, mp.pathPrefix) { + if mm.methodPrefix != "" && strings.HasPrefix(method, mm.methodPrefix) { return true } - if mp.pathRE != nil { - return mp.pathRE.MatchString(path) + if mm.methodRE != nil { + return mm.methodRE.MatchString(method) } return false @@ -239,9 +204,9 @@ func matchHeader(header string, h *Header) bool { return false } -func (mp *MuxPath) matchHeaders(r *grpcprot.Request) bool { - if mp.matchAllHeader { - for _, h := range mp.headers { +func (mm *MuxMethod) matchHeaders(r *grpcprot.Request) bool { + if mm.matchAllHeader { + for _, h := range mm.headers { v := r.RawHeader().RawGet(h.Key) if len(v) == 0 { if !matchHeader("", h) { @@ -256,7 +221,7 @@ func (mp *MuxPath) matchHeaders(r *grpcprot.Request) bool { } } } else { - for _, h := range mp.headers { + for _, h := range mm.headers { v := r.RawHeader().RawGet(h.Key) if len(v) == 0 { if matchHeader("", h) { @@ -272,7 +237,7 @@ func (mp *MuxPath) matchHeaders(r *grpcprot.Request) bool { } } - return mp.matchAllHeader + return mm.matchAllHeader } func newMux(mapper context.MuxMapper) *mux { @@ -293,8 +258,8 @@ func (m *mux) reload(superSpec *supervisor.Spec, muxMapper context.MuxMapper) { superSpec: superSpec, spec: spec, muxMapper: muxMapper, - ipFilter: newIPFilter(spec.IPFilter), - ipFilterChan: newIPFilterChain(nil, spec.IPFilter), + ipFilter: ipfilter.New(spec.IPFilter), + ipFilterChan: ipfilter.NewIPFilterChain(nil, spec.IPFilter), rules: make([]*muxRule, len(spec.Rules)), } @@ -309,15 +274,15 @@ func (m *mux) reload(superSpec *supervisor.Spec, muxMapper context.MuxMapper) { for i := 0; i < len(inst.rules); i++ { specRule := spec.Rules[i] - ruleIPFilterChain := newIPFilterChain(inst.ipFilterChan, specRule.IPFilter) + ruleIPFilterChain := ipfilter.NewIPFilterChain(inst.ipFilterChan, specRule.IPFilter) - paths := make([]*MuxPath, len(specRule.Paths)) - for j := 0; j < len(paths); j++ { - paths[j] = newMuxPath(ruleIPFilterChain, specRule.Paths[j]) + methods := make([]*MuxMethod, len(specRule.Methods)) + for j := 0; j < len(methods); j++ { + methods[j] = newMuxMethod(ruleIPFilterChain, specRule.Methods[j]) } // NOTE: Given the parent ipFilters not its own. - inst.rules[i] = newMuxRule(inst.ipFilterChan, specRule, paths) + inst.rules[i] = newMuxRule(inst.ipFilterChan, specRule, methods) } m.inst.Store(inst) @@ -369,7 +334,7 @@ func (mi *muxInstance) handler(c chan<- error, request *grpcprot.Request) { // log format: // // [$startTime] - // [$clientAddr $path $statusCode] + // [$clientAddr $method $statusCode] // [$tags] const logFmt = "[grpc][%s] [%s %s %d] [%s]" return fmt.Sprintf(logFmt, @@ -385,10 +350,10 @@ func (mi *muxInstance) handler(c chan<- error, request *grpcprot.Request) { return } - handler, ok := mi.muxMapper.GetHandler(rt.path.backend) + handler, ok := mi.muxMapper.GetHandler(rt.method.backend) if !ok { - logger.Debugf("%s: backend %q not found", mi.superSpec.Name(), rt.path.backend) - buildFailureResponse(ctx, status.Newf(codes.NotFound, "%s: backend %q not found", mi.superSpec.Name(), rt.path.backend)) + logger.Debugf("%s: backend %q not found", mi.superSpec.Name(), rt.method.backend) + buildFailureResponse(ctx, status.Newf(codes.NotFound, "%s: backend %q not found", mi.superSpec.Name(), rt.method.backend)) return } @@ -419,8 +384,8 @@ func (mi *muxInstance) search(request *grpcprot.Request) *route { } // grpc's method equals request.path in standard lib - method := request.FullMethod() - if method == "" { + fullMethod := request.FullMethod() + if fullMethod == "" { logger.Debugf("invalid grpc stream: can not get called method info") return &route{ code: codes.NotFound, @@ -429,18 +394,17 @@ func (mi *muxInstance) search(request *grpcprot.Request) *route { } // The key of the cache is grpc server address + called method - // in grpc, called method means url path - // and if a path is cached, we are sure it does not contain any + // and if a method is cached, we are sure it does not contain any // headers. - r := mi.getRouteFromCache(request.Host(), method) + r := mi.getRouteFromCache(request.Host(), fullMethod) if r != nil { if r.code != 0 { return r } - if r.path.ipFilterChain == nil { + if r.method.ipFilterChain == nil { return r } - if r.path.ipFilterChain.Allow(ip) { + if r.method.ipFilterChain.Allow(ip) { return r } return &route{ @@ -449,7 +413,7 @@ func (mi *muxInstance) search(request *grpcprot.Request) *route { } } - if !allowIP(mi.ipFilter, ip) { + if !mi.ipFilter.Allow(ip) { return &route{ code: codes.PermissionDenied, message: "request isn't allowed", @@ -461,35 +425,35 @@ func (mi *muxInstance) search(request *grpcprot.Request) *route { continue } - if !allowIP(rs.ipFilter, ip) { + if !rs.ipFilter.Allow(ip) { return &route{ code: codes.PermissionDenied, message: "request isn't allowed", } } - for _, path := range rs.paths { - if !path.matchPath(method) { + for _, method := range rs.methods { + if !method.matchMethod(fullMethod) { continue } - // The path can be put into the cache if it has no headers. - if len(path.headers) == 0 { - r = &route{code: 0, path: path} - mi.putRouteToCache(request.Host(), method, r) - } else if !path.matchHeaders(request) { + // The method can be put into the cache if it has no headers. + if len(method.headers) == 0 { + r = &route{code: 0, method: method} + mi.putRouteToCache(request.Host(), fullMethod, r) + } else if !method.matchHeaders(request) { headerMismatch = true continue } - if !allowIP(path.ipFilter, ip) { + if !method.ipFilter.Allow(ip) { return &route{ code: codes.PermissionDenied, message: "request isn't allowed", } } - return &route{code: 0, path: path} + return &route{code: 0, method: method} } } @@ -504,7 +468,7 @@ func (mi *muxInstance) search(request *grpcprot.Request) *route { code: codes.NotFound, message: "grpc stream miss match any conditions", } - mi.putRouteToCache(request.Host(), method, notFound) + mi.putRouteToCache(request.Host(), fullMethod, notFound) return notFound } diff --git a/pkg/object/grpcserver/mux_test.go b/pkg/object/grpcserver/mux_test.go index 5b62afa0c7..18805ff210 100644 --- a/pkg/object/grpcserver/mux_test.go +++ b/pkg/object/grpcserver/mux_test.go @@ -19,13 +19,14 @@ package grpcserver import ( stdcontext "context" + "testing" + "github.com/megaease/easegress/pkg/context/contexttest" "github.com/megaease/easegress/pkg/protocols/grpcprot" "github.com/megaease/easegress/pkg/supervisor" "github.com/stretchr/testify/assert" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" - "testing" ) func newTestMux(yamlSpec string, at *assert.Assertions) (*mux, *muxInstance) { @@ -105,7 +106,7 @@ ipFilter: assert.Equal(codes.NotFound, ins.search(req).code) } -func TestSearchPath(t *testing.T) { +func TestSearchMethod(t *testing.T) { assertions := assert.New(t) yamlSpec := ` @@ -116,8 +117,8 @@ port: 8850 name: server-grpc rules: - host: 127.0.0.1 - paths: - - path: "/abd" + methods: + - method: "/abd" backend: "test-demo" ` _, ins := newTestMux(yamlSpec, assertions) @@ -140,7 +141,7 @@ rules: request.SetFullMethod("/abd") search = ins.search(request) assertions.Equal(codes.OK, search.code) - assertions.Equal("test-demo", search.path.backend) + assertions.Equal("test-demo", search.method.backend) } func TestHeader(t *testing.T) { @@ -153,7 +154,7 @@ maxConnectionIdle: 60s port: 8850 name: server-grpc rules: - - paths: + - methods: - backend: "test-demo" headers: - key: "array" diff --git a/pkg/object/grpcserver/runtime.go b/pkg/object/grpcserver/runtime.go index 67bb79e0fa..9067d26cd3 100644 --- a/pkg/object/grpcserver/runtime.go +++ b/pkg/object/grpcserver/runtime.go @@ -50,29 +50,30 @@ var ( type ( stateType string - eventReload struct { - nextSuperSpec *supervisor.Spec - muxMapper context.MuxMapper - } eventCheckFailed struct{} eventServeFailed struct { roundNum uint64 err error } + eventReload struct { + nextSuperSpec *supervisor.Spec + muxMapper context.MuxMapper + } eventClose struct{ done chan struct{} } runtime struct { superSpec *supervisor.Spec - s *grpc.Server spec *Spec + s *grpc.Server mux *mux + roundNum uint64 + eventChan chan interface{} + // status - state atomic.Value // stateType - err atomic.Value - limitListener *limitlistener.LimitListener - roundNum uint64 + state atomic.Value // stateType + err atomic.Value // error - eventChan chan interface{} + limitListener *limitlistener.LimitListener } // Status contains all status generated by runtime, for displaying to users. Status struct { @@ -95,6 +96,13 @@ func newRuntime(superSpec *supervisor.Spec, muxMapper context.MuxMapper) *runtim return r } +// Close closes runtime. +func (r *runtime) Close() { + done := make(chan struct{}) + r.eventChan <- &eventClose{done: done} + <-done +} + // Status is the wrapper of runtime's Status. func (r *runtime) Status() *Status { err := r.getError() @@ -105,6 +113,28 @@ func (r *runtime) Status() *Status { } } +// FSM is the finite-state-machine for the runtime. +func (r *runtime) fsm() { + for e := range r.eventChan { + switch e := e.(type) { + case *eventCheckFailed: + r.handleEventCheckFailed(e) + case *eventServeFailed: + r.handleEventServeFailed(e) + case *eventReload: + r.handleEventReload(e) + case *eventClose: + r.handleEventClose(e) + // NOTE: We don't close hs.eventChan, + // in case of panic of any other goroutines + // to send event to it later. + return + default: + logger.Errorf("BUG: unknown event: %T\n", e) + } + } +} + func (r *runtime) reload(nextSuperSpec *supervisor.Spec, muxMapper context.MuxMapper) { r.superSpec = nextSuperSpec r.mux.reload(nextSuperSpec, muxMapper) @@ -113,7 +143,7 @@ func (r *runtime) reload(nextSuperSpec *supervisor.Spec, muxMapper context.MuxMa // r.limitListener is not created just after the process started and the config load for the first time. if nextSpec != nil && r.limitListener != nil { - r.limitListener.SetMaxConnection(uint32(nextSpec.MaxConnections)) + r.limitListener.SetMaxConnection(nextSpec.MaxConnections) } // NOTE: Due to the mechanism of supervisor, @@ -140,69 +170,6 @@ func (r *runtime) reload(nextSuperSpec *supervisor.Spec, muxMapper context.MuxMa } } -func (r *runtime) checkFailed(timeout time.Duration) { - ticker := time.NewTicker(timeout) - for range ticker.C { - state := r.getState() - if state == stateFailed { - r.eventChan <- &eventCheckFailed{} - } else if state == stateClosed { - ticker.Stop() - return - } - } -} - -func (r *runtime) needRestartServer(nextSpec *Spec) bool { - x := *r.spec - y := *nextSpec - - // The change of options below need not restart the HTTP server. - x.MaxConnections, y.MaxConnections = 0, 0 - x.CacheSize, y.CacheSize = 0, 0 - x.XForwardedFor, y.XForwardedFor = false, false - x.IPFilter, y.IPFilter = nil, nil - x.Rules, y.Rules = nil, nil - - return !reflect.DeepEqual(x, y) -} - -// FSM is the finite-state-machine for the runtime. -func (r *runtime) fsm() { - for e := range r.eventChan { - switch e := e.(type) { - case *eventCheckFailed: - r.handleEventCheckFailed(e) - case *eventServeFailed: - r.handleEventServeFailed(e) - case *eventReload: - r.handleEventReload(e) - case *eventClose: - r.handleEventClose(e) - // NOTE: We don't close hs.eventChan, - // in case of panic of any other goroutines - // to send event to it later. - return - default: - logger.Errorf("BUG: unknown event: %T\n", e) - } - } -} - -func (r *runtime) handleEventCheckFailed(e *eventCheckFailed) { - if r.getState() == stateFailed { - r.startServer() - } -} - -func (r *runtime) handleEventServeFailed(e *eventServeFailed) { - if atomic.LoadUint64(&r.roundNum) > e.roundNum { - return - } - r.setState(stateFailed) - r.setError(e.err) -} - func (r *runtime) setState(state stateType) { r.state.Store(state) } @@ -211,8 +178,13 @@ func (r *runtime) getState() stateType { return r.state.Load().(stateType) } -func (r *runtime) handleEventReload(e *eventReload) { - r.reload(e.nextSuperSpec, e.muxMapper) +func (r *runtime) setError(err error) { + if err == nil { + r.err.Store(errNil) + } else { + // NOTE: For type safe. + r.err.Store(fmt.Errorf("%v", err)) + } } func (r *runtime) getError() error { @@ -223,13 +195,18 @@ func (r *runtime) getError() error { return err.(error) } -func (r *runtime) setError(err error) { - if err == nil { - r.err.Store(errNil) - } else { - // NOTE: For type safe. - r.err.Store(fmt.Errorf("%v", err)) - } +func (r *runtime) needRestartServer(nextSpec *Spec) bool { + x := *r.spec + y := *nextSpec + + // The change of options below need not restart the HTTP server. + x.MaxConnections, y.MaxConnections = 0, 0 + x.CacheSize, y.CacheSize = 0, 0 + x.XForwardedFor, y.XForwardedFor = false, false + x.IPFilter, y.IPFilter = nil, nil + x.Rules, y.Rules = nil, nil + + return !reflect.DeepEqual(x, y) } func (r *runtime) startServer() { @@ -302,6 +279,43 @@ func (r *runtime) buildServerKeepaliveOpt() []grpc.ServerOption { return opts } +func (r *runtime) closeServer() { + if r.s != nil { + r.s.GracefulStop() + } +} + +func (r *runtime) checkFailed(timeout time.Duration) { + ticker := time.NewTicker(timeout) + for range ticker.C { + state := r.getState() + if state == stateFailed { + r.eventChan <- &eventCheckFailed{} + } else if state == stateClosed { + ticker.Stop() + return + } + } +} + +func (r *runtime) handleEventCheckFailed(e *eventCheckFailed) { + if r.getState() == stateFailed { + r.startServer() + } +} + +func (r *runtime) handleEventServeFailed(e *eventServeFailed) { + if atomic.LoadUint64(&r.roundNum) > e.roundNum { + return + } + r.setState(stateFailed) + r.setError(e.err) +} + +func (r *runtime) handleEventReload(e *eventReload) { + r.reload(e.nextSuperSpec, e.muxMapper) +} + func (r *runtime) handleEventClose(e *eventClose) { r.setState(stateClosed) r.setError(errNil) @@ -309,15 +323,3 @@ func (r *runtime) handleEventClose(e *eventClose) { r.mux.close() close(e.done) } - -func (r *runtime) Close() { - done := make(chan struct{}) - r.eventChan <- &eventClose{done: done} - <-done -} - -func (r *runtime) closeServer() { - if r.s != nil { - r.s.GracefulStop() - } -} diff --git a/pkg/object/grpcserver/spec.go b/pkg/object/grpcserver/spec.go index 4b996ec1e2..60d6717e96 100644 --- a/pkg/object/grpcserver/spec.go +++ b/pkg/object/grpcserver/spec.go @@ -19,15 +19,16 @@ package grpcserver import ( "fmt" - "github.com/megaease/easegress/pkg/util/ipfilter" "regexp" + + "github.com/megaease/easegress/pkg/util/ipfilter" ) type ( // Spec describe gRPC server Spec struct { Port uint16 `json:"port" jsonschema:"required,minimum=1025"` - MaxConnections uint16 `json:"maxConnections" jsonschema:"omitempty,minimum=1"` + MaxConnections uint32 `json:"maxConnections" jsonschema:"omitempty,minimum=1"` // MinTime see keepalive.EnforcementPolicy MinTime string `json:"minTimeClientSendPing" jsonschema:"omitempty,format=duration"` @@ -63,15 +64,15 @@ type ( IPFilter *ipfilter.Spec `json:"ipFilter,omitempty" jsonschema:"omitempty"` Host string `json:"host" jsonschema:"omitempty"` HostRegexp string `json:"hostRegexp" jsonschema:"omitempty,format=regexp"` - Paths []*Path `json:"paths" jsonschema:"omitempty"` + Methods []*Method `json:"methods" jsonschema:"omitempty"` } - // Path is second level entry of router. - Path struct { + // Method is second level entry of router. + Method struct { IPFilter *ipfilter.Spec `json:"ipFilter,omitempty" jsonschema:"omitempty"` - Path string `json:"path,omitempty" jsonschema:"omitempty,pattern=^/"` - PathPrefix string `json:"pathPrefix,omitempty" jsonschema:"omitempty,pattern=^/"` - PathRegexp string `json:"pathRegexp,omitempty" jsonschema:"omitempty,format=regexp"` + Method string `json:"method,omitempty" jsonschema:"omitempty,pattern=^/"` + MethodPrefix string `json:"methodPrefix,omitempty" jsonschema:"omitempty,pattern=^/"` + MethodRegexp string `json:"methodRegexp,omitempty" jsonschema:"omitempty,format=regexp"` Backend string `json:"backend" jsonschema:"required"` Headers []*Header `json:"headers" jsonschema:"omitempty"` MatchAllHeader bool `json:"matchAllHeader" jsonschema:"omitempty"` @@ -94,7 +95,7 @@ func (h *Header) initHeaderRoute() { } // Validate validates Path. -func (p *Path) Validate() error { +func (m *Method) Validate() error { return nil } diff --git a/pkg/object/httpserver/mux.go b/pkg/object/httpserver/mux.go index f8b61065b6..3585278550 100644 --- a/pkg/object/httpserver/mux.go +++ b/pkg/object/httpserver/mux.go @@ -181,7 +181,7 @@ func (m *mux) reload(superSpec *supervisor.Spec, muxMapper context.MuxMapper) { httpStat: m.httpStat, topN: m.topN, metrics: oldInst.metrics, - ipFilter: ipfilter.New(spec.IPFilterSpec), + ipFilter: ipfilter.New(spec.IPFilter), tracer: tracer, accessLogFormatter: newAccessLogFormatter(spec.AccessLogFormat), } diff --git a/pkg/object/httpserver/runtime.go b/pkg/object/httpserver/runtime.go index a17840c82f..487866f4d3 100644 --- a/pkg/object/httpserver/runtime.go +++ b/pkg/object/httpserver/runtime.go @@ -66,7 +66,7 @@ type ( eventCheckFailed struct{} eventServeFailed struct { - startNum uint64 + roundNum uint64 err error } eventReload struct { @@ -81,7 +81,7 @@ type ( server *http.Server server3 *http3.Server mux *mux - startNum uint64 + roundNum uint64 eventChan chan interface{} // status @@ -239,7 +239,7 @@ func (r *runtime) needRestartServer(nextSpec *Spec) bool { x.CacheSize, y.CacheSize = 0, 0 x.XForwardedFor, y.XForwardedFor = false, false x.Tracing, y.Tracing = nil, nil - x.IPFilterSpec, y.IPFilterSpec = nil, nil + x.IPFilter, y.IPFilter = nil, nil x.Rules, y.Rules = nil, nil // The update of rules need not to shutdown server. @@ -247,7 +247,7 @@ func (r *runtime) needRestartServer(nextSpec *Spec) bool { } func (r *runtime) startServer() { - r.startNum++ + r.roundNum++ r.setState(stateRunning) r.setError(nil) @@ -279,14 +279,14 @@ func (r *runtime) startHTTP3Server() { } // to avoid data race - startNum := r.startNum + roundNum := r.roundNum srv := r.server3 go func() { if err := srv.ListenAndServe(); err != http.ErrServerClosed { r.eventChan <- &eventServeFailed{ err: err, - startNum: startNum, + roundNum: roundNum, } } }() @@ -320,7 +320,7 @@ func (r *runtime) startHTTP1And2Server() { // to avoid data race spec := r.spec - startNum := r.startNum + roundNum := r.roundNum srv := r.server go func() { @@ -335,7 +335,7 @@ func (r *runtime) startHTTP1And2Server() { if err != http.ErrServerClosed { r.eventChan <- &eventServeFailed{ err: err, - startNum: startNum, + roundNum: roundNum, } } }() @@ -382,7 +382,7 @@ func (r *runtime) handleEventCheckFailed(e *eventCheckFailed) { } func (r *runtime) handleEventServeFailed(e *eventServeFailed) { - if r.startNum > e.startNum { + if r.roundNum > e.roundNum { return } r.setState(stateFailed) diff --git a/pkg/object/httpserver/spec.go b/pkg/object/httpserver/spec.go index ada6967eec..4e7b8d4b88 100644 --- a/pkg/object/httpserver/spec.go +++ b/pkg/object/httpserver/spec.go @@ -57,8 +57,8 @@ type ( RouterKind string `json:"routerKind,omitempty" jsonschema:"omitempty,enum=,enum=Ordered,enum=RadixTree"` - IPFilterSpec *ipfilter.Spec `json:"ipFilter,omitempty" jsonschema:"omitempty"` - Rules routers.Rules `json:"rules" jsonschema:"omitempty"` + IPFilter *ipfilter.Spec `json:"ipFilter,omitempty" jsonschema:"omitempty"` + Rules routers.Rules `json:"rules" jsonschema:"omitempty"` GlobalFilter string `json:"globalFilter,omitempty" jsonschema:"omitempty"` From 0bee4bbae9da98bc1fc68277d01064a3b49f1f11 Mon Sep 17 00:00:00 2001 From: Bomin Zhang Date: Thu, 9 Feb 2023 10:22:17 +0800 Subject: [PATCH 12/14] Apply suggestions from code review Co-authored-by: Groot --- pkg/filters/proxies/grpcproxy/requestmatch.go | 2 +- pkg/filters/proxies/httpproxy/requestmatch.go | 2 +- pkg/filters/proxies/requestmatch.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/filters/proxies/grpcproxy/requestmatch.go b/pkg/filters/proxies/grpcproxy/requestmatch.go index c0f728fbc7..cdd1626435 100644 --- a/pkg/filters/proxies/grpcproxy/requestmatch.go +++ b/pkg/filters/proxies/grpcproxy/requestmatch.go @@ -30,7 +30,7 @@ type RequestMatcherSpec struct { Methods []*stringtool.StringMatcher `json:"methods" jsonschema:"omitempty"` } -// Validate validtes the RequestMatcherSpec. +// Validate validates the RequestMatcherSpec. func (s *RequestMatcherSpec) Validate() error { if err := s.RequestMatcherBaseSpec.Validate(); err != nil { return err diff --git a/pkg/filters/proxies/httpproxy/requestmatch.go b/pkg/filters/proxies/httpproxy/requestmatch.go index 904a846e02..1f14a0a51a 100644 --- a/pkg/filters/proxies/httpproxy/requestmatch.go +++ b/pkg/filters/proxies/httpproxy/requestmatch.go @@ -30,7 +30,7 @@ type RequestMatcherSpec struct { URLs []*MethodAndURLMatcher `json:"urls" jsonschema:"omitempty"` } -// Validate validtes the RequestMatcherSpec. +// Validate validates the RequestMatcherSpec. func (s *RequestMatcherSpec) Validate() error { if err := s.RequestMatcherBaseSpec.Validate(); err != nil { return err diff --git a/pkg/filters/proxies/requestmatch.go b/pkg/filters/proxies/requestmatch.go index 3f9d2db931..07e7a670b1 100644 --- a/pkg/filters/proxies/requestmatch.go +++ b/pkg/filters/proxies/requestmatch.go @@ -41,7 +41,7 @@ type RequestMatcherBaseSpec struct { HeaderHashKey string `json:"headerHashKey" jsonschema:"omitempty"` } -// Validate validtes the RequestMatcherBaseSpec. +// Validate validates the RequestMatcherBaseSpec. func (s *RequestMatcherBaseSpec) Validate() error { if s.Policy == "general" || s.Policy == "" { if len(s.Headers) == 0 { From 8cae22c0261716c9de029fa997fc133b879ec7e3 Mon Sep 17 00:00:00 2001 From: Bomin Zhang Date: Thu, 9 Feb 2023 17:08:41 +0800 Subject: [PATCH 13/14] fix github action warnings --- build/test/server.go | 1 + cmd/client/command/common.go | 1 + cmd/client/main.go | 1 + cmd/server/main.go | 1 + example/backend-service/echo/echo.go | 1 + example/backend-service/mirror/mirror.go | 1 + example/backend-service/remote/remote.go | 1 + pkg/api/api.go | 1 + pkg/cluster/cluster.go | 1 + pkg/cluster/clustertest/cluster.go | 2 + pkg/cluster/customdata/customdata.go | 1 + pkg/common/utils.go | 1 + pkg/context/context.go | 1 + pkg/context/contexttest/contexttest.go | 1 + pkg/env/env.go | 1 + pkg/filters/builder/builder.go | 1 + pkg/filters/certextractor/certextractor.go | 1 + pkg/filters/connectcontrol/connectcontrol.go | 1 + pkg/filters/corsadaptor/corsadaptor.go | 1 + pkg/filters/fallback/fallback.go | 1 + pkg/filters/filters.go | 1 + pkg/filters/headerlookup/headerlookup.go | 2 + pkg/filters/headertojson/headertojson.go | 1 + pkg/filters/kafka/kafka.go | 5 +- pkg/filters/kafka/kafka_test.go | 4 +- pkg/filters/kafkabackend/kafka.go | 5 +- pkg/filters/kafkabackend/kafka_test.go | 4 +- pkg/filters/meshadaptor/meshadaptor.go | 1 + pkg/filters/mock/mock.go | 1 + pkg/filters/mqttclientauth/mqttauth.go | 1 + pkg/filters/oidcadaptor/oidcadaptor.go | 69 +++++++++++-------- pkg/filters/opafilter/opafilter.go | 11 +++ pkg/filters/proxies/grpcproxy/loadbalance.go | 2 +- pkg/filters/proxies/grpcproxy/proxy.go | 16 +++-- pkg/filters/proxies/httpproxy/proxy.go | 16 +++-- pkg/filters/proxies/server.go | 1 + pkg/filters/ratelimiter/ratelimiter.go | 1 + pkg/filters/redirector/redirector.go | 2 + pkg/filters/remotefilter/remotefilter.go | 1 + pkg/filters/requestadaptor/requestadaptor.go | 1 + .../responseadaptor/responseadaptor.go | 1 + pkg/filters/topicmapper/topicmapper.go | 1 + pkg/filters/validator/validator.go | 1 + pkg/filters/wasmhost/doc.go | 1 + pkg/graceupdate/graceupdate.go | 1 + pkg/logger/logger.go | 6 +- pkg/object/autocertmanager/autocertmanager.go | 1 + .../consulserviceregistry.go | 1 + .../easemonitormetrics/easemonitormetrics.go | 1 + .../etcdserviceregistry.go | 1 + .../eurekaserviceregistry.go | 1 + pkg/object/function/faascontroller.go | 1 + pkg/object/function/provider/provider.go | 1 + pkg/object/function/spec/fsm.go | 1 + pkg/object/function/storage/storage.go | 1 + pkg/object/function/worker/worker.go | 1 + pkg/object/globalfilter/globalfilter.go | 1 + pkg/object/grpcserver/grpcserver.go | 1 + pkg/object/httpserver/httpserver.go | 1 + .../httpserver/routers/ordered/router.go | 1 + .../httpserver/routers/radixtree/router.go | 1 + pkg/object/httpserver/routers/routers.go | 2 + .../ingresscontroller/ingresscontroller.go | 1 + pkg/object/meshcontroller/api/api.go | 1 + .../meshcontroller/certmanager/certmanager.go | 1 + .../meshcontroller/informer/informer.go | 1 + .../ingresscontroller/ingresscontroller.go | 1 + pkg/object/meshcontroller/label/label.go | 1 + pkg/object/meshcontroller/layout/layout.go | 1 + pkg/object/meshcontroller/master/master.go | 1 + pkg/object/meshcontroller/meshcontroller.go | 1 + .../registrycenter/registrycenter.go | 1 + pkg/object/meshcontroller/service/service.go | 1 + pkg/object/meshcontroller/spec/spec.go | 1 + pkg/object/meshcontroller/storage/storage.go | 1 + pkg/object/meshcontroller/worker/worker.go | 1 + pkg/object/mqttproxy/mqttproxy.go | 1 + pkg/object/mqttproxy/topicmgr.go | 1 + .../nacosserviceregistry.go | 1 + pkg/object/pipeline/pipeline.go | 1 + .../rawconfigtrafficcontroller.go | 1 + pkg/object/serviceregistry/serviceregistry.go | 1 + .../statussynccontroller.go | 1 + .../trafficcontroller/trafficcontroller.go | 1 + .../zookeeperserviceregistry.go | 1 + pkg/option/option.go | 1 + pkg/pidfile/pidfile.go | 1 + pkg/profile/profile.go | 1 + pkg/protocols/grpcprot/fake.go | 4 ++ pkg/protocols/grpcprot/grpc.go | 14 +++- pkg/protocols/grpcprot/header.go | 11 +++ pkg/protocols/grpcprot/request.go | 6 ++ pkg/protocols/grpcprot/response.go | 21 +++++- pkg/protocols/httpprot/http.go | 1 + .../httpprot/httpheader/httpheader.go | 1 + pkg/protocols/httpprot/httpstat/httpstat.go | 1 + pkg/protocols/httpprot/response.go | 1 + pkg/protocols/mqttprot/mqtt.go | 1 + pkg/protocols/mqttprot/response.go | 1 + pkg/protocols/protocols.go | 2 + pkg/registry/registry.go | 2 + pkg/resilience/resilience.go | 1 + pkg/supervisor/supervisor.go | 1 + pkg/tracing/tracing.go | 1 + pkg/util/circuitbreaker/circuitbreaker.go | 1 + pkg/util/codecounter/codecounter.go | 1 + pkg/util/codectool/codectool.go | 1 + pkg/util/dynamicobject/dynamicobject.go | 1 + pkg/util/easemonitor/easemonitor.go | 1 + pkg/util/fasttime/fasttime.go | 1 + pkg/util/filterwriter/filterwriter.go | 1 + pkg/util/ipfilter/ipfilter.go | 1 + pkg/util/jmxtool/common.go | 1 + pkg/util/k8s/k8s.go | 1 + pkg/util/limitlistener/limitlistener.go | 1 + pkg/util/pathadaptor/pathadaptor.go | 1 + pkg/util/prometheushelper/helper.go | 1 + pkg/util/ratelimiter/ratelimiter.go | 1 + pkg/util/readers/readerat.go | 1 + pkg/util/sampler/sampler.go | 1 + pkg/util/sem/semaphore.go | 1 + pkg/util/signer/signer.go | 1 + pkg/util/stringtool/stringtool.go | 2 + pkg/util/timetool/distributedtimer.go | 1 + .../urlclusteranalyzer/urlclusteranalyzer.go | 1 + pkg/util/urlrule/urlrule.go | 1 + pkg/v/v.go | 1 + pkg/version/version.go | 1 + 128 files changed, 259 insertions(+), 55 deletions(-) diff --git a/build/test/server.go b/build/test/server.go index 125f4532ae..ffb40349d5 100644 --- a/build/test/server.go +++ b/build/test/server.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package test is for integration testing. package test import ( diff --git a/cmd/client/command/common.go b/cmd/client/command/common.go index c4d9e29e4b..e334848e80 100644 --- a/cmd/client/command/common.go +++ b/cmd/client/command/common.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package command implements commands of Easegress client. package command import ( diff --git a/cmd/client/main.go b/cmd/client/main.go index 404ceeb312..cc7d512a55 100644 --- a/cmd/client/main.go +++ b/cmd/client/main.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package main is the entry point of Easegress client. package main import ( diff --git a/cmd/server/main.go b/cmd/server/main.go index 57a215b5a0..019c433a59 100644 --- a/cmd/server/main.go +++ b/cmd/server/main.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package main is the entry point of Easegress server. package main import ( diff --git a/example/backend-service/echo/echo.go b/example/backend-service/echo/echo.go index e7ccaecd2a..12f837b5a8 100644 --- a/example/backend-service/echo/echo.go +++ b/example/backend-service/echo/echo.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package main is the entry point of the simple echo server. package main import ( diff --git a/example/backend-service/mirror/mirror.go b/example/backend-service/mirror/mirror.go index ed4263c4ea..366c5aadba 100644 --- a/example/backend-service/mirror/mirror.go +++ b/example/backend-service/mirror/mirror.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package main is the entry point of the simple mirror server. package main import ( diff --git a/example/backend-service/remote/remote.go b/example/backend-service/remote/remote.go index 987b2889c5..0fa89b71c6 100644 --- a/example/backend-service/remote/remote.go +++ b/example/backend-service/remote/remote.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package main is the entry point of the simple Easegress remote server. package main import ( diff --git a/pkg/api/api.go b/pkg/api/api.go index 479d75ea69..dfd0f22269 100644 --- a/pkg/api/api.go +++ b/pkg/api/api.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package api implements the HTTP API of Easegress. package api import ( diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index c70507744d..2321386445 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package cluster provides the cluster management. package cluster import ( diff --git a/pkg/cluster/clustertest/cluster.go b/pkg/cluster/clustertest/cluster.go index 3707042b27..441906b9d3 100644 --- a/pkg/cluster/clustertest/cluster.go +++ b/pkg/cluster/clustertest/cluster.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package clustertest provides a mocked cluster for testing. package clustertest import ( @@ -124,6 +125,7 @@ func (mc *MockedCluster) Put(key, value string) error { return nil } +// PutUnderTimeout implements interface function PutUnderTimeout func (mc *MockedCluster) PutUnderTimeout(key, value string, timeout time.Duration) error { if mc.MockedPutUnderTimeout != nil { return mc.MockedPutUnderTimeout(key, value, timeout) diff --git a/pkg/cluster/customdata/customdata.go b/pkg/cluster/customdata/customdata.go index c053988439..8217f6f2b4 100644 --- a/pkg/cluster/customdata/customdata.go +++ b/pkg/cluster/customdata/customdata.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package customdata provides a way to store custom data in Easegress cluster. package customdata import ( diff --git a/pkg/common/utils.go b/pkg/common/utils.go index a8d78b121a..5d0cb24edc 100644 --- a/pkg/common/utils.go +++ b/pkg/common/utils.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package common provides several common utilities for other packages. package common import ( diff --git a/pkg/context/context.go b/pkg/context/context.go index b689c84559..16c56fe16e 100644 --- a/pkg/context/context.go +++ b/pkg/context/context.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package context provides the context for traffic handlers. package context import ( diff --git a/pkg/context/contexttest/contexttest.go b/pkg/context/contexttest/contexttest.go index 6c00f6a1a5..81157e6122 100644 --- a/pkg/context/contexttest/contexttest.go +++ b/pkg/context/contexttest/contexttest.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package contexttest provides utilities for testing context. package contexttest import "github.com/megaease/easegress/pkg/context" diff --git a/pkg/env/env.go b/pkg/env/env.go index 6209fcb24e..f19ef3590b 100644 --- a/pkg/env/env.go +++ b/pkg/env/env.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package env provides functions for environment variables. package env import ( diff --git a/pkg/filters/builder/builder.go b/pkg/filters/builder/builder.go index 393babccc2..f2b8a8191b 100644 --- a/pkg/filters/builder/builder.go +++ b/pkg/filters/builder/builder.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package builder implements builder filters. package builder import ( diff --git a/pkg/filters/certextractor/certextractor.go b/pkg/filters/certextractor/certextractor.go index aca1f19677..914b7eecaf 100644 --- a/pkg/filters/certextractor/certextractor.go +++ b/pkg/filters/certextractor/certextractor.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package certextractor implements a filter to extract given field from TLS. package certextractor import ( diff --git a/pkg/filters/connectcontrol/connectcontrol.go b/pkg/filters/connectcontrol/connectcontrol.go index b36f7e2cd7..5619c87916 100644 --- a/pkg/filters/connectcontrol/connectcontrol.go +++ b/pkg/filters/connectcontrol/connectcontrol.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package connectcontrol implements filter for controlling connections of MQTT clients package connectcontrol import ( diff --git a/pkg/filters/corsadaptor/corsadaptor.go b/pkg/filters/corsadaptor/corsadaptor.go index 1c0e40f661..9ee3300bbb 100644 --- a/pkg/filters/corsadaptor/corsadaptor.go +++ b/pkg/filters/corsadaptor/corsadaptor.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package corsadaptor implements a filter that adapts CORS stuff. package corsadaptor import ( diff --git a/pkg/filters/fallback/fallback.go b/pkg/filters/fallback/fallback.go index 19801b2e66..d455ea2341 100644 --- a/pkg/filters/fallback/fallback.go +++ b/pkg/filters/fallback/fallback.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package fallback implements the fallback filter. package fallback import ( diff --git a/pkg/filters/filters.go b/pkg/filters/filters.go index cb7f7281e6..ed8bb42261 100644 --- a/pkg/filters/filters.go +++ b/pkg/filters/filters.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package filters implements common functionality of filters. package filters import ( diff --git a/pkg/filters/headerlookup/headerlookup.go b/pkg/filters/headerlookup/headerlookup.go index 45ef853c20..0cf78d1108 100644 --- a/pkg/filters/headerlookup/headerlookup.go +++ b/pkg/filters/headerlookup/headerlookup.go @@ -15,6 +15,8 @@ * limitations under the License. */ +// Package headerlookup implements a filter that enriches request headers +// per request, looking up values from etcd. package headerlookup import ( diff --git a/pkg/filters/headertojson/headertojson.go b/pkg/filters/headertojson/headertojson.go index 03362bdf9b..d1fc76e497 100644 --- a/pkg/filters/headertojson/headertojson.go +++ b/pkg/filters/headertojson/headertojson.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package headertojson implements a filter to convert HTTP request header to json. package headertojson import ( diff --git a/pkg/filters/kafka/kafka.go b/pkg/filters/kafka/kafka.go index 1d0665d174..7ad23f1b18 100644 --- a/pkg/filters/kafka/kafka.go +++ b/pkg/filters/kafka/kafka.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package kafka implements a kafka proxy for MQTT requests. package kafka import ( @@ -36,7 +37,7 @@ const ( var kind = &filters.Kind{ Name: Kind, - Description: "Kafka is a backend of MQTTProxy", + Description: "Kafka is a kafka proxy for MQTT requests", Results: []string{resultGetDataFailed}, DefaultSpec: func() filters.Spec { return &Spec{} @@ -51,7 +52,7 @@ func init() { } type ( - // Kafka is kafka backend for MQTT proxy + // Kafka is a kafka proxy for MQTT requests. Kafka struct { spec *Spec producer sarama.AsyncProducer diff --git a/pkg/filters/kafka/kafka_test.go b/pkg/filters/kafka/kafka_test.go index 9fbfa32407..4dd2fbf821 100644 --- a/pkg/filters/kafka/kafka_test.go +++ b/pkg/filters/kafka/kafka_test.go @@ -63,11 +63,11 @@ func (m *mockAsyncProducer) AbortTxn() error { return nil } -func (m *mockAsyncProducer) AddOffsetsToTxn(offsets map[string][]*sarama.PartitionOffsetMetadata, groupId string) error { +func (m *mockAsyncProducer) AddOffsetsToTxn(offsets map[string][]*sarama.PartitionOffsetMetadata, groupID string) error { return nil } -func (m *mockAsyncProducer) AddMessageToTxn(msg *sarama.ConsumerMessage, groupId string, metadata *string) error { +func (m *mockAsyncProducer) AddMessageToTxn(msg *sarama.ConsumerMessage, groupID string, metadata *string) error { return nil } diff --git a/pkg/filters/kafkabackend/kafka.go b/pkg/filters/kafkabackend/kafka.go index 1de1c4bcb0..ec0080df84 100644 --- a/pkg/filters/kafkabackend/kafka.go +++ b/pkg/filters/kafkabackend/kafka.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package kafka implements a kafka proxy for HTTP requests. package kafka import ( @@ -38,7 +39,7 @@ const ( var kind = &filters.Kind{ Name: Kind, - Description: "Kafka is a backend of MQTTProxy", + Description: "Kafka is a kafka proxy for HTTP requests", Results: []string{resultParseErr}, DefaultSpec: func() filters.Spec { return &Spec{} @@ -53,7 +54,7 @@ func init() { } type ( - // Kafka is kafka backend for MQTT proxy + // Kafka is a kafka proxy for HTTP requests. Kafka struct { spec *Spec producer sarama.AsyncProducer diff --git a/pkg/filters/kafkabackend/kafka_test.go b/pkg/filters/kafkabackend/kafka_test.go index d793b66096..9faa5a6779 100644 --- a/pkg/filters/kafkabackend/kafka_test.go +++ b/pkg/filters/kafkabackend/kafka_test.go @@ -59,11 +59,11 @@ func (m *mockAsyncProducer) AbortTxn() error { return nil } -func (m *mockAsyncProducer) AddOffsetsToTxn(offsets map[string][]*sarama.PartitionOffsetMetadata, groupId string) error { +func (m *mockAsyncProducer) AddOffsetsToTxn(offsets map[string][]*sarama.PartitionOffsetMetadata, groupID string) error { return nil } -func (m *mockAsyncProducer) AddMessageToTxn(msg *sarama.ConsumerMessage, groupId string, metadata *string) error { +func (m *mockAsyncProducer) AddMessageToTxn(msg *sarama.ConsumerMessage, groupID string, metadata *string) error { return nil } diff --git a/pkg/filters/meshadaptor/meshadaptor.go b/pkg/filters/meshadaptor/meshadaptor.go index 528f278ca8..28509b4517 100644 --- a/pkg/filters/meshadaptor/meshadaptor.go +++ b/pkg/filters/meshadaptor/meshadaptor.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package meshadaptor provides MeshAdaptor filter. package meshadaptor import ( diff --git a/pkg/filters/mock/mock.go b/pkg/filters/mock/mock.go index cda7177a73..76c4b328c4 100644 --- a/pkg/filters/mock/mock.go +++ b/pkg/filters/mock/mock.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package mock provides Mock filter. package mock import ( diff --git a/pkg/filters/mqttclientauth/mqttauth.go b/pkg/filters/mqttclientauth/mqttauth.go index da1d4b56ca..2a0515f5b9 100644 --- a/pkg/filters/mqttclientauth/mqttauth.go +++ b/pkg/filters/mqttclientauth/mqttauth.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package mqttclientauth implements authentication for MQTT clients. package mqttclientauth import ( diff --git a/pkg/filters/oidcadaptor/oidcadaptor.go b/pkg/filters/oidcadaptor/oidcadaptor.go index 3025631479..40d5c48e77 100644 --- a/pkg/filters/oidcadaptor/oidcadaptor.go +++ b/pkg/filters/oidcadaptor/oidcadaptor.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package oidcadaptor implements OpenID Connect authorization. package oidcadaptor import ( @@ -67,6 +68,7 @@ type store interface { get(key string) string } +// OIDCAdaptor is the filter for OpenID Connect authorization. type OIDCAdaptor struct { spec *Spec store @@ -82,12 +84,13 @@ type OIDCAdaptor struct { jwks *keyfunc.JWKS } +// Spec defines the spec of OIDCAdaptor. type Spec struct { filters.BaseSpec `yaml:",inline"` CookieName string `json:"cookieName"` - ClientId string `json:"clientId" jsonschema:"required"` + ClientID string `json:"clientId" jsonschema:"required"` ClientSecret string `json:"clientSecret" jsonschema:"required"` Discovery string `json:"discovery"` @@ -107,10 +110,10 @@ type oidcConfig struct { TokenEndpoint string `json:"token_endpoint"` UserInfoEndpoint string `json:"userinfo_endpoint"` RevocationEndpoint string `json:"revocation_endpoint"` - JwksUri string `json:"jwks_uri"` + JwksURI string `json:"jwks_uri"` ResponseTypesSupported []string `json:"response_types_supported"` SubjectTypesSupported []string `json:"subject_types_supported"` - IdTokenSigningAlgValuesSupported []string `json:"id_token_signing_alg_values_supported"` + IDTokenSigningAlgValuesSupported []string `json:"id_token_signing_alg_values_supported"` ScopesSupported []string `json:"scopes_supported"` TokenEndpointAuthMethodsSupported []string `json:"token_endpoint_auth_methods_supported"` ClaimsSupported []string `json:"claims_supported"` @@ -123,25 +126,29 @@ type oidcIDToken struct { TokenType string `json:"token_type"` RefreshToken string `json:"refresh_token"` ExpiresIn int `json:"expires_in"` - IdToken string `json:"id_token"` + IDToken string `json:"id_token"` } func init() { filters.Register(kind) } +// Name returns the name of the OIDCAdaptor filter instance. func (o *OIDCAdaptor) Name() string { return o.spec.Name() } +// Spec returns the spec used by the OIDCAdaptor instance. func (o *OIDCAdaptor) Spec() filters.Spec { return o.spec } +// Kind returns the kind of filter. func (o *OIDCAdaptor) Kind() *filters.Kind { return kind } +// Init initializes the filter. func (o *OIDCAdaptor) Init() { // delegate store interface operation to itself for testing o.store = o @@ -164,11 +171,13 @@ func (o *OIDCAdaptor) Init() { o.redirectPath = parsed.Path } +// Inherit inherits previous generation of the filter instance. func (o *OIDCAdaptor) Inherit(previousGeneration filters.Filter) { o.Init() previousGeneration.Close() } +// Handle handles the request. func (o *OIDCAdaptor) Handle(ctx *context.Context) (result string) { req := ctx.GetInputRequest().(*httpprot.Request) var rw *httpprot.Response @@ -192,16 +201,18 @@ func (o *OIDCAdaptor) Handle(ctx *context.Context) (result string) { if req.Path() == o.redirectPath { return o.handleOIDCCallback(ctx) } - authorizeUrl := o.buildAuthorizeURL(req) + authorizeURL := o.buildAuthorizeURL(req) rw.SetStatusCode(http.StatusFound) - rw.Header().Set("Location", authorizeUrl) + rw.Header().Set("Location", authorizeURL) return resultFiltered } +// Status returns the status of the filter instance. func (o *OIDCAdaptor) Status() interface{} { return nil } +// Close closes the filter instance. func (o *OIDCAdaptor) Close() { } @@ -222,7 +233,7 @@ func (o *OIDCAdaptor) initDiscoveryOIDCConf() { logger.Errorf("parse jwksRefreshInterval[%s] duration error: %s", o.jwksRefreshInterval, err) } } - jwks, err := keyfunc.Get(oidcConf.JwksUri, keyfunc.Options{ + jwks, err := keyfunc.Get(oidcConf.JwksURI, keyfunc.Options{ Client: httpCli, RefreshInterval: interval, }) @@ -251,17 +262,17 @@ func (o *OIDCAdaptor) handleOIDCCallback(ctx *context.Context) string { req.Header().Set("X-Access-Token", oidcToken.AccessToken) } } - reqUrl := o.store.get(clusterCacheKey("request_url", state)) - req.Header().Set("X-Origin-Request-URL", reqUrl) + reqURL := o.store.get(clusterCacheKey("request_url", state)) + req.Header().Set("X-Origin-Request-URL", reqURL) userInfo := map[string]any{} - if len(oidcToken.IdToken) > 0 { - parseToken, err := o.validateIDToken(oidcToken.IdToken) + if len(oidcToken.IDToken) > 0 { + parseToken, err := o.validateIDToken(oidcToken.IDToken) if err != nil { return filterResp(rw, http.StatusUnauthorized, "invalid oidc id token") } if o.setIDTokenHeader { - req.Header().Set("X-ID-Token", oidcToken.IdToken) + req.Header().Set("X-ID-Token", oidcToken.IDToken) } if claims, ok := parseToken.Claims.(jwt.MapClaims); ok { userInfo = claims @@ -285,7 +296,7 @@ func (o *OIDCAdaptor) handleOIDCCallback(ctx *context.Context) string { func (o *OIDCAdaptor) fetchOIDCToken(authCode string, state string, spec *Spec, err error, rw *httpprot.Response, req *httpprot.Request) (*oidcIDToken, error) { // client_secret_post || client_secret_basic tokenFormData := url.Values{ - "client_id": {o.spec.ClientId}, + "client_id": {o.spec.ClientID}, "client_secret": {o.spec.ClientSecret}, "code": {authCode}, "grant_type": {"authorization_code"}, @@ -295,7 +306,7 @@ func (o *OIDCAdaptor) fetchOIDCToken(authCode string, state string, spec *Spec, // https://openid.net/specs/openid-connect-core-1_0.html#TokenRequest tokenReq, _ := http.NewRequest(http.MethodPost, o.oidcConfig.TokenEndpoint, strings.NewReader(tokenFormData.Encode())) tokenReq.Header.Set("Content-Type", "application/x-www-form-urlencoded") - authBasic := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", spec.ClientId, spec.ClientSecret))) + authBasic := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", spec.ClientID, spec.ClientSecret))) tokenReq.Header.Set("Authorization", "Basic "+authBasic) tokenReq.Header.Set("Accept", "application/json") @@ -360,40 +371,40 @@ func (o *OIDCAdaptor) validateCodeAndState(code, state string) error { func (o *OIDCAdaptor) buildAuthorizeURL(req *httpprot.Request) string { // https://openid.net/specs/openid-connect-core-1_0.html#AuthorizationEndpoint - var authUrlBuilder strings.Builder - authUrlBuilder.WriteString(o.oidcConfig.AuthorizationEndpoint) - authUrlBuilder.WriteString("?client_id=" + o.spec.ClientId) + var authURLBuilder strings.Builder + authURLBuilder.WriteString(o.oidcConfig.AuthorizationEndpoint) + authURLBuilder.WriteString("?client_id=" + o.spec.ClientID) state := strings.ReplaceAll(uuid.New().String(), "-", "") // state is recommended - authUrlBuilder.WriteString("&state=" + state) + authURLBuilder.WriteString("&state=" + state) // End-user may spend some time doing login stuff, so we use a 10-minute timeout err := o.store.put(clusterCacheKey("state", state), "1", 10*time.Minute) if err != nil { logger.Errorf("put oidc state error: %s", err) } - var reqUrl string + var reqURL string args := req.URL().RawQuery if args != "" { - reqUrl = fmt.Sprintf("%s://%s%s?%s", req.Scheme(), req.Host(), req.Path(), args) + reqURL = fmt.Sprintf("%s://%s%s?%s", req.Scheme(), req.Host(), req.Path(), args) } else { - reqUrl = fmt.Sprintf("%s://%s%s", req.Scheme(), req.Host(), req.Path()) + reqURL = fmt.Sprintf("%s://%s%s", req.Scheme(), req.Host(), req.Path()) } - err = o.store.put(clusterCacheKey("request_url", state), reqUrl, 10*time.Minute) + err = o.store.put(clusterCacheKey("request_url", state), reqURL, 10*time.Minute) if err != nil { logger.Errorf("put origin request url error: %s", err) } // nonce is optional nonce := strings.ReplaceAll(uuid.New().String(), "-", "") - authUrlBuilder.WriteString("&nonce=" + nonce) - authUrlBuilder.WriteString("&response_type=code") - authUrlBuilder.WriteString("&scope=") + authURLBuilder.WriteString("&nonce=" + nonce) + authURLBuilder.WriteString("&response_type=code") + authURLBuilder.WriteString("&scope=") if len(o.oidcConfig.ScopesSupported) > 0 { - authUrlBuilder.WriteString(strings.Join(o.oidcConfig.ScopesSupported, "+")) + authURLBuilder.WriteString(strings.Join(o.oidcConfig.ScopesSupported, "+")) } else { - authUrlBuilder.WriteString("user") + authURLBuilder.WriteString("user") } - authUrlBuilder.WriteString("&redirect_uri=" + url.QueryEscape(o.spec.RedirectURI)) - return authUrlBuilder.String() + authURLBuilder.WriteString("&redirect_uri=" + url.QueryEscape(o.spec.RedirectURI)) + return authURLBuilder.String() } func clusterCacheKey(tag string, val string) string { diff --git a/pkg/filters/opafilter/opafilter.go b/pkg/filters/opafilter/opafilter.go index 64f6d51ae9..34cf551a96 100644 --- a/pkg/filters/opafilter/opafilter.go +++ b/pkg/filters/opafilter/opafilter.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package opafilter implements OpenPolicyAgent function. package opafilter import ( @@ -60,12 +61,14 @@ var kind = &filters.Kind{ }, } +// OPAFilter is the filter for OpenPolicyAgent. type OPAFilter struct { spec *Spec includedHeadersParsed []string `yaml:"includedHeadersParsed"` regoQuery *rego.PreparedEvalQuery } +// Spec is the spec of the OPAFilter. type Spec struct { filters.BaseSpec `yaml:",inline"` DefaultStatus int `yaml:"defaultStatus"` @@ -78,18 +81,22 @@ func init() { filters.Register(kind) } +// Name returns the name of the OPAFilter filter instance. func (o *OPAFilter) Name() string { return o.spec.Name() } +// Spec returns the spec of the OPAFilter filter instance. func (o *OPAFilter) Spec() filters.Spec { return o.spec } +// Kind returns the kind of the OPAFilter filter instance. func (o *OPAFilter) Kind() *filters.Kind { return kind } +// Init initialize the filter instance. func (o *OPAFilter) Init() { o.includedHeadersParsed = strings.Split(o.spec.IncludedHeaders, ",") if o.spec.DefaultStatus == 0 { @@ -117,11 +124,13 @@ func (o *OPAFilter) Init() { } } +// Inherit inherits previous generation of filter instance. func (o *OPAFilter) Inherit(previousGeneration filters.Filter) { o.Init() previousGeneration.Close() } +// Handle handles the request. func (o *OPAFilter) Handle(ctx *context.Context) (result string) { req := ctx.GetInputRequest().(*httpprot.Request) var rw *httpprot.Response @@ -132,10 +141,12 @@ func (o *OPAFilter) Handle(ctx *context.Context) (result string) { return o.evalRequest(req, rw) } +// Status returns the status of the filter instance. func (o *OPAFilter) Status() interface{} { return nil } +// Close closes the filter instance. func (o *OPAFilter) Close() { } diff --git a/pkg/filters/proxies/grpcproxy/loadbalance.go b/pkg/filters/proxies/grpcproxy/loadbalance.go index b8915c8b67..a5656a978c 100644 --- a/pkg/filters/proxies/grpcproxy/loadbalance.go +++ b/pkg/filters/proxies/grpcproxy/loadbalance.go @@ -79,5 +79,5 @@ func (f *forwardLoadBalancer) ReturnServer(s *Server, req protocols.Request, res } // Close closes the load balancer. -func (lb *forwardLoadBalancer) Close() { +func (f *forwardLoadBalancer) Close() { } diff --git a/pkg/filters/proxies/grpcproxy/proxy.go b/pkg/filters/proxies/grpcproxy/proxy.go index 0c7e419ede..a247a046b2 100644 --- a/pkg/filters/proxies/grpcproxy/proxy.go +++ b/pkg/filters/proxies/grpcproxy/proxy.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package grpcproxy implements a gRPC proxy. package grpcproxy import ( @@ -84,11 +85,16 @@ type ( } // Server is the backend server. - Server = proxies.Server - RequestMatcher = proxies.RequestMatcher - LoadBalancer = proxies.LoadBalancer - LoadBalanceSpec = proxies.LoadBalanceSpec - BaseServerPool = proxies.ServerPoolBase + Server = proxies.Server + // RequestMatcher is the interface of a request matcher + RequestMatcher = proxies.RequestMatcher + // LoadBalancer is the interface of a load balancer. + LoadBalancer = proxies.LoadBalancer + // LoadBalanceSpec is the spec of a load balancer. + LoadBalanceSpec = proxies.LoadBalanceSpec + // BaseServerPool is the base of a server pool. + BaseServerPool = proxies.ServerPoolBase + // BaseServerPoolSpec is the spec of BaseServerPool. BaseServerPoolSpec = proxies.ServerPoolBaseSpec ) diff --git a/pkg/filters/proxies/httpproxy/proxy.go b/pkg/filters/proxies/httpproxy/proxy.go index fb662fb361..adb3d3912a 100644 --- a/pkg/filters/proxies/httpproxy/proxy.go +++ b/pkg/filters/proxies/httpproxy/proxy.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package httpproxy implements an HTTP proxy. package httpproxy import ( @@ -129,11 +130,16 @@ type ( } // Server is the backend server. - Server = proxies.Server - RequestMatcher = proxies.RequestMatcher - LoadBalancer = proxies.LoadBalancer - LoadBalanceSpec = proxies.LoadBalanceSpec - BaseServerPool = proxies.ServerPoolBase + Server = proxies.Server + // RequestMatcher is the interface of a request matcher + RequestMatcher = proxies.RequestMatcher + // LoadBalancer is the interface of a load balancer. + LoadBalancer = proxies.LoadBalancer + // LoadBalanceSpec is the spec of a load balancer. + LoadBalanceSpec = proxies.LoadBalanceSpec + // BaseServerPool is the base of a server pool. + BaseServerPool = proxies.ServerPoolBase + // BaseServerPoolSpec is the spec of BaseServerPool. BaseServerPoolSpec = proxies.ServerPoolBaseSpec ) diff --git a/pkg/filters/proxies/server.go b/pkg/filters/proxies/server.go index 6ea5ff2a28..51335455ca 100644 --- a/pkg/filters/proxies/server.go +++ b/pkg/filters/proxies/server.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package proxies implements basic functions for backend proxies. package proxies import ( diff --git a/pkg/filters/ratelimiter/ratelimiter.go b/pkg/filters/ratelimiter/ratelimiter.go index 5381d6f730..92b3893a48 100644 --- a/pkg/filters/ratelimiter/ratelimiter.go +++ b/pkg/filters/ratelimiter/ratelimiter.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package ratelimiter implements a rate limiter. package ratelimiter import ( diff --git a/pkg/filters/redirector/redirector.go b/pkg/filters/redirector/redirector.go index ce4a1de355..6e0692687f 100644 --- a/pkg/filters/redirector/redirector.go +++ b/pkg/filters/redirector/redirector.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package redirector implements a filter to handle HTTP redirects. package redirector import ( @@ -87,6 +88,7 @@ type ( } ) +// Validate validates the spec. func (s *Spec) Validate() error { if _, ok := statusCodeMap[s.StatusCode]; !ok { return errors.New("invalid status code of Redirector, support 300, 301, 302, 303, 304, 307, 308") diff --git a/pkg/filters/remotefilter/remotefilter.go b/pkg/filters/remotefilter/remotefilter.go index 43135a780b..328136a7e3 100644 --- a/pkg/filters/remotefilter/remotefilter.go +++ b/pkg/filters/remotefilter/remotefilter.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package remotefilter implements the RemoteFilter filter to invokes remote apis. package remotefilter import ( diff --git a/pkg/filters/requestadaptor/requestadaptor.go b/pkg/filters/requestadaptor/requestadaptor.go index d2ea30d34f..dedd241d5f 100644 --- a/pkg/filters/requestadaptor/requestadaptor.go +++ b/pkg/filters/requestadaptor/requestadaptor.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package requestadaptor implements a filter to adapts requests. package requestadaptor import ( diff --git a/pkg/filters/responseadaptor/responseadaptor.go b/pkg/filters/responseadaptor/responseadaptor.go index 9492946c17..d534e88129 100644 --- a/pkg/filters/responseadaptor/responseadaptor.go +++ b/pkg/filters/responseadaptor/responseadaptor.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package responseadaptor implements a filter to adapts responses. package responseadaptor import ( diff --git a/pkg/filters/topicmapper/topicmapper.go b/pkg/filters/topicmapper/topicmapper.go index 6e0400a055..572dbc4e96 100644 --- a/pkg/filters/topicmapper/topicmapper.go +++ b/pkg/filters/topicmapper/topicmapper.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package topicmapper maps MQTT topic to Kafka topics and key-value headers package topicmapper import ( diff --git a/pkg/filters/validator/validator.go b/pkg/filters/validator/validator.go index 7d10a40914..ab142a98e1 100644 --- a/pkg/filters/validator/validator.go +++ b/pkg/filters/validator/validator.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package validator provides Validator filter to validates HTTP requests. package validator import ( diff --git a/pkg/filters/wasmhost/doc.go b/pkg/filters/wasmhost/doc.go index 58f492ae1a..07122cfcf0 100644 --- a/pkg/filters/wasmhost/doc.go +++ b/pkg/filters/wasmhost/doc.go @@ -15,4 +15,5 @@ * limitations under the License. */ +// Package wasmhost implements a host environment for WebAssembly. package wasmhost diff --git a/pkg/graceupdate/graceupdate.go b/pkg/graceupdate/graceupdate.go index 09048235e1..a54fe9b0a3 100644 --- a/pkg/graceupdate/graceupdate.go +++ b/pkg/graceupdate/graceupdate.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package graceupdate provides graceful update for easegress. package graceupdate import ( diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index 51cdd1c467..a73284a872 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -15,16 +15,18 @@ * limitations under the License. */ +// Package logger provides logger for Easegress. package logger import ( - "github.com/go-logr/zapr" - "go.opentelemetry.io/otel" "io" "os" "path/filepath" "time" + "github.com/go-logr/zapr" + "go.opentelemetry.io/otel" + "go.uber.org/zap" "go.uber.org/zap/zapcore" diff --git a/pkg/object/autocertmanager/autocertmanager.go b/pkg/object/autocertmanager/autocertmanager.go index ba6151535b..bf6bfea009 100644 --- a/pkg/object/autocertmanager/autocertmanager.go +++ b/pkg/object/autocertmanager/autocertmanager.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package autocertmanager provides AutoCertManager to manage certificates automatically. package autocertmanager import ( diff --git a/pkg/object/consulserviceregistry/consulserviceregistry.go b/pkg/object/consulserviceregistry/consulserviceregistry.go index 44b2a83825..1bd8d1493a 100644 --- a/pkg/object/consulserviceregistry/consulserviceregistry.go +++ b/pkg/object/consulserviceregistry/consulserviceregistry.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package consulserviceregistry provides ConsulServiceRegistry. package consulserviceregistry import ( diff --git a/pkg/object/easemonitormetrics/easemonitormetrics.go b/pkg/object/easemonitormetrics/easemonitormetrics.go index 45b8d2136f..2f5663e44f 100644 --- a/pkg/object/easemonitormetrics/easemonitormetrics.go +++ b/pkg/object/easemonitormetrics/easemonitormetrics.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package easemonitormetrics provides EaseMonitorMetrics. package easemonitormetrics import ( diff --git a/pkg/object/etcdserviceregistry/etcdserviceregistry.go b/pkg/object/etcdserviceregistry/etcdserviceregistry.go index 06b419b56f..dff9504511 100644 --- a/pkg/object/etcdserviceregistry/etcdserviceregistry.go +++ b/pkg/object/etcdserviceregistry/etcdserviceregistry.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package eserviceregistry provides EtcdServiceRegistry. package eserviceregistry import ( diff --git a/pkg/object/eurekaserviceregistry/eurekaserviceregistry.go b/pkg/object/eurekaserviceregistry/eurekaserviceregistry.go index de2c3dbf5e..e8afcd1ef1 100644 --- a/pkg/object/eurekaserviceregistry/eurekaserviceregistry.go +++ b/pkg/object/eurekaserviceregistry/eurekaserviceregistry.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package eurekaserviceregistry provides EurekaServiceRegistry. package eurekaserviceregistry import ( diff --git a/pkg/object/function/faascontroller.go b/pkg/object/function/faascontroller.go index 21aa94dbf3..bf97d484c0 100644 --- a/pkg/object/function/faascontroller.go +++ b/pkg/object/function/faascontroller.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package function provides FaasController. package function import ( diff --git a/pkg/object/function/provider/provider.go b/pkg/object/function/provider/provider.go index cba06c018a..1cdee3c171 100644 --- a/pkg/object/function/provider/provider.go +++ b/pkg/object/function/provider/provider.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package provider defines and implements FaasProvider interface. package provider import ( diff --git a/pkg/object/function/spec/fsm.go b/pkg/object/function/spec/fsm.go index 715e3a6163..d563df5837 100644 --- a/pkg/object/function/spec/fsm.go +++ b/pkg/object/function/spec/fsm.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package spec provides the spec for FaaS. package spec import ( diff --git a/pkg/object/function/storage/storage.go b/pkg/object/function/storage/storage.go index dc667fd2db..02591b73ae 100644 --- a/pkg/object/function/storage/storage.go +++ b/pkg/object/function/storage/storage.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package storage provides the storage for FaaS. package storage import ( diff --git a/pkg/object/function/worker/worker.go b/pkg/object/function/worker/worker.go index 98a79b002f..547cbea15d 100644 --- a/pkg/object/function/worker/worker.go +++ b/pkg/object/function/worker/worker.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package worker provides the worker for FaaSController. package worker import ( diff --git a/pkg/object/globalfilter/globalfilter.go b/pkg/object/globalfilter/globalfilter.go index d7d4b6a99c..aee0effd58 100644 --- a/pkg/object/globalfilter/globalfilter.go +++ b/pkg/object/globalfilter/globalfilter.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package globalfilter provides GlobalFilter. package globalfilter import ( diff --git a/pkg/object/grpcserver/grpcserver.go b/pkg/object/grpcserver/grpcserver.go index 5f0a0f3dbb..5562d7e2fd 100644 --- a/pkg/object/grpcserver/grpcserver.go +++ b/pkg/object/grpcserver/grpcserver.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package grpcserver implements the GRPCServer. package grpcserver import ( diff --git a/pkg/object/httpserver/httpserver.go b/pkg/object/httpserver/httpserver.go index 0bf087b4f8..1651e42aec 100644 --- a/pkg/object/httpserver/httpserver.go +++ b/pkg/object/httpserver/httpserver.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package httpserver implements the HTTPServer. package httpserver import ( diff --git a/pkg/object/httpserver/routers/ordered/router.go b/pkg/object/httpserver/routers/ordered/router.go index 51ef70b628..04f5e349bf 100644 --- a/pkg/object/httpserver/routers/ordered/router.go +++ b/pkg/object/httpserver/routers/ordered/router.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package ordered provides the router implementation of ordered routing policy. package ordered import ( diff --git a/pkg/object/httpserver/routers/radixtree/router.go b/pkg/object/httpserver/routers/radixtree/router.go index fe317e9de9..c882bbc55b 100644 --- a/pkg/object/httpserver/routers/radixtree/router.go +++ b/pkg/object/httpserver/routers/radixtree/router.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package radixtree provides the router implementation of radix tree routing policy. package radixtree import ( diff --git a/pkg/object/httpserver/routers/routers.go b/pkg/object/httpserver/routers/routers.go index 1fb7c529be..d91376e5c9 100644 --- a/pkg/object/httpserver/routers/routers.go +++ b/pkg/object/httpserver/routers/routers.go @@ -15,6 +15,8 @@ * limitations under the License. */ +// Package routers provides the router interface and the implementation of +// different routing policies. package routers import ( diff --git a/pkg/object/ingresscontroller/ingresscontroller.go b/pkg/object/ingresscontroller/ingresscontroller.go index 1d416c7988..34def5cb4a 100644 --- a/pkg/object/ingresscontroller/ingresscontroller.go +++ b/pkg/object/ingresscontroller/ingresscontroller.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package ingresscontroller implements a K8s ingress controller. package ingresscontroller import ( diff --git a/pkg/object/meshcontroller/api/api.go b/pkg/object/meshcontroller/api/api.go index 27788fcc57..32f6b8e255 100644 --- a/pkg/object/meshcontroller/api/api.go +++ b/pkg/object/meshcontroller/api/api.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package api provides the API for mesh controller. package api import ( diff --git a/pkg/object/meshcontroller/certmanager/certmanager.go b/pkg/object/meshcontroller/certmanager/certmanager.go index 235d4c1b88..cfd7564bb2 100644 --- a/pkg/object/meshcontroller/certmanager/certmanager.go +++ b/pkg/object/meshcontroller/certmanager/certmanager.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package certmanager provides the cert manager for mesh controller. package certmanager import ( diff --git a/pkg/object/meshcontroller/informer/informer.go b/pkg/object/meshcontroller/informer/informer.go index c08c9b166d..ae8d624b56 100644 --- a/pkg/object/meshcontroller/informer/informer.go +++ b/pkg/object/meshcontroller/informer/informer.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package informer provides the informer for mesh controller. package informer import ( diff --git a/pkg/object/meshcontroller/ingresscontroller/ingresscontroller.go b/pkg/object/meshcontroller/ingresscontroller/ingresscontroller.go index b025655441..b70f1b8e0b 100644 --- a/pkg/object/meshcontroller/ingresscontroller/ingresscontroller.go +++ b/pkg/object/meshcontroller/ingresscontroller/ingresscontroller.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package ingresscontroller implements the ingress controller for service mesh. package ingresscontroller import ( diff --git a/pkg/object/meshcontroller/label/label.go b/pkg/object/meshcontroller/label/label.go index 693076ba0d..11da5615a4 100644 --- a/pkg/object/meshcontroller/label/label.go +++ b/pkg/object/meshcontroller/label/label.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package label defines labels. package label const ( diff --git a/pkg/object/meshcontroller/layout/layout.go b/pkg/object/meshcontroller/layout/layout.go index c7f159bc3b..02ee08fd2b 100644 --- a/pkg/object/meshcontroller/layout/layout.go +++ b/pkg/object/meshcontroller/layout/layout.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package layout defines the layout of the data in etcd. package layout import ( diff --git a/pkg/object/meshcontroller/master/master.go b/pkg/object/meshcontroller/master/master.go index dff6ee22f9..c7af28ed7f 100644 --- a/pkg/object/meshcontroller/master/master.go +++ b/pkg/object/meshcontroller/master/master.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package master provides master role of Easegress for mesh control plane. package master import ( diff --git a/pkg/object/meshcontroller/meshcontroller.go b/pkg/object/meshcontroller/meshcontroller.go index 3df1e56068..1df7ed3db0 100644 --- a/pkg/object/meshcontroller/meshcontroller.go +++ b/pkg/object/meshcontroller/meshcontroller.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package meshcontroller provides the service mesh controller. package meshcontroller import ( diff --git a/pkg/object/meshcontroller/registrycenter/registrycenter.go b/pkg/object/meshcontroller/registrycenter/registrycenter.go index c90c960af1..e5f4c17ee9 100644 --- a/pkg/object/meshcontroller/registrycenter/registrycenter.go +++ b/pkg/object/meshcontroller/registrycenter/registrycenter.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package registrycenter provides registry center server. package registrycenter import ( diff --git a/pkg/object/meshcontroller/service/service.go b/pkg/object/meshcontroller/service/service.go index 9d91cf33eb..ab9031f7cb 100644 --- a/pkg/object/meshcontroller/service/service.go +++ b/pkg/object/meshcontroller/service/service.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package service provides business layer between mesh and store. package service import ( diff --git a/pkg/object/meshcontroller/spec/spec.go b/pkg/object/meshcontroller/spec/spec.go index ff8c4c93ef..1656456d28 100644 --- a/pkg/object/meshcontroller/spec/spec.go +++ b/pkg/object/meshcontroller/spec/spec.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package spec defines the spec for various objects in mesh. package spec import ( diff --git a/pkg/object/meshcontroller/storage/storage.go b/pkg/object/meshcontroller/storage/storage.go index 66e19072d9..dba46d9158 100644 --- a/pkg/object/meshcontroller/storage/storage.go +++ b/pkg/object/meshcontroller/storage/storage.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package storage provides the storage APIs. package storage import ( diff --git a/pkg/object/meshcontroller/worker/worker.go b/pkg/object/meshcontroller/worker/worker.go index 6e3ffe9673..5a7b824ed0 100644 --- a/pkg/object/meshcontroller/worker/worker.go +++ b/pkg/object/meshcontroller/worker/worker.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package worker provides the worker for mesh controller. package worker import ( diff --git a/pkg/object/mqttproxy/mqttproxy.go b/pkg/object/mqttproxy/mqttproxy.go index 22a323712f..67ea50d3a4 100644 --- a/pkg/object/mqttproxy/mqttproxy.go +++ b/pkg/object/mqttproxy/mqttproxy.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package mqttproxy implements the MQTTProxy. package mqttproxy import ( diff --git a/pkg/object/mqttproxy/topicmgr.go b/pkg/object/mqttproxy/topicmgr.go index ddf6868f19..f73b44cccb 100644 --- a/pkg/object/mqttproxy/topicmgr.go +++ b/pkg/object/mqttproxy/topicmgr.go @@ -25,6 +25,7 @@ import ( lru "github.com/hashicorp/golang-lru" ) +// TopicManager is the interface of a topic manager. type TopicManager interface { subscribe(topics []string, qoss []byte, clientID string) error unsubscribe(topics []string, clientID string) error diff --git a/pkg/object/nacosserviceregistry/nacosserviceregistry.go b/pkg/object/nacosserviceregistry/nacosserviceregistry.go index bcbda9e1a5..1b6e3db34f 100644 --- a/pkg/object/nacosserviceregistry/nacosserviceregistry.go +++ b/pkg/object/nacosserviceregistry/nacosserviceregistry.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package nacosserviceregistry provides the NacosServiceRegistry. package nacosserviceregistry import ( diff --git a/pkg/object/pipeline/pipeline.go b/pkg/object/pipeline/pipeline.go index e96e19493b..f5e65c9936 100644 --- a/pkg/object/pipeline/pipeline.go +++ b/pkg/object/pipeline/pipeline.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package pipeline provides the pipeline of Easegress. package pipeline import ( diff --git a/pkg/object/rawconfigtrafficcontroller/rawconfigtrafficcontroller.go b/pkg/object/rawconfigtrafficcontroller/rawconfigtrafficcontroller.go index 8e1037dfad..e013370393 100644 --- a/pkg/object/rawconfigtrafficcontroller/rawconfigtrafficcontroller.go +++ b/pkg/object/rawconfigtrafficcontroller/rawconfigtrafficcontroller.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package rawconfigtrafficcontroller implements the RawConfigTrafficController. package rawconfigtrafficcontroller import ( diff --git a/pkg/object/serviceregistry/serviceregistry.go b/pkg/object/serviceregistry/serviceregistry.go index 94d56ea634..8e617ed46b 100644 --- a/pkg/object/serviceregistry/serviceregistry.go +++ b/pkg/object/serviceregistry/serviceregistry.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package serviceregistry provides the service registry. package serviceregistry import ( diff --git a/pkg/object/statussynccontroller/statussynccontroller.go b/pkg/object/statussynccontroller/statussynccontroller.go index 7202672d24..b9b0431863 100644 --- a/pkg/object/statussynccontroller/statussynccontroller.go +++ b/pkg/object/statussynccontroller/statussynccontroller.go @@ -14,6 +14,7 @@ * limitations under the License. */ +// Package statussynccontroller implements the StatusSyncController. package statussynccontroller import ( diff --git a/pkg/object/trafficcontroller/trafficcontroller.go b/pkg/object/trafficcontroller/trafficcontroller.go index 6c1ed6720b..685b605571 100644 --- a/pkg/object/trafficcontroller/trafficcontroller.go +++ b/pkg/object/trafficcontroller/trafficcontroller.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package trafficcontroller implements the TrafficController. package trafficcontroller import ( diff --git a/pkg/object/zookeeperserviceregistry/zookeeperserviceregistry.go b/pkg/object/zookeeperserviceregistry/zookeeperserviceregistry.go index 7332facbaa..9c765e1888 100644 --- a/pkg/object/zookeeperserviceregistry/zookeeperserviceregistry.go +++ b/pkg/object/zookeeperserviceregistry/zookeeperserviceregistry.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package zookeeperserviceregistry implements the ZookeeperServiceRegistry. package zookeeperserviceregistry import ( diff --git a/pkg/option/option.go b/pkg/option/option.go index 562356009c..f63b207e12 100644 --- a/pkg/option/option.go +++ b/pkg/option/option.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package option implements the start-up options. package option import ( diff --git a/pkg/pidfile/pidfile.go b/pkg/pidfile/pidfile.go index c9432b7f4a..f042702a1e 100644 --- a/pkg/pidfile/pidfile.go +++ b/pkg/pidfile/pidfile.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package pidfile provides pidfile related functions. package pidfile import ( diff --git a/pkg/profile/profile.go b/pkg/profile/profile.go index 88274502a8..61a606e80d 100644 --- a/pkg/profile/profile.go +++ b/pkg/profile/profile.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package profile provides profile related functions. package profile import ( diff --git a/pkg/protocols/grpcprot/fake.go b/pkg/protocols/grpcprot/fake.go index 644186a176..0978a25f73 100644 --- a/pkg/protocols/grpcprot/fake.go +++ b/pkg/protocols/grpcprot/fake.go @@ -19,18 +19,22 @@ package grpcprot import ( "context" + "google.golang.org/grpc" ) +// FakeServerStream is a fake grpc.ServerStream for testing. type FakeServerStream struct { grpc.ServerStream ctx context.Context } +// NewFakeServerStream returns a new FakeServerStream. func NewFakeServerStream(ctx context.Context) *FakeServerStream { return &FakeServerStream{ctx: ctx} } +// Context returns the context of the stream. func (f *FakeServerStream) Context() context.Context { return f.ctx } diff --git a/pkg/protocols/grpcprot/grpc.go b/pkg/protocols/grpcprot/grpc.go index bfe9280b42..658f1e1075 100644 --- a/pkg/protocols/grpcprot/grpc.go +++ b/pkg/protocols/grpcprot/grpc.go @@ -15,10 +15,12 @@ * limitations under the License. */ +// Package grpcprot implements the grpc protocol. package grpcprot import ( "fmt" + "github.com/megaease/easegress/pkg/protocols" "google.golang.org/grpc" ) @@ -29,30 +31,36 @@ type Protocol struct { var _ protocols.Protocol = (*Protocol)(nil) +// CreateRequest creates a Request. func (p *Protocol) CreateRequest(req interface{}) (protocols.Request, error) { - if r, ok := req.(grpc.ServerStream); ok { + r, ok := req.(grpc.ServerStream) + if ok { return NewRequestWithServerStream(r), nil - } else { - return nil, fmt.Errorf("input param's type should be grpc.ServerStream") } + return nil, fmt.Errorf("input param's type should be grpc.ServerStream") } +// CreateResponse creates a Response. func (p *Protocol) CreateResponse(resp interface{}) (protocols.Response, error) { return NewResponse(), nil } +// NewRequestInfo creates a RequestInfo. func (p *Protocol) NewRequestInfo() interface{} { panic("implement me") } +// BuildRequest builds a Request from request info. func (p *Protocol) BuildRequest(reqInfo interface{}) (protocols.Request, error) { panic("implement me") } +// NewResponseInfo creates a ResponseInfo. func (p *Protocol) NewResponseInfo() interface{} { panic("implement me") } +// BuildResponse builds a Response from response info. func (p *Protocol) BuildResponse(respInfo interface{}) (protocols.Response, error) { panic("implement me") } diff --git a/pkg/protocols/grpcprot/header.go b/pkg/protocols/grpcprot/header.go index 9ca6a65426..ca3739c522 100644 --- a/pkg/protocols/grpcprot/header.go +++ b/pkg/protocols/grpcprot/header.go @@ -33,26 +33,31 @@ type ( Trailer = Header ) +// NewHeader returns a new Header func NewHeader(md metadata.MD) *Header { return &Header{ md: md.Copy(), } } +// NewTrailer returns a new Trailer func NewTrailer(md metadata.MD) *Trailer { return &Trailer{ md: md.Copy(), } } +// GetMD returns the metadata.MD func (h *Header) GetMD() metadata.MD { return h.md.Copy() } +// RawAdd adds the key, value pair to the header. func (h *Header) RawAdd(key string, values ...string) { h.md.Append(key, values...) } +// Add adds the key, value pair to the header. func (h *Header) Add(key string, value interface{}) { switch value.(type) { case string: @@ -64,10 +69,12 @@ func (h *Header) Add(key string, value interface{}) { } } +// RawSet sets the header entries associated with key to the given value. func (h *Header) RawSet(key string, values ...string) { h.md.Set(key, values...) } +// Set sets the header entries associated with key to the given value. func (h *Header) Set(key string, value interface{}) { switch value.(type) { case string: @@ -79,14 +86,17 @@ func (h *Header) Set(key string, value interface{}) { } } +// RawGet gets the values associated with the given key. func (h *Header) RawGet(key string) []string { return h.md.Get(key) } +// Get gets the values associated with the given key. func (h *Header) Get(key string) interface{} { return h.md.Get(key) } +// Del deletes the values associated with key. func (h *Header) Del(key string) { h.md.Delete(key) } @@ -118,6 +128,7 @@ func (h *Header) Walk(fn func(key string, value interface{}) bool) { } } +// Clone returns a copy of the header. func (h *Header) Clone() protocols.Header { return &Header{ md: h.md.Copy(), diff --git a/pkg/protocols/grpcprot/request.go b/pkg/protocols/grpcprot/request.go index af5746299b..5c3b4b88a0 100644 --- a/pkg/protocols/grpcprot/request.go +++ b/pkg/protocols/grpcprot/request.go @@ -60,6 +60,7 @@ type ( ) const ( + // Authority is the key of authority in grpc metadata Authority = ":authority" ) @@ -109,6 +110,7 @@ func NewRequestWithContext(ctx context.Context) *Request { return r } +// Network returns the network type of the address. func (a *Addr) Network() string { if a != nil { // Per the documentation on net/http.Request.RemoteAddr, if this is @@ -125,6 +127,7 @@ func (a *Addr) Network() string { return "" } +// String implements the Stringer interface. func (a *Addr) String() string { return a.addr } func (a *Addr) setAddr(addr string) { @@ -247,6 +250,7 @@ func (r *Request) RealIP() string { return ip } +// SetRealIP set the client ip of the request. func (r *Request) SetRealIP(ip string) { r.realIP = "" if strings.ContainsRune(r.peer.Addr.String(), ':') { @@ -262,6 +266,7 @@ func (r *Request) SourceHost() string { return r.peer.Addr.(*Addr).String() } +// SetSourceHost set the source host of the request. func (r *Request) SetSourceHost(sourceHost string) { r.peer.Addr.(*Addr).setAddr(sourceHost) } @@ -272,6 +277,7 @@ func (r *Request) Host() string { return r.header.GetFirst(Authority) } +// SetHost set the host of the request. func (r *Request) SetHost(host string) { r.header.Set(Authority, host) } diff --git a/pkg/protocols/grpcprot/response.go b/pkg/protocols/grpcprot/response.go index 38bd59e42e..4c53efc9ed 100644 --- a/pkg/protocols/grpcprot/response.go +++ b/pkg/protocols/grpcprot/response.go @@ -18,11 +18,12 @@ package grpcprot import ( + "io" + "github.com/megaease/easegress/pkg/protocols" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" - "io" ) // Response wrapper status.Status @@ -38,6 +39,7 @@ var ( _ protocols.Response = (*Response)(nil) ) +// NewResponse returns a new Response. func NewResponse() *Response { return &Response{ Status: nil, @@ -46,22 +48,27 @@ func NewResponse() *Response { } } +// SetTrailer sets the trailer of the response. func (r *Response) SetTrailer(trailer *Trailer) { r.trailer.md = trailer.md } +// Trailer returns the trailer of the response. func (r *Response) Trailer() protocols.Trailer { return r.trailer } +// RawTrailer returns the trailer of the response. func (r *Response) RawTrailer() *Trailer { return r.trailer } +// GetStatus returns the status of the response. func (r *Response) GetStatus() *status.Status { return r.Status } +// SetStatus sets the status of the response. func (r *Response) SetStatus(s *status.Status) { if s == nil { r.Status = status.New(codes.OK, "OK") @@ -70,6 +77,7 @@ func (r *Response) SetStatus(s *status.Status) { r.Status = s } +// StatusCode returns the status code of the response. func (r *Response) StatusCode() int { if r.Status == nil { return int(codes.OK) @@ -77,10 +85,12 @@ func (r *Response) StatusCode() int { return int(r.Status.Code()) } +// SetHeader sets the header of the response. func (r *Response) SetHeader(header *Header) { r.header.md = header.md } +// Header returns the header of the response. func (r *Response) Header() protocols.Header { return r.header } @@ -90,30 +100,37 @@ func (r *Response) RawHeader() *Header { return r.header } +// IsStream returns true if the response is a stream. func (r *Response) IsStream() bool { return true } +// SetPayload sets the payload of the response. func (r *Response) SetPayload(payload interface{}) { panic("implement me") } +// GetPayload returns the payload of the response. func (r *Response) GetPayload() io.Reader { panic("implement me") } +// RawPayload returns the payload of the response. func (r *Response) RawPayload() []byte { panic("implement me") } +// PayloadSize returns the size of the payload. func (r *Response) PayloadSize() int64 { panic("implement me") } +// ToBuilderResponse wraps the response and returns the wrapper, the +// return value can be used in the template of the Builder filters. func (r *Response) ToBuilderResponse(name string) interface{} { panic("implement me") } +// Close closes the response. func (r *Response) Close() { - } diff --git a/pkg/protocols/httpprot/http.go b/pkg/protocols/httpprot/http.go index 4c3868b1c5..21be73e534 100644 --- a/pkg/protocols/httpprot/http.go +++ b/pkg/protocols/httpprot/http.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package httpprot implements the HTTP protocol. package httpprot import ( diff --git a/pkg/protocols/httpprot/httpheader/httpheader.go b/pkg/protocols/httpprot/httpheader/httpheader.go index fb03688818..140d7d0613 100644 --- a/pkg/protocols/httpprot/httpheader/httpheader.go +++ b/pkg/protocols/httpprot/httpheader/httpheader.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package httpheader provides HTTP Header related functions. package httpheader import ( diff --git a/pkg/protocols/httpprot/httpstat/httpstat.go b/pkg/protocols/httpprot/httpstat/httpstat.go index 126d8f36a7..066e1e6417 100644 --- a/pkg/protocols/httpprot/httpstat/httpstat.go +++ b/pkg/protocols/httpprot/httpstat/httpstat.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package httpstat implements the statistics tool for HTTP traffic. package httpstat import ( diff --git a/pkg/protocols/httpprot/response.go b/pkg/protocols/httpprot/response.go index 99b66e12d3..7be79b53b0 100644 --- a/pkg/protocols/httpprot/response.go +++ b/pkg/protocols/httpprot/response.go @@ -71,6 +71,7 @@ func (r *Response) IsStream() bool { return r.stream != nil } +// Trailer returns the trailer of the response in type protocols.Trailer. func (r *Response) Trailer() protocols.Trailer { return newHeader(r.Std().Trailer) } diff --git a/pkg/protocols/mqttprot/mqtt.go b/pkg/protocols/mqttprot/mqtt.go index e647fd7610..4001c1cf1f 100644 --- a/pkg/protocols/mqttprot/mqtt.go +++ b/pkg/protocols/mqttprot/mqtt.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package mqttprot implements the MQTT protocol. package mqttprot import "github.com/megaease/easegress/pkg/protocols" diff --git a/pkg/protocols/mqttprot/response.go b/pkg/protocols/mqttprot/response.go index bd4cff56e9..c73494a8ac 100644 --- a/pkg/protocols/mqttprot/response.go +++ b/pkg/protocols/mqttprot/response.go @@ -45,6 +45,7 @@ func (r *Response) IsStream() bool { return false } +// Trailer returns the trailer of the response. func (r *Response) Trailer() protocols.Trailer { panic("implement me") } diff --git a/pkg/protocols/protocols.go b/pkg/protocols/protocols.go index 642120920e..3b9560ab30 100644 --- a/pkg/protocols/protocols.go +++ b/pkg/protocols/protocols.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package protocols defines the common interface of protocols used in Easegress. package protocols import ( @@ -135,6 +136,7 @@ type Header interface { Clone() Header } +// Trailer is the trailers of a request or response. type Trailer = Header // Protocol is the interface of a protocol. diff --git a/pkg/registry/registry.go b/pkg/registry/registry.go index ab236c8c9c..25208081db 100644 --- a/pkg/registry/registry.go +++ b/pkg/registry/registry.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package registry is the registry of filters and objects in Easegress. package registry import ( @@ -63,6 +64,7 @@ import ( _ "github.com/megaease/easegress/pkg/object/trafficcontroller" _ "github.com/megaease/easegress/pkg/object/zookeeperserviceregistry" + // Routers _ "github.com/megaease/easegress/pkg/object/httpserver/routers/ordered" _ "github.com/megaease/easegress/pkg/object/httpserver/routers/radixtree" ) diff --git a/pkg/resilience/resilience.go b/pkg/resilience/resilience.go index 991a8977f6..a742e6814a 100644 --- a/pkg/resilience/resilience.go +++ b/pkg/resilience/resilience.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package resilience implements the resilience policies. package resilience import ( diff --git a/pkg/supervisor/supervisor.go b/pkg/supervisor/supervisor.go index 56ab1b7898..1ebc890a3e 100644 --- a/pkg/supervisor/supervisor.go +++ b/pkg/supervisor/supervisor.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package supervisor implements the supervisor of all objects. package supervisor import ( diff --git a/pkg/tracing/tracing.go b/pkg/tracing/tracing.go index 15c7c89874..4e3417b244 100644 --- a/pkg/tracing/tracing.go +++ b/pkg/tracing/tracing.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package tracing implements the tracing. package tracing import ( diff --git a/pkg/util/circuitbreaker/circuitbreaker.go b/pkg/util/circuitbreaker/circuitbreaker.go index cde849391e..d04965033b 100644 --- a/pkg/util/circuitbreaker/circuitbreaker.go +++ b/pkg/util/circuitbreaker/circuitbreaker.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package circuitbreaker implements the circuit breaker logic. package circuitbreaker import ( diff --git a/pkg/util/codecounter/codecounter.go b/pkg/util/codecounter/codecounter.go index c73fb35f80..bdd5d7012b 100644 --- a/pkg/util/codecounter/codecounter.go +++ b/pkg/util/codecounter/codecounter.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package codecounter provides a goroutine unsafe HTTP status code counter. package codecounter import "sync/atomic" diff --git a/pkg/util/codectool/codectool.go b/pkg/util/codectool/codectool.go index 39c147f2cf..4d6bedbfe6 100644 --- a/pkg/util/codectool/codectool.go +++ b/pkg/util/codectool/codectool.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package codectool provides some codec tools for JSON and YAML marshaling. package codectool import ( diff --git a/pkg/util/dynamicobject/dynamicobject.go b/pkg/util/dynamicobject/dynamicobject.go index c3e5727e40..16deb82dbd 100644 --- a/pkg/util/dynamicobject/dynamicobject.go +++ b/pkg/util/dynamicobject/dynamicobject.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package dynamicobject provides a dynamic object. package dynamicobject // DynamicObject defines a dynamic object which is a map of string to diff --git a/pkg/util/easemonitor/easemonitor.go b/pkg/util/easemonitor/easemonitor.go index 512230a30a..a17cb700c1 100644 --- a/pkg/util/easemonitor/easemonitor.go +++ b/pkg/util/easemonitor/easemonitor.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package easemonitor provides the common fields and interfaces for EaseMonitor metrics. package easemonitor import ( diff --git a/pkg/util/fasttime/fasttime.go b/pkg/util/fasttime/fasttime.go index 043719f64f..f46536434d 100644 --- a/pkg/util/fasttime/fasttime.go +++ b/pkg/util/fasttime/fasttime.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package fasttime provides fast time.Now() and time.Since() and time.Format(). package fasttime import ( diff --git a/pkg/util/filterwriter/filterwriter.go b/pkg/util/filterwriter/filterwriter.go index ec05b87b8b..91352e8e61 100644 --- a/pkg/util/filterwriter/filterwriter.go +++ b/pkg/util/filterwriter/filterwriter.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package filterwriter provides a filter writer. package filterwriter import ( diff --git a/pkg/util/ipfilter/ipfilter.go b/pkg/util/ipfilter/ipfilter.go index 4cb18b52dd..d35644ec12 100644 --- a/pkg/util/ipfilter/ipfilter.go +++ b/pkg/util/ipfilter/ipfilter.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package ipfilter provides IPFilter. package ipfilter import ( diff --git a/pkg/util/jmxtool/common.go b/pkg/util/jmxtool/common.go index 068a7d7d3f..18d81400eb 100644 --- a/pkg/util/jmxtool/common.go +++ b/pkg/util/jmxtool/common.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package jmxtool provides some tools for jmx package jmxtool import ( diff --git a/pkg/util/k8s/k8s.go b/pkg/util/k8s/k8s.go index 4500bab9fc..8c7dccef8f 100644 --- a/pkg/util/k8s/k8s.go +++ b/pkg/util/k8s/k8s.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package k8s provides kubernetes utilities. package k8s import ( diff --git a/pkg/util/limitlistener/limitlistener.go b/pkg/util/limitlistener/limitlistener.go index b91aad1c6e..d003239759 100644 --- a/pkg/util/limitlistener/limitlistener.go +++ b/pkg/util/limitlistener/limitlistener.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package limitlistener provides a Listener that accepts at most n simultaneous. package limitlistener import ( diff --git a/pkg/util/pathadaptor/pathadaptor.go b/pkg/util/pathadaptor/pathadaptor.go index 1a55950550..66c11e4b51 100644 --- a/pkg/util/pathadaptor/pathadaptor.go +++ b/pkg/util/pathadaptor/pathadaptor.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package pathadaptor provides a path adaptor. package pathadaptor import ( diff --git a/pkg/util/prometheushelper/helper.go b/pkg/util/prometheushelper/helper.go index d96cc1b9cb..1a22b16ec1 100644 --- a/pkg/util/prometheushelper/helper.go +++ b/pkg/util/prometheushelper/helper.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package prometheushelper provides helper functions for prometheus. package prometheushelper import ( diff --git a/pkg/util/ratelimiter/ratelimiter.go b/pkg/util/ratelimiter/ratelimiter.go index 2650c72da0..71d7d488a6 100644 --- a/pkg/util/ratelimiter/ratelimiter.go +++ b/pkg/util/ratelimiter/ratelimiter.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package ratelimiter provides a rate limiter package ratelimiter import ( diff --git a/pkg/util/readers/readerat.go b/pkg/util/readers/readerat.go index 798d95d1db..b6af28b63b 100644 --- a/pkg/util/readers/readerat.go +++ b/pkg/util/readers/readerat.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package readers provides several readers. package readers import ( diff --git a/pkg/util/sampler/sampler.go b/pkg/util/sampler/sampler.go index 47289d39b6..d80fde2c42 100644 --- a/pkg/util/sampler/sampler.go +++ b/pkg/util/sampler/sampler.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package sampler provides utilities for sampling. package sampler import ( diff --git a/pkg/util/sem/semaphore.go b/pkg/util/sem/semaphore.go index 5e1a9f989f..a70eca4c12 100644 --- a/pkg/util/sem/semaphore.go +++ b/pkg/util/sem/semaphore.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package sem provides a semaphore with a max capacity. package sem import ( diff --git a/pkg/util/signer/signer.go b/pkg/util/signer/signer.go index 96261c6e87..cb502806f6 100644 --- a/pkg/util/signer/signer.go +++ b/pkg/util/signer/signer.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package signer provides a signer for HTTP requests. package signer import ( diff --git a/pkg/util/stringtool/stringtool.go b/pkg/util/stringtool/stringtool.go index 5ea60d506e..e0ec90b332 100644 --- a/pkg/util/stringtool/stringtool.go +++ b/pkg/util/stringtool/stringtool.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package stringtool provides string utilities. package stringtool import ( @@ -127,6 +128,7 @@ func (sm *StringMatcher) Validate() error { return fmt.Errorf("all patterns are empty") } +// Init initializes the StringMatcher. func (sm *StringMatcher) Init() { if sm.RegEx != "" { sm.re = regexp.MustCompile(sm.RegEx) diff --git a/pkg/util/timetool/distributedtimer.go b/pkg/util/timetool/distributedtimer.go index 753959d897..f77d5e0b99 100644 --- a/pkg/util/timetool/distributedtimer.go +++ b/pkg/util/timetool/distributedtimer.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package timetool provides time utilities. package timetool import ( diff --git a/pkg/util/urlclusteranalyzer/urlclusteranalyzer.go b/pkg/util/urlclusteranalyzer/urlclusteranalyzer.go index 5186dd9a1e..e81975fd0c 100644 --- a/pkg/util/urlclusteranalyzer/urlclusteranalyzer.go +++ b/pkg/util/urlclusteranalyzer/urlclusteranalyzer.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package urlclusteranalyzer provides url cluster analyzer. package urlclusteranalyzer import ( diff --git a/pkg/util/urlrule/urlrule.go b/pkg/util/urlrule/urlrule.go index 310c3880e5..8aee11dc5a 100644 --- a/pkg/util/urlrule/urlrule.go +++ b/pkg/util/urlrule/urlrule.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package urlrule impelments match rule for HTTP requests. package urlrule import ( diff --git a/pkg/v/v.go b/pkg/v/v.go index 47c991dfd4..60a5aa87c3 100644 --- a/pkg/v/v.go +++ b/pkg/v/v.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package v implements the common validation logic of Easegress. package v import ( diff --git a/pkg/version/version.go b/pkg/version/version.go index b7b4b2ac7c..87e4680016 100644 --- a/pkg/version/version.go +++ b/pkg/version/version.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package version defines the version of Easegress. package version import "fmt" From 23f395934a5c707b7dee7cb35c25f642fa82a756 Mon Sep 17 00:00:00 2001 From: Bomin Zhang Date: Fri, 17 Feb 2023 09:14:34 +0800 Subject: [PATCH 14/14] more fixes --- pkg/filters/proxies/grpcproxy/proxy.go | 1 + pkg/filters/proxies/httpproxy/proxy.go | 1 + pkg/filters/proxies/serverpool.go | 1 + 3 files changed, 3 insertions(+) diff --git a/pkg/filters/proxies/grpcproxy/proxy.go b/pkg/filters/proxies/grpcproxy/proxy.go index 2dbeed4cf9..b2d72c94e1 100644 --- a/pkg/filters/proxies/grpcproxy/proxy.go +++ b/pkg/filters/proxies/grpcproxy/proxy.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package grpcproxy provides the proxy filter of gRPC. package grpcproxy import ( diff --git a/pkg/filters/proxies/httpproxy/proxy.go b/pkg/filters/proxies/httpproxy/proxy.go index 5ca875375b..19add5812e 100644 --- a/pkg/filters/proxies/httpproxy/proxy.go +++ b/pkg/filters/proxies/httpproxy/proxy.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package httpproxy provides the Proxy of HTTP. package httpproxy import ( diff --git a/pkg/filters/proxies/serverpool.go b/pkg/filters/proxies/serverpool.go index 0e41cf1893..05abed5234 100644 --- a/pkg/filters/proxies/serverpool.go +++ b/pkg/filters/proxies/serverpool.go @@ -15,6 +15,7 @@ * limitations under the License. */ +// Package proxies provides the common interface and implementation of proxies. package proxies import (