diff --git a/cmd/client/commandv2/convert.go b/cmd/client/commandv2/convert.go new file mode 100644 index 0000000000..1dc6a2bc21 --- /dev/null +++ b/cmd/client/commandv2/convert.go @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package commandv2 + +import ( + "github.com/spf13/cobra" + + "github.com/megaease/easegress/v2/cmd/client/commandv2/convert/nginx" +) + +// ConvertCmd returns convert command. +func ConvertCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "convert", + Short: "Convert other kinds of config to Easegress yaml file", + } + cmd.AddCommand(nginx.Cmd()) + return cmd +} diff --git a/cmd/client/commandv2/convert/nginx/cmd.go b/cmd/client/commandv2/convert/nginx/cmd.go new file mode 100644 index 0000000000..ece278396b --- /dev/null +++ b/cmd/client/commandv2/convert/nginx/cmd.go @@ -0,0 +1,153 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package nginx + +import ( + "fmt" + "math/rand" + "os" + "path/filepath" + + "github.com/megaease/easegress/v2/cmd/client/commandv2/specs" + "github.com/megaease/easegress/v2/cmd/client/general" + "github.com/megaease/easegress/v2/pkg/util/codectool" + crossplane "github.com/nginxinc/nginx-go-crossplane" + "github.com/spf13/cobra" +) + +// Options contains the options for convert nginx.conf. +type Options struct { + NginxConf string + Output string + ResourcePrefix string + usedNames map[string]struct{} +} + +// Cmd returns convert nginx.conf command. +func Cmd() *cobra.Command { + flags := &Options{} + flags.init() + examples := []general.Example{ + { + Desc: "Convert nginx config to easegress yamls", + Command: "egctl convert nginx -f -o --resource-prefix ", + }, + } + cmd := &cobra.Command{ + Use: "nginx", + Short: "Convert nginx.conf to easegress yaml file", + Example: general.CreateMultiExample(examples), + Args: func(cmd *cobra.Command, args []string) error { + if flags.NginxConf == "" { + return fmt.Errorf("nginx.conf file path is required") + } + if flags.Output == "" { + return fmt.Errorf("output yaml file path is required") + } + if flags.ResourcePrefix == "" { + return fmt.Errorf("prefix is required") + } + return nil + }, + Run: func(cmd *cobra.Command, args []string) { + payload, err := crossplane.Parse(flags.NginxConf, &crossplane.ParseOptions{}) + if err != nil { + general.ExitWithErrorf("parse nginx.conf failed: %v", err) + } + for _, e := range payload.Errors { + general.Warnf("parse nginx.conf error: %v in %s of %s", e.Error, e.Line, e.File) + } + config, err := parsePayload(payload) + if err != nil { + general.ExitWithError(err) + } + hs, pls, err := convertConfig(flags, config) + if err != nil { + general.ExitWithError(err) + } + if err := writeYaml(flags.Output, hs, pls); err != nil { + general.ExitWithError(err) + } + }, + } + cmd.Flags().StringVarP(&flags.NginxConf, "file", "f", "", "nginx.conf file path") + cmd.Flags().StringVarP(&flags.Output, "output", "o", "", "output yaml file path") + cmd.Flags().StringVar(&flags.ResourcePrefix, "resource-prefix", "nginx", "prefix of output yaml resources") + return cmd +} + +func (opt *Options) init() { + opt.usedNames = make(map[string]struct{}) + opt.usedNames[""] = struct{}{} +} + +// GetPipelineName creates a globally unique name for the pipeline based on the path. +func (opt *Options) GetPipelineName(path string) string { + letters := []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + nameRunes := make([]rune, 0) + for _, r := range path { + if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') || (r >= '0' && r <= '9') { + nameRunes = append(nameRunes, r) + } + } + name := string(nameRunes) + if _, ok := opt.usedNames[name]; !ok { + opt.usedNames[name] = struct{}{} + return fmt.Sprintf("%s-%s", opt.ResourcePrefix, name) + } + for i := 0; i < 8; i++ { + nameRunes = append(nameRunes, letters[rand.Intn(len(letters))]) + } + name = string(nameRunes) + if _, ok := opt.usedNames[name]; !ok { + opt.usedNames[name] = struct{}{} + return fmt.Sprintf("%s-%s", opt.ResourcePrefix, name) + } + return opt.GetPipelineName(path) +} + +func writeYaml(filename string, servers []*specs.HTTPServerSpec, pipelines []*specs.PipelineSpec) error { + absPath, err := filepath.Abs(filename) + if err != nil { + return err + } + file, err := os.Create(absPath) + if err != nil { + return err + } + defer file.Close() + + for _, s := range servers { + data, err := codectool.MarshalYAML(s) + if err != nil { + return err + } + file.WriteString(string(data)) + file.WriteString("\n---\n") + } + for _, p := range pipelines { + data, err := codectool.MarshalYAML(p) + if err != nil { + return err + } + file.WriteString(string(data)) + file.WriteString("\n---\n") + } + file.Sync() + return nil +} diff --git a/cmd/client/commandv2/convert/nginx/cmd_test.go b/cmd/client/commandv2/convert/nginx/cmd_test.go new file mode 100644 index 0000000000..dc6c93f1ab --- /dev/null +++ b/cmd/client/commandv2/convert/nginx/cmd_test.go @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package nginx + +import ( + "io" + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCmd(t *testing.T) { + cmd := Cmd() + assert.NotNil(t, cmd) + cmd.ParseFlags([]string{""}) + assert.NotNil(t, cmd.Args(cmd, []string{})) + + cmd.ParseFlags([]string{"-f", "test.conf"}) + assert.NotNil(t, cmd.Args(cmd, []string{})) + + cmd.ParseFlags([]string{"-o", "test.yaml"}) + assert.Nil(t, cmd.Args(cmd, []string{})) + + cmd.ParseFlags([]string{"--resource-prefix", "test"}) + assert.Nil(t, cmd.Args(cmd, []string{})) + + tempDir := newTempTestDir(t) + defer tempDir.Clean() + + nginxConf := ` + events {} + http { + server { + listen 127.0.0.1:8080; + + location = /user { + proxy_pass http://localhost:9999; + } + } + } + ` + nginxFile := tempDir.Create("nginx.conf", []byte(nginxConf)) + outputFile := tempDir.Create("test.yaml", []byte("")) + cmd.ParseFlags([]string{"-f", nginxFile, "-o", outputFile, "--prefix", "test"}) + cmd.Run(cmd, []string{}) + file, err := os.Open(outputFile) + assert.Nil(t, err) + defer file.Close() + data, err := io.ReadAll(file) + assert.Nil(t, err) + assert.Contains(t, string(data), "test-8080") + assert.Contains(t, string(data), "test-user") +} + +func TestOption(t *testing.T) { + option := &Options{ + NginxConf: "test.conf", + Output: "test.yaml", + ResourcePrefix: "test", + } + option.init() + path := option.GetPipelineName("/user") + assert.Equal(t, "test-user", path) + path = option.GetPipelineName("/apis/v1") + assert.Equal(t, "test-apisv1", path) + + path = option.GetPipelineName("/apis/v1/") + assert.Contains(t, path, "test-apisv1") + assert.NotEqual(t, "test-apisv1", path) +} diff --git a/cmd/client/commandv2/convert/nginx/convert.go b/cmd/client/commandv2/convert/nginx/convert.go new file mode 100644 index 0000000000..7eec300c16 --- /dev/null +++ b/cmd/client/commandv2/convert/nginx/convert.go @@ -0,0 +1,256 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package nginx + +import ( + "fmt" + "regexp" + "sort" + "strings" + + "github.com/megaease/easegress/v2/cmd/client/commandv2/specs" + "github.com/megaease/easegress/v2/cmd/client/general" + "github.com/megaease/easegress/v2/pkg/filters" + "github.com/megaease/easegress/v2/pkg/filters/builder" + "github.com/megaease/easegress/v2/pkg/filters/proxies" + "github.com/megaease/easegress/v2/pkg/filters/proxies/httpproxy" + "github.com/megaease/easegress/v2/pkg/object/httpserver/routers" + "github.com/megaease/easegress/v2/pkg/protocols/httpprot/httpheader" + "github.com/megaease/easegress/v2/pkg/util/codectool" +) + +func convertConfig(options *Options, config *Config) ([]*specs.HTTPServerSpec, []*specs.PipelineSpec, error) { + httpServers := make([]*specs.HTTPServerSpec, 0) + pipelines := make([]*specs.PipelineSpec, 0) + for _, server := range config.Servers { + s, p, err := convertServer(options, server) + if err != nil { + return nil, nil, err + } + httpServers = append(httpServers, s) + pipelines = append(pipelines, p...) + } + return httpServers, pipelines, nil +} + +func convertServer(options *Options, server *Server) (*specs.HTTPServerSpec, []*specs.PipelineSpec, error) { + pipelines := make([]*specs.PipelineSpec, 0) + httpServer := specs.NewHTTPServerSpec(fmt.Sprintf("%s-%d", options.ResourcePrefix, server.Port)) + httpServer = convertServerBase(httpServer, server.ServerBase) + + httpServer.Rules = make([]*routers.Rule, 0) + for _, rule := range server.Rules { + routerRule, pls := convertRule(options, rule) + httpServer.Rules = append(httpServer.Rules, routerRule) + pipelines = append(pipelines, pls...) + } + return httpServer, pipelines, nil +} + +func convertServerBase(spec *specs.HTTPServerSpec, base ServerBase) *specs.HTTPServerSpec { + spec.Port = uint16(base.Port) + spec.Address = base.Address + spec.HTTPS = base.HTTPS + spec.CaCertBase64 = base.CaCert + for k, v := range base.Certs { + spec.Certs[k] = v + } + for k, v := range base.Keys { + spec.Keys[k] = v + } + return spec +} + +// convertRule converts nginx conf to easegress rule. +// exact path > prefix path > regexp path. +// prefix path should be sorted by path length. +func convertRule(options *Options, rule *Rule) (*routers.Rule, []*specs.PipelineSpec) { + router := &routers.Rule{ + Hosts: make([]routers.Host, 0), + Paths: make([]*routers.Path, 0), + } + for _, h := range rule.Hosts { + router.Hosts = append(router.Hosts, routers.Host{ + Value: h.Value, + IsRegexp: h.IsRegexp, + }) + } + + pipelines := make([]*specs.PipelineSpec, 0) + exactPaths := make([]*routers.Path, 0) + prefixPaths := make([]*routers.Path, 0) + rePaths := make([]*routers.Path, 0) + for _, p := range rule.Paths { + name := options.GetPipelineName(p.Path) + path := &routers.Path{ + Backend: name, + } + // path for websocket should not limit body size. + if isWebsocket(p.Backend) { + path.ClientMaxBodySize = -1 + } + switch p.Type { + case PathTypeExact: + path.Path = p.Path + exactPaths = append(exactPaths, path) + case PathTypePrefix: + path.PathPrefix = p.Path + prefixPaths = append(prefixPaths, path) + case PathTypeRe: + path.PathRegexp = p.Path + rePaths = append(rePaths, path) + case PathTypeReInsensitive: + path.PathRegexp = fmt.Sprintf("(?i)%s", p.Path) + rePaths = append(rePaths, path) + default: + general.Warnf("unknown path type: %s", p.Type) + } + pipelineSpec := convertProxy(name, p.Backend) + pipelines = append(pipelines, pipelineSpec) + } + sort.Slice(prefixPaths, func(i, j int) bool { + return len(prefixPaths[i].PathPrefix) > len(prefixPaths[j].PathPrefix) + }) + router.Paths = append(router.Paths, exactPaths...) + router.Paths = append(router.Paths, prefixPaths...) + router.Paths = append(router.Paths, rePaths...) + return router, pipelines +} + +func convertProxy(name string, info *ProxyInfo) *specs.PipelineSpec { + pipeline := specs.NewPipelineSpec(name) + + flow := make([]filters.Spec, 0) + if len(info.SetHeaders) != 0 { + adaptor := getRequestAdaptor(info) + if adaptor != nil { + flow = append(flow, adaptor) + } + } + + var proxy filters.Spec + if isWebsocket(info) { + proxy = getWebsocketFilter(info) + } else { + proxy = getProxyFilter(info) + } + flow = append(flow, proxy) + pipeline.SetFilters(flow) + return pipeline +} + +var nginxEmbeddedVarRe *regexp.Regexp +var nginxToTemplateMap = map[string]string{ + "$host": ".req.Host", + "$hostname": ".req.Host", + "$content_length": `header .req.Header "Content-Length"`, + "$content_type": `header .req.Header "Content-Type"`, + "$remote_addr": ".req.RemoteAddr", + "$remote_user": "username .req", + "$request_body": ".req.Body", + "$request_method": ".req.Method", + "$request_uri": ".req.RequestURI", + "$scheme": ".req.URL.Scheme", +} + +func translateNginxEmbeddedVar(s string) string { + if nginxEmbeddedVarRe == nil { + nginxEmbeddedVarRe = regexp.MustCompile(`\$[a-zA-Z0-9_]+`) + } + return nginxEmbeddedVarRe.ReplaceAllStringFunc(s, func(s string) string { + newValue := nginxToTemplateMap[s] + if newValue == "" { + newValue = s + msg := "nginx embedded value %s is not supported now, " + msg += "please check easegress RequestAdaptor filter for more information about template." + general.Warnf(msg, s) + } + return fmt.Sprintf("{{ %s }}", newValue) + }) +} + +func getRequestAdaptor(info *ProxyInfo) *builder.RequestAdaptorSpec { + spec := specs.NewRequestAdaptorFilterSpec("request-adaptor") + template := &builder.RequestAdaptorTemplate{ + Header: &httpheader.AdaptSpec{ + Set: make(map[string]string), + }, + } + for k, v := range info.SetHeaders { + if k == "Upgrade" || k == "Connection" { + continue + } + template.Header.Set[k] = translateNginxEmbeddedVar(v) + } + if len(template.Header.Set) == 0 { + return nil + } + data := codectool.MustMarshalYAML(template) + spec.Template = string(data) + return spec +} + +func isWebsocket(info *ProxyInfo) bool { + return info.SetHeaders["Upgrade"] != "" && info.SetHeaders["Connection"] != "" +} + +func getWebsocketFilter(info *ProxyInfo) *httpproxy.WebSocketProxySpec { + for i, s := range info.Servers { + s.Server = strings.Replace(s.Server, "http://", "ws://", 1) + s.Server = strings.Replace(s.Server, "https://", "wss://", 1) + info.Servers[i] = s + } + spec := specs.NewWebsocketFilterSpec("websocket") + spec.Pools = []*httpproxy.WebSocketServerPoolSpec{{ + BaseServerPoolSpec: getBaseServerPool(info), + }} + return spec +} + +func getProxyFilter(info *ProxyInfo) *httpproxy.Spec { + spec := specs.NewProxyFilterSpec("proxy") + if info.GzipMinLength != 0 { + spec.Compression = &httpproxy.CompressionSpec{ + MinLength: uint32(info.GzipMinLength), + } + } + spec.Pools = []*httpproxy.ServerPoolSpec{{ + BaseServerPoolSpec: getBaseServerPool(info), + }} + return spec +} + +func getBaseServerPool(info *ProxyInfo) httpproxy.BaseServerPoolSpec { + servers := make([]*proxies.Server, len(info.Servers)) + policy := proxies.LoadBalancePolicyRoundRobin + for i, s := range info.Servers { + servers[i] = &proxies.Server{ + URL: s.Server, + Weight: s.Weight, + } + if s.Weight != 1 { + policy = proxies.LoadBalancePolicyWeightedRandom + } + } + return httpproxy.BaseServerPoolSpec{ + Servers: servers, + LoadBalance: &proxies.LoadBalanceSpec{ + Policy: policy, + }, + } +} diff --git a/cmd/client/commandv2/convert/nginx/convert_test.go b/cmd/client/commandv2/convert/nginx/convert_test.go new file mode 100644 index 0000000000..4063e92b1b --- /dev/null +++ b/cmd/client/commandv2/convert/nginx/convert_test.go @@ -0,0 +1,299 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package nginx + +import ( + "fmt" + "net/http" + "strings" + "testing" + + "github.com/megaease/easegress/v2/cmd/client/commandv2/specs" + "github.com/megaease/easegress/v2/pkg/filters" + "github.com/megaease/easegress/v2/pkg/filters/builder" + "github.com/megaease/easegress/v2/pkg/protocols/httpprot" + "github.com/megaease/easegress/v2/pkg/util/codectool" + "github.com/stretchr/testify/assert" +) + +func TestGetRequestAdaptor(t *testing.T) { + assert := assert.New(t) + req, err := http.NewRequest("GET", "http://example.com", strings.NewReader("test")) + req.Header.Set("Content-Length", "4") + req.Header.Set("Content-Type", "text/plain") + req.SetBasicAuth("user", "pass") + req.RemoteAddr = "localhost:8080" + req.RequestURI = "/apis/v1" + assert.Nil(err) + ctx := newContext(t, req) + info := &ProxyInfo{ + SetHeaders: map[string]string{ + "X-Host": "$host", + "X-Hostname": "$hostname", + "X-Content": "$content_length", + "X-Content-Type": "$content_type", + "X-Remote-Addr": "$remote_addr", + "X-Remote-User": "$remote_user", + "X-Request-Body": "$request_body", + "X-Method": "$request_method", + "X-Request-URI": "$request_uri", + "X-Scheme": "$scheme", + }, + } + spec := getRequestAdaptor(info) + ra := filters.GetKind(builder.RequestAdaptorKind).CreateInstance(spec) + ra.Init() + ra.Handle(ctx) + h := ctx.GetInputRequest().(*httpprot.Request).Header() + expected := map[string]string{ + "X-Host": "example.com", + "X-Hostname": "example.com", + "X-Content": "4", + "X-Content-Type": "text/plain", + "X-Remote-Addr": "localhost:8080", + "X-Remote-User": "user", + "X-Request-Body": "test", + "X-Method": "GET", + "X-Request-URI": "/apis/v1", + "X-Scheme": "http", + } + for k, v := range expected { + assert.Equal(v, h.Get(k), fmt.Sprintf("header %s", k)) + } +} + +func TestConvertConfig(t *testing.T) { + options := &Options{ + ResourcePrefix: "test-convert", + } + options.init() + conf := ` +servers: +- port: 8080 + address: localhost + https: true + caCert: caCertBase64Str + certs: + cert1: cert1Base64Str + keys: + cert1: key1Base64Str + rules: + - hosts: + - value: www.example.com + isRegexp: false + - isRegexp: true + value: '.*\.example\.com' + paths: + - path: /apis + type: prefix + backend: + servers: + - server: http://localhost:8880 + weight: 1 + - server: http://localhost:8881 + weight: 2 + setHeaders: + X-Path: apis + gzipMinLength: 1000 + - path: /exact + type: exact + backend: + servers: + - server: http://localhost:9999 + weight: 1 + - path: /regexp + type: regexp + backend: + servers: + - server: http://localhost:7777 + weight: 1 + - path: /case-insensitive-regexp + type: caseInsensitiveRegexp + backend: + servers: + - server: http://localhost:6666 + weight: 1 + - path: /websocket + type: prefix + backend: + servers: + - server: https://localhost:9090 + weight: 1 + setHeaders: + Connection: $connection_upgrade + Upgrade: $http_upgrade +` + config := &Config{} + err := codectool.Unmarshal([]byte(conf), config) + assert.Nil(t, err) + httpServers, pipelines, err := convertConfig(options, config) + assert.Nil(t, err) + assert.Equal(t, 1, len(httpServers)) + assert.Equal(t, 5, len(pipelines)) + serverYaml := ` +name: test-convert-8080 +kind: HTTPServer +https: true +caCertBase64: caCertBase64Str +certs: + cert1: cert1Base64Str +keys: + cert1: key1Base64Str +port: 8080 +address: localhost +rules: +- hosts: + - value: www.example.com + isRegexp: false + - isRegexp: true + value: '.*\.example\.com' + paths: + - path: /exact + backend: test-convert-exact + - pathPrefix: /websocket + backend: test-convert-websocket + clientMaxBodySize: -1 + - pathPrefix: /apis + backend: test-convert-apis + - pathRegexp: /regexp + backend: test-convert-regexp + - pathRegexp: (?i)/case-insensitive-regexp + backend: test-convert-caseinsensitiveregexp +` + expected := specs.NewHTTPServerSpec("test-convert-8080") + err = codectool.UnmarshalYAML([]byte(serverYaml), expected) + assert.Nil(t, err) + assert.Equal(t, expected, httpServers[0]) + + pipelineApis := ` +name: test-convert-apis +kind: Pipeline +filters: + - kind: RequestAdaptor + name: request-adaptor + template: | + header: + set: + X-Path: apis + - compression: + minLength: 1000 + kind: Proxy + name: proxy + pools: + - loadBalance: + policy: weightedRandom + servers: + - url: http://localhost:8880 + weight: 1 + - url: http://localhost:8881 + weight: 2 +` + pipelineExact := ` +name: test-convert-exact +kind: Pipeline +filters: + - kind: Proxy + name: proxy + pools: + - loadBalance: + policy: roundRobin + servers: + - url: http://localhost:9999 + weight: 1 +` + pipelineRegexp := ` +name: test-convert-regexp +kind: Pipeline +filters: + - kind: Proxy + name: proxy + pools: + - loadBalance: + policy: roundRobin + servers: + - url: http://localhost:7777 + weight: 1 +` + pipelineCIReg := ` +name: test-convert-caseinsensitiveregexp +kind: Pipeline +filters: + - kind: Proxy + name: proxy + pools: + - loadBalance: + policy: roundRobin + servers: + - url: http://localhost:6666 + weight: 1 +` + pipelineWebsocket := ` +name: test-convert-websocket +kind: Pipeline +filters: + - kind: WebSocketProxy + name: websocket + pools: + - loadBalance: + policy: roundRobin + servers: + - url: wss://localhost:9090 + weight: 1 +` + for i, yamlStr := range []string{pipelineApis, pipelineExact, pipelineRegexp, pipelineCIReg, pipelineWebsocket} { + spec := specs.NewPipelineSpec("") + err = codectool.UnmarshalYAML([]byte(yamlStr), spec) + assert.Nil(t, err, i) + for j, f := range spec.Filters { + compareFilter(t, f, pipelines[i].Filters[j], fmt.Sprintf("%d filter in %d pipeline", j, i)) + } + } +} + +func compareFilter(t *testing.T, f1 map[string]interface{}, f2 map[string]interface{}, msg string) { + d1, err := codectool.MarshalYAML(f1) + assert.Nil(t, err, msg) + d2, err := codectool.MarshalYAML(f2) + assert.Nil(t, err, msg) + + var specFn func() interface{} + switch f1["kind"] { + case "Proxy": + specFn = func() interface{} { + return specs.NewProxyFilterSpec("") + } + case "RequestAdaptor": + specFn = func() interface{} { + return specs.NewRequestAdaptorFilterSpec("") + } + case "WebSocketProxy": + specFn = func() interface{} { + return specs.NewWebsocketFilterSpec("") + } + default: + t.Errorf("filter kind %s is not compared", f1["kind"]) + return + } + s1 := specFn() + err = codectool.UnmarshalYAML(d1, s1) + assert.Nil(t, err, msg) + s2 := specFn() + err = codectool.Unmarshal(d2, s2) + assert.Nil(t, err, msg) + assert.Equal(t, s1, s2) +} diff --git a/cmd/client/commandv2/convert/nginx/env.go b/cmd/client/commandv2/convert/nginx/env.go new file mode 100644 index 0000000000..344763a2e4 --- /dev/null +++ b/cmd/client/commandv2/convert/nginx/env.go @@ -0,0 +1,448 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package nginx + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "os" + "regexp" + "strconv" + "strings" + + "github.com/megaease/easegress/v2/cmd/client/general" +) + +var nginxBackends = map[string]struct{}{ + "root": {}, + "return": {}, + "rewrite": {}, + "try_files": {}, + "error_page": {}, + "proxy_pass": {}, + "fastcgi_pass": {}, + "uwsgi_pass": {}, + "scgi_pass": {}, + "memcached_pass": {}, + "grpc_pass": {}, +} + +// Env is the environment for converting. +// Server is the server environment. Used to create HTTPServer or in future GRPCServer. +// Proxy is the proxy environment. Used to create Pipeline. +type Env struct { + Server *ServerEnv `json:"server"` + Proxy *ProxyEnv `json:"proxy"` + Upstream []*Directive `json:"upstream"` + + updateFn map[string]func(*Directive) `json:"-"` +} + +// ServerEnv is the environment for creating server. +// Listen contains the listen address, port and protocol. +// ServerName contains the server name (hosts in easegress). +type ServerEnv struct { + Listen *Directive `json:"listen"` + ServerName *Directive `json:"server_name"` + SSLClientCertificate *Directive `json:"ssl_client_certificate"` + SSLCertificate []*Directive `json:"ssl_certificate"` + SSLCertificateKey []*Directive `json:"ssl_certificate_key"` +} + +// ProxyEnv is the environment for creating proxy. +type ProxyEnv struct { + Pass *Directive `json:"pass"` + ProxySetHeader []*Directive `json:"proxy_set_header"` + Gzip *GzipEnv `json:"gzip"` +} + +// GzipEnv is the environment for creating gzip. +type GzipEnv struct { + Gzip *Directive `json:"gzip"` + GzipMinLength *Directive `json:"gzip_min_length"` +} + +func (env *Env) init() { + env.updateFn = map[string]func(*Directive){ + "listen": func(d *Directive) { env.Server.Listen = d }, + "server_name": func(d *Directive) { env.Server.ServerName = d }, + "ssl_client_certificate": func(d *Directive) { env.Server.SSLClientCertificate = d }, + "ssl_certificate": func(d *Directive) { env.Server.SSLCertificate = append(env.Server.SSLCertificate, d) }, + "ssl_certificate_key": func(d *Directive) { env.Server.SSLCertificateKey = append(env.Server.SSLCertificateKey, d) }, + "proxy_set_header": func(d *Directive) { env.Proxy.ProxySetHeader = append(env.Proxy.ProxySetHeader, d) }, + "upstream": func(d *Directive) { env.Upstream = append(env.Upstream, d) }, + "gzip": func(d *Directive) { env.Proxy.Gzip.Gzip = d }, + "gzip_min_length": func(d *Directive) { env.Proxy.Gzip.GzipMinLength = d }, + } +} + +func newEnv() *Env { + env := &Env{ + Server: &ServerEnv{ + SSLCertificate: make([]*Directive, 0), + SSLCertificateKey: make([]*Directive, 0), + }, + Proxy: &ProxyEnv{ + ProxySetHeader: make([]*Directive, 0), + Gzip: &GzipEnv{}, + }, + Upstream: make([]*Directive, 0), + } + env.init() + return env +} + +// Clone clones the environment. +func (env *Env) Clone() (*Env, error) { + data, err := json.Marshal(env) + if err != nil { + return nil, err + } + var newEnv Env + err = json.Unmarshal(data, &newEnv) + if err != nil { + return nil, err + } + newEnv.init() + return &newEnv, nil +} + +// MustClone clones the environment. +func (env *Env) MustClone() *Env { + newEnv, err := env.Clone() + if err != nil { + panic(err) + } + return newEnv +} + +// Update updates the environment. +func (env *Env) Update(d *Directive) { + fn, ok := env.updateFn[d.Directive] + if ok { + fn(d) + return + } + _, ok = nginxBackends[d.Directive] + if ok { + env.Proxy.Pass = d + } +} + +// GetServerInfo gets the server info from environment. +func (env *Env) GetServerInfo() (*ServerInfo, error) { + info := &ServerInfo{} + info.Port = 80 + + s := env.Server + if s.Listen != nil { + address, port, https, err := processListen(s.Listen) + if err != nil { + return nil, err + } + info.Address = address + info.Port = port + info.HTTPS = https + } + + if s.ServerName != nil { + serverName := s.ServerName + hosts, err := processServerName(serverName) + if err != nil { + return nil, err + } + info.Hosts = hosts + } + + if s.SSLClientCertificate != nil { + cert := s.SSLClientCertificate + mustContainArgs(cert, 1) + caCert, err := loadCert(cert.Args[0]) + if err != nil { + return nil, fmt.Errorf("%s: %v", directiveInfo(cert), err) + } + info.CaCert = caCert + } + + if len(s.SSLCertificate) > 0 { + certs, keys, err := processSSLCertificates(s.SSLCertificate, s.SSLCertificateKey) + if err != nil { + return nil, err + } + info.Certs = certs + info.Keys = keys + } + return info, nil +} + +// GetProxyInfo gets the proxy info from environment. +func (env *Env) GetProxyInfo() (*ProxyInfo, error) { + p := env.Proxy + if p.Pass == nil || p.Pass.Directive != "proxy_pass" { + return nil, errors.New("no proxy_pass found") + } + servers, err := processProxyPass(p.Pass, env) + if err != nil { + return nil, err + } + + var setHeaders map[string]string + if len(p.ProxySetHeader) > 0 { + setHeaders, err = processProxySetHeader(p.ProxySetHeader) + if err != nil { + return nil, err + } + } + + gzipMinLength := processGzip(p.Gzip) + + return &ProxyInfo{ + Servers: servers, + SetHeaders: setHeaders, + GzipMinLength: gzipMinLength, + }, nil +} + +func processGzip(gzip *GzipEnv) int { + if gzip.Gzip == nil { + return 0 + } + mustContainArgs(gzip.Gzip, 1) + if gzip.Gzip.Args[0] != "on" { + return 0 + } + if gzip.GzipMinLength == nil { + // nginx default value + return 20 + } + mustContainArgs(gzip.GzipMinLength, 1) + minLength, err := strconv.Atoi(gzip.GzipMinLength.Args[0]) + if err != nil { + general.Warnf("%s: invalid number %v, use default value of 20 instead", directiveInfo(gzip.GzipMinLength), err) + return 20 + } + if minLength < 0 { + general.Warnf("%s: negative number, use default value of 20 instead", directiveInfo(gzip.GzipMinLength)) + return 20 + } + return minLength +} + +func processProxySetHeader(ds []*Directive) (map[string]string, error) { + res := make(map[string]string) + for _, d := range ds { + mustContainArgs(d, 2) + res[d.Args[0]] = d.Args[1] + } + return res, nil +} + +func processProxyPass(d *Directive, env *Env) ([]*BackendInfo, error) { + mustContainArgs(d, 1) + url := d.Args[0] + if !strings.HasPrefix(url, "http://") && !strings.HasPrefix(url, "https://") { + return nil, fmt.Errorf("%s: proxy_pass %s is not http or https", directiveInfo(d), url) + } + prefix := "http://" + if strings.HasPrefix(url, "https://") { + prefix = "https://" + } + address := url[len(prefix):] + + var upstream *Directive + for _, u := range env.Upstream { + mustContainArgs(u, 1) + if address == u.Args[0] { + upstream = u + break + } + } + if upstream == nil { + return []*BackendInfo{{Server: url, Weight: 1}}, nil + } + res := make([]*BackendInfo, 0) + for _, block := range upstream.Block { + if block.Directive != "server" { + continue + } + mustContainArgs(block, 1) + server := block.Args[0] + weight := 1 + for _, arg := range block.Args[1:] { + if strings.HasPrefix(arg, "weight=") { + w, err := strconv.Atoi(arg[7:]) + if err != nil { + general.Warnf("%s: invalid weight %v, use default value of 1 instead", directiveInfo(block), err) + } else { + weight = w + } + } + } + res = append(res, &BackendInfo{Server: prefix + server, Weight: weight}) + } + return res, nil +} + +func processSSLCertificates(certs []*Directive, keys []*Directive) (map[string]string, map[string]string, error) { + if len(certs) != len(keys) { + var missMatch []*Directive + if len(certs) > len(keys) { + missMatch = certs[len(keys):] + } else { + missMatch = keys[len(certs):] + } + msg := "" + for _, d := range missMatch { + msg += directiveInfo(d) + "\n" + } + return nil, nil, fmt.Errorf("%s has miss certs or keys", msg) + } + + certMap := make(map[string]string) + keyMap := make(map[string]string) + for i := 0; i < len(certs); i++ { + cert := certs[i] + key := keys[i] + mustContainArgs(cert, 1) + mustContainArgs(key, 1) + certName := cert.Args[0] + keyName := key.Args[0] + certData, err := loadCert(certName) + if err != nil { + return nil, nil, fmt.Errorf("%s: %v", directiveInfo(cert), err) + } + keyData, err := loadCert(keyName) + if err != nil { + return nil, nil, fmt.Errorf("%s: %v", directiveInfo(key), err) + } + // cert and keys should have the same key to match. + certMap[certName] = certData + keyMap[certName] = keyData + } + return certMap, keyMap, nil +} + +func loadCert(filePath string) (string, error) { + data, err := os.ReadFile(filePath) + if err != nil { + return "", err + } + res := base64.StdEncoding.EncodeToString(data) + return res, nil +} + +func processServerName(d *Directive) ([]*HostInfo, error) { + hosts := make([]*HostInfo, 0) + for _, arg := range d.Args { + if strings.HasPrefix(arg, "~") { + _, err := regexp.Compile(arg[1:]) + if err != nil { + return nil, fmt.Errorf("%s: %v", directiveInfo(d), err) + } + hosts = append(hosts, &HostInfo{ + Value: arg[1:], + IsRegexp: true, + }) + } else { + count := strings.Count(arg, "*") + if count > 1 { + return nil, fmt.Errorf("%s: host %s contains more than one wildcard", directiveInfo(d), arg) + } + if count == 1 { + if arg[0] != '*' && arg[len(arg)-1] != '*' { + return nil, fmt.Errorf("%s: host %s contains wildcard in the middle", directiveInfo(d), arg) + } + } + hosts = append(hosts, &HostInfo{ + Value: arg, + IsRegexp: false, + }) + } + } + return hosts, nil +} + +func processListen(d *Directive) (address string, port int, https bool, err error) { + mustContainArgs(d, 1) + address, port, err = splitAddressPort(d.Args[0]) + if err != nil { + return "", 0, false, fmt.Errorf("%s: %v", directiveInfo(d), err) + } + for _, arg := range d.Args[1:] { + if arg == "ssl" { + return address, port, true, nil + } + } + return address, port, false, nil +} + +// splitAddressPort splits the listen directive into address and port. +// nginx examples: +// listen 127.0.0.1:8000; +// listen 127.0.0.1; +// listen 8000; +// listen *:8000; +// listen localhost:8000; +// listen [::]:8000; +// listen [::1]; +func splitAddressPort(listen string) (string, int, error) { + if listen == "" { + return "", 0, fmt.Errorf("listen is empty") + } + if listen[0] == '[' { + end := strings.Index(listen, "]") + if end < 0 { + return "", 0, fmt.Errorf("invalid listen: %s", listen) + } + host := listen[0 : end+1] + portPart := listen[end+1:] + if portPart == "" { + return host, 80, nil + } + if portPart[0] != ':' { + return "", 0, fmt.Errorf("invalid listen: %s", listen) + } + portPart = portPart[1:] + port, err := strconv.Atoi(portPart) + if err != nil { + return "", 0, fmt.Errorf("invalid listen: %s", listen) + } + return host, port, nil + } + + index := strings.Index(listen, ":") + if index < 0 { + port, err := strconv.Atoi(listen) + if err != nil { + return listen, 80, nil + } + return "", port, nil + } + host := listen[0:index] + portPart := listen[index+1:] + if portPart == "" { + return host, 80, nil + } + port, err := strconv.Atoi(portPart) + if err != nil { + return "", 0, fmt.Errorf("invalid listen: %s", listen) + } + return host, port, nil +} diff --git a/cmd/client/commandv2/convert/nginx/env_test.go b/cmd/client/commandv2/convert/nginx/env_test.go new file mode 100644 index 0000000000..85223cdce4 --- /dev/null +++ b/cmd/client/commandv2/convert/nginx/env_test.go @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package nginx + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSplitAddressPort(t *testing.T) { + testCases := []struct { + listen string + address string + port int + err bool + }{ + {listen: "127.0.0.1:8000", address: "127.0.0.1", port: 8000, err: false}, + {listen: "127.0.0.1", address: "127.0.0.1", port: 80, err: false}, + {listen: "8000", address: "", port: 8000, err: false}, + {listen: "*:8000", address: "*", port: 8000, err: false}, + {listen: "localhost:8000", address: "localhost", port: 8000, err: false}, + {listen: "[::]:8000", address: "[::]", port: 8000, err: false}, + {listen: "[::1]", address: "[::1]", port: 80, err: false}, + {listen: "", address: "", port: 0, err: true}, + {listen: "[::", address: "", port: 0, err: true}, + {listen: "[::]:", address: "", port: 0, err: true}, + {listen: "[::]:8888", address: "[::]", port: 8888, err: false}, + } + for _, tc := range testCases { + msg := fmt.Sprintf("%v", tc) + address, port, err := splitAddressPort(tc.listen) + assert.Equal(t, tc.address, address, msg) + assert.Equal(t, tc.port, port, msg) + if tc.err { + assert.NotNil(t, err, msg) + } else { + assert.Nil(t, err, msg) + } + } +} + +func newDirective(d string, args ...string) *Directive { + return &Directive{ + Directive: d, + Args: args, + } +} + +func TestEnvProcessErrors(t *testing.T) { + // gzip + { + testCases := []struct { + gzip *Directive + len *Directive + res int + }{ + {gzip: newDirective("gzip", "invalid"), len: nil, res: 0}, + {gzip: newDirective("gzip", "on"), len: nil, res: 20}, + {gzip: newDirective("gzip", "on"), len: newDirective("gzip_min_length", "200"), res: 200}, + {gzip: newDirective("gzip", "on"), len: newDirective("gzip_min_length", "invalid"), res: 20}, + {gzip: newDirective("gzip", "on"), len: newDirective("gzip_min_length", "-1"), res: 20}, + } + for i, tc := range testCases { + gzip := &GzipEnv{ + Gzip: tc.gzip, + GzipMinLength: tc.len, + } + got := processGzip(gzip) + assert.Equal(t, tc.res, got, "case", i) + } + } + + // ssl + { + certs := []*Directive{newDirective("ssl_certificate", "cert1"), newDirective("ssl_certificate", "cert2")} + keys := []*Directive{newDirective("ssl_certificate_key", "key1")} + _, _, err := processSSLCertificates(certs, keys) + assert.NotNil(t, err) + + certs = []*Directive{newDirective("ssl_certificate", "cert1")} + keys = []*Directive{newDirective("ssl_certificate_key", "key1"), newDirective("ssl_certificate_key", "key2")} + _, _, err = processSSLCertificates(certs, keys) + assert.NotNil(t, err) + + certs = []*Directive{newDirective("ssl_certificate", "cert1")} + keys = []*Directive{newDirective("ssl_certificate_key", "key1")} + _, _, err = processSSLCertificates(certs, keys) + assert.NotNil(t, err) + } + + // server name + { + testCases := []struct { + server *Directive + hostValues []string + isRegexp []bool + err bool + }{ + {server: newDirective("server_name", "~www.example.com$"), hostValues: []string{"www.example.com$"}, isRegexp: []bool{true}, err: false}, + {server: newDirective("server_name", "~["), hostValues: []string{}, isRegexp: []bool{}, err: true}, + {server: newDirective("server_name", "*.example.*"), hostValues: []string{}, isRegexp: []bool{}, err: true}, + } + for i, tc := range testCases { + serverNames, err := processServerName(tc.server) + assert.Equal(t, len(tc.hostValues), len(serverNames), "case", i) + for i := 0; i < len(tc.hostValues); i++ { + assert.Equal(t, tc.hostValues[i], serverNames[i].Value, "case", i) + assert.Equal(t, tc.isRegexp[i], serverNames[i].IsRegexp, "case", i) + } + if tc.err { + assert.NotNil(t, err, "case", i) + } else { + assert.Nil(t, err, "case", i) + } + } + } +} diff --git a/cmd/client/commandv2/convert/nginx/parse.go b/cmd/client/commandv2/convert/nginx/parse.go new file mode 100644 index 0000000000..0a8a46b678 --- /dev/null +++ b/cmd/client/commandv2/convert/nginx/parse.go @@ -0,0 +1,262 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package nginx + +import ( + "errors" + "fmt" + "path/filepath" + "strings" + + "github.com/megaease/easegress/v2/cmd/client/general" + crossplane "github.com/nginxinc/nginx-go-crossplane" +) + +const ( + // DirectiveInclude is the include directive. + DirectiveInclude = "include" + // DirectiveHTTP is the http directive. + DirectiveHTTP = "http" + // DirectiveServer is the server directive. + DirectiveServer = "server" + // DirectiveLocation is the location directive. + DirectiveLocation = "location" +) + +// Directive is the nginx directive. +type Directive = crossplane.Directive + +// Directives is the nginx directives. +type Directives = crossplane.Directives + +// Payload is the nginx payload. +type Payload = crossplane.Payload + +func parsePayload(payload *Payload) (*Config, error) { + addFilenameToPayload(payload) + directives := payload.Config[0].Parsed + directives = loadIncludes(directives, payload) + + config := &Config{ + Servers: make([]*Server, 0), + } + for _, d := range directives { + if d.Directive == DirectiveHTTP { + servers, err := parseHTTPDirective(payload, d) + if err != nil { + return nil, err + } + for _, s := range servers { + config.Servers, err = updateServers(config.Servers, s) + if err != nil { + return nil, err + } + } + } + } + return config, nil +} + +func parseHTTPDirective(payload *Payload, directive *Directive) ([]*Server, error) { + env := newEnv() + directives := loadIncludes(directive.Block, payload) + for _, d := range directives { + if d.Directive != DirectiveServer { + env.Update(d) + } + } + servers := make([]*Server, 0) + for _, d := range directives { + if d.Directive == DirectiveServer { + server, err := parseServerDirective(env.MustClone(), payload, d) + if err != nil { + return nil, err + } + servers, err = updateServers(servers, server) + if err != nil { + return nil, err + } + } + } + return servers, nil +} + +func updateServers(servers []*Server, s *Server) ([]*Server, error) { + for i, server := range servers { + if server.Port != s.Port { + continue + } + // same port, try to merge. + if server.Address != s.Address { + return nil, fmt.Errorf("two server in port %d have different address %s vs %s", server.Port, server.Address, s.Address) + } + if s.HTTPS { + servers[i].HTTPS = true + } + if s.CaCert != "" { + servers[i].CaCert = s.CaCert + } + for k, v := range s.Certs { + servers[i].Certs[k] = v + } + for k, v := range s.Keys { + servers[i].Keys[k] = v + } + servers[i].Rules = append(servers[i].Rules, s.Rules...) + return servers, nil + } + return append(servers, s), nil +} + +func parseServerDirective(env *Env, payload *Payload, directive *Directive) (*Server, error) { + directives := loadIncludes(directive.Block, payload) + for _, d := range directives { + if d.Directive != DirectiveLocation { + env.Update(d) + } + } + info, err := env.GetServerInfo() + if err != nil { + return nil, err + } + res := &Server{ + ServerBase: info.ServerBase, + Rules: []*Rule{{ + Hosts: info.Hosts, + Paths: make([]*Path, 0), + }}, + } + + for _, d := range directives { + if d.Directive == DirectiveLocation { + paths, err := parseLocationDirective(env.MustClone(), payload, d) + if err != nil { + return nil, err + } + res.Rules[0].Paths = append(res.Rules[0].Paths, paths...) + } + } + return res, nil +} + +func parseLocationDirective(env *Env, payload *Payload, directive *Directive) ([]*Path, error) { + directives := loadIncludes(directive.Block, payload) + for _, d := range directives { + env.Update(d) + } + res := make([]*Path, 0) + proxyInfo, err := env.GetProxyInfo() + // for various nginx backends, we only support proxy_pass now. + // so we only warn when we can't get proxy info. + if err != nil { + general.Warnf("failed to get proxy for %s, %v", directiveInfo(directive), err) + } else { + path, pathType, err := parseLocationArgs(directive) + if err != nil { + general.Warnf("%s, %v", directiveInfo(directive), err) + } else { + res = append(res, &Path{ + Path: path, + Type: pathType, + Backend: proxyInfo, + }) + } + } + + // location can be nested. + for _, d := range directives { + if d.Directive == DirectiveLocation { + paths, err := parseLocationDirective(env.MustClone(), payload, d) + if err != nil { + return nil, err + } + res = append(res, paths...) + } + } + return res, nil +} + +func parseLocationArgs(d *Directive) (string, PathType, error) { + mustContainArgs(d, 1) + arg0 := d.Args[0] + if strings.HasPrefix(arg0, "/") { + return arg0, PathTypePrefix, nil + } + mustContainArgs(d, 2) + switch arg0 { + case "=": + return d.Args[1], PathTypeExact, nil + case "~": + return d.Args[1], PathTypeRe, nil + case "~*": + return d.Args[1], PathTypeReInsensitive, nil + case "^~": + return d.Args[1], PathTypePrefix, nil + default: + return "", PathTypePrefix, errors.New("invalid location args, only support =, ~, ~*, ^~") + } +} + +// addFilenameToPayload adds filename to payload recursively for all nested directives. +func addFilenameToPayload(payload *crossplane.Payload) { + for _, config := range payload.Config { + filename := filepath.Base(config.File) + directives := config.Parsed + for len(directives) > 0 { + d := directives[0] + d.File = filename + directives = append(directives[1:], d.Block...) + } + } +} + +// loadIncludes loads all include files for current directives recursively but not nested. +func loadIncludes(directives crossplane.Directives, payload *crossplane.Payload) crossplane.Directives { + res := crossplane.Directives{} + for _, d := range directives { + if d.Directive != DirectiveInclude { + res = append(res, d) + continue + } + mustContainArgs(d, 1) + name := d.Args[0] + var include crossplane.Directives + for _, config := range payload.Config { + if config.File == name { + include = config.Parsed + break + } + } + if include == nil { + general.Warnf("can't find include file %s for %s", name, directiveInfo(d)) + continue + } + include = loadIncludes(include, payload) + res = append(res, include...) + } + return res +} + +func mustContainArgs(d *Directive, argNum int) { + if len(d.Args) < argNum { + general.ExitWithErrorf( + "%s must have at least %d args, please update it or remote it.", + directiveInfo(d), argNum, + ) + } +} diff --git a/cmd/client/commandv2/convert/nginx/parse_test.go b/cmd/client/commandv2/convert/nginx/parse_test.go new file mode 100644 index 0000000000..12d3d61f5b --- /dev/null +++ b/cmd/client/commandv2/convert/nginx/parse_test.go @@ -0,0 +1,399 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package nginx + +import ( + "bytes" + "fmt" + "html/template" + "testing" + + "github.com/megaease/easegress/v2/pkg/util/codectool" + crossplane "github.com/nginxinc/nginx-go-crossplane" + "github.com/stretchr/testify/assert" +) + +func TestAddFilenameToPayload(t *testing.T) { + tempDir := newTempTestDir(t) + defer tempDir.Clean() + + var checkFilename func(ds crossplane.Directives, filename string) + checkFilename = func(ds crossplane.Directives, filename string) { + for _, d := range ds { + assert.Equal(t, filename, d.File, "line %d, directive %v", d.Line, d) + checkFilename(d.Block, filename) + } + } + + { + nginxConf := ` + events {} + http { + include %s; + server { + listen 80; + location / { + proxy_pass http://localhost:8888; + } + } + } + ` + proxyConf := `proxy_set_header Conf-One One;` + + proxyFile := tempDir.Create("proxy.conf", []byte(proxyConf)) + nginxFile := tempDir.Create("nginx.conf", []byte(fmt.Sprintf(nginxConf, proxyFile))) + + payload, err := crossplane.Parse(nginxFile, &crossplane.ParseOptions{}) + addFilenameToPayload(payload) + assert.Nil(t, err) + assert.Equal(t, 2, len(payload.Config)) + checkFilename(payload.Config[0].Parsed, "nginx.conf") + checkFilename(payload.Config[1].Parsed, "proxy.conf") + } +} + +func TestLoadIncludes(t *testing.T) { + tempDir := newTempTestDir(t) + defer tempDir.Clean() + + { + nginxConf := ` + events {} + http { + include %s; + server { + listen 80; + location / { + proxy_pass http://localhost:8888; + } + } + } + ` + proxyConf1 := `include %s; proxy_set_header Conf-One One;` + proxyConf2 := `proxy_set_header Conf-Two Two;` + + proxyFile2 := tempDir.Create("proxy2.conf", []byte(proxyConf2)) + proxyFile1 := tempDir.Create("proxy1.conf", []byte(fmt.Sprintf(proxyConf1, proxyFile2))) + nginxFile := tempDir.Create("nginx.conf", []byte(fmt.Sprintf(nginxConf, proxyFile1))) + + payload, err := crossplane.Parse(nginxFile, &crossplane.ParseOptions{}) + addFilenameToPayload(payload) + assert.Nil(t, err) + httpDirectives := payload.Config[0].Parsed[1].Block + httpDirectives = loadIncludes(httpDirectives, payload) + assert.Equal(t, 3, len(httpDirectives)) + + // first directive from conf2 + d2 := httpDirectives[0] + assert.Equal(t, "proxy_set_header", d2.Directive) + assert.Equal(t, []string{"Conf-Two", "Two"}, d2.Args) + assert.Equal(t, "proxy2.conf", d2.File) + // second directive from conf1 + d1 := httpDirectives[1] + assert.Equal(t, "proxy_set_header", d1.Directive) + assert.Equal(t, []string{"Conf-One", "One"}, d1.Args) + assert.Equal(t, "proxy1.conf", d1.File) + } + + { + // test invalid includes + nginxConf := ` + events {} + http { + include not-exist.conf; + include %s invalid-args.conf; + include; + server { + listen 80; + location / { + proxy_pass http://localhost:8888; + } + } + } + ` + proxyConf1 := `include %s; proxy_set_header Conf-One One;` + proxyConf2 := `proxy_set_header Conf-Two Two;` + + proxyFile2 := tempDir.Create("proxy2.conf", []byte(proxyConf2)) + proxyFile1 := tempDir.Create("proxy1.conf", []byte(fmt.Sprintf(proxyConf1, proxyFile2))) + nginxFile := tempDir.Create("nginx.conf", []byte(fmt.Sprintf(nginxConf, proxyFile1))) + + payload, err := crossplane.Parse(nginxFile, &crossplane.ParseOptions{}) + addFilenameToPayload(payload) + assert.Nil(t, err) + httpDirectives := payload.Config[0].Parsed[1].Block + httpDirectives = loadIncludes(httpDirectives, payload) + assert.Equal(t, 1, len(httpDirectives)) + } +} + +func TestParsePayload(t *testing.T) { + tempDir := newTempTestDir(t) + defer tempDir.Clean() + { + nginxConf := ` + events {} + http { + upstream backend { + server localhost:1234; + server localhost:2345 weight=10; + } + + server { + listen 80; + + server_name www.example.com *.example.com; + + location /apis { + proxy_set_header X-Path "apis"; + proxy_pass http://localhost:8880; + + location /apis/v1 { + proxy_set_header X-Path "apis/v1"; + proxy_pass http://localhost:8888; + } + } + + location /upstream { + gzip on; + gzip_min_length 1000; + proxy_pass http://backend; + } + + location /websocket { + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + proxy_pass http://localhost:9090; + } + } + + server { + listen 127.0.0.1:443 ssl; + ssl_client_certificate {{ .CaFile }}; + ssl_certificate {{ .CertFile }}; + ssl_certificate_key {{ .KeyFile }}; + + location = /user { + proxy_pass http://localhost:9999; + } + + location ^~ /user/admin { + proxy_pass http://localhost:9991; + } + + location ~* /user/.* { + proxy_pass http://localhost:9992; + } + + location ~ /user/.* { + proxy_pass http://localhost:9993; + } + } + } + ` + tmplValue := map[string]string{ + "CaFile": tempDir.Create("ca.crt", []byte("ca")), + "CertFile": tempDir.Create("cert.crt", []byte("cert")), + "KeyFile": tempDir.Create("key.crt", []byte("key")), + } + + tmpl, err := template.New("nginx").Parse(nginxConf) + assert.Nil(t, err) + var buffer bytes.Buffer + tmpl.Execute(&buffer, tmplValue) + + file := tempDir.Create("nginx.conf", buffer.Bytes()) + payload, err := crossplane.Parse(file, &crossplane.ParseOptions{}) + assert.Nil(t, err) + config, err := parsePayload(payload) + assert.Nil(t, err) + + proxyInfo := ` +servers: + - port: 80 + rules: + - hosts: + - value: www.example.com + isRegexp: false + - value: '*.example.com' + isRegexp: false + paths: + - path: /apis + type: prefix + backend: + servers: + - server: http://localhost:8880 + weight: 1 + setHeaders: + X-Path: apis + - path: /apis/v1 + type: prefix + backend: + servers: + - server: http://localhost:8888 + weight: 1 + setHeaders: + X-Path: apis/v1 + - path: /upstream + type: prefix + backend: + servers: + - server: http://localhost:1234 + weight: 1 + - server: http://localhost:2345 + weight: 10 + gzipMinLength: 1000 + - path: /websocket + type: prefix + backend: + servers: + - server: http://localhost:9090 + weight: 1 + setHeaders: + Connection: $connection_upgrade + Upgrade: $http_upgrade + - port: 443 + address: 127.0.0.1 + https: true + caCert: Y2E= + certs: + {{ .CertFile }}: Y2VydA== + keys: + {{ .CertFile }}: a2V5 + rules: + - paths: + - path: /user + type: exact + backend: + servers: + - server: http://localhost:9999 + weight: 1 + - path: /user/admin + type: prefix + backend: + servers: + - server: http://localhost:9991 + weight: 1 + - path: /user/.* + type: caseInsensitiveRegexp + backend: + servers: + - server: http://localhost:9992 + weight: 1 + - path: /user/.* + type: regexp + backend: + servers: + - server: http://localhost:9993 + weight: 1 +` + tmp, err := template.New("proxyInfo").Parse(proxyInfo) + assert.Nil(t, err) + var proxyBuffer bytes.Buffer + tmp.Execute(&proxyBuffer, tmplValue) + expected := &Config{} + err = codectool.UnmarshalYAML(proxyBuffer.Bytes(), expected) + assert.Nil(t, err) + assert.Equal(t, expected, config) + } +} + +func TestUpdateServer(t *testing.T) { + tempDir := newTempTestDir(t) + defer tempDir.Clean() + { + nginxConf := ` + events {} + http { + server { + listen 80; + + location /apis { + proxy_pass http://localhost:8880; + } + } + + server { + listen 80; + + location /user { + proxy_pass http://localhost:9999; + } + } + } + ` + file := tempDir.Create("nginx.conf", []byte(nginxConf)) + payload, err := crossplane.Parse(file, &crossplane.ParseOptions{}) + assert.Nil(t, err) + config, err := parsePayload(payload) + assert.Nil(t, err) + + proxyInfo := ` +servers: + - port: 80 + rules: + - paths: + - path: /apis + type: prefix + backend: + servers: + - server: http://localhost:8880 + weight: 1 + - paths: + - path: /user + type: prefix + backend: + servers: + - server: http://localhost:9999 + weight: 1 +` + expected := &Config{} + err = codectool.UnmarshalYAML([]byte(proxyInfo), expected) + assert.Nil(t, err) + assert.Equal(t, expected, config) + } + { + servers := []*Server{ + { + ServerBase: ServerBase{ + Port: 80, + Certs: map[string]string{}, + Keys: map[string]string{}, + }, + }, + } + server := &Server{ + ServerBase: ServerBase{ + Port: 80, + HTTPS: true, + CaCert: "ca", + Certs: map[string]string{"cert": "cert"}, + Keys: map[string]string{"cert": "key"}, + }, + } + servers, err := updateServers(servers, server) + assert.Nil(t, err) + assert.Equal(t, 1, len(servers)) + got := servers[0] + assert.True(t, got.HTTPS) + assert.Equal(t, "ca", got.CaCert) + assert.Equal(t, map[string]string{"cert": "cert"}, got.Certs) + assert.Equal(t, map[string]string{"cert": "key"}, got.Keys) + } +} diff --git a/cmd/client/commandv2/convert/nginx/test_test.go b/cmd/client/commandv2/convert/nginx/test_test.go new file mode 100644 index 0000000000..1e8aba4552 --- /dev/null +++ b/cmd/client/commandv2/convert/nginx/test_test.go @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package nginx + +import ( + "net/http" + "os" + "path/filepath" + "testing" + + "github.com/megaease/easegress/v2/pkg/context" + "github.com/megaease/easegress/v2/pkg/logger" + "github.com/megaease/easegress/v2/pkg/protocols/httpprot" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func init() { + logger.InitMock() +} + +type tempTestDir struct { + dir string + t *testing.T + files []string +} + +func newTempTestDir(t *testing.T) *tempTestDir { + dir, err := os.MkdirTemp("", "test") + require.Nil(t, err) + return &tempTestDir{dir: dir, t: t} +} + +func (dir *tempTestDir) Create(filename string, content []byte) string { + file, err := os.Create(filepath.Join(dir.dir, filename)) + require.Nil(dir.t, err) + defer file.Close() + + _, err = file.Write(content) + require.Nil(dir.t, err) + dir.files = append(dir.files, file.Name()) + return file.Name() +} + +func (dir *tempTestDir) Clean() { + for _, file := range dir.files { + os.Remove(file) + } + os.Remove(dir.dir) +} + +func newContext(t *testing.T, req *http.Request) *context.Context { + ctx := context.New(nil) + r, err := httpprot.NewRequest(req) + assert.Nil(t, err) + r.FetchPayload(0) + ctx.SetRequest(context.DefaultNamespace, r) + return ctx +} diff --git a/cmd/client/commandv2/convert/nginx/types.go b/cmd/client/commandv2/convert/nginx/types.go new file mode 100644 index 0000000000..1fc4acc85b --- /dev/null +++ b/cmd/client/commandv2/convert/nginx/types.go @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package nginx + +// Config is config for convert. +type Config struct { + Servers []*Server `json:"servers"` +} + +// ServerBase is the base config for server. +type ServerBase struct { + Port int `json:"port"` + Address string `json:"address"` + HTTPS bool `json:"https"` + + CaCert string `json:"caCert"` + Certs map[string]string `json:"certs"` + Keys map[string]string `json:"keys"` +} + +// Server is the config for server. +type Server struct { + ServerBase `json:",inline"` + Rules []*Rule `json:"rules"` +} + +// ServerInfo is the info config for server. +type ServerInfo struct { + ServerBase `json:",inline"` + Hosts []*HostInfo `json:"hosts"` +} + +// HostInfo is the info config for host. +type HostInfo struct { + Value string `json:"value"` + IsRegexp bool `json:"isRegexp"` +} + +// Rule is the config for rule. +type Rule struct { + Hosts []*HostInfo `json:"hosts"` + Paths []*Path `json:"paths"` +} + +// PathType is the type of path. +type PathType string + +const ( + // PathTypePrefix is the prefix type of path. + PathTypePrefix PathType = "prefix" + // PathTypeExact is the exact type of path. + PathTypeExact PathType = "exact" + // PathTypeRe is the regexp type of path. + PathTypeRe PathType = "regexp" + // PathTypeReInsensitive is the case insensitive regexp type of path. + PathTypeReInsensitive PathType = "caseInsensitiveRegexp" +) + +// Path is the config for path. +type Path struct { + Path string `json:"path"` + Type PathType `json:"type"` + Backend *ProxyInfo `json:"backend"` +} + +// ProxyInfo is the config for proxy. +type ProxyInfo struct { + Servers []*BackendInfo `json:"servers"` + SetHeaders map[string]string `json:"setHeaders"` + GzipMinLength int `json:"gzipMinLength"` +} + +// BackendInfo is the config for backend. +type BackendInfo struct { + Server string `json:"server"` + Weight int `json:"weight"` +} diff --git a/cmd/client/commandv2/convert/nginx/utils.go b/cmd/client/commandv2/convert/nginx/utils.go new file mode 100644 index 0000000000..5aad3ae5e2 --- /dev/null +++ b/cmd/client/commandv2/convert/nginx/utils.go @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package nginx + +import ( + "fmt" + "strings" +) + +func directiveInfo(d *Directive) string { + return fmt.Sprintf("directive <%s %s> of %s:%d", d.Directive, strings.Join(d.Args, " "), d.File, d.Line) +} diff --git a/cmd/client/commandv2/convert/nginx/utils_test.go b/cmd/client/commandv2/convert/nginx/utils_test.go new file mode 100644 index 0000000000..31f06c02b5 --- /dev/null +++ b/cmd/client/commandv2/convert/nginx/utils_test.go @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package nginx + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDirectiveInfo(t *testing.T) { + d := &Directive{ + Directive: "test", + Args: []string{"a", "b"}, + File: "test.conf", + Line: 100, + } + assert.Equal(t, "directive of test.conf:100", directiveInfo(d)) +} diff --git a/cmd/client/commandv2/create/create_test.go b/cmd/client/commandv2/create/create_test.go index 5720904f77..0136637de4 100644 --- a/cmd/client/commandv2/create/create_test.go +++ b/cmd/client/commandv2/create/create_test.go @@ -27,4 +27,5 @@ import ( func TestCmd(t *testing.T) { cmd := Cmd() assert.NotNil(t, cmd) + assert.Error(t, cmd.Args(cmd, nil)) } diff --git a/cmd/client/commandv2/create/createhttpproxy.go b/cmd/client/commandv2/create/createhttpproxy.go index 8f859b0af4..5f273ce6fe 100644 --- a/cmd/client/commandv2/create/createhttpproxy.go +++ b/cmd/client/commandv2/create/createhttpproxy.go @@ -24,14 +24,13 @@ import ( "path/filepath" "strings" + "github.com/megaease/easegress/v2/cmd/client/commandv2/specs" "github.com/megaease/easegress/v2/cmd/client/general" "github.com/megaease/easegress/v2/cmd/client/resources" "github.com/megaease/easegress/v2/pkg/filters" "github.com/megaease/easegress/v2/pkg/filters/proxies" "github.com/megaease/easegress/v2/pkg/filters/proxies/httpproxy" - "github.com/megaease/easegress/v2/pkg/object/httpserver" "github.com/megaease/easegress/v2/pkg/object/httpserver/routers" - "github.com/megaease/easegress/v2/pkg/object/pipeline" "github.com/megaease/easegress/v2/pkg/util/codectool" "github.com/spf13/cobra" ) @@ -147,22 +146,6 @@ func httpProxyRun(cmd *cobra.Command, args []string) error { return nil } -// HTTPServerSpec is the spec of HTTPServer. -type HTTPServerSpec struct { - Name string `json:"name"` - Kind string `json:"kind"` - - httpserver.Spec `json:",inline"` -} - -// PipelineSpec is the spec of Pipeline. -type PipelineSpec struct { - Name string `json:"name"` - Kind string `json:"kind"` - - pipeline.Spec `json:",inline"` -} - // Complete completes all the required options. func (o *HTTPProxyOptions) Complete(args []string) { o.Name = args[0] @@ -223,12 +206,8 @@ func (o *HTTPProxyOptions) getPipelineName(id int) string { } // Translate translates HTTPProxyOptions to HTTPServerSpec and PipelineSpec. -func (o *HTTPProxyOptions) Translate() (*HTTPServerSpec, []*PipelineSpec) { - hs := &HTTPServerSpec{ - Name: o.getServerName(), - Kind: httpserver.Kind, - Spec: *getDefaultHTTPServerSpec(), - } +func (o *HTTPProxyOptions) Translate() (*specs.HTTPServerSpec, []*specs.PipelineSpec) { + hs := specs.NewHTTPServerSpec(o.getServerName()) hs.Port = uint16(o.Port) if o.TLS { hs.HTTPS = true @@ -247,9 +226,9 @@ func (o *HTTPProxyOptions) Translate() (*HTTPServerSpec, []*PipelineSpec) { return hs, pipelines } -func (o *HTTPProxyOptions) translateRules() (routers.Rules, []*PipelineSpec) { +func (o *HTTPProxyOptions) translateRules() (routers.Rules, []*specs.PipelineSpec) { var rules routers.Rules - var pipelines []*PipelineSpec + var pipelines []*specs.PipelineSpec pipelineID := 0 for _, rule := range o.rules { @@ -261,11 +240,10 @@ func (o *HTTPProxyOptions) translateRules() (routers.Rules, []*PipelineSpec) { PathPrefix: rule.PathPrefix, Backend: pipelineName, } - pipelines = append(pipelines, &PipelineSpec{ - Name: pipelineName, - Kind: pipeline.Kind, - Spec: *translateToPipeline(rule.Endpoints), - }) + + pipelineSpec := specs.NewPipelineSpec(pipelineName) + translateToPipeline(rule.Endpoints, pipelineSpec) + pipelines = append(pipelines, pipelineSpec) l := len(rules) if l != 0 && rules[l-1].Host == rule.Host { @@ -306,20 +284,13 @@ func loadCertFile(filePath string) (string, error) { return base64.StdEncoding.EncodeToString(data), nil } -func translateToPipeline(endpoints []string) *pipeline.Spec { +func translateToPipeline(endpoints []string, spec *specs.PipelineSpec) { proxy := translateToProxyFilter(endpoints) - data := codectool.MustMarshalYAML(proxy) - maps, _ := general.UnmarshalMapInterface(data, false) - - spec := getDefaultPipelineSpec() - spec.Filters = maps - return spec + spec.SetFilters([]filters.Spec{proxy}) } func translateToProxyFilter(endpoints []string) *httpproxy.Spec { - spec := getDefaultProxyFilterSpec() - spec.BaseSpec.MetaSpec.Name = "proxy" - spec.BaseSpec.MetaSpec.Kind = httpproxy.Kind + spec := specs.NewProxyFilterSpec("proxy") servers := make([]*proxies.Server, len(endpoints)) for i, endpoint := range endpoints { @@ -381,15 +352,3 @@ type HTTPProxyRule struct { PathPrefix string Endpoints []string } - -func getDefaultHTTPServerSpec() *httpserver.Spec { - return (&httpserver.HTTPServer{}).DefaultSpec().(*httpserver.Spec) -} - -func getDefaultPipelineSpec() *pipeline.Spec { - return (&pipeline.Pipeline{}).DefaultSpec().(*pipeline.Spec) -} - -func getDefaultProxyFilterSpec() *httpproxy.Spec { - return filters.GetKind(httpproxy.Kind).DefaultSpec().(*httpproxy.Spec) -} diff --git a/cmd/client/commandv2/create/createhttpproxy_test.go b/cmd/client/commandv2/create/createhttpproxy_test.go index c05f194282..a8aec49c6a 100644 --- a/cmd/client/commandv2/create/createhttpproxy_test.go +++ b/cmd/client/commandv2/create/createhttpproxy_test.go @@ -24,7 +24,7 @@ import ( "path/filepath" "testing" - "github.com/megaease/easegress/v2/pkg/filters" + "github.com/megaease/easegress/v2/cmd/client/commandv2/specs" "github.com/megaease/easegress/v2/pkg/filters/proxies/httpproxy" "github.com/megaease/easegress/v2/pkg/object/httpserver/routers" "github.com/megaease/easegress/v2/pkg/util/codectool" @@ -44,7 +44,7 @@ pools: loadBalance: policy: roundRobin ` - expected := getDefaultProxyFilterSpec() + expected := specs.NewProxyFilterSpec("proxy") err := codectool.UnmarshalYAML([]byte(yamlStr), expected) assert.Nil(err) @@ -71,12 +71,13 @@ filters: policy: roundRobin ` // compare expected and got pipeline - expected := getDefaultPipelineSpec() + expected := specs.NewPipelineSpec("pipeline") err := codectool.UnmarshalYAML([]byte(yamlStr), expected) assert.Nil(err) endpoints := []string{"http://127.0.0.1:9095", "http://127.0.0.1:9096"} - got := translateToPipeline(endpoints) + got := specs.NewPipelineSpec("pipeline") + translateToPipeline(endpoints, got) // filters part is not compare here, because the filter part is map[string]interface{}, // the expected map[string]interface{} is unmarshal from yaml, @@ -92,7 +93,7 @@ filters: // if marshal it once, some part of expectedFilter will be nil. // but gotFilter will be empty. for example []string{} vs nil. // []string{} and nil are actually same in this case. - expectedFilter := getDefaultProxyFilterSpec() + expectedFilter := specs.NewProxyFilterSpec("proxy") filterYaml := codectool.MustMarshalYAML(expected.Filters[0]) err = codectool.UnmarshalYAML(filterYaml, expectedFilter) assert.Nil(err) @@ -100,7 +101,7 @@ filters: err = codectool.UnmarshalYAML(filterYaml, expectedFilter) assert.Nil(err) - gotFilter := filters.GetKind(httpproxy.Kind).DefaultSpec().(*httpproxy.Spec) + gotFilter := specs.NewProxyFilterSpec("proxy") filterYaml = codectool.MustMarshalYAML(got.Filters[0]) err = codectool.UnmarshalYAML(filterYaml, gotFilter) assert.Nil(err) @@ -311,11 +312,11 @@ filters: policy: roundRobin ` expectedFilter := func() *httpproxy.Spec { - expected := getDefaultPipelineSpec() + expected := specs.NewPipelineSpec("pipeline") err = codectool.UnmarshalYAML([]byte(yamlStr), expected) assert.Nil(err) - expectedFilter := getDefaultProxyFilterSpec() + expectedFilter := specs.NewProxyFilterSpec("proxy") filterYaml := codectool.MustMarshalYAML(expected.Filters[0]) err = codectool.UnmarshalYAML(filterYaml, expectedFilter) assert.Nil(err) @@ -326,7 +327,7 @@ filters: }() for i, p := range pls { - gotFilter := getDefaultProxyFilterSpec() + gotFilter := specs.NewProxyFilterSpec("proxy") filterYaml := codectool.MustMarshalYAML(p.Filters[0]) err = codectool.UnmarshalYAML(filterYaml, gotFilter) assert.Nil(err) diff --git a/cmd/client/commandv2/specs/spec.go b/cmd/client/commandv2/specs/spec.go new file mode 100644 index 0000000000..f4d91ab32c --- /dev/null +++ b/cmd/client/commandv2/specs/spec.go @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2017, MegaEase + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package specs + +import ( + "github.com/megaease/easegress/v2/cmd/client/general" + "github.com/megaease/easegress/v2/pkg/filters" + "github.com/megaease/easegress/v2/pkg/filters/builder" + "github.com/megaease/easegress/v2/pkg/filters/proxies/httpproxy" + "github.com/megaease/easegress/v2/pkg/object/httpserver" + "github.com/megaease/easegress/v2/pkg/object/pipeline" + "github.com/megaease/easegress/v2/pkg/util/codectool" +) + +// HTTPServerSpec is the spec of HTTPServer. +type HTTPServerSpec struct { + Name string `json:"name"` + Kind string `json:"kind"` + + httpserver.Spec `json:",inline"` +} + +// PipelineSpec is the spec of Pipeline. +type PipelineSpec struct { + Name string `json:"name"` + Kind string `json:"kind"` + + pipeline.Spec `json:",inline"` +} + +// SetFilters sets the filters of PipelineSpec. +func (p *PipelineSpec) SetFilters(filters []filters.Spec) { + data := codectool.MustMarshalYAML(filters) + maps, _ := general.UnmarshalMapInterface(data, true) + p.Filters = maps +} + +// NewHTTPServerSpec returns a new HTTPServerSpec. +func NewHTTPServerSpec(name string) *HTTPServerSpec { + spec := &HTTPServerSpec{ + Name: name, + Kind: httpserver.Kind, + Spec: *getDefaultHTTPServerSpec(), + } + spec.Spec.Certs = map[string]string{} + spec.Spec.Keys = map[string]string{} + return spec +} + +// NewPipelineSpec returns a new PipelineSpec. +func NewPipelineSpec(name string) *PipelineSpec { + return &PipelineSpec{ + Name: name, + Kind: pipeline.Kind, + Spec: *getDefaultPipelineSpec(), + } +} + +func getDefaultHTTPServerSpec() *httpserver.Spec { + return (&httpserver.HTTPServer{}).DefaultSpec().(*httpserver.Spec) +} + +func getDefaultPipelineSpec() *pipeline.Spec { + return (&pipeline.Pipeline{}).DefaultSpec().(*pipeline.Spec) +} + +// NewProxyFilterSpec returns a new ProxyFilterSpec. +func NewProxyFilterSpec(name string) *httpproxy.Spec { + spec := GetDefaultFilterSpec(httpproxy.Kind).(*httpproxy.Spec) + spec.BaseSpec.MetaSpec.Name = name + spec.BaseSpec.MetaSpec.Kind = httpproxy.Kind + return spec +} + +// NewWebsocketFilterSpec returns a new WebsocketFilterSpec. +func NewWebsocketFilterSpec(name string) *httpproxy.WebSocketProxySpec { + spec := GetDefaultFilterSpec(httpproxy.WebSocketProxyKind).(*httpproxy.WebSocketProxySpec) + spec.BaseSpec.MetaSpec.Name = name + spec.BaseSpec.MetaSpec.Kind = httpproxy.WebSocketProxyKind + return spec +} + +// NewRequestAdaptorFilterSpec returns a new RequestAdaptorFilterSpec. +func NewRequestAdaptorFilterSpec(name string) *builder.RequestAdaptorSpec { + spec := GetDefaultFilterSpec(builder.RequestAdaptorKind).(*builder.RequestAdaptorSpec) + spec.BaseSpec.MetaSpec.Name = name + spec.BaseSpec.MetaSpec.Kind = builder.RequestAdaptorKind + return spec +} + +// GetDefaultFilterSpec returns the default filter spec of the kind. +func GetDefaultFilterSpec(kind string) filters.Spec { + return filters.GetKind(kind).DefaultSpec() +} diff --git a/cmd/client/general/message.go b/cmd/client/general/message.go index ebcb540082..c87e5df9d2 100644 --- a/cmd/client/general/message.go +++ b/cmd/client/general/message.go @@ -32,3 +32,13 @@ func SuccessMsg(action CmdType, values ...string) string { func ErrorMsg(action CmdType, err error, values ...string) error { return fmt.Errorf("%s %s failed, %v", action, strings.Join(values, " "), err) } + +// Warnf prints the warning message. +func Warnf(format string, args ...interface{}) { + fmt.Printf("WARNING: "+format+"\n", args...) +} + +// Infof prints the info message. +func Infof(format string, args ...interface{}) { + fmt.Printf("INFO: "+format+"\n", args...) +} diff --git a/cmd/client/main.go b/cmd/client/main.go index 886ac51cf4..1115fef885 100644 --- a/cmd/client/main.go +++ b/cmd/client/main.go @@ -40,6 +40,11 @@ var basicGroup = &cobra.Group{ Title: `Basic Commands`, } +var advancedGroup = &cobra.Group{ + ID: "advanced", + Title: `Advanced Commands`, +} + var otherGroup = &cobra.Group{ ID: "other", Title: `Other Commands`, @@ -77,6 +82,11 @@ func main() { commandv2.EditCmd(), ) + addCommandWithGroup( + advancedGroup, + commandv2.ConvertCmd(), + ) + addCommandWithGroup( otherGroup, commandv2.APIsCmd(), @@ -98,7 +108,7 @@ func main() { command.CustomDataCmd(), ) - rootCmd.AddGroup(basicGroup, otherGroup, deprecatedGroup) + rootCmd.AddGroup(basicGroup, advancedGroup, otherGroup, deprecatedGroup) for _, c := range rootCmd.Commands() { general.GenerateExampleFromChild(c) diff --git a/docs/02.Tutorials/2.2.HTTP-Proxy-Usage.md b/docs/02.Tutorials/2.2.HTTP-Proxy-Usage.md index ee216c260f..71a8cf3c44 100644 --- a/docs/02.Tutorials/2.2.HTTP-Proxy-Usage.md +++ b/docs/02.Tutorials/2.2.HTTP-Proxy-Usage.md @@ -98,9 +98,14 @@ port: 10080 rules: # Rules for host matching. # If not match, HTTPServer will check next rule. +# wildcard is supported in front or end of hostnames. - host: - hosts: [, ] hostRegexp: + hosts: + - value: *.example.com + isRegexp: false + - value: www.example.* + isRegexp: false # IP-based filtering. ipFilter: {} diff --git a/docs/03.Advanced-Cookbook/3.13.Nginx.md b/docs/03.Advanced-Cookbook/3.13.Nginx.md new file mode 100644 index 0000000000..7baacc88b9 --- /dev/null +++ b/docs/03.Advanced-Cookbook/3.13.Nginx.md @@ -0,0 +1,342 @@ +# Nginx Configuration Conversion to Easegress YAML + +- [Nginx Configuration Conversion to Easegress YAML](#nginx-configuration-conversion-to-easegress-yaml) + - [Nginx Configurations Supported for Conversion](#nginx-configurations-supported-for-conversion) + - [listen](#listen) + - [server\_name](#server_name) + - [location](#location) + - [proxy\_pass and upstream](#proxy_pass-and-upstream) + - [websocket](#websocket) + - [HTTPS](#https) + - [proxy\_set\_header](#proxy_set_header) + - [gzip](#gzip) + - [Example](#example) + +This document serves as a guide for converting Nginx configurations to Easegress YAML format using the `egctl convert nginx` command. + +```bash +egctl convert nginx -f -o --prefix +``` + +## Nginx Configurations Supported for Conversion + +### listen +The `listen` directive supports `port`, `address:port`, and `ssl` configurations. It corresponds to the `port` and `address` properties of an Easegress `HTTPServer`. + +``` +listen: 80; +listen: 127.0.0.1:80; +listen: 443 ssl; +``` + +### server_name +Supports exact hostnames, prefixed hostnames, suffixed hostnames, and regular expressions. Translates to the `hosts` attribute in Easegress `HTTPServer`. + +``` +server_name www.example.com *.example.com; +server_name www.example.* ~^(?.+)\.example\.net$ +``` + +### location +Handles `location` with prefix paths, exact paths, and regular expression paths (case-insensitive included). The ordering in Easegress follows Nginx's precedence: exact paths first, prefix paths next (sorted by length), and regex paths last. Nested locations are also supported. + +This translates to `rules` and `paths` in an Easegress `HTTPServer`. + +``` +location /apis { + location /apis/v1 { + ... + } + ... +} + +location = /user { + ... +} + +location ~* \.(gif|jpg|jpeg)$ { + ... +} + +location ^~ /admin/ { + ... +} +``` + +The resulting path order in Easegress would be: + +``` +- path: /user +- pathPrefix: /apis/v1 +- pathPrefix: /admin/ +- pathPrefix: /apis +- pathRegexp: \.(gif|jpg|jpeg)$ +``` + +### proxy_pass and upstream +Currently, only reverse proxy functionalities are supported. For `upstream`, the default load-balancing strategy is `roundRobin`. If a server weight is specified, `weightRandom` is used for load balancing. + +Equal to Easegress `Pipeline` with `Proxy` filter. + +``` +proxy_pass http://user.example.com + +proxy_pass http:backend + +upstream backend { + server 127.0.0.1:8080; + server 127.0.0.2:9090 weight=10; +} +``` + +### websocket +Settings recognized as `WebSocket` configurations correspond to an Easegress `Pipeline` with a `WebsocketProxy` filter. + +``` +location /websocket { + proxy_pass http://wsbackend; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "Upgrade"; +} +``` + +### HTTPS +HTTPS configurations are supported as per Easegress `HTTPServer` settings. + +``` +listen 443 ssl; + +ssl_certificate you-cert.crt; +ssl_certificate_key your-cert-key.key; + +ssl_certificate you-cert2.crt; +ssl_certificate_key your-cert-key2.key; + +ssl_client_certificate your-client-cert.crt; +``` + +### proxy_set_header +Translates to an Easegress `Pipeline` with a `RequestAdaptor` filter. + +The following Nginx embedded variables are supported: + +- `$host` +- `$hostname` +- `$content_length` +- `$content_type` +- `$remote_addr` +- `$remote_user` +- `$request_body` +- `$request_method` +- `$request_uri` +- `$scheme` + +For example: +``` +proxy_set_header Host $host +``` + +### gzip +Corresponds to an Easegress `Pipeline` with a `Proxy` filter, including `compression` settings. + +``` +gzip on; +gzip_min_length 1000; +``` + +## Example + +Given following nginx configuration: +``` +events {} +http { + upstream backend { + server localhost:1234; + server localhost:2345 weight=10; + } + + server { + listen 80; + + server_name www.example.com *.example.com; + + location /apis { + proxy_set_header X-Path "apis"; + proxy_pass http://localhost:8880; + + location /apis/v1 { + proxy_set_header X-Path "apis/v1"; + proxy_pass http://localhost:8888; + } + } + + location /upstream { + gzip on; + gzip_min_length 1000; + proxy_pass http://backend; + } + + location /websocket { + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + proxy_pass http://localhost:9090; + } + } + + server { + listen 127.0.0.1:443 ssl; + ssl_client_certificate ; + ssl_certificate ; + ssl_certificate_key ; + + location = /user { + proxy_pass http://localhost:9999; + } + } +} +``` + +then run + +```bash +egctl convert ngxin -f nginx.conf -o nginx.yaml --prefix convert-nginx +``` + +will generate following Easegress YAML: + +```yaml +name: convert-nginx-80 +kind: HTTPServer +address: "" +port: 80 +keepAliveTimeout: 60s +maxConnections: 10240 +rules: + - hosts: + - isRegexp: false + value: www.example.com + - isRegexp: false + value: '*.example.com' + paths: + - pathPrefix: /websocket + backend: convert-nginx-websocket + clientMaxBodySize: -1 + - pathPrefix: /upstream + backend: convert-nginx-upstream + - pathPrefix: /apis/v1 + backend: convert-nginx-apisv1 + - pathPrefix: /apis + backend: convert-nginx-apis + +--- +name: convert-nginx-443 +kind: HTTPServer +https: true +address: 127.0.0.1 +port: 443 +caCertBase64: +certs: + : +keys: + : +rules: + - paths: + - path: /user + backend: convert-nginx-user +--- +name: convert-nginx-apis +kind: Pipeline +flow: [] +filters: + - kind: RequestAdaptor + name: request-adaptor + template: | + header: + set: + X-Path: apis + - kind: Proxy + maxIdleConns: 10240 + maxIdleConnsPerHost: 1024 + name: proxy + pools: + - loadBalance: + policy: roundRobin + servers: + - url: http://localhost:8880 + weight: 1 + +--- +name: convert-nginx-apisv1 +kind: Pipeline +flow: [] +filters: + - kind: RequestAdaptor + name: request-adaptor + template: | + header: + set: + X-Path: apis/v1 + version: "" + - kind: Proxy + maxIdleConns: 10240 + maxIdleConnsPerHost: 1024 + name: proxy + pools: + - loadBalance: + policy: roundRobin + servers: + - url: http://localhost:8888 + weight: 1 + +--- +name: convert-nginx-upstream +kind: Pipeline +flow: [] +filters: + - compression: + minLength: 1000 + kind: Proxy + maxIdleConns: 10240 + maxIdleConnsPerHost: 1024 + name: proxy + pools: + - loadBalance: + policy: weightedRandom + servers: + - url: http://localhost:1234 + weight: 1 + - url: http://localhost:2345 + weight: 10 + +--- +name: convert-nginx-websocket +kind: Pipeline +flow: [] +filters: + - kind: WebSocketProxy + name: websocket + pools: + - loadBalance: + policy: roundRobin + servers: + - url: ws://localhost:9090 + weight: 1 + +--- +name: convert-nginx-user +kind: Pipeline +flow: [] +filters: + - kind: Proxy + maxIdleConns: 10240 + maxIdleConnsPerHost: 1024 + name: proxy + pools: + - loadBalance: + policy: roundRobin + servers: + - url: http://localhost:9999 + weight: 1 +``` diff --git a/docs/03.Advanced-Cookbook/README.md b/docs/03.Advanced-Cookbook/README.md index 7e75ed84de..74d2f9de9e 100644 --- a/docs/03.Advanced-Cookbook/README.md +++ b/docs/03.Advanced-Cookbook/README.md @@ -12,3 +12,4 @@ ### [Workflow](3.10.Workflow.md) ### [Performance](3.11.Performance.md) ### [Migrate v1.x Filter To v2.x](3.12.Migrate.md) +### [Nginx Configuration Conversion to Easegress YAML](3.13.Nginx.md) diff --git a/docs/07.Reference/7.01.Controllers.md b/docs/07.Reference/7.01.Controllers.md index b19622ba4a..1a14f962bc 100644 --- a/docs/07.Reference/7.01.Controllers.md +++ b/docs/07.Reference/7.01.Controllers.md @@ -678,7 +678,7 @@ domains: | Name | Type | Description | Required | | ---------- | ----------------------------------- | ------------------------------------------------------------- | -------- | | ipFilter | [ipfilter.Spec](#ipfilterSpec) | IP Filter for all traffic under the rule | No | -| host | string | Exact host to match | No | +| host | string | Exact host or wildcard. For example "*.example.com" or "www.example.com". | No | | hostRegexp | string | Host in regular expression to match | No | | hosts | [][httpserver.Host](#httpserverhost) | Hosts to match | No | | paths | [][httpserver.Path](#httpserverPath) | Path matching rules, empty means to match nothing. Note that multiple paths are matched in the order of their appearance in the spec, this is different from Nginx. | No | @@ -691,7 +691,7 @@ domains: | Name | Type | Description | Required | | ------------- | ------------------------ | ---------------------------------------------------------------------- | -------- | | isRegexp | bool | Whether `value` is regular expression or exact value, default is false | No | -| value | string | Host value to match | Yes | +| value | string | Host value to match. Wildcard is supported. | Yes | ### httpserver.Path diff --git a/docs/07.Reference/7.02.Filters.md b/docs/07.Reference/7.02.Filters.md index 254093cdf6..b0ebed1733 100644 --- a/docs/07.Reference/7.02.Filters.md +++ b/docs/07.Reference/7.02.Filters.md @@ -449,7 +449,7 @@ path: addPrefix: /v3 ``` -The example configuration below removes header `X-Version` from all `GET` requests. +The example configuration below removes header `X-Version` from all `GET` requests and set header with key `Host` and value from current request host. See more details about template in [here](#template-of-builder-filters). ```yaml kind: RequestAdaptor @@ -457,7 +457,13 @@ name: request-adaptor-example method: GET header: del: ["X-Version"] +template: | + method: '{{ .req.Method }}' + header: + set: + Host: '{{ .req.Host }}' ``` +The structure of template follows the structure of `Configuration` below. The example configuration below modifies the request path using regular expressions. @@ -1897,6 +1903,13 @@ and please refer [Pipeline](7.01.Controllers.md#pipeline) for what is `namespace For example, if the request of the `DEFAULT` namespace is an HTTP one, we can access its method via `.requests.DEFAULT.Method`. +For convenience, shorthand notations are used to simplify the access of default request and response properties. In this notation: + +- `.req.Host` is equal to `.requests.DEFAULT.Host` +- `.resp.Body` is equal to `.responses.DEFAULT.Body` + +Here, `.req` is a shorthand for `.requests.DEFAULT`, and similarly, `.resp` is shorthand for `.requests.DEFAULT`. + Easegress also injects other data into the template engine, which can be accessed with `.data.`, for example, we can use `.data.PIPELINE` to read the data defined in the pipeline spec. @@ -1904,6 +1917,25 @@ read the data defined in the pipeline spec. The `template` should generate a string in YAML format, the schema of the result YAML varies from filters and protocols. +Use `RequestAdaptor` as an example: + +```yaml +kind: RequestAdaptor +name: request-adaptor +template: | + header: + set: + Content: '{{ header .req.Header "Content-Length" }}' + Content-Type: '{{ header .req.Header "Content-Type" }}' + Host: '{{ .req.Host }}' + Method: '{{ .req.Method }}' + Remote-Addr: '{{ .req.RemoteAddr }}' + Remote-User: '{{ username .req }}' + Request-Body: '{{ .req.Body }}' + Request-URI: '{{ .req.RequestURI }}' + Scheme: '{{ .req.URL.Scheme }}' +``` + #### HTTP Specific * **Available fields of existing requests** diff --git a/go.mod b/go.mod index 1690410873..400bf03690 100644 --- a/go.mod +++ b/go.mod @@ -41,6 +41,7 @@ require ( github.com/mitchellh/mapstructure v1.5.0 github.com/nacos-group/nacos-sdk-go v1.1.4 github.com/nacos-group/nacos-sdk-go/v2 v2.2.3 + github.com/nginxinc/nginx-go-crossplane v0.4.33 github.com/open-policy-agent/opa v0.58.0 github.com/openzipkin/zipkin-go v0.4.2 github.com/patrickmn/go-cache v2.1.0+incompatible @@ -108,8 +109,10 @@ require ( github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 // indirect github.com/hashicorp/go-version v1.6.0 // indirect github.com/huandu/xstrings v1.4.0 // indirect + github.com/jstemmer/go-junit-report v1.0.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect + github.com/maxbrunsfeld/counterfeiter/v6 v6.6.1 // indirect github.com/onsi/ginkgo/v2 v2.13.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b // indirect diff --git a/go.sum b/go.sum index 2b16e57bb8..a87fe79a27 100644 --- a/go.sum +++ b/go.sum @@ -501,6 +501,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jstemmer/go-junit-report v1.0.0 h1:8X1gzZpR+nVQLAht+L/foqOeX2l9DTZoaIPbEQHxsds= +github.com/jstemmer/go-junit-report v1.0.0/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtblin/go-ldap-client v0.0.0-20170223121919-b73f66626b33 h1:XDpFOMOZq0u0Ar4F0p/wklqQXp/AMV1pTF5T5bDoUfQ= github.com/jtblin/go-ldap-client v0.0.0-20170223121919-b73f66626b33/go.mod h1:+0BcLY5d54TVv6irFzHoiFvwAHR6T0g9B+by/UaS9T0= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= @@ -573,6 +575,8 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= +github.com/maxbrunsfeld/counterfeiter/v6 v6.6.1 h1:9XE5ykDiC8eNSqIPkxx0EsV3kMX1oe4kQWRZjIgytUA= +github.com/maxbrunsfeld/counterfeiter/v6 v6.6.1/go.mod h1:qbKwBR+qQODzH2WD/s53mdgp/xVcXMlJb59GRFOp6Z4= github.com/megaease/easemesh-api v1.4.4 h1:E18mtLfj8ffuPTeN7MqZeakJgT/tJ92JNIZsY2k2GE0= github.com/megaease/easemesh-api v1.4.4/go.mod h1:GuAE5DwqK6lI/ovoRKjyPxBCSoMhj0NLp9PRejj0Hnw= github.com/megaease/grace v1.0.0 h1:b44R3j6e/iaN62F4ZUnru9nzL1VaIcxxUZjSPVtTVzI= @@ -609,6 +613,8 @@ github.com/nacos-group/nacos-sdk-go v1.1.4 h1:qyrZ7HTWM4aeymFfqnbgNRERh7TWuER10p github.com/nacos-group/nacos-sdk-go v1.1.4/go.mod h1:cBv9wy5iObs7khOqov1ERFQrCuTR4ILpgaiaVMxEmGI= github.com/nacos-group/nacos-sdk-go/v2 v2.2.3 h1:sUQx4f1bXDeeOOEQZjGAitzxYApbYY9fVDbxVCaBW+I= github.com/nacos-group/nacos-sdk-go/v2 v2.2.3/go.mod h1:UL4U89WYdnyajgKJUMpuT1Rr6iNmbjrxOO40JRgtA00= +github.com/nginxinc/nginx-go-crossplane v0.4.33 h1:Sr6ptE+xzQ7Vc5xs1SXNDDBWvVwKA7BTtQJZIGTS7RY= +github.com/nginxinc/nginx-go-crossplane v0.4.33/go.mod h1:UzbZnyFv0vPlt1Urbnp/mrFCzBL4tYCReFuNBpFQEfI= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nrdcg/dnspod-go v0.4.0 h1:c/jn1mLZNKF3/osJ6mz3QPxTudvPArXTjpkmYj0uK6U= github.com/nrdcg/dnspod-go v0.4.0/go.mod h1:vZSoFSFeQVm2gWLMkyX61LZ8HI3BaqtHZWgPTGKr6KQ= @@ -711,6 +717,7 @@ github.com/sagikazarmark/locafero v0.3.0 h1:zT7VEGWC2DTflmccN/5T1etyKvxSxpHsjb9c github.com/sagikazarmark/locafero v0.3.0/go.mod h1:w+v7UsPNFwzF1cHuOajOOzoq4U7v/ig1mpRjqV+Bu1U= github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sclevine/spec v1.4.0 h1:z/Q9idDcay5m5irkZ28M7PtQM4aOISzOpj4bUPkDee8= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= diff --git a/pkg/filters/builder/builder.go b/pkg/filters/builder/builder.go index ebabbf763f..d675283f79 100644 --- a/pkg/filters/builder/builder.go +++ b/pkg/filters/builder/builder.go @@ -39,9 +39,9 @@ type ( // Spec is the spec of Builder. Spec struct { - LeftDelim string `json:"leftDelim" jsonschema:"omitempty"` - RightDelim string `json:"rightDelim" jsonschema:"omitempty"` - Template string `json:"template" jsonschema:"omitempty"` + LeftDelim string `json:"leftDelim,omitempty" jsonschema:"omitempty"` + RightDelim string `json:"rightDelim,omitempty" jsonschema:"omitempty"` + Template string `json:"template,omitempty" jsonschema:"omitempty"` } ) @@ -79,15 +79,24 @@ func prepareBuilderData(ctx *context.Context) (map[string]interface{}, error) { requests := make(map[string]interface{}) responses := make(map[string]interface{}) + var defaultReq, defaultResp interface{} for k, v := range ctx.Requests() { requests[k] = v.ToBuilderRequest(k) + if k == context.DefaultNamespace { + defaultReq = requests[k] + } } for k, v := range ctx.Responses() { responses[k] = v.ToBuilderResponse(k) + if k == context.DefaultNamespace { + defaultResp = responses[k] + } } return map[string]interface{}{ + "req": defaultReq, + "resp": defaultResp, "requests": requests, "responses": responses, "data": ctx.Data(), diff --git a/pkg/filters/builder/extrafuncs.go b/pkg/filters/builder/extrafuncs.go index 65c642eb88..ea90b7a168 100644 --- a/pkg/filters/builder/extrafuncs.go +++ b/pkg/filters/builder/extrafuncs.go @@ -20,6 +20,7 @@ package builder import ( "encoding/json" "fmt" + "net/http" "strconv" "strings" "text/template" @@ -141,4 +142,16 @@ var extraFuncs = template.FuncMap{ "panic": func(v interface{}) interface{} { panic(v) }, + + "header": func(header http.Header, key string) string { + return header.Get(key) + }, + + "username": func(req interface{}) string { + type BasicAuth interface { + BasicAuth() (username, password string, ok bool) + } + username, _, _ := req.(BasicAuth).BasicAuth() + return username + }, } diff --git a/pkg/filters/builder/requestadaptor.go b/pkg/filters/builder/requestadaptor.go index ef9cb8f7b0..a74d1f0d8c 100644 --- a/pkg/filters/builder/requestadaptor.go +++ b/pkg/filters/builder/requestadaptor.go @@ -145,18 +145,18 @@ type ( Spec `json:",inline"` RequestAdaptorTemplate `json:",inline"` - Compress string `json:"compress" jsonschema:"omitempty"` - Decompress string `json:"decompress" jsonschema:"omitempty"` + Compress string `json:"compress,omitempty" jsonschema:"omitempty"` + Decompress string `json:"decompress,omitempty" jsonschema:"omitempty"` Sign *SignerSpec `json:"sign,omitempty" jsonschema:"omitempty"` } // RequestAdaptorTemplate is the template of the request adaptor. RequestAdaptorTemplate struct { - Host string `json:"host" jsonschema:"omitempty"` - Method string `json:"method" jsonschema:"omitempty,format=httpmethod"` + Host string `json:"host,omitempty" jsonschema:"omitempty"` + Method string `json:"method,omitempty" jsonschema:"omitempty,format=httpmethod"` Path *pathadaptor.Spec `json:"path,omitempty" jsonschema:"omitempty"` Header *httpheader.AdaptSpec `json:"header,omitempty" jsonschema:"omitempty"` - Body string `json:"body" jsonschema:"omitempty"` + Body string `json:"body,omitempty" jsonschema:"omitempty"` } // SignerSpec is the spec of the request signer. diff --git a/pkg/filters/proxies/httpproxy/pool.go b/pkg/filters/proxies/httpproxy/pool.go index 6ed24f3c5e..6abd9d4251 100644 --- a/pkg/filters/proxies/httpproxy/pool.go +++ b/pkg/filters/proxies/httpproxy/pool.go @@ -181,16 +181,16 @@ type ServerPool struct { type ServerPoolSpec struct { BaseServerPoolSpec `json:",inline"` - Filter *RequestMatcherSpec `json:"filter" jsonschema:"omitempty"` - SpanName string `json:"spanName" jsonschema:"omitempty"` - ServerMaxBodySize int64 `json:"serverMaxBodySize" jsonschema:"omitempty"` - Timeout string `json:"timeout" jsonschema:"omitempty,format=duration"` - RetryPolicy string `json:"retryPolicy" jsonschema:"omitempty"` - CircuitBreakerPolicy string `json:"circuitBreakerPolicy" jsonschema:"omitempty"` + Filter *RequestMatcherSpec `json:"filter,omitempty" jsonschema:"omitempty"` + SpanName string `json:"spanName,omitempty" jsonschema:"omitempty"` + ServerMaxBodySize int64 `json:"serverMaxBodySize,omitempty" jsonschema:"omitempty"` + Timeout string `json:"timeout,omitempty" jsonschema:"omitempty,format=duration"` + RetryPolicy string `json:"retryPolicy,omitempty" jsonschema:"omitempty"` + CircuitBreakerPolicy string `json:"circuitBreakerPolicy,omitempty" jsonschema:"omitempty"` MemoryCache *MemoryCacheSpec `json:"memoryCache,omitempty" jsonschema:"omitempty"` // FailureCodes would be 5xx if it isn't assigned any value. - FailureCodes []int `json:"failureCodes" jsonschema:"omitempty,uniqueItems=true"` + FailureCodes []int `json:"failureCodes,omitempty" jsonschema:"omitempty,uniqueItems=true"` } // ServerPoolStatus is the status of Pool. diff --git a/pkg/filters/proxies/httpproxy/proxy.go b/pkg/filters/proxies/httpproxy/proxy.go index e67cb19eba..834ee2a0de 100644 --- a/pkg/filters/proxies/httpproxy/proxy.go +++ b/pkg/filters/proxies/httpproxy/proxy.go @@ -110,10 +110,10 @@ type ( MirrorPool *ServerPoolSpec `json:"mirrorPool,omitempty" jsonschema:"omitempty"` Compression *CompressionSpec `json:"compression,omitempty" jsonschema:"omitempty"` MTLS *MTLS `json:"mtls,omitempty" jsonschema:"omitempty"` - MaxIdleConns int `json:"maxIdleConns" jsonschema:"omitempty"` - MaxIdleConnsPerHost int `json:"maxIdleConnsPerHost" jsonschema:"omitempty"` - MaxRedirection int `json:"maxRedirection" jsonschema:"omitempty"` - ServerMaxBodySize int64 `json:"serverMaxBodySize" jsonschema:"omitempty"` + MaxIdleConns int `json:"maxIdleConns,omitempty" jsonschema:"omitempty"` + MaxIdleConnsPerHost int `json:"maxIdleConnsPerHost,omitempty" jsonschema:"omitempty"` + MaxRedirection int `json:"maxRedirection,omitempty" jsonschema:"omitempty"` + ServerMaxBodySize int64 `json:"serverMaxBodySize,omitempty" jsonschema:"omitempty"` } // Status is the status of Proxy. diff --git a/pkg/filters/proxies/httpproxy/wspool.go b/pkg/filters/proxies/httpproxy/wspool.go index 35a4a9ce49..d645f0991c 100644 --- a/pkg/filters/proxies/httpproxy/wspool.go +++ b/pkg/filters/proxies/httpproxy/wspool.go @@ -47,11 +47,11 @@ type WebSocketServerPool struct { // WebSocketServerPoolSpec is the spec for a server pool. type WebSocketServerPoolSpec struct { BaseServerPoolSpec `json:",inline"` - ClientMaxMsgSize int64 `json:"clientMaxMsgSize" jsonschema:"omitempty"` - ServerMaxMsgSize int64 `json:"serverMaxMsgSize" jsonschema:"omitempty"` - Filter *RequestMatcherSpec `json:"filter" jsonschema:"omitempty"` - InsecureSkipVerify bool `json:"insecureSkipVerify" jsonschema:"omitempty"` - OriginPatterns []string `json:"originPatterns" jsonschema:"omitempty"` + ClientMaxMsgSize int64 `json:"clientMaxMsgSize,omitempty" jsonschema:"omitempty"` + ServerMaxMsgSize int64 `json:"serverMaxMsgSize,omitempty" jsonschema:"omitempty"` + Filter *RequestMatcherSpec `json:"filter,omitempty" jsonschema:"omitempty"` + InsecureSkipVerify bool `json:"insecureSkipVerify,omitempty" jsonschema:"omitempty"` + OriginPatterns []string `json:"originPatterns,omitempty" jsonschema:"omitempty"` } // NewWebSocketServerPool creates a new server pool according to spec. diff --git a/pkg/filters/proxies/loadbalance.go b/pkg/filters/proxies/loadbalance.go index 8a31fca916..e591ffb064 100644 --- a/pkg/filters/proxies/loadbalance.go +++ b/pkg/filters/proxies/loadbalance.go @@ -54,11 +54,11 @@ type LoadBalancer interface { // this is not good as new policies could be added in the future, we should // convert it to a map later. type LoadBalanceSpec struct { - Policy string `json:"policy" jsonschema:"omitempty"` - HeaderHashKey string `json:"headerHashKey" jsonschema:"omitempty"` - ForwardKey string `json:"forwardKey" jsonschema:"omitempty"` - StickySession *StickySessionSpec `json:"stickySession" jsonschema:"omitempty"` - HealthCheck *HealthCheckSpec `json:"healthCheck" jsonschema:"omitempty"` + Policy string `json:"policy,omitempty" jsonschema:"omitempty"` + HeaderHashKey string `json:"headerHashKey,omitempty" jsonschema:"omitempty"` + ForwardKey string `json:"forwardKey,omitempty" jsonschema:"omitempty"` + StickySession *StickySessionSpec `json:"stickySession,omitempty" jsonschema:"omitempty"` + HealthCheck *HealthCheckSpec `json:"healthCheck,omitempty" jsonschema:"omitempty"` } // LoadBalancePolicy is the interface of a load balance policy. diff --git a/pkg/filters/proxies/server.go b/pkg/filters/proxies/server.go index 6ea5ff2a28..76bfd44493 100644 --- a/pkg/filters/proxies/server.go +++ b/pkg/filters/proxies/server.go @@ -27,9 +27,9 @@ import ( // Server is a backend proxy server. type Server struct { URL string `json:"url" jsonschema:"required,format=url"` - Tags []string `json:"tags" jsonschema:"omitempty,uniqueItems=true"` - Weight int `json:"weight" jsonschema:"omitempty,minimum=0,maximum=100"` - KeepHost bool `json:"keepHost" jsonschema:"omitempty,default=false"` + Tags []string `json:"tags,omitempty" jsonschema:"omitempty,uniqueItems=true"` + Weight int `json:"weight,omitempty" jsonschema:"omitempty,minimum=0,maximum=100"` + KeepHost bool `json:"keepHost,omitempty" jsonschema:"omitempty,default=false"` AddrIsHostName bool `json:"-"` Unhealth bool `json:"-"` // HealthCounter is used to count the number of successive health checks diff --git a/pkg/filters/proxies/serverpool.go b/pkg/filters/proxies/serverpool.go index 720943df45..c0d152f137 100644 --- a/pkg/filters/proxies/serverpool.go +++ b/pkg/filters/proxies/serverpool.go @@ -45,11 +45,11 @@ type ServerPoolBase struct { // ServerPoolBaseSpec is the spec for a base server pool. type ServerPoolBaseSpec struct { - ServerTags []string `json:"serverTags" jsonschema:"omitempty,uniqueItems=true"` - Servers []*Server `json:"servers" jsonschema:"omitempty"` - ServiceRegistry string `json:"serviceRegistry" jsonschema:"omitempty"` - ServiceName string `json:"serviceName" jsonschema:"omitempty"` - LoadBalance *LoadBalanceSpec `json:"loadBalance" jsonschema:"omitempty"` + ServerTags []string `json:"serverTags,omitempty" jsonschema:"omitempty,uniqueItems=true"` + Servers []*Server `json:"servers,omitempty" jsonschema:"omitempty"` + ServiceRegistry string `json:"serviceRegistry,omitempty" jsonschema:"omitempty"` + ServiceName string `json:"serviceName,omitempty" jsonschema:"omitempty"` + LoadBalance *LoadBalanceSpec `json:"loadBalance,omitempty" jsonschema:"omitempty"` } // Validate validates ServerPoolSpec. diff --git a/pkg/object/httpserver/routers/spec.go b/pkg/object/httpserver/routers/spec.go index 1647df72bc..cf12d58993 100644 --- a/pkg/object/httpserver/routers/spec.go +++ b/pkg/object/httpserver/routers/spec.go @@ -22,6 +22,7 @@ import ( "net/http" "net/url" "regexp" + "strings" "github.com/megaease/easegress/v2/pkg/logger" "github.com/megaease/easegress/v2/pkg/util/ipfilter" @@ -38,6 +39,8 @@ type Paths []*Path type Host struct { IsRegexp bool `json:"isRegexp" jsonschema:"omitempty"` Value string `json:"value" jsonschema:"required"` + prefix string `json:"-"` + suffix string `json:"-"` re *regexp.Regexp } @@ -124,6 +127,20 @@ func (rule *Rule) Init() { for i := range rule.Hosts { h := &rule.Hosts[i] if !h.IsRegexp { + if h.Value != "" { + count := strings.Count(h.Value, "*") + if count > 1 { + logger.Errorf("invalid host %s, only one wildcard is allowed", h.Value) + continue + } + if h.Value[0] == '*' { + h.suffix = h.Value[1:] + } else if h.Value[len(h.Value)-1] == '*' { + h.prefix = h.Value[:len(h.Value)-1] + } else { + logger.Errorf("invalid host %s, only wildcard prefix or suffix is allowed", h.Value) + } + } continue } if re, err := regexp.Compile(h.Value); err != nil { @@ -154,6 +171,10 @@ func (rule *Rule) MatchHost(ctx *RouteContext) bool { } } else if host == h.Value { return true + } else if h.prefix != "" && strings.HasPrefix(host, h.prefix) { + return true + } else if h.suffix != "" && strings.HasSuffix(host, h.suffix) { + return true } } diff --git a/pkg/object/httpserver/routers/spec_test.go b/pkg/object/httpserver/routers/spec_test.go index f27dedaf20..2ceff529ef 100644 --- a/pkg/object/httpserver/routers/spec_test.go +++ b/pkg/object/httpserver/routers/spec_test.go @@ -126,6 +126,29 @@ func TestRuleMatch(t *testing.T) { rule.Init() assert.NotNil(rule) assert.False(rule.MatchHost(ctx)) + + testCases := []struct { + request string + value string + result bool + }{ + {request: "http://www.megaease.com:8080", value: "www.megaease.com", result: true}, + {request: "http://www.megaease.com:8080", value: "*.megaease.com", result: true}, + {request: "http://www.sub.megaease.com:8080", value: "*.megaease.com", result: true}, + {request: "http://www.example.megaease.com:8080", value: "*.megaease.com", result: true}, + {request: "http://www.megaease.com:8080", value: "www.megaease.*", result: true}, + {request: "http://www.megaease.cn:8080", value: "www.megaease.*", result: true}, + {request: "http://www.google.com:8080", value: "*.megaease.com", result: false}, + } + for _, tc := range testCases { + stdr, _ := http.NewRequest(http.MethodGet, tc.request, nil) + req, _ := httpprot.NewRequest(stdr) + ctx := NewContext(req) + + rule = &Rule{Hosts: []Host{{Value: tc.value}}} + rule.Init() + assert.Equal(tc.result, rule.MatchHost(ctx)) + } } func TestRuleAllowIP(t *testing.T) { diff --git a/pkg/object/httpserver/runtime.go b/pkg/object/httpserver/runtime.go index c25f1f4998..33abe988cd 100644 --- a/pkg/object/httpserver/runtime.go +++ b/pkg/object/httpserver/runtime.go @@ -268,7 +268,7 @@ func (r *runtime) startHTTP3Server() { } r.server3 = &http3.Server{ - Addr: fmt.Sprintf(":%d", r.spec.Port), + Addr: fmt.Sprintf("%s:%d", r.spec.Address, r.spec.Port), Handler: r.mux, TLSConfig: tlsConfig, QuicConfig: &quic.Config{ @@ -303,14 +303,14 @@ func (r *runtime) startHTTP1And2Server() { return !bytes.Contains(p, []byte("TLS handshake error")) }) r.server = &http.Server{ - Addr: fmt.Sprintf(":%d", r.spec.Port), + Addr: fmt.Sprintf("%s:%d", r.spec.Address, r.spec.Port), Handler: r.mux, IdleTimeout: keepAliveTimeout, ErrorLog: log.New(fw, "", log.LstdFlags), } r.server.SetKeepAlivesEnabled(r.spec.KeepAlive) - listener, err := gnet.Listen("tcp", fmt.Sprintf(":%d", r.spec.Port)) + listener, err := gnet.Listen("tcp", fmt.Sprintf("%s:%d", r.spec.Address, r.spec.Port)) if err != nil { logger.Errorf("httpserver %s failed to listen: %v", r.superSpec.Name(), err) r.setState(stateFailed) diff --git a/pkg/object/httpserver/spec.go b/pkg/object/httpserver/spec.go index 3a92d90e60..c79c6ce79b 100644 --- a/pkg/object/httpserver/spec.go +++ b/pkg/object/httpserver/spec.go @@ -37,6 +37,7 @@ type ( HTTPS bool `json:"https" jsonschema:"required"` AutoCert bool `json:"autoCert" jsonschema:"omitempty"` XForwardedFor bool `json:"xForwardedFor" jsonschema:"omitempty"` + Address string `json:"address" jsonschema:"omitempty"` Port uint16 `json:"port" jsonschema:"required,minimum=1"` ClientMaxBodySize int64 `json:"clientMaxBodySize" jsonschema:"omitempty"` KeepAliveTimeout string `json:"keepAliveTimeout" jsonschema:"omitempty,format=duration"` diff --git a/pkg/protocols/httpprot/httpheader/httpheader.go b/pkg/protocols/httpprot/httpheader/httpheader.go index 140d7d0613..755ad10745 100644 --- a/pkg/protocols/httpprot/httpheader/httpheader.go +++ b/pkg/protocols/httpprot/httpheader/httpheader.go @@ -31,11 +31,11 @@ type ( // AdaptSpec describes rules for adapting. AdaptSpec struct { - Del []string `json:"del" jsonschema:"omitempty,uniqueItems=true"` + Del []string `json:"del,omitempty" jsonschema:"omitempty,uniqueItems=true"` // NOTE: Set and Add allow empty value. - Set map[string]string `json:"set" jsonschema:"omitempty"` - Add map[string]string `json:"add" jsonschema:"omitempty"` + Set map[string]string `json:"set,omitempty" jsonschema:"omitempty"` + Add map[string]string `json:"add,omitempty" jsonschema:"omitempty"` } ) diff --git a/pkg/supervisor/spec.go b/pkg/supervisor/spec.go index 09695fbd76..0bed2ae20d 100644 --- a/pkg/supervisor/spec.go +++ b/pkg/supervisor/spec.go @@ -47,7 +47,7 @@ type ( Version string `json:"version" jsonschema:"required"` // RFC3339 format - CreatedAt string `json:"createdAt" jsonschema:"omitempty"` + CreatedAt string `json:"createdAt,omitempty" jsonschema:"omitempty"` } )