diff --git a/go.mod b/go.mod index 4097464..7ad219c 100644 --- a/go.mod +++ b/go.mod @@ -1,19 +1,22 @@ -module github.com/nginxinc/nginx-go-crossplane +module github.com/UangDesign/nginx-go-crossplane -go 1.19 +go 1.23.0 + +toolchain go1.24.6 require ( github.com/jstemmer/go-junit-report v1.0.0 + github.com/nginxinc/nginx-go-crossplane v0.4.80 github.com/stretchr/testify v1.10.0 golang.org/x/tools v0.32.0 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect - github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/rogpeppe/go-internal v1.9.0 // indirect golang.org/x/mod v0.24.0 // indirect golang.org/x/sync v0.13.0 // indirect - gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 91acfef..83161ff 100644 --- a/go.sum +++ b/go.sum @@ -2,16 +2,15 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/jstemmer/go-junit-report v1.0.0 h1:8X1gzZpR+nVQLAht+L/foqOeX2l9DTZoaIPbEQHxsds= github.com/jstemmer/go-junit-report v1.0.0/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/nginxinc/nginx-go-crossplane v0.4.80 h1:9DnNZE5hH9qlvYXJsBb64H1xRxns4CG2vwmdfjS2Akc= +github.com/nginxinc/nginx-go-crossplane v0.4.80/go.mod h1:YW/lk3F6/HUSQyfB6bFPnL9TkLcyfRXWfBNgirZmFfI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= diff --git a/parse_with_fs.go b/parse_with_fs.go new file mode 100644 index 0000000..89b3350 --- /dev/null +++ b/parse_with_fs.go @@ -0,0 +1,355 @@ +package crossplane + +import ( + "errors" + "fmt" + "io" + "io/fs" + "path/filepath" + "sort" + "strings" +) + +var ( + memFsOpen = func(memfs fs.FS, path string) (io.ReadCloser, error) { return memfs.Open(path) } +) + +var openFunc func(memfs fs.FS, path string) (io.ReadCloser, error) + +type MemFsParseOptions struct { + ParseOptions + Open func(memfs fs.FS, path string) (io.ReadCloser, error) +} + +func ParseWithMemFs(memfs fs.FS, filename string, options *MemFsParseOptions) (*Payload, error) { + payload := &Payload{ + Status: "ok", + Errors: []PayloadError{}, + Config: []Config{}, + } + if options.Glob == nil { + options.Glob = filepath.Glob + } + + handleError := func(config *Config, err error) { + var line *int + if e, ok := err.(*ParseError); ok { + line = e.Line + } + cerr := ConfigError{Line: line, Error: err} + perr := PayloadError{Line: line, Error: err, File: config.File} + if options.ErrorCallback != nil { + perr.Callback = options.ErrorCallback(err) + } + + const failedSts = "failed" + config.Status = failedSts + config.Errors = append(config.Errors, cerr) + + payload.Status = failedSts + payload.Errors = append(payload.Errors, perr) + } + + if options.Open != nil { + openFunc = options.Open + } + + p := parser{ + configDir: filepath.Dir(filename), + options: &options.ParseOptions, + handleError: handleError, + includes: []fileCtx{{path: filename, ctx: blockCtx{}}}, + included: map[string]int{filename: 0}, + // adjacency list where an edge exists between a file and the file it includes + includeEdges: map[string][]string{}, + // number of times a file is included by another file + includeInDegree: map[string]int{filename: 0}, + } + for len(p.includes) > 0 { + incl := p.includes[0] + p.includes = p.includes[1:] + + file, err := p.openMemfsFile(memfs, incl.path) + if err != nil { + return nil, err + } + + defer file.Close() + + tokens := LexWithOptions(file, options.LexOptions) + config := Config{ + File: incl.path, + Status: "ok", + Errors: []ConfigError{}, + Parsed: Directives{}, + } + parsed, err := p.memfsParse(memfs, &config, tokens, incl.ctx, false) + if err != nil { + if options.StopParsingOnError { + return nil, err + } + handleError(&config, err) + } else { + config.Parsed = parsed + } + + payload.Config = append(payload.Config, config) + } + if p.isAcyclic() { + return nil, errors.New("configs contain include cycle") + } + + if options.CombineConfigs { + return payload.Combined() + } + + return payload, nil +} + +func (p *parser) openMemfsFile(memfs fs.FS, path string) (io.ReadCloser, error) { + open := memFsOpen + if openFunc != nil { + open = openFunc + } + return open(memfs, path) +} + +// parse Recursively parses directives from an nginx config context. +// +//nolint:gocyclo,funlen,gocognit,maintidx,nonamedreturns +func (p *parser) memfsParse(memfs fs.FS, parsing *Config, tokens <-chan NgxToken, ctx blockCtx, consume bool) (parsed Directives, err error) { + var tokenOk bool + // parse recursively by pulling from a flat stream of tokens + for t := range tokens { + if t.Error != nil { + var perr *ParseError + if errors.As(t.Error, &perr) { + perr.File = &parsing.File + perr.BlockCtx = ctx.getLastBlock() + return nil, perr + } + return nil, &ParseError{ + What: t.Error.Error(), + File: &parsing.File, + Line: &t.Line, + originalErr: t.Error, + BlockCtx: ctx.getLastBlock(), + } + } + + var commentsInArgs []string + + // we are parsing a block, so break if it's closing + if t.Value == "}" && !t.IsQuoted { + break + } + + // if we are consuming, then just continue until end of context + if consume { + // if we find a block inside this context, consume it too + if t.Value == "{" && !t.IsQuoted { + _, _ = p.memfsParse(memfs, parsing, tokens, nil, true) + } + continue + } + + var fileName string + if p.options.CombineConfigs { + fileName = parsing.File + } + + // the first token should always be an nginx directive + stmt := &Directive{ + Directive: t.Value, + Line: t.Line, + Args: []string{}, + File: fileName, + } + + // if token is comment + if strings.HasPrefix(t.Value, "#") && !t.IsQuoted { + if p.options.ParseComments { + comment := t.Value[1:] + stmt.Directive = "#" + stmt.Comment = &comment + parsed = append(parsed, stmt) + } + continue + } + + // parse arguments by reading tokens + t, tokenOk = <-tokens + if !tokenOk { + return nil, &ParseError{ + What: ErrPrematureLexEnd.Error(), + File: &parsing.File, + Line: &stmt.Line, + originalErr: ErrPrematureLexEnd, + BlockCtx: ctx.getLastBlock(), + } + } + for t.IsQuoted || (t.Value != "{" && t.Value != ";" && t.Value != "}") { + if !strings.HasPrefix(t.Value, "#") || t.IsQuoted { + stmt.Args = append(stmt.Args, t.Value) + } else if p.options.ParseComments { + commentsInArgs = append(commentsInArgs, t.Value[1:]) + } + t, tokenOk = <-tokens + if !tokenOk { + return nil, &ParseError{ + What: ErrPrematureLexEnd.Error(), + File: &parsing.File, + Line: &stmt.Line, + originalErr: ErrPrematureLexEnd, + BlockCtx: ctx.getLastBlock(), + } + } + } + + // if inside "map-like" block - add contents to payload, but do not parse further + if len(ctx) > 0 { + if _, ok := mapBodies[ctx[len(ctx)-1]]; ok { + mapErr := analyzeMapBody(parsing.File, stmt, t.Value, ctx[len(ctx)-1]) + if mapErr != nil && p.options.StopParsingOnError { + return nil, mapErr + } else if mapErr != nil { + p.handleError(parsing, mapErr) + // consume invalid block + if t.Value == "{" && !t.IsQuoted { + _, _ = p.memfsParse(memfs, parsing, tokens, nil, true) + } + continue + } + stmt.IsMapBlockParameter = true + parsed = append(parsed, stmt) + continue + } + } + + // consume the directive if it is ignored and move on + if contains(p.options.IgnoreDirectives, stmt.Directive) { + // if this directive was a block consume it too + if t.Value == "{" && !t.IsQuoted { + _, _ = p.memfsParse(memfs, parsing, tokens, nil, true) + } + continue + } + + // raise errors if this statement is invalid + err = analyze(parsing.File, stmt, t.Value, ctx, p.options) + + if perr, ok := err.(*ParseError); ok && !p.options.StopParsingOnError { + p.handleError(parsing, perr) + // if it was a block but shouldn"t have been then consume + if strings.HasSuffix(perr.What, ` is not terminated by ";"`) { + if t.Value != "}" && !t.IsQuoted { + _, _ = p.memfsParse(memfs, parsing, tokens, nil, true) + } else { + break + } + } + // keep on parsin' + continue + } else if err != nil { + return nil, err + } + + // prepare arguments - strip parentheses + if stmt.Directive == "if" { + stmt = prepareIfArgs(stmt) + } + + // add "includes" to the payload if this is an include statement + if !p.options.SingleFile && stmt.Directive == "include" { + if len(stmt.Args) == 0 { + return nil, &ParseError{ + What: fmt.Sprintf(`invalid number of arguments in "%s" directive in %s:%d`, + stmt.Directive, + parsing.File, + stmt.Line, + ), + File: &parsing.File, + Line: &stmt.Line, + Statement: stmt.String(), + BlockCtx: ctx.getLastBlock(), + } + } + + pattern := stmt.Args[0] + if !filepath.IsAbs(pattern) { + pattern = filepath.Join(p.configDir, pattern) + } + + // get names of all included files + var fnames []string + if hasMagic.MatchString(pattern) { + fnames, err = p.options.Glob(pattern) + if err != nil { + return nil, err + } + sort.Strings(fnames) + } else { + // if the file pattern was explicit, nginx will check + // that the included file can be opened and read + if f, err := p.openMemfsFile(memfs, pattern); err != nil { + perr := &ParseError{ + What: err.Error(), + File: &parsing.File, + Line: &stmt.Line, + Statement: stmt.String(), + BlockCtx: ctx.getLastBlock(), + } + if !p.options.StopParsingOnError { + p.handleError(parsing, perr) + } else { + return nil, perr + } + } else { + defer f.Close() + fnames = []string{pattern} + } + } + + for _, fname := range fnames { + // the included set keeps files from being parsed twice + // TODO: handle files included from multiple contexts + if _, ok := p.included[fname]; !ok { + p.included[fname] = len(p.included) + p.includes = append(p.includes, fileCtx{fname, ctx}) + } + stmt.Includes = append(stmt.Includes, p.included[fname]) + // add edge between the current file and it's included file and + // increase the included file's in degree + p.includeEdges[parsing.File] = append(p.includeEdges[parsing.File], fname) + p.includeInDegree[fname]++ + } + } + + // if this statement terminated with "{" then it is a block + if t.Value == "{" && !t.IsQuoted { + stmt.Block = make(Directives, 0) + inner := enterBlockCtx(stmt, ctx) // get context for block + blocks, err := p.memfsParse(memfs, parsing, tokens, inner, false) + if err != nil { + return nil, err + } + stmt.Block = append(stmt.Block, blocks...) + } + + parsed = append(parsed, stmt) + + // add all comments found inside args after stmt is added + for _, comment := range commentsInArgs { + comment := comment + parsed = append(parsed, &Directive{ + Directive: "#", + Line: stmt.Line, + Args: []string{}, + File: fileName, + Comment: &comment, + }) + } + } + + return parsed, nil +} diff --git a/parse_with_fs_test.go b/parse_with_fs_test.go new file mode 100644 index 0000000..fe76843 --- /dev/null +++ b/parse_with_fs_test.go @@ -0,0 +1,285 @@ +package crossplane + +import ( + "encoding/json" + "testing" + "testing/fstest" +) + +const nginx_conf = `# 全局配置段 +user nginx; +worker_processes auto; # 自动根据CPU核心数设置worker进程数 +error_log /var/log/nginx/error.log warn; # 错误日志路径和级别 +pid /var/run/nginx.pid; + +# 事件模块配置 +events { + worker_connections 1024; # 每个worker进程的最大连接数 + multi_accept on; # 一次接受所有新连接 + use epoll; # 使用epoll高效模型(Linux) +} + +# HTTP模块配置 +http { + include mime.types; # MIME类型定义 + default_type application/octet-stream; # 默认MIME类型 + + # 日志格式定义 + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + log_format detailed '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for" ' + 'rt=$request_time uct="$upstream_connect_time" uht="$upstream_header_time" urt="$upstream_response_time"'; + + access_log /var/log/nginx/access.log main; # 访问日志 + + sendfile on; # 高效文件传输 + tcp_nopush on; # 优化数据包发送 + tcp_nodelay on; # 禁用Nagle算法 + + keepalive_timeout 65; # 保持连接超时时间 + types_hash_max_size 2048; + + # 启用gzip压缩 + gzip on; + gzip_disable "msie6"; + gzip_vary on; + gzip_proxied any; + gzip_comp_level 6; + gzip_buffers 16 8k; + gzip_http_version 1.1; + gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript; + + # 安全相关头部 + add_header X-Frame-Options "SAMEORIGIN" always; + add_header X-Content-Type-Options "nosniff" always; + add_header Referrer-Policy "no-referrer-when-downgrade" always; + add_header Content-Security-Policy "default-src 'self' http: https: data: blob: 'unsafe-inline'" always; + add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always; + + # 上传文件大小限制 + client_max_body_size 100M; + + # 负载均衡上游服务器配置 + upstream backend { + least_conn; # 最少连接负载均衡算法 + server backend1.example.com:8080 weight=5; + server backend2.example.com:8080; + server backup.backend.example.com:8080 backup; # 备份服务器 + } + + # 另一个上游配置 - 用于WebSocket + upstream websocket { + server ws1.example.com:8080; + server ws2.example.com:8080; + } + + # 静态文件缓存路径配置 + proxy_cache_path /var/cache/nginx levels=1:2 keys_zone=STATIC:10m inactive=24h max_size=1g use_temp_path=off; + + # 虚拟主机配置 (HTTP) + server { + listen 80; + server_name example.com www.example.com; + root /var/www/html; + + # 全局错误页面 + error_page 404 /404.html; + error_page 500 502 503 504 /50x.html; + + # 重定向所有HTTP到HTTPS + return 301 https://$host$request_uri; + } + + # 虚拟主机配置 (HTTPS) + server { + listen 443 ssl http2; + server_name example.com www.example.com; + + # SSL证书配置 + ssl_certificate /etc/ssl/certs/example.com.crt; + ssl_certificate_key /etc/ssl/private/example.com.key; + ssl_trusted_certificate /etc/ssl/certs/example.com.ca.crt; + + # SSL优化配置 + ssl_protocols TLSv1.2 TLSv1.3; + ssl_ciphers 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256'; + ssl_prefer_server_ciphers on; + ssl_session_cache shared:SSL:10m; + ssl_session_timeout 10m; + ssl_session_tickets off; + ssl_stapling on; + ssl_stapling_verify on; + + # OCSP Stapling + resolver 8.8.8.8 8.8.4.4 valid=300s; + resolver_timeout 5s; + + root /var/www/html; + + # 静态文件服务配置 + location / { + try_files $uri $uri/ /index.html; + expires 1d; # 缓存控制 + add_header Cache-Control "public"; + } + + # 静态资源目录 + location /static/ { + alias /var/www/static/; + expires 1y; + access_log off; + add_header Cache-Control "public"; + } + + # API反向代理配置 + location /api/ { + proxy_pass http://backend/; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection 'upgrade'; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_cache STATIC; + proxy_cache_valid 200 1h; + proxy_cache_use_stale error timeout invalid_header updating http_500 http_502 http_503 http_504; + } + + # WebSocket配置 + location /ws/ { + proxy_pass http://websocket; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "Upgrade"; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_read_timeout 86400; # WebSocket长连接超时 + } + + # 禁止访问隐藏文件 + location ~ /\. { + deny all; + access_log off; + log_not_found off; + } + + # 基本认证保护的管理区域 + location /admin/ { + auth_basic "Admin Area"; + auth_basic_user_file /etc/nginx/.htpasswd; + try_files $uri $uri/ /admin/index.html; + } + + # 健康检查端点 + location /health { + access_log off; + return 200 "OK\n"; + add_header Content-Type text/plain; + } + + # 禁止常见漏洞扫描 + location ~* (wp-admin|wp-login|\.git) { + deny all; + } + } + + # 子域名配置 + server { + listen 80; + server_name blog.example.com; + + location / { + proxy_pass http://blog-backend; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + } + } + + # 重定向配置 + server { + listen 80; + server_name old.example.com; + return 301 https://example.com$request_uri; + } + + # 默认服务器配置 - 捕获所有未匹配的请求 + server { + listen 80 default_server; + listen [::]:80 default_server; + server_name _; + return 444; # 关闭连接而不发送响应头 + } +} + +# 邮件代理配置示例 (可选) +mail { + server_name mail.example.com; + auth_http localhost:9000/auth; + + proxy_pass_error_message on; + + server { + listen 25; + protocol smtp; + smtp_auth login plain cram-md5; + } + + server { + listen 110; + protocol pop3; + pop3_auth plain apop cram-md5; + } + + server { + listen 143; + protocol imap; + } +} + +# TCP/UDP代理配置示例 (Nginx Plus或1.9.0+) +stream { + upstream dns_servers { + server 192.168.1.1:53; + server 192.168.1.2:53; + } + + server { + listen 53 udp; + proxy_pass dns_servers; + proxy_timeout 1s; + proxy_responses 1; + } + + server { + listen 3306; + proxy_pass db_master; + } +}` + +const mime_types = `` + +func Test_ParseWithMemFs(t *testing.T) { + memfs := fstest.MapFS{ + "nginx.conf": { + Data: []byte(nginx_conf), + }, + "mime.types": { + Data: []byte(mime_types), + }, + } + + payload, err := ParseWithMemFs(memfs, "nginx.conf", &MemFsParseOptions{}) + + if err != nil { + t.Fatal(err) + } + + if data, err := json.Marshal(payload); err == nil { + t.Log(string(data)) + } +}