2021-09-17 02:50:32 +08:00
|
|
|
// Copyright 2015 Matthew Holt and The Caddy Authors
|
2019-08-10 02:05:47 +08:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package caddyfile
|
|
|
|
|
|
|
|
import (
|
2020-01-10 00:40:16 +08:00
|
|
|
"bytes"
|
2021-04-10 02:06:25 +08:00
|
|
|
"fmt"
|
2021-09-30 01:17:48 +08:00
|
|
|
"io"
|
2019-08-10 02:05:47 +08:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
|
|
|
"strings"
|
2020-06-02 00:43:06 +08:00
|
|
|
|
2022-04-26 00:12:10 +08:00
|
|
|
"go.uber.org/zap"
|
2023-08-14 23:41:15 +08:00
|
|
|
|
|
|
|
"github.com/caddyserver/caddy/v2"
|
2019-08-10 02:05:47 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
// Parse parses the input just enough to group tokens, in
|
|
|
|
// order, by server block. No further parsing is performed.
|
|
|
|
// Server blocks are returned in the order in which they appear.
|
|
|
|
// Directives that do not appear in validDirectives will cause
|
|
|
|
// an error. If you do not want to check for valid directives,
|
|
|
|
// pass in nil instead.
|
2020-01-10 00:40:16 +08:00
|
|
|
//
|
|
|
|
// Environment variables in {$ENVIRONMENT_VARIABLE} notation
|
|
|
|
// will be replaced before parsing begins.
|
|
|
|
func Parse(filename string, input []byte) ([]ServerBlock, error) {
|
2021-11-16 05:41:19 +08:00
|
|
|
// unfortunately, we must copy the input because parsing must
|
|
|
|
// remain a read-only operation, but we have to expand environment
|
|
|
|
// variables before we parse, which changes the underlying array (#4422)
|
|
|
|
inputCopy := make([]byte, len(input))
|
|
|
|
copy(inputCopy, input)
|
|
|
|
|
|
|
|
tokens, err := allTokens(filename, inputCopy)
|
2019-08-10 02:05:47 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-04-10 02:06:25 +08:00
|
|
|
p := parser{
|
|
|
|
Dispenser: NewDispenser(tokens),
|
|
|
|
importGraph: importGraph{
|
2024-04-09 07:12:35 +08:00
|
|
|
nodes: make(map[string]struct{}),
|
2021-04-10 02:06:25 +08:00
|
|
|
edges: make(adjacency),
|
|
|
|
},
|
|
|
|
}
|
2019-08-10 02:05:47 +08:00
|
|
|
return p.parseAll()
|
|
|
|
}
|
|
|
|
|
2021-11-16 05:41:19 +08:00
|
|
|
// allTokens lexes the entire input, but does not parse it.
|
|
|
|
// It returns all the tokens from the input, unstructured
|
|
|
|
// and in order. It may mutate input as it expands env vars.
|
|
|
|
func allTokens(filename string, input []byte) ([]Token, error) {
|
2023-01-17 19:57:42 +08:00
|
|
|
return Tokenize(replaceEnvVars(input), filename)
|
2021-11-16 05:41:19 +08:00
|
|
|
}
|
|
|
|
|
2020-01-10 00:40:16 +08:00
|
|
|
// replaceEnvVars replaces all occurrences of environment variables.
|
2021-11-16 05:41:19 +08:00
|
|
|
// It mutates the underlying array and returns the updated slice.
|
2023-01-17 19:57:42 +08:00
|
|
|
func replaceEnvVars(input []byte) []byte {
|
2020-01-10 00:40:16 +08:00
|
|
|
var offset int
|
|
|
|
for {
|
|
|
|
begin := bytes.Index(input[offset:], spanOpen)
|
|
|
|
if begin < 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
begin += offset // make beginning relative to input, not offset
|
|
|
|
end := bytes.Index(input[begin+len(spanOpen):], spanClose)
|
|
|
|
if end < 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
end += begin + len(spanOpen) // make end relative to input, not begin
|
|
|
|
|
|
|
|
// get the name; if there is no name, skip it
|
2020-11-24 03:51:35 +08:00
|
|
|
envString := input[begin+len(spanOpen) : end]
|
|
|
|
if len(envString) == 0 {
|
2020-01-10 00:40:16 +08:00
|
|
|
offset = end + len(spanClose)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2020-11-24 03:51:35 +08:00
|
|
|
// split the string into a key and an optional default
|
|
|
|
envParts := strings.SplitN(string(envString), envVarDefaultDelimiter, 2)
|
|
|
|
|
|
|
|
// do a lookup for the env var, replace with the default if not found
|
|
|
|
envVarValue, found := os.LookupEnv(envParts[0])
|
|
|
|
if !found && len(envParts) == 2 {
|
|
|
|
envVarValue = envParts[1]
|
|
|
|
}
|
|
|
|
|
2020-01-10 00:40:16 +08:00
|
|
|
// get the value of the environment variable
|
2020-11-24 03:51:35 +08:00
|
|
|
// note that this causes one-level deep chaining
|
|
|
|
envVarBytes := []byte(envVarValue)
|
2020-01-10 00:40:16 +08:00
|
|
|
|
|
|
|
// splice in the value
|
|
|
|
input = append(input[:begin],
|
2020-11-24 03:51:35 +08:00
|
|
|
append(envVarBytes, input[end+len(spanClose):]...)...)
|
2020-01-10 00:40:16 +08:00
|
|
|
|
|
|
|
// continue at the end of the replacement
|
2020-11-24 03:51:35 +08:00
|
|
|
offset = begin + len(envVarBytes)
|
2020-01-10 00:40:16 +08:00
|
|
|
}
|
2023-01-17 19:57:42 +08:00
|
|
|
return input
|
2020-01-10 00:40:16 +08:00
|
|
|
}
|
|
|
|
|
2019-08-10 02:05:47 +08:00
|
|
|
type parser struct {
|
|
|
|
*Dispenser
|
|
|
|
block ServerBlock // current server block being parsed
|
|
|
|
eof bool // if we encounter a valid EOF in a hard place
|
|
|
|
definedSnippets map[string][]Token
|
2019-08-22 00:46:35 +08:00
|
|
|
nesting int
|
2021-04-10 02:06:25 +08:00
|
|
|
importGraph importGraph
|
2019-08-10 02:05:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (p *parser) parseAll() ([]ServerBlock, error) {
|
|
|
|
var blocks []ServerBlock
|
|
|
|
|
|
|
|
for p.Next() {
|
|
|
|
err := p.parseOne()
|
|
|
|
if err != nil {
|
|
|
|
return blocks, err
|
|
|
|
}
|
2019-08-23 03:38:37 +08:00
|
|
|
if len(p.block.Keys) > 0 || len(p.block.Segments) > 0 {
|
2019-08-10 02:05:47 +08:00
|
|
|
blocks = append(blocks, p.block)
|
|
|
|
}
|
2019-08-22 00:46:35 +08:00
|
|
|
if p.nesting > 0 {
|
|
|
|
return blocks, p.EOFErr()
|
|
|
|
}
|
2019-08-10 02:05:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return blocks, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *parser) parseOne() error {
|
2019-08-22 00:46:35 +08:00
|
|
|
p.block = ServerBlock{}
|
2019-08-10 02:05:47 +08:00
|
|
|
return p.begin()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *parser) begin() error {
|
|
|
|
if len(p.tokens) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
err := p.addresses()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if p.eof {
|
|
|
|
// this happens if the Caddyfile consists of only
|
|
|
|
// a line of addresses and nothing else
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-05-16 23:27:52 +08:00
|
|
|
if ok, name := p.isNamedRoute(); ok {
|
|
|
|
// we just need a dummy leading token to ease parsing later
|
|
|
|
nameToken := p.Token()
|
|
|
|
nameToken.Text = name
|
|
|
|
|
2024-02-19 08:22:48 +08:00
|
|
|
// named routes only have one key, the route name
|
|
|
|
p.block.Keys = []Token{nameToken}
|
|
|
|
p.block.IsNamedRoute = true
|
|
|
|
|
2023-05-16 23:27:52 +08:00
|
|
|
// get all the tokens from the block, including the braces
|
|
|
|
tokens, err := p.blockTokens(true)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
tokens = append([]Token{nameToken}, tokens...)
|
|
|
|
p.block.Segments = []Segment{tokens}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-08-10 02:05:47 +08:00
|
|
|
if ok, name := p.isSnippet(); ok {
|
|
|
|
if p.definedSnippets == nil {
|
|
|
|
p.definedSnippets = map[string][]Token{}
|
|
|
|
}
|
|
|
|
if _, found := p.definedSnippets[name]; found {
|
|
|
|
return p.Errf("redeclaration of previously declared snippet %s", name)
|
|
|
|
}
|
|
|
|
// consume all tokens til matched close brace
|
2023-05-16 23:27:52 +08:00
|
|
|
tokens, err := p.blockTokens(false)
|
2019-08-10 02:05:47 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-04-10 02:06:25 +08:00
|
|
|
// Just as we need to track which file the token comes from, we need to
|
2023-02-17 08:08:36 +08:00
|
|
|
// keep track of which snippet the token comes from. This is helpful
|
|
|
|
// in tracking import cycles across files/snippets by namespacing them.
|
|
|
|
// Without this, we end up with false-positives in cycle-detection.
|
2021-04-10 02:06:25 +08:00
|
|
|
for k, v := range tokens {
|
|
|
|
v.snippetName = name
|
|
|
|
tokens[k] = v
|
|
|
|
}
|
2019-08-10 02:05:47 +08:00
|
|
|
p.definedSnippets[name] = tokens
|
|
|
|
// empty block keys so we don't save this block as a real server.
|
|
|
|
p.block.Keys = nil
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return p.blockContents()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *parser) addresses() error {
|
|
|
|
var expectingAnother bool
|
|
|
|
|
|
|
|
for {
|
2024-02-19 08:22:48 +08:00
|
|
|
value := p.Val()
|
|
|
|
token := p.Token()
|
2019-08-10 02:05:47 +08:00
|
|
|
|
2024-05-24 10:06:16 +08:00
|
|
|
// Reject request matchers if trying to define them globally
|
|
|
|
if strings.HasPrefix(value, "@") {
|
|
|
|
return p.Errf("request matchers may not be defined globally, they must be in a site block; found %s", value)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Special case: import directive replaces tokens during parse-time
|
2024-02-19 08:22:48 +08:00
|
|
|
if value == "import" && p.isNewLine() {
|
2023-05-23 05:36:55 +08:00
|
|
|
err := p.doImport(0)
|
2019-08-10 02:05:47 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Open brace definitely indicates end of addresses
|
2024-02-19 08:22:48 +08:00
|
|
|
if value == "{" {
|
2019-08-10 02:05:47 +08:00
|
|
|
if expectingAnother {
|
2024-02-19 08:22:48 +08:00
|
|
|
return p.Errf("Expected another address but had '%s' - check for extra comma", value)
|
2019-08-10 02:05:47 +08:00
|
|
|
}
|
2021-08-24 01:53:27 +08:00
|
|
|
// Mark this server block as being defined with braces.
|
|
|
|
// This is used to provide a better error message when
|
|
|
|
// the user may have tried to define two server blocks
|
|
|
|
// without having used braces, which are required in
|
|
|
|
// that case.
|
|
|
|
p.block.HasBraces = true
|
2019-08-10 02:05:47 +08:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2021-05-13 06:18:44 +08:00
|
|
|
// Users commonly forget to place a space between the address and the '{'
|
2024-02-19 08:22:48 +08:00
|
|
|
if strings.HasSuffix(value, "{") {
|
|
|
|
return p.Errf("Site addresses cannot end with a curly brace: '%s' - put a space between the token and the brace", value)
|
2021-05-13 06:18:44 +08:00
|
|
|
}
|
|
|
|
|
2024-02-19 08:22:48 +08:00
|
|
|
if value != "" { // empty token possible if user typed ""
|
2019-08-10 02:05:47 +08:00
|
|
|
// Trailing comma indicates another address will follow, which
|
|
|
|
// may possibly be on the next line
|
2024-02-19 08:22:48 +08:00
|
|
|
if value[len(value)-1] == ',' {
|
|
|
|
value = value[:len(value)-1]
|
2019-08-10 02:05:47 +08:00
|
|
|
expectingAnother = true
|
|
|
|
} else {
|
|
|
|
expectingAnother = false // but we may still see another one on this line
|
|
|
|
}
|
|
|
|
|
2021-08-24 01:26:07 +08:00
|
|
|
// If there's a comma here, it's probably because they didn't use a space
|
|
|
|
// between their two domains, e.g. "foo.com,bar.com", which would not be
|
|
|
|
// parsed as two separate site addresses.
|
2024-02-19 08:22:48 +08:00
|
|
|
if strings.Contains(value, ",") {
|
|
|
|
return p.Errf("Site addresses cannot contain a comma ',': '%s' - put a space after the comma to separate site addresses", value)
|
2021-08-24 01:26:07 +08:00
|
|
|
}
|
|
|
|
|
2024-10-11 04:26:59 +08:00
|
|
|
// After the above, a comma surrounded by spaces would result
|
|
|
|
// in an empty token which we should ignore
|
|
|
|
if value != "" {
|
|
|
|
// Add the token as a site address
|
|
|
|
token.Text = value
|
|
|
|
p.block.Keys = append(p.block.Keys, token)
|
|
|
|
}
|
2019-08-10 02:05:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Advance token and possibly break out of loop or return error
|
|
|
|
hasNext := p.Next()
|
|
|
|
if expectingAnother && !hasNext {
|
|
|
|
return p.EOFErr()
|
|
|
|
}
|
|
|
|
if !hasNext {
|
|
|
|
p.eof = true
|
|
|
|
break // EOF
|
|
|
|
}
|
|
|
|
if !expectingAnother && p.isNewLine() {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *parser) blockContents() error {
|
|
|
|
errOpenCurlyBrace := p.openCurlyBrace()
|
|
|
|
if errOpenCurlyBrace != nil {
|
|
|
|
// single-server configs don't need curly braces
|
|
|
|
p.cursor--
|
|
|
|
}
|
|
|
|
|
|
|
|
err := p.directives()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-08-22 00:46:35 +08:00
|
|
|
// only look for close curly brace if there was an opening
|
2019-08-10 02:05:47 +08:00
|
|
|
if errOpenCurlyBrace == nil {
|
|
|
|
err = p.closeCurlyBrace()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// directives parses through all the lines for directives
|
|
|
|
// and it expects the next token to be the first
|
|
|
|
// directive. It goes until EOF or closing curly brace
|
|
|
|
// which ends the server block.
|
|
|
|
func (p *parser) directives() error {
|
|
|
|
for p.Next() {
|
|
|
|
// end of server block
|
|
|
|
if p.Val() == "}" {
|
2019-08-22 00:46:35 +08:00
|
|
|
// p.nesting has already been decremented
|
2019-08-10 02:05:47 +08:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// special case: import directive replaces tokens during parse-time
|
|
|
|
if p.Val() == "import" {
|
2023-05-23 05:36:55 +08:00
|
|
|
err := p.doImport(1)
|
2019-08-10 02:05:47 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
p.cursor-- // cursor is advanced when we continue, so roll back one more
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-08-22 00:46:35 +08:00
|
|
|
// normal case: parse a directive as a new segment
|
|
|
|
// (a "segment" is a line which starts with a directive
|
|
|
|
// and which ends at the end of the line or at the end of
|
|
|
|
// the block that is opened at the end of the line)
|
2019-08-10 02:05:47 +08:00
|
|
|
if err := p.directive(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2019-08-22 00:46:35 +08:00
|
|
|
|
2019-08-10 02:05:47 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// doImport swaps out the import directive and its argument
|
|
|
|
// (a total of 2 tokens) with the tokens in the specified file
|
|
|
|
// or globbing pattern. When the function returns, the cursor
|
|
|
|
// is on the token before where the import directive was. In
|
|
|
|
// other words, call Next() to access the first token that was
|
|
|
|
// imported.
|
2023-05-23 05:36:55 +08:00
|
|
|
func (p *parser) doImport(nesting int) error {
|
2019-08-10 02:05:47 +08:00
|
|
|
// syntax checks
|
|
|
|
if !p.NextArg() {
|
|
|
|
return p.ArgErr()
|
|
|
|
}
|
2020-01-10 00:40:16 +08:00
|
|
|
importPattern := p.Val()
|
2019-08-10 02:05:47 +08:00
|
|
|
if importPattern == "" {
|
|
|
|
return p.Err("Import requires a non-empty filepath")
|
|
|
|
}
|
2020-06-02 00:43:06 +08:00
|
|
|
|
|
|
|
// grab remaining args as placeholder replacements
|
|
|
|
args := p.RemainingArgs()
|
|
|
|
|
2023-02-17 08:08:36 +08:00
|
|
|
// set up a replacer for non-variadic args replacement
|
|
|
|
repl := makeArgsReplacer(args)
|
2020-06-02 00:43:06 +08:00
|
|
|
|
2024-06-15 01:27:51 +08:00
|
|
|
// grab all the tokens (if it exists) from within a block that follows the import
|
|
|
|
var blockTokens []Token
|
|
|
|
for currentNesting := p.Nesting(); p.NextBlock(currentNesting); {
|
|
|
|
blockTokens = append(blockTokens, p.Token())
|
|
|
|
}
|
|
|
|
// initialize with size 1
|
|
|
|
blockMapping := make(map[string][]Token, 1)
|
|
|
|
if len(blockTokens) > 0 {
|
|
|
|
// use such tokens to create a new dispenser, and then use it to parse each block
|
|
|
|
bd := NewDispenser(blockTokens)
|
|
|
|
for bd.Next() {
|
|
|
|
// see if we can grab a key
|
|
|
|
var currentMappingKey string
|
|
|
|
if bd.Val() == "{" {
|
|
|
|
return p.Err("anonymous blocks are not supported")
|
|
|
|
}
|
|
|
|
currentMappingKey = bd.Val()
|
|
|
|
currentMappingTokens := []Token{}
|
|
|
|
// read all args until end of line / {
|
|
|
|
if bd.NextArg() {
|
|
|
|
currentMappingTokens = append(currentMappingTokens, bd.Token())
|
|
|
|
for bd.NextArg() {
|
|
|
|
currentMappingTokens = append(currentMappingTokens, bd.Token())
|
|
|
|
}
|
|
|
|
// TODO(elee1766): we don't enter another mapping here because it's annoying to extract the { and } properly.
|
|
|
|
// maybe someone can do that in the future
|
|
|
|
} else {
|
|
|
|
// attempt to enter a block and add tokens to the currentMappingTokens
|
|
|
|
for mappingNesting := bd.Nesting(); bd.NextBlock(mappingNesting); {
|
|
|
|
currentMappingTokens = append(currentMappingTokens, bd.Token())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
blockMapping[currentMappingKey] = currentMappingTokens
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-02 00:43:06 +08:00
|
|
|
// splice out the import directive and its arguments
|
|
|
|
// (2 tokens, plus the length of args)
|
2024-06-15 01:27:51 +08:00
|
|
|
tokensBefore := p.tokens[:p.cursor-1-len(args)-len(blockTokens)]
|
2019-08-10 02:05:47 +08:00
|
|
|
tokensAfter := p.tokens[p.cursor+1:]
|
|
|
|
var importedTokens []Token
|
2021-04-10 02:06:25 +08:00
|
|
|
var nodes []string
|
2019-08-10 02:05:47 +08:00
|
|
|
|
|
|
|
// first check snippets. That is a simple, non-recursive replacement
|
|
|
|
if p.definedSnippets != nil && p.definedSnippets[importPattern] != nil {
|
|
|
|
importedTokens = p.definedSnippets[importPattern]
|
2021-04-10 02:06:25 +08:00
|
|
|
if len(importedTokens) > 0 {
|
|
|
|
// just grab the first one
|
|
|
|
nodes = append(nodes, fmt.Sprintf("%s:%s", importedTokens[0].File, importedTokens[0].snippetName))
|
|
|
|
}
|
2019-08-10 02:05:47 +08:00
|
|
|
} else {
|
|
|
|
// make path relative to the file of the _token_ being processed rather
|
|
|
|
// than current working directory (issue #867) and then use glob to get
|
|
|
|
// list of matching filenames
|
|
|
|
absFile, err := filepath.Abs(p.Dispenser.File())
|
|
|
|
if err != nil {
|
2019-08-22 05:23:00 +08:00
|
|
|
return p.Errf("Failed to get absolute path of file: %s: %v", p.Dispenser.File(), err)
|
2019-08-10 02:05:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
var matches []string
|
|
|
|
var globPattern string
|
|
|
|
if !filepath.IsAbs(importPattern) {
|
|
|
|
globPattern = filepath.Join(filepath.Dir(absFile), importPattern)
|
|
|
|
} else {
|
|
|
|
globPattern = importPattern
|
|
|
|
}
|
|
|
|
if strings.Count(globPattern, "*") > 1 || strings.Count(globPattern, "?") > 1 ||
|
|
|
|
(strings.Contains(globPattern, "[") && strings.Contains(globPattern, "]")) {
|
|
|
|
// See issue #2096 - a pattern with many glob expansions can hang for too long
|
|
|
|
return p.Errf("Glob pattern may only contain one wildcard (*), but has others: %s", globPattern)
|
|
|
|
}
|
|
|
|
matches, err = filepath.Glob(globPattern)
|
|
|
|
if err != nil {
|
|
|
|
return p.Errf("Failed to use import pattern %s: %v", importPattern, err)
|
|
|
|
}
|
|
|
|
if len(matches) == 0 {
|
|
|
|
if strings.ContainsAny(globPattern, "*?[]") {
|
2022-04-26 00:12:10 +08:00
|
|
|
caddy.Log().Warn("No files matching import glob pattern", zap.String("pattern", importPattern))
|
2019-08-10 02:05:47 +08:00
|
|
|
} else {
|
|
|
|
return p.Errf("File to import not found: %s", importPattern)
|
|
|
|
}
|
2023-01-22 01:22:36 +08:00
|
|
|
} else {
|
|
|
|
// See issue #5295 - should skip any files that start with a . when iterating over them.
|
|
|
|
sep := string(filepath.Separator)
|
|
|
|
segGlobPattern := strings.Split(globPattern, sep)
|
|
|
|
if strings.HasPrefix(segGlobPattern[len(segGlobPattern)-1], "*") {
|
|
|
|
var tmpMatches []string
|
|
|
|
for _, m := range matches {
|
|
|
|
seg := strings.Split(m, sep)
|
|
|
|
if !strings.HasPrefix(seg[len(seg)-1], ".") {
|
|
|
|
tmpMatches = append(tmpMatches, m)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
matches = tmpMatches
|
|
|
|
}
|
2019-08-10 02:05:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// collect all the imported tokens
|
|
|
|
for _, importFile := range matches {
|
|
|
|
newTokens, err := p.doSingleImport(importFile)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
importedTokens = append(importedTokens, newTokens...)
|
|
|
|
}
|
2021-04-10 02:06:25 +08:00
|
|
|
nodes = matches
|
|
|
|
}
|
|
|
|
|
2023-05-26 03:05:00 +08:00
|
|
|
nodeName := p.File()
|
2023-02-17 08:08:36 +08:00
|
|
|
if p.Token().snippetName != "" {
|
2021-04-10 02:06:25 +08:00
|
|
|
nodeName += fmt.Sprintf(":%s", p.Token().snippetName)
|
|
|
|
}
|
|
|
|
p.importGraph.addNode(nodeName)
|
|
|
|
p.importGraph.addNodes(nodes)
|
|
|
|
if err := p.importGraph.addEdges(nodeName, nodes); err != nil {
|
|
|
|
p.importGraph.removeNodes(nodes)
|
|
|
|
return err
|
2019-08-10 02:05:47 +08:00
|
|
|
}
|
|
|
|
|
2020-06-02 00:43:06 +08:00
|
|
|
// copy the tokens so we don't overwrite p.definedSnippets
|
2023-02-17 08:08:36 +08:00
|
|
|
tokensCopy := make([]Token, 0, len(importedTokens))
|
2020-06-02 00:43:06 +08:00
|
|
|
|
2023-05-23 05:36:55 +08:00
|
|
|
var (
|
|
|
|
maybeSnippet bool
|
|
|
|
maybeSnippetId bool
|
|
|
|
index int
|
|
|
|
)
|
|
|
|
|
2020-06-02 00:43:06 +08:00
|
|
|
// run the argument replacer on the tokens
|
2023-02-17 08:08:36 +08:00
|
|
|
// golang for range slice return a copy of value
|
|
|
|
// similarly, append also copy value
|
2023-05-23 05:36:55 +08:00
|
|
|
for i, token := range importedTokens {
|
2023-05-26 03:05:00 +08:00
|
|
|
// update the token's imports to refer to import directive filename, line number and snippet name if there is one
|
2023-02-17 08:08:36 +08:00
|
|
|
if token.snippetName != "" {
|
2023-05-26 03:05:00 +08:00
|
|
|
token.imports = append(token.imports, fmt.Sprintf("%s:%d (import %s)", p.File(), p.Line(), token.snippetName))
|
2023-02-17 08:08:36 +08:00
|
|
|
} else {
|
2023-05-26 03:05:00 +08:00
|
|
|
token.imports = append(token.imports, fmt.Sprintf("%s:%d (import)", p.File(), p.Line()))
|
2023-02-17 08:08:36 +08:00
|
|
|
}
|
|
|
|
|
2023-05-23 05:36:55 +08:00
|
|
|
// naive way of determine snippets, as snippets definition can only follow name + block
|
|
|
|
// format, won't check for nesting correctness or any other error, that's what parser does.
|
|
|
|
if !maybeSnippet && nesting == 0 {
|
|
|
|
// first of the line
|
2023-07-13 04:32:22 +08:00
|
|
|
if i == 0 || isNextOnNewLine(tokensCopy[i-1], token) {
|
2023-05-23 05:36:55 +08:00
|
|
|
index = 0
|
|
|
|
} else {
|
|
|
|
index++
|
|
|
|
}
|
|
|
|
|
|
|
|
if index == 0 && len(token.Text) >= 3 && strings.HasPrefix(token.Text, "(") && strings.HasSuffix(token.Text, ")") {
|
|
|
|
maybeSnippetId = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
switch token.Text {
|
|
|
|
case "{":
|
|
|
|
nesting++
|
|
|
|
if index == 1 && maybeSnippetId && nesting == 1 {
|
|
|
|
maybeSnippet = true
|
|
|
|
maybeSnippetId = false
|
|
|
|
}
|
|
|
|
case "}":
|
|
|
|
nesting--
|
|
|
|
if nesting == 0 && maybeSnippet {
|
|
|
|
maybeSnippet = false
|
|
|
|
}
|
|
|
|
}
|
2024-06-15 01:27:51 +08:00
|
|
|
// if it is {block}, we substitute with all tokens in the block
|
|
|
|
// if it is {blocks.*}, we substitute with the tokens in the mapping for the *
|
|
|
|
var skip bool
|
|
|
|
var tokensToAdd []Token
|
|
|
|
switch {
|
|
|
|
case token.Text == "{block}":
|
|
|
|
tokensToAdd = blockTokens
|
|
|
|
case strings.HasPrefix(token.Text, "{blocks.") && strings.HasSuffix(token.Text, "}"):
|
|
|
|
// {blocks.foo.bar} will be extracted to key `foo.bar`
|
|
|
|
blockKey := strings.TrimPrefix(strings.TrimSuffix(token.Text, "}"), "{blocks.")
|
|
|
|
val, ok := blockMapping[blockKey]
|
|
|
|
if ok {
|
|
|
|
tokensToAdd = val
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
skip = true
|
|
|
|
}
|
|
|
|
if !skip {
|
|
|
|
if len(tokensToAdd) == 0 {
|
|
|
|
// if there is no content in the snippet block, don't do any replacement
|
|
|
|
// this allows snippets which contained {block}/{block.*} before this change to continue functioning as normal
|
|
|
|
tokensCopy = append(tokensCopy, token)
|
|
|
|
} else {
|
|
|
|
tokensCopy = append(tokensCopy, tokensToAdd...)
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
2023-05-23 05:36:55 +08:00
|
|
|
|
|
|
|
if maybeSnippet {
|
|
|
|
tokensCopy = append(tokensCopy, token)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2023-02-17 08:08:36 +08:00
|
|
|
foundVariadic, startIndex, endIndex := parseVariadic(token, len(args))
|
|
|
|
if foundVariadic {
|
|
|
|
for _, arg := range args[startIndex:endIndex] {
|
|
|
|
token.Text = arg
|
|
|
|
tokensCopy = append(tokensCopy, token)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
token.Text = repl.ReplaceKnown(token.Text, "")
|
|
|
|
tokensCopy = append(tokensCopy, token)
|
|
|
|
}
|
2020-06-02 00:43:06 +08:00
|
|
|
}
|
|
|
|
|
2019-08-10 02:05:47 +08:00
|
|
|
// splice the imported tokens in the place of the import statement
|
|
|
|
// and rewind cursor so Next() will land on first imported token
|
2020-06-02 00:43:06 +08:00
|
|
|
p.tokens = append(tokensBefore, append(tokensCopy, tokensAfter...)...)
|
2024-06-15 01:27:51 +08:00
|
|
|
p.cursor -= len(args) + len(blockTokens) + 1
|
2019-08-10 02:05:47 +08:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// doSingleImport lexes the individual file at importFile and returns
|
|
|
|
// its tokens or an error, if any.
|
|
|
|
func (p *parser) doSingleImport(importFile string) ([]Token, error) {
|
|
|
|
file, err := os.Open(importFile)
|
|
|
|
if err != nil {
|
|
|
|
return nil, p.Errf("Could not import %s: %v", importFile, err)
|
|
|
|
}
|
|
|
|
defer file.Close()
|
|
|
|
|
|
|
|
if info, err := file.Stat(); err != nil {
|
|
|
|
return nil, p.Errf("Could not import %s: %v", importFile, err)
|
|
|
|
} else if info.IsDir() {
|
|
|
|
return nil, p.Errf("Could not import %s: is a directory", importFile)
|
|
|
|
}
|
|
|
|
|
2021-09-30 01:17:48 +08:00
|
|
|
input, err := io.ReadAll(file)
|
2020-01-10 10:34:22 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, p.Errf("Could not read imported file %s: %v", importFile, err)
|
|
|
|
}
|
|
|
|
|
2023-01-22 01:22:36 +08:00
|
|
|
// only warning in case of empty files
|
|
|
|
if len(input) == 0 || len(strings.TrimSpace(string(input))) == 0 {
|
|
|
|
caddy.Log().Warn("Import file is empty", zap.String("file", importFile))
|
|
|
|
return []Token{}, nil
|
|
|
|
}
|
|
|
|
|
2020-01-10 10:34:22 +08:00
|
|
|
importedTokens, err := allTokens(importFile, input)
|
2019-08-10 02:05:47 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, p.Errf("Could not read tokens while importing %s: %v", importFile, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tack the file path onto these tokens so errors show the imported file's name
|
|
|
|
// (we use full, absolute path to avoid bugs: issue #1892)
|
|
|
|
filename, err := filepath.Abs(importFile)
|
|
|
|
if err != nil {
|
2019-08-22 05:23:00 +08:00
|
|
|
return nil, p.Errf("Failed to get absolute path of file: %s: %v", importFile, err)
|
2019-08-10 02:05:47 +08:00
|
|
|
}
|
|
|
|
for i := 0; i < len(importedTokens); i++ {
|
|
|
|
importedTokens[i].File = filename
|
|
|
|
}
|
|
|
|
|
|
|
|
return importedTokens, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// directive collects tokens until the directive's scope
|
|
|
|
// closes (either end of line or end of curly brace block).
|
|
|
|
// It expects the currently-loaded token to be a directive
|
|
|
|
// (or } that ends a server block). The collected tokens
|
|
|
|
// are loaded into the current server block for later use
|
|
|
|
// by directive setup functions.
|
|
|
|
func (p *parser) directive() error {
|
2019-08-22 00:46:35 +08:00
|
|
|
// a segment is a list of tokens associated with this directive
|
|
|
|
var segment Segment
|
2019-08-10 02:05:47 +08:00
|
|
|
|
2019-08-22 00:46:35 +08:00
|
|
|
// the directive itself is appended as a relevant token
|
|
|
|
segment = append(segment, p.Token())
|
2019-08-10 02:05:47 +08:00
|
|
|
|
|
|
|
for p.Next() {
|
|
|
|
if p.Val() == "{" {
|
2019-08-22 00:46:35 +08:00
|
|
|
p.nesting++
|
2022-03-24 02:34:13 +08:00
|
|
|
if !p.isNextOnNewLine() && p.Token().wasQuoted == 0 {
|
|
|
|
return p.Err("Unexpected next token after '{' on same line")
|
|
|
|
}
|
2023-04-21 02:43:51 +08:00
|
|
|
if p.isNewLine() {
|
|
|
|
return p.Err("Unexpected '{' on a new line; did you mean to place the '{' on the previous line?")
|
|
|
|
}
|
2022-03-24 02:34:13 +08:00
|
|
|
} else if p.Val() == "{}" {
|
|
|
|
if p.isNextOnNewLine() && p.Token().wasQuoted == 0 {
|
|
|
|
return p.Err("Unexpected '{}' at end of line")
|
|
|
|
}
|
2019-08-22 00:46:35 +08:00
|
|
|
} else if p.isNewLine() && p.nesting == 0 {
|
2019-08-10 02:05:47 +08:00
|
|
|
p.cursor-- // read too far
|
|
|
|
break
|
2019-08-22 00:46:35 +08:00
|
|
|
} else if p.Val() == "}" && p.nesting > 0 {
|
|
|
|
p.nesting--
|
|
|
|
} else if p.Val() == "}" && p.nesting == 0 {
|
2019-08-10 02:05:47 +08:00
|
|
|
return p.Err("Unexpected '}' because no matching opening brace")
|
|
|
|
} else if p.Val() == "import" && p.isNewLine() {
|
2023-05-23 05:36:55 +08:00
|
|
|
if err := p.doImport(1); err != nil {
|
2019-08-10 02:05:47 +08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
p.cursor-- // cursor is advanced when we continue, so roll back one more
|
|
|
|
continue
|
|
|
|
}
|
2020-01-10 00:40:16 +08:00
|
|
|
|
2019-08-22 00:46:35 +08:00
|
|
|
segment = append(segment, p.Token())
|
2019-08-10 02:05:47 +08:00
|
|
|
}
|
|
|
|
|
2019-08-22 00:46:35 +08:00
|
|
|
p.block.Segments = append(p.block.Segments, segment)
|
|
|
|
|
|
|
|
if p.nesting > 0 {
|
2019-08-10 02:05:47 +08:00
|
|
|
return p.EOFErr()
|
|
|
|
}
|
2019-08-22 00:46:35 +08:00
|
|
|
|
2019-08-10 02:05:47 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// openCurlyBrace expects the current token to be an
|
|
|
|
// opening curly brace. This acts like an assertion
|
|
|
|
// because it returns an error if the token is not
|
|
|
|
// a opening curly brace. It does NOT advance the token.
|
|
|
|
func (p *parser) openCurlyBrace() error {
|
|
|
|
if p.Val() != "{" {
|
|
|
|
return p.SyntaxErr("{")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// closeCurlyBrace expects the current token to be
|
|
|
|
// a closing curly brace. This acts like an assertion
|
|
|
|
// because it returns an error if the token is not
|
|
|
|
// a closing curly brace. It does NOT advance the token.
|
|
|
|
func (p *parser) closeCurlyBrace() error {
|
|
|
|
if p.Val() != "}" {
|
|
|
|
return p.SyntaxErr("}")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-05-16 23:27:52 +08:00
|
|
|
func (p *parser) isNamedRoute() (bool, string) {
|
|
|
|
keys := p.block.Keys
|
|
|
|
// A named route block is a single key with parens, prefixed with &.
|
2024-02-19 08:22:48 +08:00
|
|
|
if len(keys) == 1 && strings.HasPrefix(keys[0].Text, "&(") && strings.HasSuffix(keys[0].Text, ")") {
|
|
|
|
return true, strings.TrimSuffix(keys[0].Text[2:], ")")
|
2023-05-16 23:27:52 +08:00
|
|
|
}
|
|
|
|
return false, ""
|
|
|
|
}
|
|
|
|
|
2019-08-10 02:05:47 +08:00
|
|
|
func (p *parser) isSnippet() (bool, string) {
|
|
|
|
keys := p.block.Keys
|
|
|
|
// A snippet block is a single key with parens. Nothing else qualifies.
|
2024-02-19 08:22:48 +08:00
|
|
|
if len(keys) == 1 && strings.HasPrefix(keys[0].Text, "(") && strings.HasSuffix(keys[0].Text, ")") {
|
|
|
|
return true, strings.TrimSuffix(keys[0].Text[1:], ")")
|
2019-08-10 02:05:47 +08:00
|
|
|
}
|
|
|
|
return false, ""
|
|
|
|
}
|
|
|
|
|
|
|
|
// read and store everything in a block for later replay.
|
2023-05-16 23:27:52 +08:00
|
|
|
func (p *parser) blockTokens(retainCurlies bool) ([]Token, error) {
|
|
|
|
// block must have curlies.
|
2019-08-10 02:05:47 +08:00
|
|
|
err := p.openCurlyBrace()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-05-16 23:27:52 +08:00
|
|
|
nesting := 1 // count our own nesting
|
2019-08-10 02:05:47 +08:00
|
|
|
tokens := []Token{}
|
2023-05-16 23:27:52 +08:00
|
|
|
if retainCurlies {
|
|
|
|
tokens = append(tokens, p.Token())
|
|
|
|
}
|
2019-08-10 02:05:47 +08:00
|
|
|
for p.Next() {
|
|
|
|
if p.Val() == "}" {
|
2019-08-22 01:26:48 +08:00
|
|
|
nesting--
|
|
|
|
if nesting == 0 {
|
2023-05-16 23:27:52 +08:00
|
|
|
if retainCurlies {
|
|
|
|
tokens = append(tokens, p.Token())
|
|
|
|
}
|
2019-08-10 02:05:47 +08:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if p.Val() == "{" {
|
2019-08-22 01:26:48 +08:00
|
|
|
nesting++
|
2019-08-10 02:05:47 +08:00
|
|
|
}
|
|
|
|
tokens = append(tokens, p.tokens[p.cursor])
|
|
|
|
}
|
|
|
|
// make sure we're matched up
|
2019-08-22 01:26:48 +08:00
|
|
|
if nesting != 0 {
|
2019-08-10 02:05:47 +08:00
|
|
|
return nil, p.SyntaxErr("}")
|
|
|
|
}
|
|
|
|
return tokens, nil
|
|
|
|
}
|
2019-08-22 00:46:35 +08:00
|
|
|
|
|
|
|
// ServerBlock associates any number of keys from the
|
|
|
|
// head of the server block with tokens, which are
|
|
|
|
// grouped by segments.
|
|
|
|
type ServerBlock struct {
|
2023-05-16 23:27:52 +08:00
|
|
|
HasBraces bool
|
2024-02-19 08:22:48 +08:00
|
|
|
Keys []Token
|
2023-05-16 23:27:52 +08:00
|
|
|
Segments []Segment
|
|
|
|
IsNamedRoute bool
|
2019-08-22 00:46:35 +08:00
|
|
|
}
|
|
|
|
|
2024-02-19 08:22:48 +08:00
|
|
|
func (sb ServerBlock) GetKeysText() []string {
|
|
|
|
res := []string{}
|
|
|
|
for _, k := range sb.Keys {
|
|
|
|
res = append(res, k.Text)
|
|
|
|
}
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
2019-08-22 00:46:35 +08:00
|
|
|
// DispenseDirective returns a dispenser that contains
|
|
|
|
// all the tokens in the server block.
|
|
|
|
func (sb ServerBlock) DispenseDirective(dir string) *Dispenser {
|
|
|
|
var tokens []Token
|
|
|
|
for _, seg := range sb.Segments {
|
|
|
|
if len(seg) > 0 && seg[0].Text == dir {
|
|
|
|
tokens = append(tokens, seg...)
|
|
|
|
}
|
|
|
|
}
|
2019-08-22 05:23:00 +08:00
|
|
|
return NewDispenser(tokens)
|
2019-08-22 00:46:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Segment is a list of tokens which begins with a directive
|
|
|
|
// and ends at the end of the directive (either at the end of
|
|
|
|
// the line, or at the end of a block it opens).
|
|
|
|
type Segment []Token
|
|
|
|
|
|
|
|
// Directive returns the directive name for the segment.
|
|
|
|
// The directive name is the text of the first token.
|
|
|
|
func (s Segment) Directive() string {
|
|
|
|
if len(s) > 0 {
|
|
|
|
return s[0].Text
|
|
|
|
}
|
|
|
|
return ""
|
|
|
|
}
|
2020-01-10 00:40:16 +08:00
|
|
|
|
|
|
|
// spanOpen and spanClose are used to bound spans that
|
|
|
|
// contain the name of an environment variable.
|
2020-11-24 03:51:35 +08:00
|
|
|
var (
|
|
|
|
spanOpen, spanClose = []byte{'{', '$'}, []byte{'}'}
|
|
|
|
envVarDefaultDelimiter = ":"
|
|
|
|
)
|