2017-09-23 13:56:58 +08:00
|
|
|
// Copyright 2015 Light Code Labs, LLC
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2016-06-06 11:51:56 +08:00
|
|
|
package proxy
|
|
|
|
|
|
|
|
import (
|
2017-06-29 05:54:29 +08:00
|
|
|
"bytes"
|
2017-11-06 14:01:10 +08:00
|
|
|
"context"
|
2019-02-06 01:16:08 +08:00
|
|
|
"crypto/x509"
|
2016-06-06 11:51:56 +08:00
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
2017-05-14 06:49:06 +08:00
|
|
|
"net"
|
2016-06-06 11:51:56 +08:00
|
|
|
"net/http"
|
2019-03-07 05:35:07 +08:00
|
|
|
"net/textproto"
|
2016-06-06 11:51:56 +08:00
|
|
|
"net/url"
|
|
|
|
"path"
|
2019-03-07 05:35:07 +08:00
|
|
|
"regexp"
|
2016-06-06 11:51:56 +08:00
|
|
|
"strconv"
|
|
|
|
"strings"
|
2017-04-03 04:58:15 +08:00
|
|
|
"sync"
|
2017-02-15 23:09:42 +08:00
|
|
|
"sync/atomic"
|
2016-06-06 11:51:56 +08:00
|
|
|
"time"
|
|
|
|
|
2017-04-04 05:16:32 +08:00
|
|
|
"crypto/tls"
|
|
|
|
|
2016-06-06 11:51:56 +08:00
|
|
|
"github.com/mholt/caddy/caddyfile"
|
|
|
|
"github.com/mholt/caddy/caddyhttp/httpserver"
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2017-07-07 23:37:49 +08:00
|
|
|
supportedPolicies = make(map[string]func(string) Policy)
|
2016-06-06 11:51:56 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
type staticUpstream struct {
|
2016-10-26 00:45:30 +08:00
|
|
|
from string
|
|
|
|
upstreamHeaders http.Header
|
|
|
|
downstreamHeaders http.Header
|
2017-04-03 04:58:15 +08:00
|
|
|
stop chan struct{} // Signals running goroutines to stop.
|
|
|
|
wg sync.WaitGroup // Used to wait for running goroutines to stop.
|
2016-10-26 00:45:30 +08:00
|
|
|
Hosts HostPool
|
|
|
|
Policy Policy
|
|
|
|
KeepAlive int
|
2018-10-31 02:02:59 +08:00
|
|
|
FallbackDelay time.Duration
|
2018-04-17 22:09:22 +08:00
|
|
|
Timeout time.Duration
|
2016-10-26 00:45:30 +08:00
|
|
|
FailTimeout time.Duration
|
|
|
|
TryDuration time.Duration
|
|
|
|
TryInterval time.Duration
|
|
|
|
MaxConns int64
|
|
|
|
HealthCheck struct {
|
2017-06-29 05:54:29 +08:00
|
|
|
Client http.Client
|
|
|
|
Path string
|
|
|
|
Interval time.Duration
|
|
|
|
Timeout time.Duration
|
|
|
|
Host string
|
|
|
|
Port string
|
|
|
|
ContentString string
|
2016-06-06 11:51:56 +08:00
|
|
|
}
|
2019-03-07 05:35:07 +08:00
|
|
|
WithoutPathPrefix string
|
|
|
|
IgnoredSubPaths []string
|
|
|
|
insecureSkipVerify bool
|
|
|
|
MaxFails int32
|
|
|
|
resolver srvResolver
|
|
|
|
CaCertPool *x509.CertPool
|
|
|
|
upstreamHeaderReplacements headerReplacements
|
|
|
|
downstreamHeaderReplacements headerReplacements
|
2017-11-06 14:01:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
type srvResolver interface {
|
|
|
|
LookupSRV(context.Context, string, string, string) (string, []*net.SRV, error)
|
2016-06-06 11:51:56 +08:00
|
|
|
}
|
|
|
|
|
2019-03-07 05:35:07 +08:00
|
|
|
// headerReplacement stores a compiled regex matcher and a string replacer, for replacement rules
|
|
|
|
type headerReplacement struct {
|
|
|
|
regexp *regexp.Regexp
|
|
|
|
to string
|
|
|
|
}
|
|
|
|
|
|
|
|
// headerReplacements stores a mapping of canonical MIME header to headerReplacement
|
|
|
|
// Implements a subset of http.Header functions, to allow convenient addition and deletion of rules
|
|
|
|
type headerReplacements map[string][]headerReplacement
|
|
|
|
|
|
|
|
func (h headerReplacements) Add(key string, value headerReplacement) {
|
|
|
|
key = textproto.CanonicalMIMEHeaderKey(key)
|
|
|
|
h[key] = append(h[key], value)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (h headerReplacements) Del(key string) {
|
|
|
|
delete(h, textproto.CanonicalMIMEHeaderKey(key))
|
|
|
|
}
|
|
|
|
|
2016-06-06 11:51:56 +08:00
|
|
|
// NewStaticUpstreams parses the configuration input and sets up
|
2017-04-17 23:58:47 +08:00
|
|
|
// static upstreams for the proxy middleware. The host string parameter,
|
|
|
|
// if not empty, is used for setting the upstream Host header for the
|
|
|
|
// health checks if the upstream header config requires it.
|
|
|
|
func NewStaticUpstreams(c caddyfile.Dispenser, host string) ([]Upstream, error) {
|
2016-06-06 11:51:56 +08:00
|
|
|
var upstreams []Upstream
|
|
|
|
for c.Next() {
|
2017-04-03 04:58:15 +08:00
|
|
|
|
2016-06-06 11:51:56 +08:00
|
|
|
upstream := &staticUpstream{
|
2019-03-07 05:35:07 +08:00
|
|
|
from: "",
|
|
|
|
stop: make(chan struct{}),
|
|
|
|
upstreamHeaders: make(http.Header),
|
|
|
|
downstreamHeaders: make(http.Header),
|
|
|
|
Hosts: nil,
|
|
|
|
Policy: &Random{},
|
|
|
|
MaxFails: 1,
|
|
|
|
TryInterval: 250 * time.Millisecond,
|
|
|
|
MaxConns: 0,
|
|
|
|
KeepAlive: http.DefaultMaxIdleConnsPerHost,
|
|
|
|
Timeout: 30 * time.Second,
|
|
|
|
resolver: net.DefaultResolver,
|
|
|
|
upstreamHeaderReplacements: make(headerReplacements),
|
|
|
|
downstreamHeaderReplacements: make(headerReplacements),
|
2016-06-06 11:51:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if !c.Args(&upstream.from) {
|
|
|
|
return upstreams, c.ArgErr()
|
|
|
|
}
|
|
|
|
|
|
|
|
var to []string
|
2017-11-06 14:01:10 +08:00
|
|
|
hasSrv := false
|
|
|
|
|
2016-06-06 11:51:56 +08:00
|
|
|
for _, t := range c.RemainingArgs() {
|
2017-11-06 14:01:10 +08:00
|
|
|
if len(to) > 0 && hasSrv {
|
|
|
|
return upstreams, c.Err("only one upstream is supported when using SRV locator")
|
|
|
|
}
|
|
|
|
|
|
|
|
if strings.HasPrefix(t, "srv://") || strings.HasPrefix(t, "srv+https://") {
|
|
|
|
if len(to) > 0 {
|
|
|
|
return upstreams, c.Err("service locator upstreams can not be mixed with host names")
|
|
|
|
}
|
|
|
|
|
|
|
|
hasSrv = true
|
|
|
|
}
|
|
|
|
|
2016-06-06 11:51:56 +08:00
|
|
|
parsed, err := parseUpstream(t)
|
|
|
|
if err != nil {
|
|
|
|
return upstreams, err
|
|
|
|
}
|
|
|
|
to = append(to, parsed...)
|
|
|
|
}
|
|
|
|
|
|
|
|
for c.NextBlock() {
|
|
|
|
switch c.Val() {
|
|
|
|
case "upstream":
|
|
|
|
if !c.NextArg() {
|
|
|
|
return upstreams, c.ArgErr()
|
|
|
|
}
|
2017-11-06 14:01:10 +08:00
|
|
|
|
|
|
|
if hasSrv {
|
|
|
|
return upstreams, c.Err("upstream directive is not supported when backend is service locator")
|
|
|
|
}
|
|
|
|
|
2016-06-06 11:51:56 +08:00
|
|
|
parsed, err := parseUpstream(c.Val())
|
|
|
|
if err != nil {
|
|
|
|
return upstreams, err
|
|
|
|
}
|
|
|
|
to = append(to, parsed...)
|
|
|
|
default:
|
2017-11-06 14:01:10 +08:00
|
|
|
if err := parseBlock(&c, upstream, hasSrv); err != nil {
|
2016-06-06 11:51:56 +08:00
|
|
|
return upstreams, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-15 02:43:06 +08:00
|
|
|
if len(to) == 0 {
|
|
|
|
return upstreams, c.ArgErr()
|
|
|
|
}
|
|
|
|
|
|
|
|
upstream.Hosts = make([]*UpstreamHost, len(to))
|
|
|
|
for i, host := range to {
|
|
|
|
uh, err := upstream.NewHost(host)
|
|
|
|
if err != nil {
|
|
|
|
return upstreams, err
|
|
|
|
}
|
|
|
|
upstream.Hosts[i] = uh
|
|
|
|
}
|
|
|
|
|
2016-06-06 11:51:56 +08:00
|
|
|
if upstream.HealthCheck.Path != "" {
|
2016-06-20 23:49:21 +08:00
|
|
|
upstream.HealthCheck.Client = http.Client{
|
|
|
|
Timeout: upstream.HealthCheck.Timeout,
|
2017-04-04 05:16:32 +08:00
|
|
|
Transport: &http.Transport{
|
|
|
|
TLSClientConfig: &tls.Config{InsecureSkipVerify: upstream.insecureSkipVerify},
|
|
|
|
},
|
2016-06-20 23:49:21 +08:00
|
|
|
}
|
2017-04-17 23:58:47 +08:00
|
|
|
|
|
|
|
// set up health check upstream host if we have one
|
|
|
|
if host != "" {
|
|
|
|
hostHeader := upstream.upstreamHeaders.Get("Host")
|
|
|
|
if strings.Contains(hostHeader, "{host}") {
|
|
|
|
upstream.HealthCheck.Host = strings.Replace(hostHeader, "{host}", host, -1)
|
|
|
|
}
|
|
|
|
}
|
2017-04-03 04:58:15 +08:00
|
|
|
upstream.wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer upstream.wg.Done()
|
|
|
|
upstream.HealthCheckWorker(upstream.stop)
|
|
|
|
}()
|
2016-06-06 11:51:56 +08:00
|
|
|
}
|
|
|
|
upstreams = append(upstreams, upstream)
|
|
|
|
}
|
|
|
|
return upstreams, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (u *staticUpstream) From() string {
|
|
|
|
return u.from
|
|
|
|
}
|
|
|
|
|
|
|
|
func (u *staticUpstream) NewHost(host string) (*UpstreamHost, error) {
|
|
|
|
if !strings.HasPrefix(host, "http") &&
|
2017-09-12 09:49:02 +08:00
|
|
|
!strings.HasPrefix(host, "unix:") &&
|
2017-11-06 14:01:10 +08:00
|
|
|
!strings.HasPrefix(host, "quic:") &&
|
|
|
|
!strings.HasPrefix(host, "srv://") &&
|
|
|
|
!strings.HasPrefix(host, "srv+https://") {
|
2016-06-06 11:51:56 +08:00
|
|
|
host = "http://" + host
|
|
|
|
}
|
|
|
|
uh := &UpstreamHost{
|
|
|
|
Name: host,
|
|
|
|
Conns: 0,
|
|
|
|
Fails: 0,
|
|
|
|
FailTimeout: u.FailTimeout,
|
2017-02-15 23:09:42 +08:00
|
|
|
Unhealthy: 0,
|
2016-06-06 11:51:56 +08:00
|
|
|
UpstreamHeaders: u.upstreamHeaders,
|
|
|
|
DownstreamHeaders: u.downstreamHeaders,
|
|
|
|
CheckDown: func(u *staticUpstream) UpstreamHostDownFunc {
|
|
|
|
return func(uh *UpstreamHost) bool {
|
2017-02-15 23:09:42 +08:00
|
|
|
if atomic.LoadInt32(&uh.Unhealthy) != 0 {
|
2016-06-06 11:51:56 +08:00
|
|
|
return true
|
|
|
|
}
|
2017-02-15 23:09:42 +08:00
|
|
|
if atomic.LoadInt32(&uh.Fails) >= u.MaxFails {
|
2016-06-06 11:51:56 +08:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}(u),
|
2019-03-07 05:35:07 +08:00
|
|
|
WithoutPathPrefix: u.WithoutPathPrefix,
|
|
|
|
MaxConns: u.MaxConns,
|
|
|
|
HealthCheckResult: atomic.Value{},
|
|
|
|
UpstreamHeaderReplacements: u.upstreamHeaderReplacements,
|
|
|
|
DownstreamHeaderReplacements: u.downstreamHeaderReplacements,
|
2016-06-06 11:51:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
baseURL, err := url.Parse(uh.Name)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-10-31 02:02:59 +08:00
|
|
|
uh.ReverseProxy = NewSingleHostReverseProxy(baseURL, uh.WithoutPathPrefix, u.KeepAlive, u.Timeout, u.FallbackDelay)
|
2016-06-06 11:51:56 +08:00
|
|
|
if u.insecureSkipVerify {
|
2016-08-02 06:47:31 +08:00
|
|
|
uh.ReverseProxy.UseInsecureTransport()
|
2016-06-06 11:51:56 +08:00
|
|
|
}
|
2016-06-29 08:19:35 +08:00
|
|
|
|
2019-02-06 01:16:08 +08:00
|
|
|
if u.CaCertPool != nil {
|
|
|
|
uh.ReverseProxy.UseOwnCACertificates(u.CaCertPool)
|
|
|
|
}
|
|
|
|
|
2016-06-06 11:51:56 +08:00
|
|
|
return uh, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func parseUpstream(u string) ([]string, error) {
|
2017-11-06 14:01:10 +08:00
|
|
|
if strings.HasPrefix(u, "unix:") {
|
|
|
|
return []string{u}, nil
|
|
|
|
}
|
2016-07-21 13:56:30 +08:00
|
|
|
|
2017-11-06 14:01:10 +08:00
|
|
|
isSrv := strings.HasPrefix(u, "srv://") || strings.HasPrefix(u, "srv+https://")
|
|
|
|
colonIdx := strings.LastIndex(u, ":")
|
|
|
|
protoIdx := strings.Index(u, "://")
|
2016-06-06 11:51:56 +08:00
|
|
|
|
2017-11-06 14:01:10 +08:00
|
|
|
if colonIdx == -1 || colonIdx == protoIdx {
|
|
|
|
return []string{u}, nil
|
|
|
|
}
|
2016-06-06 11:51:56 +08:00
|
|
|
|
2017-11-06 14:01:10 +08:00
|
|
|
if isSrv {
|
|
|
|
return nil, fmt.Errorf("service locator %s can not have port specified", u)
|
|
|
|
}
|
2016-06-06 11:51:56 +08:00
|
|
|
|
2017-11-06 14:01:10 +08:00
|
|
|
us := u[:colonIdx]
|
|
|
|
ue := ""
|
|
|
|
portsEnd := len(u)
|
|
|
|
if nextSlash := strings.Index(u[colonIdx:], "/"); nextSlash != -1 {
|
|
|
|
portsEnd = colonIdx + nextSlash
|
|
|
|
ue = u[portsEnd:]
|
|
|
|
}
|
|
|
|
|
|
|
|
ports := u[len(us)+1 : portsEnd]
|
|
|
|
separators := strings.Count(ports, "-")
|
|
|
|
|
|
|
|
if separators == 0 {
|
|
|
|
return []string{u}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if separators > 1 {
|
|
|
|
return nil, fmt.Errorf("port range [%s] has %d separators", ports, separators)
|
|
|
|
}
|
|
|
|
|
|
|
|
portsStr := strings.Split(ports, "-")
|
|
|
|
pIni, err := strconv.Atoi(portsStr[0])
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2016-06-06 11:51:56 +08:00
|
|
|
}
|
|
|
|
|
2017-11-06 14:01:10 +08:00
|
|
|
pEnd, err := strconv.Atoi(portsStr[1])
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-06-06 11:51:56 +08:00
|
|
|
|
2017-11-06 14:01:10 +08:00
|
|
|
if pEnd <= pIni {
|
|
|
|
return nil, fmt.Errorf("port range [%s] is invalid", ports)
|
|
|
|
}
|
|
|
|
|
|
|
|
hosts := []string{}
|
|
|
|
for p := pIni; p <= pEnd; p++ {
|
|
|
|
hosts = append(hosts, fmt.Sprintf("%s:%d%s", us, p, ue))
|
|
|
|
}
|
|
|
|
|
|
|
|
return hosts, nil
|
2016-06-06 11:51:56 +08:00
|
|
|
}
|
|
|
|
|
2017-11-06 14:01:10 +08:00
|
|
|
func parseBlock(c *caddyfile.Dispenser, u *staticUpstream, hasSrv bool) error {
|
2019-03-07 05:35:07 +08:00
|
|
|
var isUpstream bool
|
|
|
|
|
2016-06-06 11:51:56 +08:00
|
|
|
switch c.Val() {
|
|
|
|
case "policy":
|
|
|
|
if !c.NextArg() {
|
|
|
|
return c.ArgErr()
|
|
|
|
}
|
|
|
|
policyCreateFunc, ok := supportedPolicies[c.Val()]
|
|
|
|
if !ok {
|
|
|
|
return c.ArgErr()
|
|
|
|
}
|
2017-07-07 23:37:49 +08:00
|
|
|
arg := ""
|
|
|
|
if c.NextArg() {
|
|
|
|
arg = c.Val()
|
|
|
|
}
|
|
|
|
u.Policy = policyCreateFunc(arg)
|
2018-10-31 02:02:59 +08:00
|
|
|
case "fallback_delay":
|
|
|
|
if !c.NextArg() {
|
|
|
|
return c.ArgErr()
|
|
|
|
}
|
|
|
|
dur, err := time.ParseDuration(c.Val())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
u.FallbackDelay = dur
|
2016-06-06 11:51:56 +08:00
|
|
|
case "fail_timeout":
|
|
|
|
if !c.NextArg() {
|
|
|
|
return c.ArgErr()
|
|
|
|
}
|
|
|
|
dur, err := time.ParseDuration(c.Val())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
u.FailTimeout = dur
|
|
|
|
case "max_fails":
|
|
|
|
if !c.NextArg() {
|
|
|
|
return c.ArgErr()
|
|
|
|
}
|
|
|
|
n, err := strconv.Atoi(c.Val())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-09-25 06:03:22 +08:00
|
|
|
if n < 1 {
|
|
|
|
return c.Err("max_fails must be at least 1")
|
|
|
|
}
|
2016-06-06 11:51:56 +08:00
|
|
|
u.MaxFails = int32(n)
|
2016-09-25 06:03:22 +08:00
|
|
|
case "try_duration":
|
|
|
|
if !c.NextArg() {
|
|
|
|
return c.ArgErr()
|
|
|
|
}
|
|
|
|
dur, err := time.ParseDuration(c.Val())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
u.TryDuration = dur
|
|
|
|
case "try_interval":
|
|
|
|
if !c.NextArg() {
|
|
|
|
return c.ArgErr()
|
|
|
|
}
|
|
|
|
interval, err := time.ParseDuration(c.Val())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
u.TryInterval = interval
|
2016-06-06 11:51:56 +08:00
|
|
|
case "max_conns":
|
|
|
|
if !c.NextArg() {
|
|
|
|
return c.ArgErr()
|
|
|
|
}
|
|
|
|
n, err := strconv.ParseInt(c.Val(), 10, 64)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
u.MaxConns = n
|
|
|
|
case "health_check":
|
|
|
|
if !c.NextArg() {
|
|
|
|
return c.ArgErr()
|
|
|
|
}
|
|
|
|
u.HealthCheck.Path = c.Val()
|
2016-06-20 23:49:21 +08:00
|
|
|
|
|
|
|
// Set defaults
|
|
|
|
if u.HealthCheck.Interval == 0 {
|
|
|
|
u.HealthCheck.Interval = 30 * time.Second
|
|
|
|
}
|
|
|
|
if u.HealthCheck.Timeout == 0 {
|
|
|
|
u.HealthCheck.Timeout = 60 * time.Second
|
|
|
|
}
|
|
|
|
case "health_check_interval":
|
|
|
|
var interval string
|
|
|
|
if !c.Args(&interval) {
|
|
|
|
return c.ArgErr()
|
|
|
|
}
|
|
|
|
dur, err := time.ParseDuration(interval)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
u.HealthCheck.Interval = dur
|
|
|
|
case "health_check_timeout":
|
|
|
|
var interval string
|
|
|
|
if !c.Args(&interval) {
|
|
|
|
return c.ArgErr()
|
|
|
|
}
|
|
|
|
dur, err := time.ParseDuration(interval)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2016-06-06 11:51:56 +08:00
|
|
|
}
|
2016-06-20 23:49:21 +08:00
|
|
|
u.HealthCheck.Timeout = dur
|
2017-05-14 06:49:06 +08:00
|
|
|
case "health_check_port":
|
|
|
|
if !c.NextArg() {
|
|
|
|
return c.ArgErr()
|
|
|
|
}
|
2017-11-06 14:01:10 +08:00
|
|
|
|
|
|
|
if hasSrv {
|
|
|
|
return c.Err("health_check_port directive is not allowed when upstream is SRV locator")
|
|
|
|
}
|
|
|
|
|
2017-05-14 06:49:06 +08:00
|
|
|
port := c.Val()
|
|
|
|
n, err := strconv.Atoi(port)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if n < 0 {
|
|
|
|
return c.Errf("invalid health_check_port '%s'", port)
|
|
|
|
}
|
2017-05-15 02:27:19 +08:00
|
|
|
u.HealthCheck.Port = port
|
2017-06-29 05:54:29 +08:00
|
|
|
case "health_check_contains":
|
|
|
|
if !c.NextArg() {
|
|
|
|
return c.ArgErr()
|
|
|
|
}
|
|
|
|
u.HealthCheck.ContentString = c.Val()
|
2016-07-19 04:32:12 +08:00
|
|
|
case "header_upstream":
|
2019-03-07 05:35:07 +08:00
|
|
|
isUpstream = true
|
|
|
|
fallthrough
|
2016-06-06 11:51:56 +08:00
|
|
|
case "header_downstream":
|
2019-03-07 05:35:07 +08:00
|
|
|
var header, value, replaced string
|
|
|
|
if c.Args(&header, &value, &replaced) {
|
|
|
|
// Don't allow - or + in replacements
|
|
|
|
if strings.HasPrefix(header, "-") || strings.HasPrefix(header, "+") {
|
2016-11-04 06:50:51 +08:00
|
|
|
return c.ArgErr()
|
|
|
|
}
|
2019-03-07 05:35:07 +08:00
|
|
|
r, err := regexp.Compile(value)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if isUpstream {
|
|
|
|
u.upstreamHeaderReplacements.Add(header, headerReplacement{r, replaced})
|
|
|
|
} else {
|
|
|
|
u.downstreamHeaderReplacements.Add(header, headerReplacement{r, replaced})
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if len(value) == 0 {
|
|
|
|
// When removing a header, the value can be optional.
|
|
|
|
if !strings.HasPrefix(header, "-") {
|
|
|
|
return c.ArgErr()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if isUpstream {
|
|
|
|
u.upstreamHeaders.Add(header, value)
|
|
|
|
} else {
|
|
|
|
u.downstreamHeaders.Add(header, value)
|
|
|
|
}
|
2016-06-06 11:51:56 +08:00
|
|
|
}
|
2016-06-15 02:03:30 +08:00
|
|
|
case "transparent":
|
2018-04-05 14:04:06 +08:00
|
|
|
// Note: X-Forwarded-For header is always being appended for proxy connections
|
|
|
|
// See implementation of createUpstreamRequest in proxy.go
|
2016-07-22 03:31:43 +08:00
|
|
|
u.upstreamHeaders.Add("Host", "{host}")
|
2016-06-15 02:03:30 +08:00
|
|
|
u.upstreamHeaders.Add("X-Real-IP", "{remote}")
|
|
|
|
u.upstreamHeaders.Add("X-Forwarded-Proto", "{scheme}")
|
2019-01-26 07:36:36 +08:00
|
|
|
u.upstreamHeaders.Add("X-Forwarded-Port", "{server_port}")
|
2016-06-06 11:51:56 +08:00
|
|
|
case "websocket":
|
|
|
|
u.upstreamHeaders.Add("Connection", "{>Connection}")
|
|
|
|
u.upstreamHeaders.Add("Upgrade", "{>Upgrade}")
|
|
|
|
case "without":
|
|
|
|
if !c.NextArg() {
|
|
|
|
return c.ArgErr()
|
|
|
|
}
|
|
|
|
u.WithoutPathPrefix = c.Val()
|
|
|
|
case "except":
|
|
|
|
ignoredPaths := c.RemainingArgs()
|
|
|
|
if len(ignoredPaths) == 0 {
|
|
|
|
return c.ArgErr()
|
|
|
|
}
|
|
|
|
u.IgnoredSubPaths = ignoredPaths
|
|
|
|
case "insecure_skip_verify":
|
|
|
|
u.insecureSkipVerify = true
|
2019-02-06 01:16:08 +08:00
|
|
|
case "ca_certificates":
|
|
|
|
caCertificates := c.RemainingArgs()
|
|
|
|
if len(caCertificates) == 0 {
|
|
|
|
return c.ArgErr()
|
|
|
|
}
|
|
|
|
|
|
|
|
pool := x509.NewCertPool()
|
|
|
|
caCertificatesAdded := make(map[string]struct{})
|
|
|
|
for _, caFile := range caCertificates {
|
|
|
|
// don't add cert to pool more than once
|
|
|
|
if _, ok := caCertificatesAdded[caFile]; ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
caCertificatesAdded[caFile] = struct{}{}
|
|
|
|
|
|
|
|
// any client with a certificate from this CA will be allowed to connect
|
|
|
|
caCrt, err := ioutil.ReadFile(caFile)
|
|
|
|
if err != nil {
|
|
|
|
return c.Err(err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
// attempt to parse pem and append to cert pool
|
|
|
|
if ok := pool.AppendCertsFromPEM(caCrt); !ok {
|
|
|
|
return c.Errf("loading CA certificate '%s': no certificates were successfully parsed", caFile)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
u.CaCertPool = pool
|
2016-08-02 06:47:31 +08:00
|
|
|
case "keepalive":
|
|
|
|
if !c.NextArg() {
|
|
|
|
return c.ArgErr()
|
|
|
|
}
|
|
|
|
n, err := strconv.Atoi(c.Val())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-08-06 06:41:32 +08:00
|
|
|
if n < 0 {
|
|
|
|
return c.ArgErr()
|
|
|
|
}
|
2016-08-02 06:47:31 +08:00
|
|
|
u.KeepAlive = n
|
2018-04-17 22:09:22 +08:00
|
|
|
case "timeout":
|
|
|
|
if !c.NextArg() {
|
|
|
|
return c.ArgErr()
|
|
|
|
}
|
|
|
|
dur, err := time.ParseDuration(c.Val())
|
|
|
|
if err != nil {
|
|
|
|
return c.Errf("unable to parse timeout duration '%s'", c.Val())
|
|
|
|
}
|
|
|
|
u.Timeout = dur
|
2016-06-06 11:51:56 +08:00
|
|
|
default:
|
|
|
|
return c.Errf("unknown property '%s'", c.Val())
|
|
|
|
}
|
2019-02-06 01:16:08 +08:00
|
|
|
|
|
|
|
// these settings are at odds with one another. insecure_skip_verify disables security features over HTTPS
|
|
|
|
// which is what we are trying to achieve with ca_certificates
|
|
|
|
if u.insecureSkipVerify && u.CaCertPool != nil {
|
|
|
|
return c.Errf("both insecure_skip_verify and ca_certificates cannot be set in the proxy directive")
|
|
|
|
}
|
|
|
|
|
2016-06-06 11:51:56 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-11-06 14:01:10 +08:00
|
|
|
func (u *staticUpstream) resolveHost(h string) ([]string, bool, error) {
|
|
|
|
names := []string{}
|
|
|
|
proto := "http"
|
|
|
|
if !strings.HasPrefix(h, "srv://") && !strings.HasPrefix(h, "srv+https://") {
|
|
|
|
return []string{h}, false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if strings.HasPrefix(h, "srv+https://") {
|
|
|
|
proto = "https"
|
|
|
|
}
|
|
|
|
|
|
|
|
_, addrs, err := u.resolver.LookupSRV(context.Background(), "", "", h)
|
|
|
|
if err != nil {
|
|
|
|
return names, true, err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, addr := range addrs {
|
|
|
|
names = append(names, fmt.Sprintf("%s://%s:%d", proto, addr.Target, addr.Port))
|
|
|
|
}
|
|
|
|
|
|
|
|
return names, true, nil
|
|
|
|
}
|
|
|
|
|
2016-06-06 11:51:56 +08:00
|
|
|
func (u *staticUpstream) healthCheck() {
|
|
|
|
for _, host := range u.Hosts {
|
2017-11-06 14:01:10 +08:00
|
|
|
candidates, isSrv, err := u.resolveHost(host.Name)
|
|
|
|
if err != nil {
|
|
|
|
host.HealthCheckResult.Store(err.Error())
|
|
|
|
atomic.StoreInt32(&host.Unhealthy, 1)
|
|
|
|
continue
|
2017-05-14 06:49:06 +08:00
|
|
|
}
|
|
|
|
|
2017-11-06 14:01:10 +08:00
|
|
|
unhealthyCount := 0
|
|
|
|
for _, addr := range candidates {
|
|
|
|
hostURL := addr
|
|
|
|
if !isSrv && u.HealthCheck.Port != "" {
|
|
|
|
hostURL = replacePort(hostURL, u.HealthCheck.Port)
|
2017-06-29 05:54:29 +08:00
|
|
|
}
|
2017-11-06 14:01:10 +08:00
|
|
|
hostURL += u.HealthCheck.Path
|
|
|
|
|
|
|
|
unhealthy := func() bool {
|
|
|
|
// set up request, needed to be able to modify headers
|
|
|
|
// possible errors are bad HTTP methods or un-parsable urls
|
|
|
|
req, err := http.NewRequest("GET", hostURL, nil)
|
|
|
|
if err != nil {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
// set host for request going upstream
|
|
|
|
if u.HealthCheck.Host != "" {
|
|
|
|
req.Host = u.HealthCheck.Host
|
|
|
|
}
|
|
|
|
r, err := u.HealthCheck.Client.Do(req)
|
|
|
|
if err != nil {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
io.Copy(ioutil.Discard, r.Body)
|
|
|
|
r.Body.Close()
|
|
|
|
}()
|
|
|
|
if r.StatusCode < 200 || r.StatusCode >= 400 {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if u.HealthCheck.ContentString == "" { // don't check for content string
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
// TODO ReadAll will be replaced if deemed necessary
|
|
|
|
// See https://github.com/mholt/caddy/pull/1691
|
|
|
|
buf, err := ioutil.ReadAll(r.Body)
|
|
|
|
if err != nil {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if bytes.Contains(buf, []byte(u.HealthCheck.ContentString)) {
|
|
|
|
return false
|
|
|
|
}
|
2017-06-29 05:54:29 +08:00
|
|
|
return true
|
|
|
|
}()
|
2017-11-06 14:01:10 +08:00
|
|
|
|
|
|
|
if unhealthy {
|
|
|
|
unhealthyCount++
|
2017-06-29 05:54:29 +08:00
|
|
|
}
|
2017-11-06 14:01:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if unhealthyCount == len(candidates) {
|
2017-02-15 23:09:42 +08:00
|
|
|
atomic.StoreInt32(&host.Unhealthy, 1)
|
2017-11-06 14:01:10 +08:00
|
|
|
host.HealthCheckResult.Store("Failed")
|
2017-02-15 23:09:42 +08:00
|
|
|
} else {
|
|
|
|
atomic.StoreInt32(&host.Unhealthy, 0)
|
2017-11-06 14:01:10 +08:00
|
|
|
host.HealthCheckResult.Store("OK")
|
2016-06-06 11:51:56 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (u *staticUpstream) HealthCheckWorker(stop chan struct{}) {
|
|
|
|
ticker := time.NewTicker(u.HealthCheck.Interval)
|
|
|
|
u.healthCheck()
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ticker.C:
|
|
|
|
u.healthCheck()
|
|
|
|
case <-stop:
|
2017-04-03 04:58:15 +08:00
|
|
|
ticker.Stop()
|
|
|
|
return
|
2016-06-06 11:51:56 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-31 17:04:54 +08:00
|
|
|
func (u *staticUpstream) Select(r *http.Request) *UpstreamHost {
|
2016-06-06 11:51:56 +08:00
|
|
|
pool := u.Hosts
|
|
|
|
if len(pool) == 1 {
|
|
|
|
if !pool[0].Available() {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return pool[0]
|
|
|
|
}
|
|
|
|
allUnavailable := true
|
|
|
|
for _, host := range pool {
|
|
|
|
if host.Available() {
|
|
|
|
allUnavailable = false
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if allUnavailable {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if u.Policy == nil {
|
2016-07-31 17:04:54 +08:00
|
|
|
return (&Random{}).Select(pool, r)
|
2016-06-06 11:51:56 +08:00
|
|
|
}
|
2016-07-31 17:04:54 +08:00
|
|
|
return u.Policy.Select(pool, r)
|
2016-06-06 11:51:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (u *staticUpstream) AllowedPath(requestPath string) bool {
|
|
|
|
for _, ignoredSubPath := range u.IgnoredSubPaths {
|
2019-02-16 02:53:14 +08:00
|
|
|
p := path.Clean(requestPath)
|
|
|
|
e := path.Join(u.From(), ignoredSubPath)
|
|
|
|
// Re-add a trailing slashes if the original
|
|
|
|
// paths had one and the cleaned paths don't
|
|
|
|
if strings.HasSuffix(requestPath, "/") && !strings.HasSuffix(p, "/") {
|
|
|
|
p = p + "/"
|
|
|
|
}
|
|
|
|
if strings.HasSuffix(ignoredSubPath, "/") && !strings.HasSuffix(e, "/") {
|
|
|
|
e = e + "/"
|
|
|
|
}
|
|
|
|
if httpserver.Path(p).Matches(e) {
|
2016-06-06 11:51:56 +08:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
2016-09-25 06:03:22 +08:00
|
|
|
|
2018-10-31 02:02:59 +08:00
|
|
|
// GetFallbackDelay returns u.FallbackDelay.
|
|
|
|
func (u *staticUpstream) GetFallbackDelay() time.Duration {
|
|
|
|
return u.FallbackDelay
|
|
|
|
}
|
|
|
|
|
2016-09-25 06:03:22 +08:00
|
|
|
// GetTryDuration returns u.TryDuration.
|
|
|
|
func (u *staticUpstream) GetTryDuration() time.Duration {
|
|
|
|
return u.TryDuration
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetTryInterval returns u.TryInterval.
|
|
|
|
func (u *staticUpstream) GetTryInterval() time.Duration {
|
|
|
|
return u.TryInterval
|
|
|
|
}
|
|
|
|
|
2018-04-17 22:09:22 +08:00
|
|
|
// GetTimeout returns u.Timeout.
|
|
|
|
func (u *staticUpstream) GetTimeout() time.Duration {
|
|
|
|
return u.Timeout
|
|
|
|
}
|
|
|
|
|
2017-01-12 02:38:14 +08:00
|
|
|
func (u *staticUpstream) GetHostCount() int {
|
|
|
|
return len(u.Hosts)
|
|
|
|
}
|
|
|
|
|
2017-04-03 04:58:15 +08:00
|
|
|
// Stop sends a signal to all goroutines started by this staticUpstream to exit
|
|
|
|
// and waits for them to finish before returning.
|
|
|
|
func (u *staticUpstream) Stop() error {
|
|
|
|
close(u.stop)
|
|
|
|
u.wg.Wait()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-09-25 06:03:22 +08:00
|
|
|
// RegisterPolicy adds a custom policy to the proxy.
|
2017-07-07 23:37:49 +08:00
|
|
|
func RegisterPolicy(name string, policy func(string) Policy) {
|
2016-09-25 06:03:22 +08:00
|
|
|
supportedPolicies[name] = policy
|
|
|
|
}
|
2017-05-14 06:49:06 +08:00
|
|
|
|
|
|
|
func replacePort(originalURL string, newPort string) string {
|
|
|
|
parsedURL, err := url.Parse(originalURL)
|
|
|
|
if err != nil {
|
|
|
|
return originalURL
|
|
|
|
}
|
|
|
|
|
|
|
|
// handles 'localhost' and 'localhost:8080'
|
|
|
|
parsedHost, _, err := net.SplitHostPort(parsedURL.Host)
|
|
|
|
if err != nil {
|
|
|
|
parsedHost = parsedURL.Host
|
|
|
|
}
|
|
|
|
|
|
|
|
parsedURL.Host = net.JoinHostPort(parsedHost, newPort)
|
|
|
|
return parsedURL.String()
|
|
|
|
}
|