2019-07-01 06:07:58 +08:00
|
|
|
// Copyright 2015 Matthew Holt and The Caddy Authors
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2019-05-21 00:59:20 +08:00
|
|
|
package caddyhttp
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2022-08-16 02:01:58 +08:00
|
|
|
"crypto/tls"
|
2020-03-16 11:26:17 +08:00
|
|
|
"encoding/json"
|
2019-05-21 00:59:20 +08:00
|
|
|
"fmt"
|
2019-05-23 04:14:26 +08:00
|
|
|
"net"
|
2019-05-21 00:59:20 +08:00
|
|
|
"net/http"
|
2023-01-10 13:08:23 +08:00
|
|
|
"net/netip"
|
2019-09-06 03:36:42 +08:00
|
|
|
"net/url"
|
2020-04-27 12:28:49 +08:00
|
|
|
"runtime"
|
2019-06-27 06:03:29 +08:00
|
|
|
"strings"
|
2022-08-04 01:04:51 +08:00
|
|
|
"sync"
|
2022-08-16 02:01:58 +08:00
|
|
|
"sync/atomic"
|
2019-10-29 04:39:37 +08:00
|
|
|
"time"
|
2019-05-21 00:59:20 +08:00
|
|
|
|
2019-07-03 02:37:06 +08:00
|
|
|
"github.com/caddyserver/caddy/v2"
|
2022-09-01 05:01:30 +08:00
|
|
|
"github.com/caddyserver/caddy/v2/modules/caddyevents"
|
2019-07-03 02:37:06 +08:00
|
|
|
"github.com/caddyserver/caddy/v2/modules/caddytls"
|
2022-03-05 04:44:59 +08:00
|
|
|
"github.com/caddyserver/certmagic"
|
2022-09-01 08:49:34 +08:00
|
|
|
"github.com/lucas-clemente/quic-go"
|
2019-09-12 08:49:21 +08:00
|
|
|
"github.com/lucas-clemente/quic-go/http3"
|
2019-10-29 04:39:37 +08:00
|
|
|
"go.uber.org/zap"
|
|
|
|
"go.uber.org/zap/zapcore"
|
2019-05-21 00:59:20 +08:00
|
|
|
)
|
|
|
|
|
2019-12-11 04:36:46 +08:00
|
|
|
// Server describes an HTTP server.
|
2019-05-21 00:59:20 +08:00
|
|
|
type Server struct {
|
2022-08-16 02:01:58 +08:00
|
|
|
activeRequests int64 // accessed atomically
|
|
|
|
|
2019-12-30 04:16:34 +08:00
|
|
|
// Socket addresses to which to bind listeners. Accepts
|
|
|
|
// [network addresses](/docs/conventions#network-addresses)
|
2020-04-22 09:30:04 +08:00
|
|
|
// that may include port ranges. Listener addresses must
|
|
|
|
// be unique; they cannot be repeated across all defined
|
|
|
|
// servers.
|
2019-12-11 04:36:46 +08:00
|
|
|
Listen []string `json:"listen,omitempty"`
|
|
|
|
|
2020-03-16 11:26:17 +08:00
|
|
|
// A list of listener wrapper modules, which can modify the behavior
|
|
|
|
// of the base listener. They are applied in the given order.
|
|
|
|
ListenerWrappersRaw []json.RawMessage `json:"listener_wrappers,omitempty" caddy:"namespace=caddy.listeners inline_key=wrapper"`
|
|
|
|
|
2019-12-11 04:36:46 +08:00
|
|
|
// How long to allow a read from a client's upload. Setting this
|
|
|
|
// to a short, non-zero value can mitigate slowloris attacks, but
|
|
|
|
// may also affect legitimately slow clients.
|
|
|
|
ReadTimeout caddy.Duration `json:"read_timeout,omitempty"`
|
|
|
|
|
|
|
|
// ReadHeaderTimeout is like ReadTimeout but for request headers.
|
|
|
|
ReadHeaderTimeout caddy.Duration `json:"read_header_timeout,omitempty"`
|
|
|
|
|
|
|
|
// WriteTimeout is how long to allow a write to a client. Note
|
|
|
|
// that setting this to a small value when serving large files
|
|
|
|
// may negatively affect legitimately slow clients.
|
|
|
|
WriteTimeout caddy.Duration `json:"write_timeout,omitempty"`
|
|
|
|
|
|
|
|
// IdleTimeout is the maximum time to wait for the next request
|
2020-11-19 01:57:54 +08:00
|
|
|
// when keep-alives are enabled. If zero, a default timeout of
|
|
|
|
// 5m is applied to help avoid resource exhaustion.
|
2019-12-11 04:36:46 +08:00
|
|
|
IdleTimeout caddy.Duration `json:"idle_timeout,omitempty"`
|
|
|
|
|
2022-09-03 06:59:11 +08:00
|
|
|
// KeepAliveInterval is the interval at which TCP keepalive packets
|
|
|
|
// are sent to keep the connection alive at the TCP layer when no other
|
|
|
|
// data is being transmitted. The default is 15s.
|
|
|
|
KeepAliveInterval caddy.Duration `json:"keepalive_interval,omitempty"`
|
|
|
|
|
2019-12-11 04:36:46 +08:00
|
|
|
// MaxHeaderBytes is the maximum size to parse from a client's
|
|
|
|
// HTTP request headers.
|
|
|
|
MaxHeaderBytes int `json:"max_header_bytes,omitempty"`
|
|
|
|
|
|
|
|
// Routes describes how this server will handle requests.
|
http: Change routes to sequential matcher evaluation (#2967)
Previously, all matchers in a route would be evaluated before any
handlers were executed, and a composite route of the matching routes
would be created. This made rewrites especially tricky, since the only
way to defer later matchers' evaluation was to wrap them in a subroute,
or to invoke a "rehandle" which often caused bugs.
Instead, this new sequential design evaluates each route's matchers then
its handlers in lock-step; matcher-handlers-matcher-handlers...
If the first matching route consists of a rewrite, then the second route
will be evaluated against the rewritten request, rather than the original
one, and so on.
This should do away with any need for rehandling.
I've also taken this opportunity to avoid adding new values to the
request context in the handler chain, as this creates a copy of the
Request struct, which may possibly lead to bugs like it has in the past
(see PR #1542, PR #1481, and maybe issue #2463). We now add all the
expected context values in the top-level handler at the server, then
any new values can be added to the variable table via the VarsCtxKey
context key, or just the GetVar/SetVar functions. In particular, we are
using this facility to convey dial information in the reverse proxy.
Had to be careful in one place as the middleware compilation logic has
changed, and moved a bit. We no longer compile a middleware chain per-
request; instead, we can compile it at provision-time, and defer only the
evaluation of matchers to request-time, which should slightly improve
performance. Doing this, however, we take advantage of multiple function
closures, and we also changed the use of HandlerFunc (function pointer)
to Handler (interface)... this led to a situation where, if we aren't
careful, allows one request routed a certain way to permanently change
the "next" handler for all/most other requests! We avoid this by making
a copy of the interface value (which is a lightweight pointer copy) and
using exclusively that within our wrapped handlers. This way, the
original stack frame is preserved in a "read-only" fashion. The comments
in the code describe this phenomenon.
This may very well be a breaking change for some configurations, however
I do not expect it to impact many people. I will make it clear in the
release notes that this change has occurred.
2020-01-10 01:00:13 +08:00
|
|
|
// Routes are executed sequentially. First a route's matchers
|
|
|
|
// are evaluated, then its grouping. If it matches and has
|
|
|
|
// not been mutually-excluded by its grouping, then its
|
|
|
|
// handlers are executed sequentially. The sequence of invoked
|
|
|
|
// handlers comprises a compiled middleware chain that flows
|
|
|
|
// from each matching route and its handlers to the next.
|
2020-12-03 04:27:08 +08:00
|
|
|
//
|
|
|
|
// By default, all unrouted requests receive a 200 OK response
|
|
|
|
// to indicate the server is working.
|
2019-12-11 04:36:46 +08:00
|
|
|
Routes RouteList `json:"routes,omitempty"`
|
|
|
|
|
2019-12-24 03:45:35 +08:00
|
|
|
// Errors is how this server will handle errors returned from any
|
|
|
|
// of the handlers in the primary routes. If the primary handler
|
|
|
|
// chain returns an error, the error along with its recommended
|
|
|
|
// status code are bubbled back up to the HTTP server which
|
|
|
|
// executes a separate error route, specified using this property.
|
|
|
|
// The error routes work exactly like the normal routes.
|
2019-12-11 04:36:46 +08:00
|
|
|
Errors *HTTPErrorConfig `json:"errors,omitempty"`
|
|
|
|
|
2019-12-30 04:16:34 +08:00
|
|
|
// How to handle TLS connections. At least one policy is
|
|
|
|
// required to enable HTTPS on this server if automatic
|
|
|
|
// HTTPS is disabled or does not apply.
|
2019-12-11 04:36:46 +08:00
|
|
|
TLSConnPolicies caddytls.ConnectionPolicies `json:"tls_connection_policies,omitempty"`
|
|
|
|
|
|
|
|
// AutoHTTPS configures or disables automatic HTTPS within this server.
|
|
|
|
// HTTPS is enabled automatically and by default when qualifying names
|
2019-12-30 04:16:34 +08:00
|
|
|
// are present in a Host matcher and/or when the server is listening
|
|
|
|
// only on the HTTPS port.
|
2019-12-11 04:36:46 +08:00
|
|
|
AutoHTTPS *AutoHTTPSConfig `json:"automatic_https,omitempty"`
|
|
|
|
|
|
|
|
// If true, will require that a request's Host header match
|
|
|
|
// the value of the ServerName sent by the client's TLS
|
|
|
|
// ClientHello; often a necessary safeguard when using TLS
|
|
|
|
// client authentication.
|
|
|
|
StrictSNIHost *bool `json:"strict_sni_host,omitempty"`
|
|
|
|
|
2023-01-10 13:08:23 +08:00
|
|
|
// A list of IP ranges (supports CIDR notation) from which
|
|
|
|
// requests should be trusted. By default, no proxies are
|
|
|
|
// trusted.
|
|
|
|
//
|
|
|
|
// On its own, this configuration will not do anything,
|
|
|
|
// but it can be used as a default set of ranges for
|
|
|
|
// handlers or matchers in routes to pick up, instead
|
|
|
|
// of needing to configure each of them. See the
|
|
|
|
// `reverse_proxy` handler for example, which uses this
|
|
|
|
// to trust sensitive incoming `X-Forwarded-*` headers.
|
|
|
|
TrustedProxies []string `json:"trusted_proxies,omitempty"`
|
|
|
|
|
2020-04-28 22:32:04 +08:00
|
|
|
// Enables access logging and configures how access logs are handled
|
|
|
|
// in this server. To minimally enable access logs, simply set this
|
|
|
|
// to a non-null, empty struct.
|
2019-12-11 04:36:46 +08:00
|
|
|
Logs *ServerLogConfig `json:"logs,omitempty"`
|
|
|
|
|
2022-08-16 02:01:58 +08:00
|
|
|
// Protocols specifies which HTTP protocols to enable.
|
|
|
|
// Supported values are:
|
|
|
|
//
|
|
|
|
// - `h1` (HTTP/1.1)
|
|
|
|
// - `h2` (HTTP/2)
|
|
|
|
// - `h2c` (cleartext HTTP/2)
|
|
|
|
// - `h3` (HTTP/3)
|
|
|
|
//
|
|
|
|
// If enabling `h2` or `h2c`, `h1` must also be enabled;
|
|
|
|
// this is due to current limitations in the Go standard
|
|
|
|
// library.
|
|
|
|
//
|
|
|
|
// HTTP/2 operates only over TLS (HTTPS). HTTP/3 opens
|
|
|
|
// a UDP socket to serve QUIC connections.
|
|
|
|
//
|
|
|
|
// H2C operates over plain TCP if the client supports it;
|
|
|
|
// however, because this is not implemented by the Go
|
|
|
|
// standard library, other server options are not compatible
|
|
|
|
// and will not be applied to H2C requests. Do not enable this
|
2020-05-06 02:33:21 +08:00
|
|
|
// only to achieve maximum client compatibility. In practice,
|
|
|
|
// very few clients implement H2C, and even fewer require it.
|
2022-08-16 02:01:58 +08:00
|
|
|
// Enabling H2C can be useful for serving/proxying gRPC
|
|
|
|
// if encryption is not possible or desired.
|
|
|
|
//
|
|
|
|
// We recommend for most users to simply let Caddy use the
|
|
|
|
// default settings.
|
|
|
|
//
|
|
|
|
// Default: `[h1 h2 h3]`
|
|
|
|
Protocols []string `json:"protocols,omitempty"`
|
2020-05-06 02:33:21 +08:00
|
|
|
|
2022-09-17 03:32:49 +08:00
|
|
|
// If set, metrics observations will be enabled.
|
|
|
|
// This setting is EXPERIMENTAL and subject to change.
|
|
|
|
Metrics *Metrics `json:"metrics,omitempty"`
|
|
|
|
|
2020-09-18 11:46:24 +08:00
|
|
|
name string
|
|
|
|
|
http: Change routes to sequential matcher evaluation (#2967)
Previously, all matchers in a route would be evaluated before any
handlers were executed, and a composite route of the matching routes
would be created. This made rewrites especially tricky, since the only
way to defer later matchers' evaluation was to wrap them in a subroute,
or to invoke a "rehandle" which often caused bugs.
Instead, this new sequential design evaluates each route's matchers then
its handlers in lock-step; matcher-handlers-matcher-handlers...
If the first matching route consists of a rewrite, then the second route
will be evaluated against the rewritten request, rather than the original
one, and so on.
This should do away with any need for rehandling.
I've also taken this opportunity to avoid adding new values to the
request context in the handler chain, as this creates a copy of the
Request struct, which may possibly lead to bugs like it has in the past
(see PR #1542, PR #1481, and maybe issue #2463). We now add all the
expected context values in the top-level handler at the server, then
any new values can be added to the variable table via the VarsCtxKey
context key, or just the GetVar/SetVar functions. In particular, we are
using this facility to convey dial information in the reverse proxy.
Had to be careful in one place as the middleware compilation logic has
changed, and moved a bit. We no longer compile a middleware chain per-
request; instead, we can compile it at provision-time, and defer only the
evaluation of matchers to request-time, which should slightly improve
performance. Doing this, however, we take advantage of multiple function
closures, and we also changed the use of HandlerFunc (function pointer)
to Handler (interface)... this led to a situation where, if we aren't
careful, allows one request routed a certain way to permanently change
the "next" handler for all/most other requests! We avoid this by making
a copy of the interface value (which is a lightweight pointer copy) and
using exclusively that within our wrapped handlers. This way, the
original stack frame is preserved in a "read-only" fashion. The comments
in the code describe this phenomenon.
This may very well be a breaking change for some configurations, however
I do not expect it to impact many people. I will make it clear in the
release notes that this change has occurred.
2020-01-10 01:00:13 +08:00
|
|
|
primaryHandlerChain Handler
|
|
|
|
errorHandlerChain Handler
|
2020-03-16 11:26:17 +08:00
|
|
|
listenerWrappers []caddy.ListenerWrapper
|
2022-08-02 03:36:22 +08:00
|
|
|
listeners []net.Listener
|
http: Change routes to sequential matcher evaluation (#2967)
Previously, all matchers in a route would be evaluated before any
handlers were executed, and a composite route of the matching routes
would be created. This made rewrites especially tricky, since the only
way to defer later matchers' evaluation was to wrap them in a subroute,
or to invoke a "rehandle" which often caused bugs.
Instead, this new sequential design evaluates each route's matchers then
its handlers in lock-step; matcher-handlers-matcher-handlers...
If the first matching route consists of a rewrite, then the second route
will be evaluated against the rewritten request, rather than the original
one, and so on.
This should do away with any need for rehandling.
I've also taken this opportunity to avoid adding new values to the
request context in the handler chain, as this creates a copy of the
Request struct, which may possibly lead to bugs like it has in the past
(see PR #1542, PR #1481, and maybe issue #2463). We now add all the
expected context values in the top-level handler at the server, then
any new values can be added to the variable table via the VarsCtxKey
context key, or just the GetVar/SetVar functions. In particular, we are
using this facility to convey dial information in the reverse proxy.
Had to be careful in one place as the middleware compilation logic has
changed, and moved a bit. We no longer compile a middleware chain per-
request; instead, we can compile it at provision-time, and defer only the
evaluation of matchers to request-time, which should slightly improve
performance. Doing this, however, we take advantage of multiple function
closures, and we also changed the use of HandlerFunc (function pointer)
to Handler (interface)... this led to a situation where, if we aren't
careful, allows one request routed a certain way to permanently change
the "next" handler for all/most other requests! We avoid this by making
a copy of the interface value (which is a lightweight pointer copy) and
using exclusively that within our wrapped handlers. This way, the
original stack frame is preserved in a "read-only" fashion. The comments
in the code describe this phenomenon.
This may very well be a breaking change for some configurations, however
I do not expect it to impact many people. I will make it clear in the
release notes that this change has occurred.
2020-01-10 01:00:13 +08:00
|
|
|
|
2019-10-29 04:39:37 +08:00
|
|
|
tlsApp *caddytls.TLS
|
2022-09-01 05:01:30 +08:00
|
|
|
events *caddyevents.App
|
2019-10-29 04:39:37 +08:00
|
|
|
logger *zap.Logger
|
|
|
|
accessLogger *zap.Logger
|
|
|
|
errorLogger *zap.Logger
|
2022-09-01 05:01:30 +08:00
|
|
|
ctx caddy.Context
|
2019-09-12 08:49:21 +08:00
|
|
|
|
2022-09-29 03:35:51 +08:00
|
|
|
server *http.Server
|
|
|
|
h3server *http3.Server
|
|
|
|
h3listeners []net.PacketConn // TODO: we have to hold these because quic-go won't close listeners it didn't create
|
|
|
|
addresses []caddy.NetworkAddress
|
2022-08-04 01:04:51 +08:00
|
|
|
|
2023-01-10 13:08:23 +08:00
|
|
|
// Holds the parsed CIDR ranges from TrustedProxies
|
|
|
|
trustedProxies []netip.Prefix
|
|
|
|
|
2022-08-04 01:04:51 +08:00
|
|
|
shutdownAt time.Time
|
|
|
|
shutdownAtMu *sync.RWMutex
|
2022-09-17 04:48:55 +08:00
|
|
|
|
|
|
|
// registered callback functions
|
|
|
|
connStateFuncs []func(net.Conn, http.ConnState)
|
|
|
|
connContextFuncs []func(ctx context.Context, c net.Conn) context.Context
|
|
|
|
onShutdownFuncs []func()
|
2019-05-21 00:59:20 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// ServeHTTP is the entry point for all HTTP requests.
|
|
|
|
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
2019-06-29 09:28:47 +08:00
|
|
|
w.Header().Set("Server", "Caddy")
|
|
|
|
|
2022-08-16 02:01:58 +08:00
|
|
|
// advertise HTTP/3, if enabled
|
2019-09-12 08:49:21 +08:00
|
|
|
if s.h3server != nil {
|
2022-09-09 03:10:40 +08:00
|
|
|
// keep track of active requests for QUIC transport purposes
|
2022-08-16 02:01:58 +08:00
|
|
|
atomic.AddInt64(&s.activeRequests, 1)
|
|
|
|
defer atomic.AddInt64(&s.activeRequests, -1)
|
|
|
|
|
2022-09-29 03:35:51 +08:00
|
|
|
if r.ProtoMajor < 3 {
|
|
|
|
err := s.h3server.SetQuicHeaders(w.Header())
|
|
|
|
if err != nil {
|
|
|
|
s.logger.Error("setting HTTP/3 Alt-Svc header", zap.Error(err))
|
|
|
|
}
|
2019-09-12 08:49:21 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-20 04:44:09 +08:00
|
|
|
// reject very long methods; probably a mistake or an attack
|
|
|
|
if len(r.Method) > 32 {
|
|
|
|
if s.shouldLogRequest(r) {
|
|
|
|
s.accessLogger.Debug("rejecting request with long method",
|
|
|
|
zap.String("method_trunc", r.Method[:32]),
|
|
|
|
zap.String("remote_addr", r.RemoteAddr))
|
|
|
|
}
|
|
|
|
w.WriteHeader(http.StatusMethodNotAllowed)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-06-15 01:58:28 +08:00
|
|
|
repl := caddy.NewReplacer()
|
2020-05-12 02:14:47 +08:00
|
|
|
r = PrepareRequest(r, repl, w, s)
|
2019-05-21 13:48:43 +08:00
|
|
|
|
http: Change routes to sequential matcher evaluation (#2967)
Previously, all matchers in a route would be evaluated before any
handlers were executed, and a composite route of the matching routes
would be created. This made rewrites especially tricky, since the only
way to defer later matchers' evaluation was to wrap them in a subroute,
or to invoke a "rehandle" which often caused bugs.
Instead, this new sequential design evaluates each route's matchers then
its handlers in lock-step; matcher-handlers-matcher-handlers...
If the first matching route consists of a rewrite, then the second route
will be evaluated against the rewritten request, rather than the original
one, and so on.
This should do away with any need for rehandling.
I've also taken this opportunity to avoid adding new values to the
request context in the handler chain, as this creates a copy of the
Request struct, which may possibly lead to bugs like it has in the past
(see PR #1542, PR #1481, and maybe issue #2463). We now add all the
expected context values in the top-level handler at the server, then
any new values can be added to the variable table via the VarsCtxKey
context key, or just the GetVar/SetVar functions. In particular, we are
using this facility to convey dial information in the reverse proxy.
Had to be careful in one place as the middleware compilation logic has
changed, and moved a bit. We no longer compile a middleware chain per-
request; instead, we can compile it at provision-time, and defer only the
evaluation of matchers to request-time, which should slightly improve
performance. Doing this, however, we take advantage of multiple function
closures, and we also changed the use of HandlerFunc (function pointer)
to Handler (interface)... this led to a situation where, if we aren't
careful, allows one request routed a certain way to permanently change
the "next" handler for all/most other requests! We avoid this by making
a copy of the interface value (which is a lightweight pointer copy) and
using exclusively that within our wrapped handlers. This way, the
original stack frame is preserved in a "read-only" fashion. The comments
in the code describe this phenomenon.
This may very well be a breaking change for some configurations, however
I do not expect it to impact many people. I will make it clear in the
release notes that this change has occurred.
2020-01-10 01:00:13 +08:00
|
|
|
// encode the request for logging purposes before
|
|
|
|
// it enters any handler chain; this is necessary
|
|
|
|
// to capture the original request in case it gets
|
|
|
|
// modified during handling
|
2021-12-03 04:26:24 +08:00
|
|
|
shouldLogCredentials := s.Logs != nil && s.Logs.ShouldLogCredentials
|
|
|
|
loggableReq := zap.Object("request", LoggableHTTPRequest{
|
|
|
|
Request: r,
|
|
|
|
ShouldLogCredentials: shouldLogCredentials,
|
|
|
|
})
|
2020-04-29 05:38:45 +08:00
|
|
|
errLog := s.errorLogger.With(loggableReq)
|
|
|
|
|
|
|
|
var duration time.Duration
|
2019-10-29 04:39:37 +08:00
|
|
|
|
2020-04-28 22:32:04 +08:00
|
|
|
if s.shouldLogRequest(r) {
|
2019-10-29 04:39:37 +08:00
|
|
|
wrec := NewResponseRecorder(w, nil, nil)
|
|
|
|
w = wrec
|
http: Change routes to sequential matcher evaluation (#2967)
Previously, all matchers in a route would be evaluated before any
handlers were executed, and a composite route of the matching routes
would be created. This made rewrites especially tricky, since the only
way to defer later matchers' evaluation was to wrap them in a subroute,
or to invoke a "rehandle" which often caused bugs.
Instead, this new sequential design evaluates each route's matchers then
its handlers in lock-step; matcher-handlers-matcher-handlers...
If the first matching route consists of a rewrite, then the second route
will be evaluated against the rewritten request, rather than the original
one, and so on.
This should do away with any need for rehandling.
I've also taken this opportunity to avoid adding new values to the
request context in the handler chain, as this creates a copy of the
Request struct, which may possibly lead to bugs like it has in the past
(see PR #1542, PR #1481, and maybe issue #2463). We now add all the
expected context values in the top-level handler at the server, then
any new values can be added to the variable table via the VarsCtxKey
context key, or just the GetVar/SetVar functions. In particular, we are
using this facility to convey dial information in the reverse proxy.
Had to be careful in one place as the middleware compilation logic has
changed, and moved a bit. We no longer compile a middleware chain per-
request; instead, we can compile it at provision-time, and defer only the
evaluation of matchers to request-time, which should slightly improve
performance. Doing this, however, we take advantage of multiple function
closures, and we also changed the use of HandlerFunc (function pointer)
to Handler (interface)... this led to a situation where, if we aren't
careful, allows one request routed a certain way to permanently change
the "next" handler for all/most other requests! We avoid this by making
a copy of the interface value (which is a lightweight pointer copy) and
using exclusively that within our wrapped handlers. This way, the
original stack frame is preserved in a "read-only" fashion. The comments
in the code describe this phenomenon.
This may very well be a breaking change for some configurations, however
I do not expect it to impact many people. I will make it clear in the
release notes that this change has occurred.
2020-01-10 01:00:13 +08:00
|
|
|
|
|
|
|
// capture the original version of the request
|
|
|
|
accLog := s.accessLogger.With(loggableReq)
|
|
|
|
|
2019-10-29 04:39:37 +08:00
|
|
|
defer func() {
|
2022-09-16 00:05:36 +08:00
|
|
|
// this request may be flagged as omitted from the logs
|
|
|
|
if skipLog, ok := GetVar(r.Context(), SkipLogVar).(bool); ok && skipLog {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-09-22 02:59:40 +08:00
|
|
|
repl.Set("http.response.status", wrec.Status()) // will be 0 if no response is written by us (Go will write 200 to client)
|
2020-03-31 01:49:53 +08:00
|
|
|
repl.Set("http.response.size", wrec.Size())
|
2020-04-29 05:38:45 +08:00
|
|
|
repl.Set("http.response.duration", duration)
|
2022-04-12 03:04:05 +08:00
|
|
|
repl.Set("http.response.duration_ms", duration.Seconds()*1e3) // multiply seconds to preserve decimal (see #4666)
|
2019-10-29 04:39:37 +08:00
|
|
|
|
|
|
|
logger := accLog
|
2020-04-10 22:12:42 +08:00
|
|
|
if s.Logs != nil {
|
|
|
|
logger = s.Logs.wrapLogger(logger, r.Host)
|
2019-10-29 04:39:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
log := logger.Info
|
|
|
|
if wrec.Status() >= 400 {
|
|
|
|
log = logger.Error
|
|
|
|
}
|
|
|
|
|
2021-07-15 01:07:38 +08:00
|
|
|
userID, _ := repl.GetString("http.auth.user.id")
|
|
|
|
|
2019-11-06 07:28:33 +08:00
|
|
|
log("handled request",
|
2021-07-15 01:07:38 +08:00
|
|
|
zap.String("user_id", userID),
|
2020-04-29 05:38:45 +08:00
|
|
|
zap.Duration("duration", duration),
|
2019-10-29 04:39:37 +08:00
|
|
|
zap.Int("size", wrec.Size()),
|
|
|
|
zap.Int("status", wrec.Status()),
|
2021-12-03 04:26:24 +08:00
|
|
|
zap.Object("resp_headers", LoggableHTTPHeader{
|
|
|
|
Header: wrec.Header(),
|
|
|
|
ShouldLogCredentials: shouldLogCredentials,
|
|
|
|
}),
|
2019-10-29 04:39:37 +08:00
|
|
|
)
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
2020-04-29 05:38:45 +08:00
|
|
|
start := time.Now()
|
|
|
|
|
2019-10-29 04:39:37 +08:00
|
|
|
// guarantee ACME HTTP challenges; handle them
|
|
|
|
// separately from any user-defined handlers
|
|
|
|
if s.tlsApp.HandleHTTPChallenge(w, r) {
|
2020-04-29 05:38:45 +08:00
|
|
|
duration = time.Since(start)
|
2019-10-29 04:39:37 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
http: Change routes to sequential matcher evaluation (#2967)
Previously, all matchers in a route would be evaluated before any
handlers were executed, and a composite route of the matching routes
would be created. This made rewrites especially tricky, since the only
way to defer later matchers' evaluation was to wrap them in a subroute,
or to invoke a "rehandle" which often caused bugs.
Instead, this new sequential design evaluates each route's matchers then
its handlers in lock-step; matcher-handlers-matcher-handlers...
If the first matching route consists of a rewrite, then the second route
will be evaluated against the rewritten request, rather than the original
one, and so on.
This should do away with any need for rehandling.
I've also taken this opportunity to avoid adding new values to the
request context in the handler chain, as this creates a copy of the
Request struct, which may possibly lead to bugs like it has in the past
(see PR #1542, PR #1481, and maybe issue #2463). We now add all the
expected context values in the top-level handler at the server, then
any new values can be added to the variable table via the VarsCtxKey
context key, or just the GetVar/SetVar functions. In particular, we are
using this facility to convey dial information in the reverse proxy.
Had to be careful in one place as the middleware compilation logic has
changed, and moved a bit. We no longer compile a middleware chain per-
request; instead, we can compile it at provision-time, and defer only the
evaluation of matchers to request-time, which should slightly improve
performance. Doing this, however, we take advantage of multiple function
closures, and we also changed the use of HandlerFunc (function pointer)
to Handler (interface)... this led to a situation where, if we aren't
careful, allows one request routed a certain way to permanently change
the "next" handler for all/most other requests! We avoid this by making
a copy of the interface value (which is a lightweight pointer copy) and
using exclusively that within our wrapped handlers. This way, the
original stack frame is preserved in a "read-only" fashion. The comments
in the code describe this phenomenon.
This may very well be a breaking change for some configurations, however
I do not expect it to impact many people. I will make it clear in the
release notes that this change has occurred.
2020-01-10 01:00:13 +08:00
|
|
|
// execute the primary handler chain
|
|
|
|
err := s.primaryHandlerChain.ServeHTTP(w, r)
|
2020-04-29 05:38:45 +08:00
|
|
|
duration = time.Since(start)
|
2019-05-21 00:59:20 +08:00
|
|
|
|
2020-04-29 05:38:45 +08:00
|
|
|
// if no errors, we're done!
|
|
|
|
if err == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-10-14 00:52:39 +08:00
|
|
|
// restore original request before invoking error handler chain (issue #3717)
|
|
|
|
// TODO: this does not restore original headers, if modified (for efficiency)
|
|
|
|
origReq := r.Context().Value(OriginalRequestCtxKey).(http.Request)
|
|
|
|
r.Method = origReq.Method
|
|
|
|
r.RemoteAddr = origReq.RemoteAddr
|
|
|
|
r.RequestURI = origReq.RequestURI
|
|
|
|
cloneURL(origReq.URL, r.URL)
|
|
|
|
|
2020-04-29 05:38:45 +08:00
|
|
|
// prepare the error log
|
|
|
|
logger := errLog
|
|
|
|
if s.Logs != nil {
|
|
|
|
logger = s.Logs.wrapLogger(logger, r.Host)
|
|
|
|
}
|
|
|
|
logger = logger.With(zap.Duration("duration", duration))
|
|
|
|
|
|
|
|
// get the values that will be used to log the error
|
|
|
|
errStatus, errMsg, errFields := errLogValues(err)
|
|
|
|
|
|
|
|
// add HTTP error information to request context
|
|
|
|
r = s.Errors.WithError(r, err)
|
|
|
|
|
|
|
|
if s.Errors != nil && len(s.Errors.Routes) > 0 {
|
|
|
|
// execute user-defined error handling route
|
|
|
|
err2 := s.errorHandlerChain.ServeHTTP(w, r)
|
|
|
|
if err2 == nil {
|
|
|
|
// user's error route handled the error response
|
|
|
|
// successfully, so now just log the error
|
2022-02-20 06:10:49 +08:00
|
|
|
logger.Debug(errMsg, errFields...)
|
2020-04-29 05:38:45 +08:00
|
|
|
} else {
|
|
|
|
// well... this is awkward
|
|
|
|
errFields = append([]zapcore.Field{
|
|
|
|
zap.String("error", err2.Error()),
|
|
|
|
zap.Namespace("first_error"),
|
|
|
|
zap.String("msg", errMsg),
|
|
|
|
}, errFields...)
|
|
|
|
logger.Error("error handling handler error", errFields...)
|
2020-11-19 07:14:50 +08:00
|
|
|
if handlerErr, ok := err.(HandlerError); ok {
|
|
|
|
w.WriteHeader(handlerErr.StatusCode)
|
|
|
|
} else {
|
|
|
|
w.WriteHeader(http.StatusInternalServerError)
|
|
|
|
}
|
2020-04-29 05:38:45 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if errStatus >= 500 {
|
|
|
|
logger.Error(errMsg, errFields...)
|
2021-11-23 02:58:25 +08:00
|
|
|
} else {
|
|
|
|
logger.Debug(errMsg, errFields...)
|
2019-05-21 00:59:20 +08:00
|
|
|
}
|
2020-04-29 05:38:45 +08:00
|
|
|
w.WriteHeader(errStatus)
|
2019-05-21 00:59:20 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-27 06:03:29 +08:00
|
|
|
// wrapPrimaryRoute wraps stack (a compiled middleware handler chain)
|
|
|
|
// in s.enforcementHandler which performs crucial security checks, etc.
|
|
|
|
func (s *Server) wrapPrimaryRoute(stack Handler) Handler {
|
|
|
|
return HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
|
|
|
|
return s.enforcementHandler(w, r, stack)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// enforcementHandler is an implicit middleware which performs
|
|
|
|
// standard checks before executing the HTTP middleware chain.
|
|
|
|
func (s *Server) enforcementHandler(w http.ResponseWriter, r *http.Request, next Handler) error {
|
|
|
|
// enforce strict host matching, which ensures that the SNI
|
|
|
|
// value (if any), matches the Host header; essential for
|
|
|
|
// servers that rely on TLS ClientAuth sharing a listener
|
|
|
|
// with servers that do not; if not enforced, client could
|
|
|
|
// bypass by sending benign SNI then restricted Host header
|
2019-09-18 13:13:21 +08:00
|
|
|
if s.StrictSNIHost != nil && *s.StrictSNIHost && r.TLS != nil {
|
2019-06-27 06:03:29 +08:00
|
|
|
hostname, _, err := net.SplitHostPort(r.Host)
|
|
|
|
if err != nil {
|
|
|
|
hostname = r.Host // OK; probably lacked port
|
|
|
|
}
|
2019-10-16 05:37:46 +08:00
|
|
|
if !strings.EqualFold(r.TLS.ServerName, hostname) {
|
2019-06-27 06:03:29 +08:00
|
|
|
err := fmt.Errorf("strict host matching: TLS ServerName (%s) and HTTP Host (%s) values differ",
|
|
|
|
r.TLS.ServerName, hostname)
|
|
|
|
r.Close = true
|
2022-01-13 05:24:22 +08:00
|
|
|
return Error(http.StatusMisdirectedRequest, err)
|
2019-06-27 06:03:29 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return next.ServeHTTP(w, r)
|
|
|
|
}
|
|
|
|
|
2019-07-10 02:58:39 +08:00
|
|
|
// listenersUseAnyPortOtherThan returns true if there are any
|
|
|
|
// listeners in s that use a port which is not otherPort.
|
2019-05-23 04:14:26 +08:00
|
|
|
func (s *Server) listenersUseAnyPortOtherThan(otherPort int) bool {
|
|
|
|
for _, lnAddr := range s.Listen {
|
2019-11-12 06:33:38 +08:00
|
|
|
laddrs, err := caddy.ParseNetworkAddress(lnAddr)
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if uint(otherPort) > laddrs.EndPort || uint(otherPort) < laddrs.StartPort {
|
|
|
|
return true
|
2019-05-23 04:14:26 +08:00
|
|
|
}
|
|
|
|
}
|
2019-09-09 22:25:48 +08:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2019-11-12 06:33:38 +08:00
|
|
|
// hasListenerAddress returns true if s has a listener
|
|
|
|
// at the given address fullAddr. Currently, fullAddr
|
|
|
|
// must represent exactly one socket address (port
|
|
|
|
// ranges are not supported)
|
2019-09-19 08:01:32 +08:00
|
|
|
func (s *Server) hasListenerAddress(fullAddr string) bool {
|
2019-11-12 06:33:38 +08:00
|
|
|
laddrs, err := caddy.ParseNetworkAddress(fullAddr)
|
2019-09-19 08:01:32 +08:00
|
|
|
if err != nil {
|
|
|
|
return false
|
|
|
|
}
|
2019-11-12 06:33:38 +08:00
|
|
|
if laddrs.PortRangeSize() != 1 {
|
|
|
|
return false // TODO: support port ranges
|
2019-09-19 08:01:32 +08:00
|
|
|
}
|
2019-11-12 06:33:38 +08:00
|
|
|
|
2019-09-09 22:25:48 +08:00
|
|
|
for _, lnAddr := range s.Listen {
|
2019-11-12 06:33:38 +08:00
|
|
|
thisAddrs, err := caddy.ParseNetworkAddress(lnAddr)
|
2019-09-19 08:01:32 +08:00
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
2019-11-12 06:33:38 +08:00
|
|
|
if thisAddrs.Network != laddrs.Network {
|
2019-09-19 08:01:32 +08:00
|
|
|
continue
|
|
|
|
}
|
2019-11-12 06:33:38 +08:00
|
|
|
|
2020-04-27 12:28:49 +08:00
|
|
|
// Apparently, Linux requires all bound ports to be distinct
|
|
|
|
// *regardless of host interface* even if the addresses are
|
|
|
|
// in fact different; binding "192.168.0.1:9000" and then
|
|
|
|
// ":9000" will fail for ":9000" because "address is already
|
|
|
|
// in use" even though it's not, and the same bindings work
|
|
|
|
// fine on macOS. I also found on Linux that listening on
|
|
|
|
// "[::]:9000" would fail with a similar error, except with
|
|
|
|
// the address "0.0.0.0:9000", as if deliberately ignoring
|
|
|
|
// that I specified the IPv6 interface explicitly. This seems
|
|
|
|
// to be a major bug in the Linux network stack and I don't
|
|
|
|
// know why it hasn't been fixed yet, so for now we have to
|
|
|
|
// special-case ourselves around Linux like a doting parent.
|
|
|
|
// The second issue seems very similar to a discussion here:
|
|
|
|
// https://github.com/nodejs/node/issues/9390
|
|
|
|
//
|
|
|
|
// This is very easy to reproduce by creating an HTTP server
|
|
|
|
// that listens to both addresses or just one with a host
|
|
|
|
// interface; or for a more confusing reproduction, try
|
|
|
|
// listening on "127.0.0.1:80" and ":443" and you'll see
|
|
|
|
// the error, if you take away the GOOS condition below.
|
|
|
|
//
|
|
|
|
// So, an address is equivalent if the port is in the port
|
|
|
|
// range, and if not on Linux, the host is the same... sigh.
|
|
|
|
if (runtime.GOOS == "linux" || thisAddrs.Host == laddrs.Host) &&
|
2019-11-12 06:33:38 +08:00
|
|
|
(laddrs.StartPort <= thisAddrs.EndPort) &&
|
|
|
|
(laddrs.StartPort >= thisAddrs.StartPort) {
|
|
|
|
return true
|
2019-09-09 22:25:48 +08:00
|
|
|
}
|
|
|
|
}
|
2019-05-23 04:14:26 +08:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2019-09-03 23:35:36 +08:00
|
|
|
func (s *Server) hasTLSClientAuth() bool {
|
|
|
|
for _, cp := range s.TLSConnPolicies {
|
2019-09-08 04:25:04 +08:00
|
|
|
if cp.ClientAuthentication != nil && cp.ClientAuthentication.Active() {
|
2019-09-03 23:35:36 +08:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2021-04-20 09:54:12 +08:00
|
|
|
// findLastRouteWithHostMatcher returns the index of the last route
|
|
|
|
// in the server which has a host matcher. Used during Automatic HTTPS
|
|
|
|
// to determine where to insert the HTTP->HTTPS redirect route, such
|
|
|
|
// that it is after any other host matcher but before any "catch-all"
|
|
|
|
// route without a host matcher.
|
|
|
|
func (s *Server) findLastRouteWithHostMatcher() int {
|
2021-07-15 00:49:34 +08:00
|
|
|
foundHostMatcher := false
|
2021-04-20 09:54:12 +08:00
|
|
|
lastIndex := len(s.Routes)
|
2021-07-15 00:49:34 +08:00
|
|
|
|
2021-04-20 09:54:12 +08:00
|
|
|
for i, route := range s.Routes {
|
|
|
|
// since we want to break out of an inner loop, use a closure
|
|
|
|
// to allow us to use 'return' when we found a host matcher
|
|
|
|
found := (func() bool {
|
|
|
|
for _, sets := range route.MatcherSets {
|
|
|
|
for _, matcher := range sets {
|
|
|
|
switch matcher.(type) {
|
|
|
|
case *MatchHost:
|
2021-07-15 00:49:34 +08:00
|
|
|
foundHostMatcher = true
|
2021-04-20 09:54:12 +08:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
})()
|
|
|
|
|
|
|
|
// if we found the host matcher, change the lastIndex to
|
|
|
|
// just after the current route
|
|
|
|
if found {
|
|
|
|
lastIndex = i + 1
|
|
|
|
}
|
|
|
|
}
|
2021-07-15 00:49:34 +08:00
|
|
|
|
|
|
|
// If we didn't actually find a host matcher, return 0
|
|
|
|
// because that means every defined route was a "catch-all".
|
|
|
|
// See https://caddy.community/t/how-to-set-priority-in-caddyfile/13002/8
|
|
|
|
if !foundHostMatcher {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2021-04-20 09:54:12 +08:00
|
|
|
return lastIndex
|
|
|
|
}
|
|
|
|
|
2022-08-16 02:01:58 +08:00
|
|
|
// serveHTTP3 creates a QUIC listener, configures an HTTP/3 server if
|
|
|
|
// not already done, and then uses that server to serve HTTP/3 over
|
|
|
|
// the listener, with Server s as the handler.
|
2022-09-29 03:35:51 +08:00
|
|
|
func (s *Server) serveHTTP3(addr caddy.NetworkAddress, tlsCfg *tls.Config) error {
|
|
|
|
switch addr.Network {
|
|
|
|
case "unix":
|
|
|
|
addr.Network = "unixgram"
|
|
|
|
case "tcp4":
|
|
|
|
addr.Network = "udp4"
|
|
|
|
case "tcp6":
|
|
|
|
addr.Network = "udp6"
|
|
|
|
default:
|
2022-10-14 01:30:57 +08:00
|
|
|
addr.Network = "udp" // TODO: Maybe a better default is to not enable HTTP/3 if we do not know the network?
|
2022-09-29 03:35:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
lnAny, err := addr.Listen(s.ctx, 0, net.ListenConfig{})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
ln := lnAny.(net.PacketConn)
|
|
|
|
|
|
|
|
h3ln, err := caddy.ListenQUIC(ln, tlsCfg, &s.activeRequests)
|
2022-08-16 02:01:58 +08:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("starting HTTP/3 QUIC listener: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// create HTTP/3 server if not done already
|
|
|
|
if s.h3server == nil {
|
|
|
|
s.h3server = &http3.Server{
|
|
|
|
Handler: s,
|
|
|
|
TLSConfig: tlsCfg,
|
|
|
|
MaxHeaderBytes: s.MaxHeaderBytes,
|
2022-09-01 08:49:34 +08:00
|
|
|
// TODO: remove this config when draft versions are no longer supported (we have no need to support drafts)
|
|
|
|
QuicConfig: &quic.Config{
|
|
|
|
Versions: []quic.VersionNumber{quic.Version1, quic.Version2},
|
|
|
|
},
|
2022-08-16 02:01:58 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-29 03:35:51 +08:00
|
|
|
s.h3listeners = append(s.h3listeners, lnAny.(net.PacketConn))
|
|
|
|
|
2022-08-16 02:01:58 +08:00
|
|
|
//nolint:errcheck
|
|
|
|
go s.h3server.ServeListener(h3ln)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-09-17 04:48:55 +08:00
|
|
|
// configureServer applies/binds the registered callback functions to the server.
|
|
|
|
func (s *Server) configureServer(server *http.Server) {
|
|
|
|
for _, f := range s.connStateFuncs {
|
|
|
|
if server.ConnState != nil {
|
|
|
|
baseConnStateFunc := server.ConnState
|
|
|
|
server.ConnState = func(conn net.Conn, state http.ConnState) {
|
|
|
|
baseConnStateFunc(conn, state)
|
|
|
|
f(conn, state)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
server.ConnState = f
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, f := range s.connContextFuncs {
|
|
|
|
if server.ConnContext != nil {
|
|
|
|
baseConnContextFunc := server.ConnContext
|
|
|
|
server.ConnContext = func(ctx context.Context, c net.Conn) context.Context {
|
|
|
|
return f(baseConnContextFunc(ctx, c), c)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
server.ConnContext = f
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, f := range s.onShutdownFuncs {
|
|
|
|
server.RegisterOnShutdown(f)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// RegisterConnState registers f to be invoked on s.ConnState.
|
|
|
|
func (s *Server) RegisterConnState(f func(net.Conn, http.ConnState)) {
|
|
|
|
s.connStateFuncs = append(s.connStateFuncs, f)
|
|
|
|
}
|
|
|
|
|
|
|
|
// RegisterConnContext registers f to be invoked as part of s.ConnContext.
|
|
|
|
func (s *Server) RegisterConnContext(f func(ctx context.Context, c net.Conn) context.Context) {
|
|
|
|
s.connContextFuncs = append(s.connContextFuncs, f)
|
|
|
|
}
|
|
|
|
|
|
|
|
// RegisterOnShutdown registers f to be invoked on server shutdown.
|
|
|
|
func (s *Server) RegisterOnShutdown(f func()) {
|
|
|
|
s.onShutdownFuncs = append(s.onShutdownFuncs, f)
|
|
|
|
}
|
|
|
|
|
2019-06-27 00:49:32 +08:00
|
|
|
// HTTPErrorConfig determines how to handle errors
|
|
|
|
// from the HTTP handlers.
|
|
|
|
type HTTPErrorConfig struct {
|
2019-12-24 03:45:35 +08:00
|
|
|
// The routes to evaluate after the primary handler
|
|
|
|
// chain returns an error. In an error route, extra
|
|
|
|
// placeholders are available:
|
|
|
|
//
|
2020-02-15 02:00:46 +08:00
|
|
|
// Placeholder | Description
|
|
|
|
// ------------|---------------
|
|
|
|
// `{http.error.status_code}` | The recommended HTTP status code
|
|
|
|
// `{http.error.status_text}` | The status text associated with the recommended status code
|
|
|
|
// `{http.error.message}` | The error message
|
|
|
|
// `{http.error.trace}` | The origin of the error
|
|
|
|
// `{http.error.id}` | An identifier for this occurrence of the error
|
2019-05-23 02:32:36 +08:00
|
|
|
Routes RouteList `json:"routes,omitempty"`
|
2019-05-21 00:59:20 +08:00
|
|
|
}
|
|
|
|
|
2019-10-29 04:39:37 +08:00
|
|
|
// WithError makes a shallow copy of r to add the error to its
|
|
|
|
// context, and sets placeholders on the request's replacer
|
|
|
|
// related to err. It returns the modified request which has
|
|
|
|
// the error information in its context and replacer. It
|
|
|
|
// overwrites any existing error values that are stored.
|
|
|
|
func (*HTTPErrorConfig) WithError(r *http.Request, err error) *http.Request {
|
|
|
|
// add the raw error value to the request context
|
|
|
|
// so it can be accessed by error handlers
|
|
|
|
c := context.WithValue(r.Context(), ErrorCtxKey, err)
|
|
|
|
r = r.WithContext(c)
|
|
|
|
|
|
|
|
// add error values to the replacer
|
2019-12-30 04:12:52 +08:00
|
|
|
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
|
2020-03-31 01:49:53 +08:00
|
|
|
repl.Set("http.error", err)
|
2019-10-29 04:39:37 +08:00
|
|
|
if handlerErr, ok := err.(HandlerError); ok {
|
2020-03-31 01:49:53 +08:00
|
|
|
repl.Set("http.error.status_code", handlerErr.StatusCode)
|
2019-10-29 04:39:37 +08:00
|
|
|
repl.Set("http.error.status_text", http.StatusText(handlerErr.StatusCode))
|
|
|
|
repl.Set("http.error.id", handlerErr.ID)
|
2022-08-23 13:31:07 +08:00
|
|
|
repl.Set("http.error.trace", handlerErr.Trace)
|
2022-08-23 22:17:46 +08:00
|
|
|
if handlerErr.Err != nil {
|
|
|
|
repl.Set("http.error.message", handlerErr.Err.Error())
|
|
|
|
} else {
|
|
|
|
repl.Set("http.error.message", http.StatusText(handlerErr.StatusCode))
|
|
|
|
}
|
2019-10-29 04:39:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
|
2020-04-28 22:32:04 +08:00
|
|
|
// shouldLogRequest returns true if this request should be logged.
|
|
|
|
func (s *Server) shouldLogRequest(r *http.Request) bool {
|
|
|
|
if s.accessLogger == nil || s.Logs == nil {
|
|
|
|
// logging is disabled
|
|
|
|
return false
|
|
|
|
}
|
2022-10-06 02:14:13 +08:00
|
|
|
if _, ok := s.Logs.LoggerNames[r.Host]; ok {
|
|
|
|
// this host is mapped to a particular logger name
|
|
|
|
return true
|
|
|
|
}
|
2020-04-28 22:32:04 +08:00
|
|
|
for _, dh := range s.Logs.SkipHosts {
|
|
|
|
// logging for this particular host is disabled
|
2022-03-05 04:44:59 +08:00
|
|
|
if certmagic.MatchWildcard(r.Host, dh) {
|
2020-04-28 22:32:04 +08:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
2022-10-06 02:14:13 +08:00
|
|
|
// if configured, this host is not mapped and thus must not be logged
|
|
|
|
return !s.Logs.SkipUnmappedHosts
|
2020-04-28 22:32:04 +08:00
|
|
|
}
|
|
|
|
|
2022-08-16 02:01:58 +08:00
|
|
|
// protocol returns true if the protocol proto is configured/enabled.
|
|
|
|
func (s *Server) protocol(proto string) bool {
|
|
|
|
for _, p := range s.Protocols {
|
|
|
|
if p == proto {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2022-09-02 06:30:03 +08:00
|
|
|
// Listeners returns the server's listeners. These are active listeners,
|
|
|
|
// so calling Accept() or Close() on them will probably break things.
|
|
|
|
// They are made available here for read-only purposes (e.g. Addr())
|
|
|
|
// and for type-asserting for purposes where you know what you're doing.
|
|
|
|
//
|
|
|
|
// EXPERIMENTAL: Subject to change or removal.
|
|
|
|
func (s *Server) Listeners() []net.Listener { return s.listeners }
|
|
|
|
|
2020-05-12 02:14:47 +08:00
|
|
|
// PrepareRequest fills the request r for use in a Caddy HTTP handler chain. w and s can
|
|
|
|
// be nil, but the handlers will lose response placeholders and access to the server.
|
|
|
|
func PrepareRequest(r *http.Request, repl *caddy.Replacer, w http.ResponseWriter, s *Server) *http.Request {
|
|
|
|
// set up the context for the request
|
|
|
|
ctx := context.WithValue(r.Context(), caddy.ReplacerCtxKey, repl)
|
|
|
|
ctx = context.WithValue(ctx, ServerCtxKey, s)
|
2023-01-10 13:08:23 +08:00
|
|
|
ctx = context.WithValue(ctx, VarsCtxKey, map[string]any{
|
|
|
|
TrustedProxyVarKey: determineTrustedProxy(r, s),
|
|
|
|
})
|
2020-05-12 02:14:47 +08:00
|
|
|
ctx = context.WithValue(ctx, routeGroupCtxKey, make(map[string]struct{}))
|
|
|
|
var url2 url.URL // avoid letting this escape to the heap
|
|
|
|
ctx = context.WithValue(ctx, OriginalRequestCtxKey, originalRequest(r, &url2))
|
|
|
|
r = r.WithContext(ctx)
|
|
|
|
|
|
|
|
// once the pointer to the request won't change
|
|
|
|
// anymore, finish setting up the replacer
|
|
|
|
addHTTPVarsToReplacer(repl, r, w)
|
|
|
|
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
|
2019-10-29 04:39:37 +08:00
|
|
|
// originalRequest returns a partial, shallow copy of
|
|
|
|
// req, including: req.Method, deep copy of req.URL
|
|
|
|
// (into the urlCopy parameter, which should be on the
|
2019-11-06 07:28:33 +08:00
|
|
|
// stack), req.RequestURI, and req.RemoteAddr. Notably,
|
|
|
|
// headers are not copied. This function is designed to
|
2019-12-05 07:28:13 +08:00
|
|
|
// be very fast and efficient, and useful primarily for
|
2019-11-06 07:28:33 +08:00
|
|
|
// read-only/logging purposes.
|
2019-10-29 04:39:37 +08:00
|
|
|
func originalRequest(req *http.Request, urlCopy *url.URL) http.Request {
|
2019-11-06 07:28:33 +08:00
|
|
|
cloneURL(req.URL, urlCopy)
|
2019-10-29 04:39:37 +08:00
|
|
|
return http.Request{
|
|
|
|
Method: req.Method,
|
2019-11-06 07:28:33 +08:00
|
|
|
RemoteAddr: req.RemoteAddr,
|
2019-10-29 04:39:37 +08:00
|
|
|
RequestURI: req.RequestURI,
|
|
|
|
URL: urlCopy,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-10 13:08:23 +08:00
|
|
|
// determineTrustedProxy parses the remote IP address of
|
|
|
|
// the request, and determines (if the server configured it)
|
|
|
|
// if the client is a trusted proxy.
|
|
|
|
func determineTrustedProxy(r *http.Request, s *Server) bool {
|
|
|
|
// If there's no server, then we can't check anything
|
|
|
|
if s == nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse the remote IP, ignore the error as non-fatal,
|
|
|
|
// but the remote IP is required to continue, so we
|
|
|
|
// just return early. This should probably never happen
|
|
|
|
// though, unless some other module manipulated the request's
|
|
|
|
// remote address and used an invalid value.
|
|
|
|
clientIP, _, err := net.SplitHostPort(r.RemoteAddr)
|
|
|
|
if err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// Client IP may contain a zone if IPv6, so we need
|
|
|
|
// to pull that out before parsing the IP
|
|
|
|
clientIP, _, _ = strings.Cut(clientIP, "%")
|
|
|
|
ipAddr, err := netip.ParseAddr(clientIP)
|
|
|
|
if err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if the client is a trusted proxy
|
|
|
|
for _, ipRange := range s.trustedProxies {
|
|
|
|
if ipRange.Contains(ipAddr) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2019-09-06 03:36:42 +08:00
|
|
|
// cloneURL makes a copy of r.URL and returns a
|
|
|
|
// new value that doesn't reference the original.
|
2019-11-06 07:28:33 +08:00
|
|
|
func cloneURL(from, to *url.URL) {
|
|
|
|
*to = *from
|
|
|
|
if from.User != nil {
|
2019-09-06 03:36:42 +08:00
|
|
|
userInfo := new(url.Userinfo)
|
2019-11-06 07:28:33 +08:00
|
|
|
*userInfo = *from.User
|
|
|
|
to.User = userInfo
|
2019-09-06 03:36:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-10 13:08:23 +08:00
|
|
|
// PrivateRangesCIDR returns a list of private CIDR range
|
|
|
|
// strings, which can be used as a configuration shortcut.
|
|
|
|
func PrivateRangesCIDR() []string {
|
|
|
|
return []string{
|
|
|
|
"192.168.0.0/16",
|
|
|
|
"172.16.0.0/12",
|
|
|
|
"10.0.0.0/8",
|
|
|
|
"127.0.0.1/8",
|
|
|
|
"fd00::/8",
|
|
|
|
"::1",
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-27 00:49:32 +08:00
|
|
|
// Context keys for HTTP request context values.
|
|
|
|
const (
|
|
|
|
// For referencing the server instance
|
|
|
|
ServerCtxKey caddy.CtxKey = "server"
|
2019-06-19 01:13:12 +08:00
|
|
|
|
2019-08-10 02:05:47 +08:00
|
|
|
// For the request's variable table
|
2019-10-29 04:39:37 +08:00
|
|
|
VarsCtxKey caddy.CtxKey = "vars"
|
2019-09-06 03:36:42 +08:00
|
|
|
|
2019-10-29 04:39:37 +08:00
|
|
|
// For a partial copy of the unmodified request that
|
|
|
|
// originally came into the server's entry handler
|
|
|
|
OriginalRequestCtxKey caddy.CtxKey = "original_request"
|
2023-01-10 13:08:23 +08:00
|
|
|
|
|
|
|
// For tracking whether the client is a trusted proxy
|
|
|
|
TrustedProxyVarKey string = "trusted_proxy"
|
2019-06-27 00:49:32 +08:00
|
|
|
)
|