2019-07-01 06:07:58 +08:00
|
|
|
// Copyright 2015 Matthew Holt and The Caddy Authors
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2019-06-15 01:58:28 +08:00
|
|
|
package caddy
|
2019-03-27 05:45:51 +08:00
|
|
|
|
|
|
|
import (
|
2022-03-23 07:47:57 +08:00
|
|
|
"context"
|
|
|
|
"crypto/tls"
|
|
|
|
"errors"
|
2019-03-27 05:45:51 +08:00
|
|
|
"fmt"
|
|
|
|
"net"
|
2022-08-18 06:10:57 +08:00
|
|
|
"net/netip"
|
2022-01-20 03:26:44 +08:00
|
|
|
"os"
|
2019-07-09 06:46:38 +08:00
|
|
|
"strconv"
|
|
|
|
"strings"
|
2019-03-27 05:45:51 +08:00
|
|
|
"sync/atomic"
|
2021-02-17 04:55:49 +08:00
|
|
|
"syscall"
|
2022-03-23 07:47:57 +08:00
|
|
|
|
|
|
|
"github.com/lucas-clemente/quic-go"
|
|
|
|
"github.com/lucas-clemente/quic-go/http3"
|
2022-09-02 06:30:03 +08:00
|
|
|
"go.uber.org/zap"
|
2019-03-27 05:45:51 +08:00
|
|
|
)
|
|
|
|
|
core: Simplify shared listeners, fix deadline bug
When this listener code was first written, UsagePool didn't exist. We
can simplify much of the wrapped listener logic by utilizing UsagePool.
This also fixes a bug where new servers were able to clear deadlines
set by old servers, even if the old server didn't get booted out of its
Accept() call yet. And with the deadline cleared, they never would.
(Sometimes. Based on reports and difficulty of reproducing the bug,
this behavior was extremely rare.) I don't know why that happened
exactly, maybe some polling mechanism in the kernel and if the timings
worked out just wrong it would expose the bug.
Anyway, now we ensure that only the closer that set the deadline is the
same one that clears it, ensuring that old servers always return out of
Accept(), because the deadline doesn't get cleared until they do.
Of course, all this hinges on the hope that my suspicions in the middle
of the night are correct and that kernels work the way I think they do
in my head.
Also minor enhancement to UsagePool where if a value errors upon
construction (a very real possibility with listeners), it is removed from
the pool. Not 100% sure the sync logic is correct there, or maybe we
don't have to even put it in the pool until after construction, but it's
subtle either way and I think this is safe... right?
2022-01-11 14:24:58 +08:00
|
|
|
// Listen is like net.Listen, except Caddy's listeners can overlap
|
|
|
|
// each other: multiple listeners may be created on the same socket
|
|
|
|
// at the same time. This is useful because during config changes,
|
|
|
|
// the new config is started while the old config is still running.
|
|
|
|
// When Caddy listeners are closed, the closing logic is virtualized
|
|
|
|
// so the underlying socket isn't actually closed until all uses of
|
|
|
|
// the socket have been finished. Always be sure to close listeners
|
|
|
|
// when you are done with them, just like normal listeners.
|
2019-04-01 10:41:29 +08:00
|
|
|
func Listen(network, addr string) (net.Listener, error) {
|
2022-09-03 06:59:11 +08:00
|
|
|
// a 0 timeout means Go uses its default
|
|
|
|
return ListenTimeout(network, addr, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
// getListenerFromPlugin returns a listener on the given network and address
|
|
|
|
// if a plugin has registered the network name. It may return (nil, nil) if
|
|
|
|
// no plugin can provide a listener.
|
|
|
|
func getListenerFromPlugin(network, addr string) (net.Listener, error) {
|
2022-09-02 06:30:03 +08:00
|
|
|
network = strings.TrimSpace(strings.ToLower(network))
|
|
|
|
|
|
|
|
// get listener from plugin if network type is registered
|
|
|
|
if getListener, ok := networkTypes[network]; ok {
|
|
|
|
Log().Debug("getting listener from plugin", zap.String("network", network))
|
|
|
|
return getListener(network, addr)
|
|
|
|
}
|
|
|
|
|
2022-09-03 06:59:11 +08:00
|
|
|
return nil, nil
|
2019-03-27 05:45:51 +08:00
|
|
|
}
|
|
|
|
|
2019-09-10 22:03:37 +08:00
|
|
|
// ListenPacket returns a net.PacketConn suitable for use in a Caddy module.
|
core: Simplify shared listeners, fix deadline bug
When this listener code was first written, UsagePool didn't exist. We
can simplify much of the wrapped listener logic by utilizing UsagePool.
This also fixes a bug where new servers were able to clear deadlines
set by old servers, even if the old server didn't get booted out of its
Accept() call yet. And with the deadline cleared, they never would.
(Sometimes. Based on reports and difficulty of reproducing the bug,
this behavior was extremely rare.) I don't know why that happened
exactly, maybe some polling mechanism in the kernel and if the timings
worked out just wrong it would expose the bug.
Anyway, now we ensure that only the closer that set the deadline is the
same one that clears it, ensuring that old servers always return out of
Accept(), because the deadline doesn't get cleared until they do.
Of course, all this hinges on the hope that my suspicions in the middle
of the night are correct and that kernels work the way I think they do
in my head.
Also minor enhancement to UsagePool where if a value errors upon
construction (a very real possibility with listeners), it is removed from
the pool. Not 100% sure the sync logic is correct there, or maybe we
don't have to even put it in the pool until after construction, but it's
subtle either way and I think this is safe... right?
2022-01-11 14:24:58 +08:00
|
|
|
// It is like Listen except for PacketConns.
|
2019-09-10 22:03:37 +08:00
|
|
|
// Always be sure to close the PacketConn when you are done.
|
|
|
|
func ListenPacket(network, addr string) (net.PacketConn, error) {
|
2022-08-04 01:04:51 +08:00
|
|
|
lnKey := listenerKey(network, addr)
|
2019-09-10 22:03:37 +08:00
|
|
|
|
core: Simplify shared listeners, fix deadline bug
When this listener code was first written, UsagePool didn't exist. We
can simplify much of the wrapped listener logic by utilizing UsagePool.
This also fixes a bug where new servers were able to clear deadlines
set by old servers, even if the old server didn't get booted out of its
Accept() call yet. And with the deadline cleared, they never would.
(Sometimes. Based on reports and difficulty of reproducing the bug,
this behavior was extremely rare.) I don't know why that happened
exactly, maybe some polling mechanism in the kernel and if the timings
worked out just wrong it would expose the bug.
Anyway, now we ensure that only the closer that set the deadline is the
same one that clears it, ensuring that old servers always return out of
Accept(), because the deadline doesn't get cleared until they do.
Of course, all this hinges on the hope that my suspicions in the middle
of the night are correct and that kernels work the way I think they do
in my head.
Also minor enhancement to UsagePool where if a value errors upon
construction (a very real possibility with listeners), it is removed from
the pool. Not 100% sure the sync logic is correct there, or maybe we
don't have to even put it in the pool until after construction, but it's
subtle either way and I think this is safe... right?
2022-01-11 14:24:58 +08:00
|
|
|
sharedPc, _, err := listenerPool.LoadOrNew(lnKey, func() (Destructor, error) {
|
|
|
|
pc, err := net.ListenPacket(network, addr)
|
|
|
|
if err != nil {
|
2022-01-20 03:26:44 +08:00
|
|
|
// https://github.com/caddyserver/caddy/pull/4534
|
|
|
|
if isUnixNetwork(network) && isListenBindAddressAlreadyInUseError(err) {
|
|
|
|
return nil, fmt.Errorf("%w: this can happen if Caddy was forcefully killed", err)
|
|
|
|
}
|
core: Simplify shared listeners, fix deadline bug
When this listener code was first written, UsagePool didn't exist. We
can simplify much of the wrapped listener logic by utilizing UsagePool.
This also fixes a bug where new servers were able to clear deadlines
set by old servers, even if the old server didn't get booted out of its
Accept() call yet. And with the deadline cleared, they never would.
(Sometimes. Based on reports and difficulty of reproducing the bug,
this behavior was extremely rare.) I don't know why that happened
exactly, maybe some polling mechanism in the kernel and if the timings
worked out just wrong it would expose the bug.
Anyway, now we ensure that only the closer that set the deadline is the
same one that clears it, ensuring that old servers always return out of
Accept(), because the deadline doesn't get cleared until they do.
Of course, all this hinges on the hope that my suspicions in the middle
of the night are correct and that kernels work the way I think they do
in my head.
Also minor enhancement to UsagePool where if a value errors upon
construction (a very real possibility with listeners), it is removed from
the pool. Not 100% sure the sync logic is correct there, or maybe we
don't have to even put it in the pool until after construction, but it's
subtle either way and I think this is safe... right?
2022-01-11 14:24:58 +08:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return &sharedPacketConn{PacketConn: pc, key: lnKey}, nil
|
|
|
|
})
|
2019-09-10 22:03:37 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
core: Simplify shared listeners, fix deadline bug
When this listener code was first written, UsagePool didn't exist. We
can simplify much of the wrapped listener logic by utilizing UsagePool.
This also fixes a bug where new servers were able to clear deadlines
set by old servers, even if the old server didn't get booted out of its
Accept() call yet. And with the deadline cleared, they never would.
(Sometimes. Based on reports and difficulty of reproducing the bug,
this behavior was extremely rare.) I don't know why that happened
exactly, maybe some polling mechanism in the kernel and if the timings
worked out just wrong it would expose the bug.
Anyway, now we ensure that only the closer that set the deadline is the
same one that clears it, ensuring that old servers always return out of
Accept(), because the deadline doesn't get cleared until they do.
Of course, all this hinges on the hope that my suspicions in the middle
of the night are correct and that kernels work the way I think they do
in my head.
Also minor enhancement to UsagePool where if a value errors upon
construction (a very real possibility with listeners), it is removed from
the pool. Not 100% sure the sync logic is correct there, or maybe we
don't have to even put it in the pool until after construction, but it's
subtle either way and I think this is safe... right?
2022-01-11 14:24:58 +08:00
|
|
|
return &fakeClosePacketConn{sharedPacketConn: sharedPc.(*sharedPacketConn)}, nil
|
2019-09-10 22:03:37 +08:00
|
|
|
}
|
|
|
|
|
2022-03-23 07:47:57 +08:00
|
|
|
// ListenQUIC returns a quic.EarlyListener suitable for use in a Caddy module.
|
|
|
|
// Note that the context passed to Accept is currently ignored, so using
|
|
|
|
// a context other than context.Background is meaningless.
|
2022-08-16 02:01:58 +08:00
|
|
|
func ListenQUIC(addr string, tlsConf *tls.Config, activeRequests *int64) (quic.EarlyListener, error) {
|
2022-08-04 01:04:51 +08:00
|
|
|
lnKey := listenerKey("udp", addr)
|
2022-03-23 07:47:57 +08:00
|
|
|
|
|
|
|
sharedEl, _, err := listenerPool.LoadOrNew(lnKey, func() (Destructor, error) {
|
2022-08-16 02:01:58 +08:00
|
|
|
el, err := quic.ListenAddrEarly(addr, http3.ConfigureTLSConfig(tlsConf), &quic.Config{
|
|
|
|
RequireAddressValidation: func(clientAddr net.Addr) bool {
|
|
|
|
var highLoad bool
|
|
|
|
if activeRequests != nil {
|
|
|
|
highLoad = atomic.LoadInt64(activeRequests) > 1000 // TODO: make tunable?
|
|
|
|
}
|
|
|
|
return highLoad
|
|
|
|
},
|
|
|
|
})
|
2022-03-23 07:47:57 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return &sharedQuicListener{EarlyListener: el, key: lnKey}, nil
|
|
|
|
})
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
return &fakeCloseQuicListener{
|
|
|
|
sharedQuicListener: sharedEl.(*sharedQuicListener),
|
|
|
|
context: ctx, contextCancel: cancel,
|
|
|
|
}, err
|
|
|
|
}
|
|
|
|
|
2022-08-04 01:04:51 +08:00
|
|
|
// ListenerUsage returns the current usage count of the given listener address.
|
|
|
|
func ListenerUsage(network, addr string) int {
|
|
|
|
count, _ := listenerPool.References(listenerKey(network, addr))
|
|
|
|
return count
|
|
|
|
}
|
|
|
|
|
2022-09-03 06:59:11 +08:00
|
|
|
func listenerKey(network, addr string) string {
|
|
|
|
return network + "/" + addr
|
2019-03-27 05:45:51 +08:00
|
|
|
}
|
|
|
|
|
2022-03-23 07:47:57 +08:00
|
|
|
type fakeCloseQuicListener struct {
|
|
|
|
closed int32 // accessed atomically; belongs to this struct only
|
|
|
|
*sharedQuicListener // embedded, so we also become a quic.EarlyListener
|
|
|
|
context context.Context
|
|
|
|
contextCancel context.CancelFunc
|
|
|
|
}
|
|
|
|
|
|
|
|
// Currently Accept ignores the passed context, however a situation where
|
|
|
|
// someone would need a hotswappable QUIC-only (not http3, since it uses context.Background here)
|
|
|
|
// server on which Accept would be called with non-empty contexts
|
|
|
|
// (mind that the default net listeners' Accept doesn't take a context argument)
|
|
|
|
// sounds way too rare for us to sacrifice efficiency here.
|
2022-05-12 13:25:17 +08:00
|
|
|
func (fcql *fakeCloseQuicListener) Accept(_ context.Context) (quic.EarlyConnection, error) {
|
2022-03-23 07:47:57 +08:00
|
|
|
conn, err := fcql.sharedQuicListener.Accept(fcql.context)
|
|
|
|
if err == nil {
|
|
|
|
return conn, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// if the listener is "closed", return a fake closed error instead
|
|
|
|
if atomic.LoadInt32(&fcql.closed) == 1 && errors.Is(err, context.Canceled) {
|
|
|
|
return nil, fakeClosedErr(fcql)
|
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fcql *fakeCloseQuicListener) Close() error {
|
|
|
|
if atomic.CompareAndSwapInt32(&fcql.closed, 0, 1) {
|
|
|
|
fcql.contextCancel()
|
|
|
|
_, _ = listenerPool.Delete(fcql.sharedQuicListener.key)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
core: Simplify shared listeners, fix deadline bug
When this listener code was first written, UsagePool didn't exist. We
can simplify much of the wrapped listener logic by utilizing UsagePool.
This also fixes a bug where new servers were able to clear deadlines
set by old servers, even if the old server didn't get booted out of its
Accept() call yet. And with the deadline cleared, they never would.
(Sometimes. Based on reports and difficulty of reproducing the bug,
this behavior was extremely rare.) I don't know why that happened
exactly, maybe some polling mechanism in the kernel and if the timings
worked out just wrong it would expose the bug.
Anyway, now we ensure that only the closer that set the deadline is the
same one that clears it, ensuring that old servers always return out of
Accept(), because the deadline doesn't get cleared until they do.
Of course, all this hinges on the hope that my suspicions in the middle
of the night are correct and that kernels work the way I think they do
in my head.
Also minor enhancement to UsagePool where if a value errors upon
construction (a very real possibility with listeners), it is removed from
the pool. Not 100% sure the sync logic is correct there, or maybe we
don't have to even put it in the pool until after construction, but it's
subtle either way and I think this is safe... right?
2022-01-11 14:24:58 +08:00
|
|
|
// fakeClosedErr returns an error value that is not temporary
|
|
|
|
// nor a timeout, suitable for making the caller think the
|
|
|
|
// listener is actually closed
|
2022-03-23 07:47:57 +08:00
|
|
|
func fakeClosedErr(l interface{ Addr() net.Addr }) error {
|
2019-04-03 04:58:24 +08:00
|
|
|
return &net.OpError{
|
|
|
|
Op: "accept",
|
2022-03-23 07:47:57 +08:00
|
|
|
Net: l.Addr().Network(),
|
|
|
|
Addr: l.Addr(),
|
2019-04-26 03:54:48 +08:00
|
|
|
Err: errFakeClosed,
|
2019-04-03 04:58:24 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
core: Simplify shared listeners, fix deadline bug
When this listener code was first written, UsagePool didn't exist. We
can simplify much of the wrapped listener logic by utilizing UsagePool.
This also fixes a bug where new servers were able to clear deadlines
set by old servers, even if the old server didn't get booted out of its
Accept() call yet. And with the deadline cleared, they never would.
(Sometimes. Based on reports and difficulty of reproducing the bug,
this behavior was extremely rare.) I don't know why that happened
exactly, maybe some polling mechanism in the kernel and if the timings
worked out just wrong it would expose the bug.
Anyway, now we ensure that only the closer that set the deadline is the
same one that clears it, ensuring that old servers always return out of
Accept(), because the deadline doesn't get cleared until they do.
Of course, all this hinges on the hope that my suspicions in the middle
of the night are correct and that kernels work the way I think they do
in my head.
Also minor enhancement to UsagePool where if a value errors upon
construction (a very real possibility with listeners), it is removed from
the pool. Not 100% sure the sync logic is correct there, or maybe we
don't have to even put it in the pool until after construction, but it's
subtle either way and I think this is safe... right?
2022-01-11 14:24:58 +08:00
|
|
|
// ErrFakeClosed is the underlying error value returned by
|
|
|
|
// fakeCloseListener.Accept() after Close() has been called,
|
|
|
|
// indicating that it is pretending to be closed so that the
|
|
|
|
// server using it can terminate, while the underlying
|
|
|
|
// socket is actually left open.
|
|
|
|
var errFakeClosed = fmt.Errorf("listener 'closed' 😉")
|
|
|
|
|
|
|
|
// fakeClosePacketConn is like fakeCloseListener, but for PacketConns.
|
2019-09-10 22:03:37 +08:00
|
|
|
type fakeClosePacketConn struct {
|
core: Simplify shared listeners, fix deadline bug
When this listener code was first written, UsagePool didn't exist. We
can simplify much of the wrapped listener logic by utilizing UsagePool.
This also fixes a bug where new servers were able to clear deadlines
set by old servers, even if the old server didn't get booted out of its
Accept() call yet. And with the deadline cleared, they never would.
(Sometimes. Based on reports and difficulty of reproducing the bug,
this behavior was extremely rare.) I don't know why that happened
exactly, maybe some polling mechanism in the kernel and if the timings
worked out just wrong it would expose the bug.
Anyway, now we ensure that only the closer that set the deadline is the
same one that clears it, ensuring that old servers always return out of
Accept(), because the deadline doesn't get cleared until they do.
Of course, all this hinges on the hope that my suspicions in the middle
of the night are correct and that kernels work the way I think they do
in my head.
Also minor enhancement to UsagePool where if a value errors upon
construction (a very real possibility with listeners), it is removed from
the pool. Not 100% sure the sync logic is correct there, or maybe we
don't have to even put it in the pool until after construction, but it's
subtle either way and I think this is safe... right?
2022-01-11 14:24:58 +08:00
|
|
|
closed int32 // accessed atomically; belongs to this struct only
|
|
|
|
*sharedPacketConn // embedded, so we also become a net.PacketConn
|
2019-09-10 22:03:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (fcpc *fakeClosePacketConn) Close() error {
|
|
|
|
if atomic.CompareAndSwapInt32(&fcpc.closed, 0, 1) {
|
2022-01-11 14:27:39 +08:00
|
|
|
_, _ = listenerPool.Delete(fcpc.sharedPacketConn.key)
|
2019-09-10 22:03:37 +08:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-02-17 04:55:49 +08:00
|
|
|
// Supports QUIC implementation: https://github.com/caddyserver/caddy/issues/3998
|
|
|
|
func (fcpc fakeClosePacketConn) SetReadBuffer(bytes int) error {
|
|
|
|
if conn, ok := fcpc.PacketConn.(interface{ SetReadBuffer(int) error }); ok {
|
|
|
|
return conn.SetReadBuffer(bytes)
|
|
|
|
}
|
|
|
|
return fmt.Errorf("SetReadBuffer() not implemented for %T", fcpc.PacketConn)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Supports QUIC implementation: https://github.com/caddyserver/caddy/issues/3998
|
|
|
|
func (fcpc fakeClosePacketConn) SyscallConn() (syscall.RawConn, error) {
|
|
|
|
if conn, ok := fcpc.PacketConn.(interface {
|
|
|
|
SyscallConn() (syscall.RawConn, error)
|
|
|
|
}); ok {
|
|
|
|
return conn.SyscallConn()
|
|
|
|
}
|
|
|
|
return nil, fmt.Errorf("SyscallConn() not implemented for %T", fcpc.PacketConn)
|
|
|
|
}
|
|
|
|
|
2022-03-23 07:47:57 +08:00
|
|
|
// sharedQuicListener is like sharedListener, but for quic.EarlyListeners.
|
|
|
|
type sharedQuicListener struct {
|
|
|
|
quic.EarlyListener
|
|
|
|
key string
|
|
|
|
}
|
|
|
|
|
|
|
|
// Destruct closes the underlying QUIC listener.
|
|
|
|
func (sql *sharedQuicListener) Destruct() error {
|
|
|
|
return sql.EarlyListener.Close()
|
|
|
|
}
|
|
|
|
|
core: Simplify shared listeners, fix deadline bug
When this listener code was first written, UsagePool didn't exist. We
can simplify much of the wrapped listener logic by utilizing UsagePool.
This also fixes a bug where new servers were able to clear deadlines
set by old servers, even if the old server didn't get booted out of its
Accept() call yet. And with the deadline cleared, they never would.
(Sometimes. Based on reports and difficulty of reproducing the bug,
this behavior was extremely rare.) I don't know why that happened
exactly, maybe some polling mechanism in the kernel and if the timings
worked out just wrong it would expose the bug.
Anyway, now we ensure that only the closer that set the deadline is the
same one that clears it, ensuring that old servers always return out of
Accept(), because the deadline doesn't get cleared until they do.
Of course, all this hinges on the hope that my suspicions in the middle
of the night are correct and that kernels work the way I think they do
in my head.
Also minor enhancement to UsagePool where if a value errors upon
construction (a very real possibility with listeners), it is removed from
the pool. Not 100% sure the sync logic is correct there, or maybe we
don't have to even put it in the pool until after construction, but it's
subtle either way and I think this is safe... right?
2022-01-11 14:24:58 +08:00
|
|
|
// sharedPacketConn is like sharedListener, but for net.PacketConns.
|
|
|
|
type sharedPacketConn struct {
|
|
|
|
net.PacketConn
|
|
|
|
key string
|
|
|
|
}
|
|
|
|
|
|
|
|
// Destruct closes the underlying socket.
|
|
|
|
func (spc *sharedPacketConn) Destruct() error {
|
|
|
|
return spc.PacketConn.Close()
|
2019-04-03 05:31:02 +08:00
|
|
|
}
|
|
|
|
|
2020-04-07 22:33:45 +08:00
|
|
|
// NetworkAddress contains the individual components
|
2019-11-12 06:33:38 +08:00
|
|
|
// for a parsed network address of the form accepted
|
|
|
|
// by ParseNetworkAddress(). Network should be a
|
|
|
|
// network value accepted by Go's net package. Port
|
|
|
|
// ranges are given by [StartPort, EndPort].
|
2020-04-07 22:33:45 +08:00
|
|
|
type NetworkAddress struct {
|
2019-11-12 06:33:38 +08:00
|
|
|
Network string
|
|
|
|
Host string
|
|
|
|
StartPort uint
|
|
|
|
EndPort uint
|
|
|
|
}
|
|
|
|
|
2020-04-07 22:33:45 +08:00
|
|
|
// IsUnixNetwork returns true if na.Network is
|
2019-12-07 02:45:50 +08:00
|
|
|
// unix, unixgram, or unixpacket.
|
2020-04-07 22:33:45 +08:00
|
|
|
func (na NetworkAddress) IsUnixNetwork() bool {
|
|
|
|
return isUnixNetwork(na.Network)
|
2019-12-07 02:45:50 +08:00
|
|
|
}
|
|
|
|
|
2019-11-12 06:33:38 +08:00
|
|
|
// JoinHostPort is like net.JoinHostPort, but where the port
|
|
|
|
// is StartPort + offset.
|
2020-04-07 22:33:45 +08:00
|
|
|
func (na NetworkAddress) JoinHostPort(offset uint) string {
|
|
|
|
if na.IsUnixNetwork() {
|
|
|
|
return na.Host
|
2019-12-07 02:45:50 +08:00
|
|
|
}
|
2020-04-07 22:33:45 +08:00
|
|
|
return net.JoinHostPort(na.Host, strconv.Itoa(int(na.StartPort+offset)))
|
2019-11-12 06:33:38 +08:00
|
|
|
}
|
2019-07-09 06:46:38 +08:00
|
|
|
|
2022-08-04 01:04:51 +08:00
|
|
|
func (na NetworkAddress) Expand() []NetworkAddress {
|
|
|
|
size := na.PortRangeSize()
|
|
|
|
addrs := make([]NetworkAddress, size)
|
|
|
|
for portOffset := uint(0); portOffset < size; portOffset++ {
|
|
|
|
na2 := na
|
|
|
|
na2.StartPort, na2.EndPort = na.StartPort+portOffset, na.StartPort+portOffset
|
|
|
|
addrs[portOffset] = na2
|
|
|
|
}
|
|
|
|
return addrs
|
|
|
|
}
|
|
|
|
|
2019-11-12 06:33:38 +08:00
|
|
|
// PortRangeSize returns how many ports are in
|
|
|
|
// pa's port range. Port ranges are inclusive,
|
|
|
|
// so the size is the difference of start and
|
|
|
|
// end ports plus one.
|
2020-04-07 22:33:45 +08:00
|
|
|
func (na NetworkAddress) PortRangeSize() uint {
|
2022-08-04 01:04:51 +08:00
|
|
|
if na.EndPort < na.StartPort {
|
|
|
|
return 0
|
|
|
|
}
|
2020-04-07 22:33:45 +08:00
|
|
|
return (na.EndPort - na.StartPort) + 1
|
2019-11-12 06:33:38 +08:00
|
|
|
}
|
|
|
|
|
2020-04-11 07:31:38 +08:00
|
|
|
func (na NetworkAddress) isLoopback() bool {
|
|
|
|
if na.IsUnixNetwork() {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if na.Host == "localhost" {
|
|
|
|
return true
|
|
|
|
}
|
2022-08-18 06:10:57 +08:00
|
|
|
if ip, err := netip.ParseAddr(na.Host); err == nil {
|
2020-04-11 07:31:38 +08:00
|
|
|
return ip.IsLoopback()
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2020-04-17 01:41:32 +08:00
|
|
|
func (na NetworkAddress) isWildcardInterface() bool {
|
|
|
|
if na.Host == "" {
|
|
|
|
return true
|
|
|
|
}
|
2022-08-18 06:10:57 +08:00
|
|
|
if ip, err := netip.ParseAddr(na.Host); err == nil {
|
2020-04-17 01:41:32 +08:00
|
|
|
return ip.IsUnspecified()
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2020-04-11 07:31:38 +08:00
|
|
|
func (na NetworkAddress) port() string {
|
|
|
|
if na.StartPort == na.EndPort {
|
|
|
|
return strconv.FormatUint(uint64(na.StartPort), 10)
|
|
|
|
}
|
|
|
|
return fmt.Sprintf("%d-%d", na.StartPort, na.EndPort)
|
|
|
|
}
|
|
|
|
|
2022-07-26 07:28:20 +08:00
|
|
|
// String reconstructs the address string for human display.
|
|
|
|
// The output can be parsed by ParseNetworkAddress(). If the
|
|
|
|
// address is a unix socket, any non-zero port will be dropped.
|
2020-04-07 22:33:45 +08:00
|
|
|
func (na NetworkAddress) String() string {
|
2022-07-26 07:28:20 +08:00
|
|
|
if na.Network == "tcp" && (na.Host != "" || na.port() != "") {
|
|
|
|
na.Network = "" // omit default network value for brevity
|
|
|
|
}
|
2020-04-11 07:31:38 +08:00
|
|
|
return JoinNetworkAddress(na.Network, na.Host, na.port())
|
2019-11-12 06:33:38 +08:00
|
|
|
}
|
|
|
|
|
2019-12-07 02:45:50 +08:00
|
|
|
func isUnixNetwork(netw string) bool {
|
|
|
|
return netw == "unix" || netw == "unixgram" || netw == "unixpacket"
|
|
|
|
}
|
|
|
|
|
2022-01-20 03:26:44 +08:00
|
|
|
func isListenBindAddressAlreadyInUseError(err error) bool {
|
|
|
|
switch networkOperationError := err.(type) {
|
|
|
|
case *net.OpError:
|
|
|
|
switch syscallError := networkOperationError.Err.(type) {
|
|
|
|
case *os.SyscallError:
|
|
|
|
if syscallError.Syscall == "bind" {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2019-11-12 06:33:38 +08:00
|
|
|
// ParseNetworkAddress parses addr into its individual
|
|
|
|
// components. The input string is expected to be of
|
|
|
|
// the form "network/host:port-range" where any part is
|
|
|
|
// optional. The default network, if unspecified, is tcp.
|
|
|
|
// Port ranges are inclusive.
|
|
|
|
//
|
|
|
|
// Network addresses are distinct from URLs and do not
|
|
|
|
// use URL syntax.
|
2020-04-07 22:33:45 +08:00
|
|
|
func ParseNetworkAddress(addr string) (NetworkAddress, error) {
|
2019-07-09 06:46:38 +08:00
|
|
|
var host, port string
|
2019-11-12 06:33:38 +08:00
|
|
|
network, host, port, err := SplitNetworkAddress(addr)
|
2019-07-09 06:46:38 +08:00
|
|
|
if err != nil {
|
2020-04-07 22:33:45 +08:00
|
|
|
return NetworkAddress{}, err
|
2019-07-09 06:46:38 +08:00
|
|
|
}
|
2022-07-26 07:28:20 +08:00
|
|
|
if network == "" {
|
|
|
|
network = "tcp"
|
|
|
|
}
|
2019-12-07 02:45:50 +08:00
|
|
|
if isUnixNetwork(network) {
|
2020-04-07 22:33:45 +08:00
|
|
|
return NetworkAddress{
|
2019-11-12 06:33:38 +08:00
|
|
|
Network: network,
|
|
|
|
Host: host,
|
|
|
|
}, nil
|
2019-07-09 06:46:38 +08:00
|
|
|
}
|
2019-11-12 06:33:38 +08:00
|
|
|
var start, end uint64
|
2022-07-26 07:28:20 +08:00
|
|
|
if port != "" {
|
2022-08-05 01:17:35 +08:00
|
|
|
before, after, found := strings.Cut(port, "-")
|
|
|
|
if !found {
|
|
|
|
after = before
|
2022-07-26 07:28:20 +08:00
|
|
|
}
|
2022-08-05 01:17:35 +08:00
|
|
|
start, err = strconv.ParseUint(before, 10, 16)
|
2022-07-26 07:28:20 +08:00
|
|
|
if err != nil {
|
|
|
|
return NetworkAddress{}, fmt.Errorf("invalid start port: %v", err)
|
|
|
|
}
|
2022-08-05 01:17:35 +08:00
|
|
|
end, err = strconv.ParseUint(after, 10, 16)
|
2022-07-26 07:28:20 +08:00
|
|
|
if err != nil {
|
|
|
|
return NetworkAddress{}, fmt.Errorf("invalid end port: %v", err)
|
|
|
|
}
|
|
|
|
if end < start {
|
|
|
|
return NetworkAddress{}, fmt.Errorf("end port must not be less than start port")
|
|
|
|
}
|
|
|
|
if (end - start) > maxPortSpan {
|
|
|
|
return NetworkAddress{}, fmt.Errorf("port range exceeds %d ports", maxPortSpan)
|
|
|
|
}
|
2019-07-09 06:46:38 +08:00
|
|
|
}
|
2020-04-07 22:33:45 +08:00
|
|
|
return NetworkAddress{
|
2019-11-12 06:33:38 +08:00
|
|
|
Network: network,
|
|
|
|
Host: host,
|
|
|
|
StartPort: uint(start),
|
|
|
|
EndPort: uint(end),
|
|
|
|
}, nil
|
2019-07-09 06:46:38 +08:00
|
|
|
}
|
|
|
|
|
2019-09-06 03:14:39 +08:00
|
|
|
// SplitNetworkAddress splits a into its network, host, and port components.
|
2019-11-12 06:33:38 +08:00
|
|
|
// Note that port may be a port range (:X-Y), or omitted for unix sockets.
|
2019-09-06 03:14:39 +08:00
|
|
|
func SplitNetworkAddress(a string) (network, host, port string, err error) {
|
2022-08-05 01:17:35 +08:00
|
|
|
beforeSlash, afterSlash, slashFound := strings.Cut(a, "/")
|
|
|
|
if slashFound {
|
|
|
|
network = strings.ToLower(strings.TrimSpace(beforeSlash))
|
|
|
|
a = afterSlash
|
2019-07-09 06:46:38 +08:00
|
|
|
}
|
2019-12-07 03:00:04 +08:00
|
|
|
if isUnixNetwork(network) {
|
2019-07-09 06:46:38 +08:00
|
|
|
host = a
|
|
|
|
return
|
|
|
|
}
|
|
|
|
host, port, err = net.SplitHostPort(a)
|
2022-07-26 07:28:20 +08:00
|
|
|
if err == nil || a == "" {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// in general, if there was an error, it was likely "missing port",
|
|
|
|
// so try adding a bogus port to take advantage of standard library's
|
|
|
|
// robust parser, then strip the artificial port before returning
|
|
|
|
// (don't overwrite original error though; might still be relevant)
|
|
|
|
var err2 error
|
|
|
|
host, port, err2 = net.SplitHostPort(a + ":0")
|
|
|
|
if err2 == nil {
|
|
|
|
err = nil
|
|
|
|
port = ""
|
|
|
|
}
|
2019-07-09 06:46:38 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-09-06 03:14:39 +08:00
|
|
|
// JoinNetworkAddress combines network, host, and port into a single
|
2019-12-07 03:00:04 +08:00
|
|
|
// address string of the form accepted by ParseNetworkAddress(). For
|
|
|
|
// unix sockets, the network should be "unix" (or "unixgram" or
|
|
|
|
// "unixpacket") and the path to the socket should be given as the
|
2019-11-12 06:33:38 +08:00
|
|
|
// host parameter.
|
2019-09-06 03:14:39 +08:00
|
|
|
func JoinNetworkAddress(network, host, port string) string {
|
2019-07-09 06:46:38 +08:00
|
|
|
var a string
|
|
|
|
if network != "" {
|
|
|
|
a = network + "/"
|
|
|
|
}
|
2020-04-11 07:31:38 +08:00
|
|
|
if (host != "" && port == "") || isUnixNetwork(network) {
|
2019-10-12 04:25:39 +08:00
|
|
|
a += host
|
|
|
|
} else if port != "" {
|
|
|
|
a += net.JoinHostPort(host, port)
|
2019-07-09 06:46:38 +08:00
|
|
|
}
|
|
|
|
return a
|
|
|
|
}
|
2019-11-12 06:33:38 +08:00
|
|
|
|
2022-09-02 06:30:03 +08:00
|
|
|
// RegisterNetwork registers a network type with Caddy so that if a listener is
|
|
|
|
// created for that network type, getListener will be invoked to get the listener.
|
|
|
|
// This should be called during init() and will panic if the network type is standard
|
|
|
|
// or reserved, or if it is already registered. EXPERIMENTAL and subject to change.
|
|
|
|
func RegisterNetwork(network string, getListener ListenerFunc) {
|
|
|
|
network = strings.TrimSpace(strings.ToLower(network))
|
|
|
|
|
|
|
|
if network == "tcp" || network == "tcp4" || network == "tcp6" ||
|
|
|
|
network == "udp" || network == "udp4" || network == "udp6" ||
|
|
|
|
network == "unix" || network == "unixpacket" || network == "unixgram" ||
|
|
|
|
strings.HasPrefix("ip:", network) || strings.HasPrefix("ip4:", network) || strings.HasPrefix("ip6:", network) {
|
|
|
|
panic("network type " + network + " is reserved")
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, ok := networkTypes[strings.ToLower(network)]; ok {
|
|
|
|
panic("network type " + network + " is already registered")
|
|
|
|
}
|
|
|
|
|
|
|
|
networkTypes[network] = getListener
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListenerFunc is a function that can return a listener given a network and address.
|
|
|
|
// The listeners must be capable of overlapping: with Caddy, new configs are loaded
|
|
|
|
// before old ones are unloaded, so listeners may overlap briefly if the configs
|
|
|
|
// both need the same listener. EXPERIMENTAL and subject to change.
|
|
|
|
type ListenerFunc func(network, addr string) (net.Listener, error)
|
|
|
|
|
|
|
|
var networkTypes = map[string]ListenerFunc{}
|
|
|
|
|
2020-03-16 11:26:17 +08:00
|
|
|
// ListenerWrapper is a type that wraps a listener
|
|
|
|
// so it can modify the input listener's methods.
|
|
|
|
// Modules that implement this interface are found
|
|
|
|
// in the caddy.listeners namespace. Usually, to
|
|
|
|
// wrap a listener, you will define your own struct
|
|
|
|
// type that embeds the input listener, then
|
|
|
|
// implement your own methods that you want to wrap,
|
|
|
|
// calling the underlying listener's methods where
|
|
|
|
// appropriate.
|
|
|
|
type ListenerWrapper interface {
|
|
|
|
WrapListener(net.Listener) net.Listener
|
|
|
|
}
|
|
|
|
|
core: Simplify shared listeners, fix deadline bug
When this listener code was first written, UsagePool didn't exist. We
can simplify much of the wrapped listener logic by utilizing UsagePool.
This also fixes a bug where new servers were able to clear deadlines
set by old servers, even if the old server didn't get booted out of its
Accept() call yet. And with the deadline cleared, they never would.
(Sometimes. Based on reports and difficulty of reproducing the bug,
this behavior was extremely rare.) I don't know why that happened
exactly, maybe some polling mechanism in the kernel and if the timings
worked out just wrong it would expose the bug.
Anyway, now we ensure that only the closer that set the deadline is the
same one that clears it, ensuring that old servers always return out of
Accept(), because the deadline doesn't get cleared until they do.
Of course, all this hinges on the hope that my suspicions in the middle
of the night are correct and that kernels work the way I think they do
in my head.
Also minor enhancement to UsagePool where if a value errors upon
construction (a very real possibility with listeners), it is removed from
the pool. Not 100% sure the sync logic is correct there, or maybe we
don't have to even put it in the pool until after construction, but it's
subtle either way and I think this is safe... right?
2022-01-11 14:24:58 +08:00
|
|
|
// listenerPool stores and allows reuse of active listeners.
|
|
|
|
var listenerPool = NewUsagePool()
|
2019-11-12 06:33:38 +08:00
|
|
|
|
|
|
|
const maxPortSpan = 65535
|
2021-02-17 04:55:49 +08:00
|
|
|
|
|
|
|
// Interface guards (see https://github.com/caddyserver/caddy/issues/3998)
|
|
|
|
var (
|
|
|
|
_ (interface{ SetReadBuffer(int) error }) = (*fakeClosePacketConn)(nil)
|
|
|
|
_ (interface {
|
|
|
|
SyscallConn() (syscall.RawConn, error)
|
|
|
|
}) = (*fakeClosePacketConn)(nil)
|
|
|
|
)
|