mirror of
https://github.com/caddyserver/caddy.git
synced 2024-11-22 12:28:45 +08:00
tls: Change struct fields to pointers, add nil checks; rate.Burst update
Making them pointers makes for cleaner JSON when adapting configs, if the struct is empty now it will be omitted entirely. The x/time/rate package was updated to support changing the burst, so we've incorporated that here and removed a TODO.
This commit is contained in:
parent
c12bf4054c
commit
b249b45d10
2
go.mod
2
go.mod
|
@ -26,5 +26,5 @@ require (
|
|||
github.com/starlight-go/starlight v0.0.0-20181207205707-b06f321544f3
|
||||
go.starlark.net v0.0.0-20190604130855-6ddc71c0ba77
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
|
||||
golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0
|
||||
)
|
||||
|
|
2
go.sum
2
go.sum
|
@ -298,6 +298,8 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
|||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0 h1:xQwXv67TxFo9nC1GJFyab5eq/5B590r6RlnL/G8Sz7w=
|
||||
golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
|
|
@ -329,15 +329,18 @@ func (app *App) automaticHTTPS() error {
|
|||
// to tell the TLS app to manage these certs by honoring
|
||||
// those port configurations
|
||||
acmeManager := &caddytls.ACMEManagerMaker{
|
||||
Challenges: caddytls.ChallengesConfig{
|
||||
HTTP: caddytls.HTTPChallengeConfig{
|
||||
Challenges: &caddytls.ChallengesConfig{
|
||||
HTTP: &caddytls.HTTPChallengeConfig{
|
||||
AlternatePort: app.HTTPPort, // we specifically want the user-configured port, if any
|
||||
},
|
||||
TLSALPN: caddytls.TLSALPNChallengeConfig{
|
||||
TLSALPN: &caddytls.TLSALPNChallengeConfig{
|
||||
AlternatePort: app.HTTPSPort, // we specifically want the user-configured port, if any
|
||||
},
|
||||
},
|
||||
}
|
||||
if tlsApp.Automation == nil {
|
||||
tlsApp.Automation = new(caddytls.AutomationConfig)
|
||||
}
|
||||
tlsApp.Automation.Policies = append(tlsApp.Automation.Policies,
|
||||
caddytls.AutomationPolicy{
|
||||
Hosts: domainsForCerts,
|
||||
|
|
|
@ -40,16 +40,16 @@ func init() {
|
|||
// after you have configured this struct
|
||||
// to your liking.
|
||||
type ACMEManagerMaker struct {
|
||||
CA string `json:"ca,omitempty"`
|
||||
Email string `json:"email,omitempty"`
|
||||
RenewAhead caddy.Duration `json:"renew_ahead,omitempty"`
|
||||
KeyType string `json:"key_type,omitempty"`
|
||||
ACMETimeout caddy.Duration `json:"acme_timeout,omitempty"`
|
||||
MustStaple bool `json:"must_staple,omitempty"`
|
||||
Challenges ChallengesConfig `json:"challenges,omitempty"`
|
||||
OnDemand bool `json:"on_demand,omitempty"`
|
||||
Storage json.RawMessage `json:"storage,omitempty"`
|
||||
TrustedRootsPEMFiles []string `json:"trusted_roots_pem_files,omitempty"`
|
||||
CA string `json:"ca,omitempty"`
|
||||
Email string `json:"email,omitempty"`
|
||||
RenewAhead caddy.Duration `json:"renew_ahead,omitempty"`
|
||||
KeyType string `json:"key_type,omitempty"`
|
||||
ACMETimeout caddy.Duration `json:"acme_timeout,omitempty"`
|
||||
MustStaple bool `json:"must_staple,omitempty"`
|
||||
Challenges *ChallengesConfig `json:"challenges,omitempty"`
|
||||
OnDemand bool `json:"on_demand,omitempty"`
|
||||
Storage json.RawMessage `json:"storage,omitempty"`
|
||||
TrustedRootsPEMFiles []string `json:"trusted_roots_pem_files,omitempty"`
|
||||
|
||||
storage certmagic.Storage
|
||||
rootPool *x509.CertPool
|
||||
|
@ -72,7 +72,7 @@ func (m ACMEManagerMaker) NewManager(interactive bool) (certmagic.Manager, error
|
|||
// Provision sets up m.
|
||||
func (m *ACMEManagerMaker) Provision(ctx caddy.Context) error {
|
||||
// DNS providers
|
||||
if m.Challenges.DNSRaw != nil {
|
||||
if m.Challenges != nil && m.Challenges.DNSRaw != nil {
|
||||
val, err := ctx.LoadModuleInline("provider", "tls.dns", m.Challenges.DNSRaw)
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading DNS provider module: %s", err)
|
||||
|
@ -125,7 +125,7 @@ func (m *ACMEManagerMaker) makeCertMagicConfig(ctx caddy.Context) certmagic.Conf
|
|||
if m.OnDemand {
|
||||
var onDemand *OnDemandConfig
|
||||
appVal, err := ctx.App("tls")
|
||||
if err == nil {
|
||||
if err == nil && appVal.(*TLS).Automation != nil {
|
||||
onDemand = appVal.(*TLS).Automation.OnDemand
|
||||
}
|
||||
|
||||
|
@ -153,24 +153,33 @@ func (m *ACMEManagerMaker) makeCertMagicConfig(ctx caddy.Context) certmagic.Conf
|
|||
}
|
||||
}
|
||||
|
||||
return certmagic.Config{
|
||||
CA: m.CA,
|
||||
Email: m.Email,
|
||||
Agreed: true,
|
||||
DisableHTTPChallenge: m.Challenges.HTTP.Disabled,
|
||||
DisableTLSALPNChallenge: m.Challenges.TLSALPN.Disabled,
|
||||
RenewDurationBefore: time.Duration(m.RenewAhead),
|
||||
AltHTTPPort: m.Challenges.HTTP.AlternatePort,
|
||||
AltTLSALPNPort: m.Challenges.TLSALPN.AlternatePort,
|
||||
DNSProvider: m.Challenges.DNS,
|
||||
KeyType: supportedCertKeyTypes[m.KeyType],
|
||||
CertObtainTimeout: time.Duration(m.ACMETimeout),
|
||||
OnDemand: ond,
|
||||
MustStaple: m.MustStaple,
|
||||
Storage: storage,
|
||||
TrustedRoots: m.rootPool,
|
||||
cfg := certmagic.Config{
|
||||
CA: m.CA,
|
||||
Email: m.Email,
|
||||
Agreed: true,
|
||||
RenewDurationBefore: time.Duration(m.RenewAhead),
|
||||
KeyType: supportedCertKeyTypes[m.KeyType],
|
||||
CertObtainTimeout: time.Duration(m.ACMETimeout),
|
||||
OnDemand: ond,
|
||||
MustStaple: m.MustStaple,
|
||||
Storage: storage,
|
||||
TrustedRoots: m.rootPool,
|
||||
// TODO: listenHost
|
||||
}
|
||||
|
||||
if m.Challenges != nil {
|
||||
if m.Challenges.HTTP != nil {
|
||||
cfg.DisableHTTPChallenge = m.Challenges.HTTP.Disabled
|
||||
cfg.AltHTTPPort = m.Challenges.HTTP.AlternatePort
|
||||
}
|
||||
if m.Challenges.TLSALPN != nil {
|
||||
cfg.DisableTLSALPNChallenge = m.Challenges.TLSALPN.Disabled
|
||||
cfg.AltTLSALPNPort = m.Challenges.TLSALPN.AlternatePort
|
||||
}
|
||||
cfg.DNSProvider = m.Challenges.DNS
|
||||
}
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
// onDemandAskRequest makes a request to the ask URL
|
||||
|
|
|
@ -155,17 +155,19 @@ func (p *ConnectionPolicy) buildStandardTLSConfig(ctx caddy.Context) error {
|
|||
}
|
||||
|
||||
// session tickets support
|
||||
cfg.SessionTicketsDisabled = tlsApp.SessionTickets.Disabled
|
||||
if tlsApp.SessionTickets != nil {
|
||||
cfg.SessionTicketsDisabled = tlsApp.SessionTickets.Disabled
|
||||
|
||||
// session ticket key rotation
|
||||
tlsApp.SessionTickets.register(cfg)
|
||||
ctx.OnCancel(func() {
|
||||
// do cleanup when the context is cancelled because,
|
||||
// though unlikely, it is possible that a context
|
||||
// needing a TLS server config could exist for less
|
||||
// than the lifetime of the whole app
|
||||
tlsApp.SessionTickets.unregister(cfg)
|
||||
})
|
||||
// session ticket key rotation
|
||||
tlsApp.SessionTickets.register(cfg)
|
||||
ctx.OnCancel(func() {
|
||||
// do cleanup when the context is cancelled because,
|
||||
// though unlikely, it is possible that a context
|
||||
// needing a TLS server config could exist for less
|
||||
// than the lifetime of the whole app
|
||||
tlsApp.SessionTickets.unregister(cfg)
|
||||
})
|
||||
}
|
||||
|
||||
// TODO: Clean up session ticket active locks in storage if app (or process) is being closed!
|
||||
|
||||
|
|
|
@ -36,8 +36,8 @@ func init() {
|
|||
// TLS represents a process-wide TLS configuration.
|
||||
type TLS struct {
|
||||
Certificates map[string]json.RawMessage `json:"certificates,omitempty"`
|
||||
Automation AutomationConfig `json:"automation"`
|
||||
SessionTickets SessionTicketService `json:"session_tickets"`
|
||||
Automation *AutomationConfig `json:"automation,omitempty"`
|
||||
SessionTickets *SessionTicketService `json:"session_tickets,omitempty"`
|
||||
|
||||
certificateLoaders []CertificateLoader
|
||||
certCache *certmagic.Cache
|
||||
|
@ -58,26 +58,28 @@ func (TLS) CaddyModule() caddy.ModuleInfo {
|
|||
func (t *TLS) Provision(ctx caddy.Context) error {
|
||||
t.ctx = ctx
|
||||
|
||||
// set up the certificate cache
|
||||
// TODO: this makes a new cache every time; better to only make a new
|
||||
// cache (or even better, add/remove only what is necessary) if the
|
||||
// certificates config has been updated
|
||||
t.certCache = certmagic.NewCache(certmagic.CacheOptions{
|
||||
// set up a new certificate cache; this (re)loads all certificates
|
||||
cacheOpts := certmagic.CacheOptions{
|
||||
GetConfigForCert: func(cert certmagic.Certificate) (certmagic.Config, error) {
|
||||
return t.getConfigForName(cert.Names[0])
|
||||
},
|
||||
OCSPCheckInterval: time.Duration(t.Automation.OCSPCheckInterval),
|
||||
RenewCheckInterval: time.Duration(t.Automation.RenewCheckInterval),
|
||||
})
|
||||
}
|
||||
if t.Automation != nil {
|
||||
cacheOpts.OCSPCheckInterval = time.Duration(t.Automation.OCSPCheckInterval)
|
||||
cacheOpts.RenewCheckInterval = time.Duration(t.Automation.RenewCheckInterval)
|
||||
}
|
||||
t.certCache = certmagic.NewCache(cacheOpts)
|
||||
|
||||
// automation/management policies
|
||||
for i, ap := range t.Automation.Policies {
|
||||
val, err := ctx.LoadModuleInline("module", "tls.management", ap.ManagementRaw)
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading TLS automation management module: %s", err)
|
||||
if t.Automation != nil {
|
||||
for i, ap := range t.Automation.Policies {
|
||||
val, err := ctx.LoadModuleInline("module", "tls.management", ap.ManagementRaw)
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading TLS automation management module: %s", err)
|
||||
}
|
||||
t.Automation.Policies[i].Management = val.(ManagerMaker)
|
||||
t.Automation.Policies[i].ManagementRaw = nil // allow GC to deallocate
|
||||
}
|
||||
t.Automation.Policies[i].Management = val.(ManagerMaker)
|
||||
t.Automation.Policies[i].ManagementRaw = nil // allow GC to deallocate
|
||||
}
|
||||
|
||||
// certificate loaders
|
||||
|
@ -93,19 +95,22 @@ func (t *TLS) Provision(ctx caddy.Context) error {
|
|||
}
|
||||
|
||||
// session ticket ephemeral keys (STEK) service and provider
|
||||
err := t.SessionTickets.provision(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("provisioning session tickets configuration: %v", err)
|
||||
if t.SessionTickets != nil {
|
||||
err := t.SessionTickets.provision(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("provisioning session tickets configuration: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// on-demand rate limiting
|
||||
if t.Automation.OnDemand != nil && t.Automation.OnDemand.RateLimit != nil {
|
||||
if t.Automation != nil && t.Automation.OnDemand != nil && t.Automation.OnDemand.RateLimit != nil {
|
||||
limit := rate.Every(time.Duration(t.Automation.OnDemand.RateLimit.Interval))
|
||||
// TODO: Burst size is not updated, see https://github.com/golang/go/issues/23575
|
||||
onDemandRateLimiter.SetLimit(limit)
|
||||
onDemandRateLimiter.SetBurst(t.Automation.OnDemand.RateLimit.Burst)
|
||||
} else {
|
||||
// if no rate limit is specified, be sure to remove any existing limit
|
||||
onDemandRateLimiter.SetLimit(0)
|
||||
onDemandRateLimiter.SetBurst(0)
|
||||
}
|
||||
|
||||
// load manual/static (unmanaged) certificates - we do this in
|
||||
|
@ -127,9 +132,6 @@ func (t *TLS) Provision(ctx caddy.Context) error {
|
|||
}
|
||||
}
|
||||
|
||||
t.storageCleanTicker = time.NewTicker(storageCleanInterval)
|
||||
t.storageCleanStop = make(chan struct{})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -156,17 +158,23 @@ func (t *TLS) Start() error {
|
|||
|
||||
// Stop stops the TLS module and cleans up any allocations.
|
||||
func (t *TLS) Stop() error {
|
||||
// stop the storage cleaner goroutine and ticker
|
||||
close(t.storageCleanStop)
|
||||
t.storageCleanTicker.Stop()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Cleanup frees up resources allocated during Provision.
|
||||
func (t *TLS) Cleanup() error {
|
||||
// stop the certificate cache
|
||||
if t.certCache != nil {
|
||||
t.certCache.Stop()
|
||||
}
|
||||
|
||||
// stop the session ticket rotation goroutine
|
||||
t.SessionTickets.stop()
|
||||
|
||||
// stop the storage cleaner goroutine and ticker
|
||||
close(t.storageCleanStop)
|
||||
t.storageCleanTicker.Stop()
|
||||
if t.SessionTickets != nil {
|
||||
t.SessionTickets.stop()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -202,15 +210,17 @@ func (t *TLS) getConfigForName(name string) (certmagic.Config, error) {
|
|||
}
|
||||
|
||||
func (t *TLS) getAutomationPolicyForName(name string) AutomationPolicy {
|
||||
for _, ap := range t.Automation.Policies {
|
||||
if len(ap.Hosts) == 0 {
|
||||
// no host filter is an automatic match
|
||||
return ap
|
||||
}
|
||||
for _, h := range ap.Hosts {
|
||||
if h == name {
|
||||
if t.Automation != nil {
|
||||
for _, ap := range t.Automation.Policies {
|
||||
if len(ap.Hosts) == 0 {
|
||||
// no host filter is an automatic match
|
||||
return ap
|
||||
}
|
||||
for _, h := range ap.Hosts {
|
||||
if h == name {
|
||||
return ap
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -228,6 +238,8 @@ func (t *TLS) AllMatchingCertificates(san string) []certmagic.Certificate {
|
|||
// if it was not recently done, and starts a goroutine that runs
|
||||
// the operation at every tick from t.storageCleanTicker.
|
||||
func (t *TLS) keepStorageClean() {
|
||||
t.storageCleanTicker = time.NewTicker(storageCleanInterval)
|
||||
t.storageCleanStop = make(chan struct{})
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
|
@ -259,10 +271,12 @@ func (t *TLS) cleanStorageUnits() {
|
|||
certmagic.CleanStorage(t.ctx.Storage(), options)
|
||||
|
||||
// then clean each storage defined in ACME automation policies
|
||||
for _, ap := range t.Automation.Policies {
|
||||
if acmeMgmt, ok := ap.Management.(ACMEManagerMaker); ok {
|
||||
if acmeMgmt.storage != nil {
|
||||
certmagic.CleanStorage(acmeMgmt.storage, options)
|
||||
if t.Automation != nil {
|
||||
for _, ap := range t.Automation.Policies {
|
||||
if acmeMgmt, ok := ap.Management.(ACMEManagerMaker); ok {
|
||||
if acmeMgmt.storage != nil {
|
||||
certmagic.CleanStorage(acmeMgmt.storage, options)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -321,9 +335,9 @@ func (ap AutomationPolicy) makeCertMagicConfig(ctx caddy.Context) certmagic.Conf
|
|||
|
||||
// ChallengesConfig configures the ACME challenges.
|
||||
type ChallengesConfig struct {
|
||||
HTTP HTTPChallengeConfig `json:"http"`
|
||||
TLSALPN TLSALPNChallengeConfig `json:"tls-alpn"`
|
||||
DNSRaw json.RawMessage `json:"dns,omitempty"`
|
||||
HTTP *HTTPChallengeConfig `json:"http,omitempty"`
|
||||
TLSALPN *TLSALPNChallengeConfig `json:"tls-alpn,omitempty"`
|
||||
DNSRaw json.RawMessage `json:"dns,omitempty"`
|
||||
|
||||
DNS challenge.Provider `json:"-"`
|
||||
}
|
||||
|
@ -377,4 +391,11 @@ var (
|
|||
storageCleanMu sync.Mutex
|
||||
)
|
||||
|
||||
// Interface guards
|
||||
var (
|
||||
_ caddy.App = (*TLS)(nil)
|
||||
_ caddy.Provisioner = (*TLS)(nil)
|
||||
_ caddy.CleanerUpper = (*TLS)(nil)
|
||||
)
|
||||
|
||||
const automateKey = "automate"
|
||||
|
|
Loading…
Reference in New Issue
Block a user