mirror of
https://github.com/caddyserver/caddy.git
synced 2024-11-29 12:16:16 +08:00
Merge branch 'master' into md_changes
This commit is contained in:
commit
c431a07af5
19
.gitattributes
vendored
19
.gitattributes
vendored
|
@ -1,7 +1,14 @@
|
|||
*.bash text eol=lf whitespace=blank-at-eol,space-before-tab,tab-in-indent,trailing-space,tabwidth=4
|
||||
*.sh text eol=lf whitespace=blank-at-eol,space-before-tab,tab-in-indent,trailing-space,tabwidth=4
|
||||
# shell scripts should not use tabs to indent!
|
||||
*.bash text eol=lf core.whitespace whitespace=tab-in-indent,trailing-space,tabwidth=2
|
||||
*.sh text eol=lf core.whitespace whitespace=tab-in-indent,trailing-space,tabwidth=2
|
||||
|
||||
# files for systemd
|
||||
*.path text eol=lf whitespace=blank-at-eol,space-before-tab,tab-in-indent,trailing-space,tabwidth=4
|
||||
*.service text eol=lf whitespace=blank-at-eol,space-before-tab,tab-in-indent,trailing-space,tabwidth=4
|
||||
*.timer text eol=lf whitespace=blank-at-eol,space-before-tab,tab-in-indent,trailing-space,tabwidth=4
|
||||
# files for systemd (shell-similar)
|
||||
*.path text eol=lf core.whitespace whitespace=tab-in-indent,trailing-space,tabwidth=2
|
||||
*.service text eol=lf core.whitespace whitespace=tab-in-indent,trailing-space,tabwidth=2
|
||||
*.timer text eol=lf core.whitespace whitespace=tab-in-indent,trailing-space,tabwidth=2
|
||||
|
||||
# go fmt will enforce this, but in case a user has not called "go fmt" allow GIT to catch this:
|
||||
*.go text eol=lf core.whitespace whitespace=indent-with-non-tab,trailing-space,tabwidth=4
|
||||
|
||||
*.yml text eol=lf core.whitespace whitespace=tab-in-indent,trailing-space,tabwidth=2
|
||||
.git* text eol=auto core.whitespace whitespace=trailing-space
|
||||
|
|
|
@ -20,10 +20,10 @@ anything about Web development
|
|||
|
||||
### Bug reports
|
||||
|
||||
First, please [search this repository](https://github.com/mholt/caddy/search?q=&type=Issues&utf8=%E2%9C%93)
|
||||
Please [search this repository](https://github.com/mholt/caddy/search?q=&type=Issues&utf8=%E2%9C%93)
|
||||
with a variety of keywords to ensure your bug is not already reported.
|
||||
|
||||
If not, [open an issue](https://github.com/mholt/caddy/issues) and answer the
|
||||
If unique, [open an issue](https://github.com/mholt/caddy/issues) and answer the
|
||||
questions so we can understand and reproduce the problematic behavior.
|
||||
|
||||
The burden is on you to convince us that it is actually a bug in Caddy. This is
|
||||
|
@ -39,12 +39,16 @@ getting free help. If we helped you, please consider
|
|||
|
||||
### Minor improvements and new tests
|
||||
|
||||
Submit [pull requests](https://github.com/mholt/caddy/pulls) at any time. Make
|
||||
sure to write tests to assert your change is working properly and is thoroughly
|
||||
covered. We'll ask most pull requests to be
|
||||
Submit [pull requests](https://github.com/mholt/caddy/pulls) at any time for
|
||||
minor changes or new tests. Make sure to write tests to assert your change is
|
||||
working properly and is thoroughly covered. We'll ask most pull requests to be
|
||||
[squashed](http://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html),
|
||||
especially with small commits.
|
||||
|
||||
Your pull request may be thoroughly reviewed. This is because if we accept the
|
||||
PR, we also assume responsibility for it, although we would prefer you to
|
||||
help maintain your code after it gets merged.
|
||||
|
||||
|
||||
### Proposals, suggestions, ideas, new features
|
||||
|
||||
|
@ -54,17 +58,23 @@ with a variety of keywords to ensure your suggestion/proposal is new.
|
|||
If so, you may open either an issue or a pull request for discussion and
|
||||
feedback.
|
||||
|
||||
The advantage of issues is that you don't have to spend time actually
|
||||
implementing your idea, but you should still describe it thoroughly. The
|
||||
advantage of a pull request is that we can immediately see the impact the change
|
||||
will have on the project, what the code will look like, and how to improve it.
|
||||
The disadvantage of pull requests is that they are unlikely to get accepted
|
||||
without significant changes, or it may be rejected entirely. Don't worry, that
|
||||
won't happen without an open discussion first.
|
||||
The advantage of issues is that you don't have to spend time implementing your
|
||||
idea, but you should still describe it thoroughly as if someone reading it would
|
||||
implement the whole thing starting from scratch.
|
||||
|
||||
If you are going to spend significant time implementing code for a pull request,
|
||||
best to open an issue first and "claim" it and get feedback before you invest
|
||||
a lot of time.
|
||||
The advantage of pull requests is that we can immediately see the impact the
|
||||
change will have on the project, what the code will look like, and how to
|
||||
improve it. The disadvantage of pull requests is that they are unlikely to get
|
||||
accepted without significant changes first, or it may be rejected entirely.
|
||||
Don't worry, that won't happen without an open discussion first.
|
||||
|
||||
If you are going to spend significant time writing code for a new pull request,
|
||||
best to open an issue to "claim" it and get feedback before you invest a lot of
|
||||
time.
|
||||
|
||||
Remember: pull requests should always be thoroughly documented both via godoc
|
||||
and with at least a rough draft of documentation that might go on the website
|
||||
for users to read.
|
||||
|
||||
|
||||
### Collaborator status
|
||||
|
@ -75,6 +85,18 @@ push to the repository and merge other pull requests. We hope that you will
|
|||
stay involved by reviewing pull requests, submitting more of your own, and
|
||||
resolving issues as you are able to. Thanks for making Caddy amazing!
|
||||
|
||||
We ask that collaborators will conduct thorough code reviews and be nice to
|
||||
new contributors. Before merging a PR, it's best to get the approval of
|
||||
at least one or two other collaborators and/or the project owner. We prefer
|
||||
squashed commits instead of many little, semantically-unimportant commits. Also,
|
||||
CI and other post-commit hooks must pass before being merged except in certain
|
||||
unusual circumstances.
|
||||
|
||||
Collaborator status may be removed for inactive users from time to time as
|
||||
we see fit; this is not an insult, just a basic security precaution in case
|
||||
the account becomes inactive or abandoned. Privileges can always be restored
|
||||
later.
|
||||
|
||||
|
||||
### Vulnerabilities
|
||||
|
||||
|
|
|
@ -18,3 +18,7 @@
|
|||
|
||||
|
||||
#### 6. What did you see instead (give full error messages and/or log)?
|
||||
|
||||
|
||||
#### 7. How can someone who is starting from scratch reproduce this behavior as minimally as possible?
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@ set -euo pipefail
|
|||
: ${output_filename:="ecaddy"}
|
||||
|
||||
: ${git_repo:="${2:-}"}
|
||||
: ${git_repo:="."}
|
||||
|
||||
pkg=main
|
||||
ldflags=()
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"os"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
@ -95,17 +96,25 @@ func TestSaveAndLoadECCPrivateKey(t *testing.T) {
|
|||
|
||||
// PrivateKeysSame compares the bytes of a and b and returns true if they are the same.
|
||||
func PrivateKeysSame(a, b crypto.PrivateKey) bool {
|
||||
return bytes.Equal(PrivateKeyBytes(a), PrivateKeyBytes(b))
|
||||
var abytes, bbytes []byte
|
||||
var err error
|
||||
|
||||
if abytes, err = PrivateKeyBytes(a); err != nil {
|
||||
return false
|
||||
}
|
||||
if bbytes, err = PrivateKeyBytes(b); err != nil {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(abytes, bbytes)
|
||||
}
|
||||
|
||||
// PrivateKeyBytes returns the bytes of DER-encoded key.
|
||||
func PrivateKeyBytes(key crypto.PrivateKey) []byte {
|
||||
var keyBytes []byte
|
||||
func PrivateKeyBytes(key crypto.PrivateKey) ([]byte, error) {
|
||||
switch key := key.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
keyBytes = x509.MarshalPKCS1PrivateKey(key)
|
||||
return x509.MarshalPKCS1PrivateKey(key), nil
|
||||
case *ecdsa.PrivateKey:
|
||||
keyBytes, _ = x509.MarshalECPrivateKey(key)
|
||||
return x509.MarshalECPrivateKey(key)
|
||||
}
|
||||
return keyBytes
|
||||
return nil, errors.New("Unknown private key type")
|
||||
}
|
||||
|
|
|
@ -112,12 +112,21 @@ func renewManagedCertificates(allowPrompts bool) (err error) {
|
|||
|
||||
// Apply changes to the cache
|
||||
for _, cert := range renewed {
|
||||
if cert.Names[len(cert.Names)-1] == "" {
|
||||
// Special case: This is the default certificate, so we must
|
||||
// ensure it gets updated as well, otherwise the renewal
|
||||
// routine will find it and think it still needs to be renewed,
|
||||
// even though we already renewed it...
|
||||
certCacheMu.Lock()
|
||||
delete(certCache, "")
|
||||
certCacheMu.Unlock()
|
||||
}
|
||||
_, err := cacheManagedCertificate(cert.Names[0], cert.OnDemand)
|
||||
if err != nil {
|
||||
if client.AllowPrompts {
|
||||
return err // operator is present, so report error immediately
|
||||
}
|
||||
log.Printf("[ERROR] %v", err)
|
||||
log.Printf("[ERROR] Caching renewed certificate: %v", err)
|
||||
}
|
||||
}
|
||||
for _, cert := range deleted {
|
||||
|
@ -178,7 +187,7 @@ func updateOCSPStaples() {
|
|||
if err != nil {
|
||||
if cert.OCSP != nil {
|
||||
// if it was no staple before, that's fine, otherwise we should log the error
|
||||
log.Printf("[ERROR] Checking OCSP for %s: %v", name, err)
|
||||
log.Printf("[ERROR] Checking OCSP for %v: %v", cert.Names, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/mholt/caddy/caddy/https"
|
||||
|
@ -138,7 +138,7 @@ func Restart(newCaddyfile Input) error {
|
|||
func getCertsForNewCaddyfile(newCaddyfile Input) error {
|
||||
// parse the new caddyfile only up to (and including) TLS
|
||||
// so we can know what we need to get certs for.
|
||||
configs, _, _, err := loadConfigsUpToIncludingTLS(path.Base(newCaddyfile.Path()), bytes.NewReader(newCaddyfile.Body()))
|
||||
configs, _, _, err := loadConfigsUpToIncludingTLS(filepath.Base(newCaddyfile.Path()), bytes.NewReader(newCaddyfile.Body()))
|
||||
if err != nil {
|
||||
return errors.New("loading Caddyfile: " + err.Error())
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package setup
|
|||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"text/template"
|
||||
|
||||
"github.com/mholt/caddy/middleware"
|
||||
|
@ -17,7 +18,6 @@ func Browse(c *Controller) (middleware.Middleware, error) {
|
|||
}
|
||||
|
||||
browse := browse.Browse{
|
||||
Root: c.Root,
|
||||
Configs: configs,
|
||||
IgnoreIndexes: false,
|
||||
}
|
||||
|
@ -50,6 +50,16 @@ func browseParse(c *Controller) ([]browse.Config, error) {
|
|||
} else {
|
||||
bc.PathScope = "/"
|
||||
}
|
||||
bc.Root = http.Dir(c.Root)
|
||||
theRoot, err := bc.Root.Open("/") // catch a missing path early
|
||||
if err != nil {
|
||||
return configs, err
|
||||
}
|
||||
defer theRoot.Close()
|
||||
_, err = theRoot.Readdir(-1)
|
||||
if err != nil {
|
||||
return configs, err
|
||||
}
|
||||
|
||||
// Second argument would be the template file to use
|
||||
var tplText string
|
||||
|
@ -85,7 +95,6 @@ const defaultTemplate = `<!DOCTYPE html>
|
|||
<html>
|
||||
<head>
|
||||
<title>{{.Name}}</title>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<style>
|
||||
* { padding: 0; margin: 0; }
|
||||
|
@ -106,7 +115,7 @@ h1 a:hover {
|
|||
}
|
||||
|
||||
header,
|
||||
.content {
|
||||
#summary {
|
||||
padding-left: 5%;
|
||||
padding-right: 5%;
|
||||
}
|
||||
|
@ -306,43 +315,49 @@ footer {
|
|||
</header>
|
||||
<main>
|
||||
<div class="meta">
|
||||
<div class="content">
|
||||
<div id="summary">
|
||||
<span class="meta-item"><b>{{.NumDirs}}</b> director{{if eq 1 .NumDirs}}y{{else}}ies{{end}}</span>
|
||||
<span class="meta-item"><b>{{.NumFiles}}</b> file{{if ne 1 .NumFiles}}s{{end}}</span>
|
||||
{{- if ne 0 .ItemsLimitedTo}}
|
||||
<span class="meta-item">(of which only <b>{{.ItemsLimitedTo}}</b> are displayed)</span>
|
||||
{{- end}}
|
||||
</div>
|
||||
</div>
|
||||
<div class="listing">
|
||||
<table>
|
||||
<table aria-describedby="summary">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>
|
||||
{{if and (eq .Sort "name") (ne .Order "desc")}}
|
||||
<a href="?sort=name&order=desc">Name <svg width="1em" height=".4em" version="1.1" viewBox="0 0 12.922194 6.0358899"><use xlink:href="#up-arrow"></use></svg></a>
|
||||
{{else if and (eq .Sort "name") (ne .Order "asc")}}
|
||||
<a href="?sort=name&order=asc">Name <svg width="1em" height=".4em" version="1.1" viewBox="0 0 12.922194 6.0358899"><use xlink:href="#down-arrow"></use></svg></a>
|
||||
{{else}}
|
||||
<a href="?sort=name&order=asc">Name</a>
|
||||
{{end}}
|
||||
{{- if and (eq .Sort "name") (ne .Order "desc")}}
|
||||
<a href="?sort=name&order=desc{{if ne 0 .ItemsLimitedTo}}&limit={{.ItemsLimitedTo}}{{end}}">Name <svg width="1em" height=".4em" version="1.1" viewBox="0 0 12.922194 6.0358899"><use xlink:href="#up-arrow"></use></svg></a>
|
||||
{{- else if and (eq .Sort "name") (ne .Order "asc")}}
|
||||
<a href="?sort=name&order=asc{{if ne 0 .ItemsLimitedTo}}&limit={{.ItemsLimitedTo}}{{end}}">Name <svg width="1em" height=".4em" version="1.1" viewBox="0 0 12.922194 6.0358899"><use xlink:href="#down-arrow"></use></svg></a>
|
||||
{{- else}}
|
||||
<a href="?sort=name&order=asc{{if ne 0 .ItemsLimitedTo}}&limit={{.ItemsLimitedTo}}{{end}}">Name</a>
|
||||
{{- end}}
|
||||
</th>
|
||||
<th>
|
||||
{{if and (eq .Sort "size") (ne .Order "desc")}}
|
||||
<a href="?sort=size&order=desc">Size <svg width="1em" height=".4em" version="1.1" viewBox="0 0 12.922194 6.0358899"><use xlink:href="#up-arrow"></use></svg></a></a>
|
||||
{{else if and (eq .Sort "size") (ne .Order "asc")}}
|
||||
<a href="?sort=size&order=asc">Size <svg width="1em" height=".4em" version="1.1" viewBox="0 0 12.922194 6.0358899"><use xlink:href="#down-arrow"></use></svg></a></a>
|
||||
{{else}}
|
||||
<a href="?sort=size&order=asc">Size</a>
|
||||
{{end}}
|
||||
{{- if and (eq .Sort "size") (ne .Order "desc")}}
|
||||
<a href="?sort=size&order=desc{{if ne 0 .ItemsLimitedTo}}&limit={{.ItemsLimitedTo}}{{end}}">Size <svg width="1em" height=".4em" version="1.1" viewBox="0 0 12.922194 6.0358899"><use xlink:href="#up-arrow"></use></svg></a></a>
|
||||
{{- else if and (eq .Sort "size") (ne .Order "asc")}}
|
||||
<a href="?sort=size&order=asc{{if ne 0 .ItemsLimitedTo}}&limit={{.ItemsLimitedTo}}{{end}}">Size <svg width="1em" height=".4em" version="1.1" viewBox="0 0 12.922194 6.0358899"><use xlink:href="#down-arrow"></use></svg></a></a>
|
||||
{{- else}}
|
||||
<a href="?sort=size&order=asc{{if ne 0 .ItemsLimitedTo}}&limit={{.ItemsLimitedTo}}{{end}}">Size</a>
|
||||
{{- end}}
|
||||
</th>
|
||||
<th class="hideable">
|
||||
{{if and (eq .Sort "time") (ne .Order "desc")}}
|
||||
<a href="?sort=time&order=desc">Modified <svg width="1em" height=".4em" version="1.1" viewBox="0 0 12.922194 6.0358899"><use xlink:href="#up-arrow"></use></svg></a></a>
|
||||
{{else if and (eq .Sort "time") (ne .Order "asc")}}
|
||||
<a href="?sort=time&order=asc">Modified <svg width="1em" height=".4em" version="1.1" viewBox="0 0 12.922194 6.0358899"><use xlink:href="#down-arrow"></use></svg></a></a>
|
||||
{{else}}
|
||||
<a href="?sort=time&order=asc">Modified</a>
|
||||
{{end}}
|
||||
{{- if and (eq .Sort "time") (ne .Order "desc")}}
|
||||
<a href="?sort=time&order=desc{{if ne 0 .ItemsLimitedTo}}&limit={{.ItemsLimitedTo}}{{end}}">Modified <svg width="1em" height=".4em" version="1.1" viewBox="0 0 12.922194 6.0358899"><use xlink:href="#up-arrow"></use></svg></a></a>
|
||||
{{- else if and (eq .Sort "time") (ne .Order "asc")}}
|
||||
<a href="?sort=time&order=asc{{if ne 0 .ItemsLimitedTo}}&limit={{.ItemsLimitedTo}}{{end}}">Modified <svg width="1em" height=".4em" version="1.1" viewBox="0 0 12.922194 6.0358899"><use xlink:href="#down-arrow"></use></svg></a></a>
|
||||
{{- else}}
|
||||
<a href="?sort=time&order=asc{{if ne 0 .ItemsLimitedTo}}&limit={{.ItemsLimitedTo}}{{end}}">Modified</a>
|
||||
{{- end}}
|
||||
</th>
|
||||
</tr>
|
||||
{{if .CanGoUp}}
|
||||
</thead>
|
||||
<tbody>
|
||||
{{- if .CanGoUp}}
|
||||
<tr>
|
||||
<td>
|
||||
<a href="..">
|
||||
|
@ -350,30 +365,52 @@ footer {
|
|||
</a>
|
||||
</td>
|
||||
<td>—</td>
|
||||
<td>—</td>
|
||||
<td class="hideable">—</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
{{range .Items}}
|
||||
{{- end}}
|
||||
{{- range .Items}}
|
||||
<tr>
|
||||
<td>
|
||||
<a href="{{.URL}}">
|
||||
{{if .IsDir}}
|
||||
{{- if .IsDir}}
|
||||
<svg width="1.5em" height="1em" version="1.1" viewBox="0 0 35.678803 28.527945"><use xlink:href="#folder"></use></svg>
|
||||
{{else}}
|
||||
{{- else}}
|
||||
<svg width="1.5em" height="1em" version="1.1" viewBox="0 0 26.604381 29.144726"><use xlink:href="#file"></use></svg>
|
||||
{{end}}
|
||||
{{- end}}
|
||||
<span class="name">{{.Name}}</span>
|
||||
</a>
|
||||
</td>
|
||||
<td>{{.HumanSize}}</td>
|
||||
<td class="hideable">{{.HumanModTime "01/02/2006 03:04:05 PM"}}</td>
|
||||
{{- if .IsDir}}
|
||||
<td data-order="-1">—</td>
|
||||
{{- else}}
|
||||
<td data-order="{{.Size}}">{{.HumanSize}}</td>
|
||||
{{- end}}
|
||||
<td class="hideable"><time datetime="{{.HumanModTime "2006-01-02T15:04:05Z"}}">{{.HumanModTime "01/02/2006 03:04:05 PM -07:00"}}</time></td>
|
||||
</tr>
|
||||
{{end}}
|
||||
{{- end}}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</main>
|
||||
<footer>
|
||||
Served with <a href="https://caddyserver.com">Caddy</a>
|
||||
</footer>
|
||||
<script type="text/javascript">
|
||||
function localizeDatetime(e, index, ar) {
|
||||
if (e.textContent === undefined) {
|
||||
return;
|
||||
}
|
||||
var d = new Date(e.getAttribute('datetime'));
|
||||
if (isNaN(d)) {
|
||||
d = new Date(e.textContent);
|
||||
if (isNaN(d)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
e.textContent = d.toLocaleString();
|
||||
}
|
||||
var timeList = Array.prototype.slice.call(document.getElementsByTagName("time"));
|
||||
timeList.forEach(localizeDatetime);
|
||||
</script>
|
||||
</body>
|
||||
</html>`
|
||||
|
|
|
@ -2,6 +2,7 @@ package setup
|
|||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/mholt/caddy/middleware"
|
||||
"github.com/mholt/caddy/middleware/extensions"
|
||||
|
@ -47,7 +48,7 @@ func extParse(c *Controller) ([]string, error) {
|
|||
// resourceExists returns true if the file specified at
|
||||
// root + path exists; false otherwise.
|
||||
func resourceExists(root, path string) bool {
|
||||
_, err := os.Stat(root + path)
|
||||
_, err := os.Stat(filepath.Join(root, path))
|
||||
// technically we should use os.IsNotExist(err)
|
||||
// but we don't handle any other kinds of errors anyway
|
||||
return err == nil
|
||||
|
|
|
@ -38,14 +38,14 @@ func TestHeadersParse(t *testing.T) {
|
|||
{`header /foo Foo "Bar Baz"`,
|
||||
false, []headers.Rule{
|
||||
{Path: "/foo", Headers: []headers.Header{
|
||||
{"Foo", "Bar Baz"},
|
||||
{Name: "Foo", Value: "Bar Baz"},
|
||||
}},
|
||||
}},
|
||||
{`header /bar { Foo "Bar Baz" Baz Qux }`,
|
||||
false, []headers.Rule{
|
||||
{Path: "/bar", Headers: []headers.Header{
|
||||
{"Foo", "Bar Baz"},
|
||||
{"Baz", "Qux"},
|
||||
{Name: "Foo", Value: "Bar Baz"},
|
||||
{Name: "Baz", Value: "Qux"},
|
||||
}},
|
||||
}},
|
||||
}
|
||||
|
|
18
dist/CHANGES.txt
vendored
18
dist/CHANGES.txt
vendored
|
@ -1,18 +1,26 @@
|
|||
CHANGES
|
||||
|
||||
<master>
|
||||
- Built with Go 1.6.1
|
||||
- New pprof directive for exposing process performance profile
|
||||
- New expvar directive for exposing memory/GC performance
|
||||
- ...
|
||||
|
||||
|
||||
0.8.3 (April 26, 2016)
|
||||
- Built with Go 1.6.2
|
||||
- New pprof middleware for exposing process profiling endpoints
|
||||
- New expvar middleware for exposing memory/GC performance
|
||||
- New -restart option to force in-process restarts on Unix systems
|
||||
- Only fail to start if managed certificate is expired (issue #642)
|
||||
- Toggle case-sensitive path matching with environment variable
|
||||
- File server now adds ETag header for static files
|
||||
- browse: Replace .LinkedPath action with .BreadcrumbMap
|
||||
- fastcgi: New except clause to exclude paths
|
||||
- proxy: New max_conns setting to limit max connections per upstream
|
||||
- proxy: Enables replaceable value for name of upstream host
|
||||
- proxy: New replaceable value for name of upstream host
|
||||
- templates: New utility actions for dealing with strings
|
||||
- tls: Customize certificate key with key_type (+ECC)
|
||||
- Internal improvements and bug fixes
|
||||
- tls: Session ticket keys are now rotated
|
||||
- Many other minor internal improvements and bug fixes
|
||||
|
||||
|
||||
0.8.2 (February 25, 2016)
|
||||
- On-demand TLS can obtain certificates during handshakes
|
||||
|
|
2
dist/README.txt
vendored
2
dist/README.txt
vendored
|
@ -1,4 +1,4 @@
|
|||
CADDY 0.8.2
|
||||
CADDY 0.8.3
|
||||
|
||||
Website
|
||||
https://caddyserver.com
|
||||
|
|
|
@ -5,7 +5,6 @@ package browse
|
|||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
|
@ -24,7 +23,6 @@ import (
|
|||
// directories in the given paths are specified.
|
||||
type Browse struct {
|
||||
Next middleware.Handler
|
||||
Root string
|
||||
Configs []Config
|
||||
IgnoreIndexes bool
|
||||
}
|
||||
|
@ -32,6 +30,7 @@ type Browse struct {
|
|||
// Config is a configuration for browsing in a particular path.
|
||||
type Config struct {
|
||||
PathScope string
|
||||
Root http.FileSystem
|
||||
Variables interface{}
|
||||
Template *template.Template
|
||||
}
|
||||
|
@ -62,6 +61,9 @@ type Listing struct {
|
|||
// And which order
|
||||
Order string
|
||||
|
||||
// If ≠0 then Items have been limited to that many elements
|
||||
ItemsLimitedTo int
|
||||
|
||||
// Optional custom variables for use in browse templates
|
||||
User interface{}
|
||||
|
||||
|
@ -132,9 +134,20 @@ func (l byName) Less(i, j int) bool {
|
|||
}
|
||||
|
||||
// By Size
|
||||
func (l bySize) Len() int { return len(l.Items) }
|
||||
func (l bySize) Swap(i, j int) { l.Items[i], l.Items[j] = l.Items[j], l.Items[i] }
|
||||
func (l bySize) Less(i, j int) bool { return l.Items[i].Size < l.Items[j].Size }
|
||||
func (l bySize) Len() int { return len(l.Items) }
|
||||
func (l bySize) Swap(i, j int) { l.Items[i], l.Items[j] = l.Items[j], l.Items[i] }
|
||||
|
||||
const directoryOffset = -1 << 31 // = math.MinInt32
|
||||
func (l bySize) Less(i, j int) bool {
|
||||
iSize, jSize := l.Items[i].Size, l.Items[j].Size
|
||||
if l.Items[i].IsDir {
|
||||
iSize = directoryOffset + iSize
|
||||
}
|
||||
if l.Items[j].IsDir {
|
||||
jSize = directoryOffset + jSize
|
||||
}
|
||||
return iSize < jSize
|
||||
}
|
||||
|
||||
// By Time
|
||||
func (l byTime) Len() int { return len(l.Items) }
|
||||
|
@ -172,20 +185,20 @@ func (l Listing) applySort() {
|
|||
}
|
||||
}
|
||||
|
||||
func directoryListing(files []os.FileInfo, r *http.Request, canGoUp bool, root string, ignoreIndexes bool, vars interface{}) (Listing, error) {
|
||||
var fileinfos []FileInfo
|
||||
var dirCount, fileCount int
|
||||
var urlPath = r.URL.Path
|
||||
func directoryListing(files []os.FileInfo, canGoUp bool, urlPath string) (Listing, bool) {
|
||||
var (
|
||||
fileinfos []FileInfo
|
||||
dirCount, fileCount int
|
||||
hasIndexFile bool
|
||||
)
|
||||
|
||||
for _, f := range files {
|
||||
name := f.Name()
|
||||
|
||||
// Directory is not browsable if it contains index file
|
||||
if !ignoreIndexes {
|
||||
for _, indexName := range middleware.IndexPages {
|
||||
if name == indexName {
|
||||
return Listing{}, errors.New("Directory contains index file, not browsable!")
|
||||
}
|
||||
for _, indexName := range middleware.IndexPages {
|
||||
if name == indexName {
|
||||
hasIndexFile = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -203,7 +216,7 @@ func directoryListing(files []os.FileInfo, r *http.Request, canGoUp bool, root s
|
|||
Name: f.Name(),
|
||||
Size: f.Size(),
|
||||
URL: url.String(),
|
||||
ModTime: f.ModTime(),
|
||||
ModTime: f.ModTime().UTC(),
|
||||
Mode: f.Mode(),
|
||||
})
|
||||
}
|
||||
|
@ -215,154 +228,204 @@ func directoryListing(files []os.FileInfo, r *http.Request, canGoUp bool, root s
|
|||
Items: fileinfos,
|
||||
NumDirs: dirCount,
|
||||
NumFiles: fileCount,
|
||||
Context: middleware.Context{
|
||||
Root: http.Dir(root),
|
||||
Req: r,
|
||||
URL: r.URL,
|
||||
},
|
||||
User: vars,
|
||||
}, nil
|
||||
}, hasIndexFile
|
||||
}
|
||||
|
||||
// ServeHTTP implements the middleware.Handler interface.
|
||||
// ServeHTTP determines if the request is for this plugin, and if all prerequisites are met.
|
||||
// If so, control is handed over to ServeListing.
|
||||
func (b Browse) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) {
|
||||
filename := b.Root + r.URL.Path
|
||||
info, err := os.Stat(filename)
|
||||
if err != nil {
|
||||
return b.Next.ServeHTTP(w, r)
|
||||
var bc *Config
|
||||
// See if there's a browse configuration to match the path
|
||||
for i := range b.Configs {
|
||||
if middleware.Path(r.URL.Path).Matches(b.Configs[i].PathScope) {
|
||||
bc = &b.Configs[i]
|
||||
goto inScope
|
||||
}
|
||||
}
|
||||
return b.Next.ServeHTTP(w, r)
|
||||
inScope:
|
||||
|
||||
// Browse works on existing directories; delegate everything else
|
||||
requestedFilepath, err := bc.Root.Open(r.URL.Path)
|
||||
if err != nil {
|
||||
switch {
|
||||
case os.IsPermission(err):
|
||||
return http.StatusForbidden, err
|
||||
case os.IsExist(err):
|
||||
return http.StatusNotFound, err
|
||||
default:
|
||||
return b.Next.ServeHTTP(w, r)
|
||||
}
|
||||
}
|
||||
defer requestedFilepath.Close()
|
||||
|
||||
info, err := requestedFilepath.Stat()
|
||||
if err != nil {
|
||||
switch {
|
||||
case os.IsPermission(err):
|
||||
return http.StatusForbidden, err
|
||||
case os.IsExist(err):
|
||||
return http.StatusGone, err
|
||||
default:
|
||||
return b.Next.ServeHTTP(w, r)
|
||||
}
|
||||
}
|
||||
if !info.IsDir() {
|
||||
return b.Next.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
// See if there's a browse configuration to match the path
|
||||
for _, bc := range b.Configs {
|
||||
if !middleware.Path(r.URL.Path).Matches(bc.PathScope) {
|
||||
continue
|
||||
}
|
||||
switch r.Method {
|
||||
case http.MethodGet, http.MethodHead:
|
||||
default:
|
||||
return http.StatusMethodNotAllowed, nil
|
||||
}
|
||||
|
||||
// Browsing navigation gets messed up if browsing a directory
|
||||
// that doesn't end in "/" (which it should, anyway)
|
||||
if r.URL.Path[len(r.URL.Path)-1] != '/' {
|
||||
http.Redirect(w, r, r.URL.Path+"/", http.StatusTemporaryRedirect)
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Load directory contents
|
||||
file, err := os.Open(b.Root + r.URL.Path)
|
||||
if err != nil {
|
||||
if os.IsPermission(err) {
|
||||
return http.StatusForbidden, err
|
||||
}
|
||||
return http.StatusNotFound, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
files, err := file.Readdir(-1)
|
||||
if err != nil {
|
||||
return http.StatusForbidden, err
|
||||
}
|
||||
|
||||
// Determine if user can browse up another folder
|
||||
var canGoUp bool
|
||||
curPath := strings.TrimSuffix(r.URL.Path, "/")
|
||||
for _, other := range b.Configs {
|
||||
if strings.HasPrefix(path.Dir(curPath), other.PathScope) {
|
||||
canGoUp = true
|
||||
break
|
||||
}
|
||||
}
|
||||
// Assemble listing of directory contents
|
||||
listing, err := directoryListing(files, r, canGoUp, b.Root, b.IgnoreIndexes, bc.Variables)
|
||||
if err != nil { // directory isn't browsable
|
||||
continue
|
||||
}
|
||||
|
||||
// Get the query vales and store them in the Listing struct
|
||||
listing.Sort, listing.Order = r.URL.Query().Get("sort"), r.URL.Query().Get("order")
|
||||
|
||||
// If the query 'sort' or 'order' is empty, check the cookies
|
||||
if listing.Sort == "" {
|
||||
sortCookie, sortErr := r.Cookie("sort")
|
||||
// if there's no sorting values in the cookies, default to "name" and "asc"
|
||||
if sortErr != nil {
|
||||
listing.Sort = "name"
|
||||
} else { // if we have values in the cookies, use them
|
||||
listing.Sort = sortCookie.Value
|
||||
}
|
||||
} else { // save the query value of 'sort' and 'order' as cookies
|
||||
http.SetCookie(w, &http.Cookie{Name: "sort", Value: listing.Sort, Path: "/"})
|
||||
http.SetCookie(w, &http.Cookie{Name: "order", Value: listing.Order, Path: "/"})
|
||||
}
|
||||
|
||||
if listing.Order == "" {
|
||||
orderCookie, orderErr := r.Cookie("order")
|
||||
// if there's no sorting values in the cookies, default to "name" and "asc"
|
||||
if orderErr != nil {
|
||||
listing.Order = "asc"
|
||||
} else { // if we have values in the cookies, use them
|
||||
listing.Order = orderCookie.Value
|
||||
}
|
||||
} else { // save the query value of 'sort' and 'order' as cookies
|
||||
http.SetCookie(w, &http.Cookie{Name: "order", Value: listing.Order, Path: "/"})
|
||||
}
|
||||
|
||||
// Apply the sorting
|
||||
listing.applySort()
|
||||
|
||||
var buf bytes.Buffer
|
||||
// check if we should provide json
|
||||
acceptHeader := strings.Join(r.Header["Accept"], ",")
|
||||
if strings.Contains(strings.ToLower(acceptHeader), "application/json") {
|
||||
var marsh []byte
|
||||
// check if we are limited
|
||||
if limitQuery := r.URL.Query().Get("limit"); limitQuery != "" {
|
||||
limit, err := strconv.Atoi(limitQuery)
|
||||
if err != nil { // if the 'limit' query can't be interpreted as a number, return err
|
||||
return http.StatusBadRequest, err
|
||||
}
|
||||
// if `limit` is equal or less than len(listing.Items) and bigger than 0, list them
|
||||
if limit <= len(listing.Items) && limit > 0 {
|
||||
marsh, err = json.Marshal(listing.Items[:limit])
|
||||
} else { // if the 'limit' query is empty, or has the wrong value, list everything
|
||||
marsh, err = json.Marshal(listing.Items)
|
||||
}
|
||||
if err != nil {
|
||||
return http.StatusInternalServerError, err
|
||||
}
|
||||
} else { // there's no 'limit' query, list them all
|
||||
marsh, err = json.Marshal(listing.Items)
|
||||
if err != nil {
|
||||
return http.StatusInternalServerError, err
|
||||
}
|
||||
}
|
||||
|
||||
// write the marshaled json to buf
|
||||
if _, err = buf.Write(marsh); err != nil {
|
||||
return http.StatusInternalServerError, err
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
|
||||
} else { // there's no 'application/json' in the 'Accept' header, browse normally
|
||||
err = bc.Template.Execute(&buf, listing)
|
||||
if err != nil {
|
||||
return http.StatusInternalServerError, err
|
||||
}
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
|
||||
}
|
||||
|
||||
buf.WriteTo(w)
|
||||
|
||||
return http.StatusOK, nil
|
||||
// Do not reply to anything else because it might be nonsensical
|
||||
switch r.Method {
|
||||
case http.MethodGet, http.MethodHead:
|
||||
// proceed, noop
|
||||
case "PROPFIND", http.MethodOptions:
|
||||
return http.StatusNotImplemented, nil
|
||||
default:
|
||||
return b.Next.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
// Didn't qualify; pass-thru
|
||||
return b.Next.ServeHTTP(w, r)
|
||||
// Browsing navigation gets messed up if browsing a directory
|
||||
// that doesn't end in "/" (which it should, anyway)
|
||||
if !strings.HasSuffix(r.URL.Path, "/") {
|
||||
http.Redirect(w, r, r.URL.Path+"/", http.StatusTemporaryRedirect)
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
return b.ServeListing(w, r, requestedFilepath, bc)
|
||||
}
|
||||
|
||||
func (b Browse) loadDirectoryContents(requestedFilepath http.File, urlPath string) (*Listing, bool, error) {
|
||||
files, err := requestedFilepath.Readdir(-1)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
// Determine if user can browse up another folder
|
||||
var canGoUp bool
|
||||
curPathDir := path.Dir(strings.TrimSuffix(urlPath, "/"))
|
||||
for _, other := range b.Configs {
|
||||
if strings.HasPrefix(curPathDir, other.PathScope) {
|
||||
canGoUp = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Assemble listing of directory contents
|
||||
listing, hasIndex := directoryListing(files, canGoUp, urlPath)
|
||||
|
||||
return &listing, hasIndex, nil
|
||||
}
|
||||
|
||||
// handleSortOrder gets and stores for a Listing the 'sort' and 'order',
|
||||
// and reads 'limit' if given. The latter is 0 if not given.
|
||||
//
|
||||
// This sets Cookies.
|
||||
func (b Browse) handleSortOrder(w http.ResponseWriter, r *http.Request, scope string) (sort string, order string, limit int, err error) {
|
||||
sort, order, limitQuery := r.URL.Query().Get("sort"), r.URL.Query().Get("order"), r.URL.Query().Get("limit")
|
||||
|
||||
// If the query 'sort' or 'order' is empty, use defaults or any values previously saved in Cookies
|
||||
switch sort {
|
||||
case "":
|
||||
sort = "name"
|
||||
if sortCookie, sortErr := r.Cookie("sort"); sortErr == nil {
|
||||
sort = sortCookie.Value
|
||||
}
|
||||
case "name", "size", "type":
|
||||
http.SetCookie(w, &http.Cookie{Name: "sort", Value: sort, Path: scope, Secure: r.TLS != nil})
|
||||
}
|
||||
|
||||
switch order {
|
||||
case "":
|
||||
order = "asc"
|
||||
if orderCookie, orderErr := r.Cookie("order"); orderErr == nil {
|
||||
order = orderCookie.Value
|
||||
}
|
||||
case "asc", "desc":
|
||||
http.SetCookie(w, &http.Cookie{Name: "order", Value: order, Path: scope, Secure: r.TLS != nil})
|
||||
}
|
||||
|
||||
if limitQuery != "" {
|
||||
limit, err = strconv.Atoi(limitQuery)
|
||||
if err != nil { // if the 'limit' query can't be interpreted as a number, return err
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ServeListing returns a formatted view of 'requestedFilepath' contents'.
|
||||
func (b Browse) ServeListing(w http.ResponseWriter, r *http.Request, requestedFilepath http.File, bc *Config) (int, error) {
|
||||
listing, containsIndex, err := b.loadDirectoryContents(requestedFilepath, r.URL.Path)
|
||||
if err != nil {
|
||||
switch {
|
||||
case os.IsPermission(err):
|
||||
return http.StatusForbidden, err
|
||||
case os.IsExist(err):
|
||||
return http.StatusGone, err
|
||||
default:
|
||||
return http.StatusInternalServerError, err
|
||||
}
|
||||
}
|
||||
if containsIndex && !b.IgnoreIndexes { // directory isn't browsable
|
||||
return b.Next.ServeHTTP(w, r)
|
||||
}
|
||||
listing.Context = middleware.Context{
|
||||
Root: bc.Root,
|
||||
Req: r,
|
||||
URL: r.URL,
|
||||
}
|
||||
listing.User = bc.Variables
|
||||
|
||||
// Copy the query values into the Listing struct
|
||||
var limit int
|
||||
listing.Sort, listing.Order, limit, err = b.handleSortOrder(w, r, bc.PathScope)
|
||||
if err != nil {
|
||||
return http.StatusBadRequest, err
|
||||
}
|
||||
|
||||
listing.applySort()
|
||||
|
||||
if limit > 0 && limit <= len(listing.Items) {
|
||||
listing.Items = listing.Items[:limit]
|
||||
listing.ItemsLimitedTo = limit
|
||||
}
|
||||
|
||||
var buf *bytes.Buffer
|
||||
acceptHeader := strings.ToLower(strings.Join(r.Header["Accept"], ","))
|
||||
switch {
|
||||
case strings.Contains(acceptHeader, "application/json"):
|
||||
if buf, err = b.formatAsJSON(listing, bc); err != nil {
|
||||
return http.StatusInternalServerError, err
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
|
||||
default: // There's no 'application/json' in the 'Accept' header; browse normally
|
||||
if buf, err = b.formatAsHTML(listing, bc); err != nil {
|
||||
return http.StatusInternalServerError, err
|
||||
}
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
|
||||
}
|
||||
|
||||
buf.WriteTo(w)
|
||||
|
||||
return http.StatusOK, nil
|
||||
}
|
||||
|
||||
func (b Browse) formatAsJSON(listing *Listing, bc *Config) (*bytes.Buffer, error) {
|
||||
marsh, err := json.Marshal(listing.Items)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
_, err = buf.Write(marsh)
|
||||
return buf, err
|
||||
}
|
||||
|
||||
func (b Browse) formatAsHTML(listing *Listing, bc *Config) (*bytes.Buffer, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
err := bc.Template.Execute(buf, listing)
|
||||
return buf, err
|
||||
}
|
||||
|
|
|
@ -112,13 +112,12 @@ func TestBrowseHTTPMethods(t *testing.T) {
|
|||
|
||||
b := Browse{
|
||||
Next: middleware.HandlerFunc(func(w http.ResponseWriter, r *http.Request) (int, error) {
|
||||
t.Fatalf("Next shouldn't be called")
|
||||
return 0, nil
|
||||
return http.StatusTeapot, nil // not t.Fatalf, or we will not see what other methods yield
|
||||
}),
|
||||
Root: "./testdata",
|
||||
Configs: []Config{
|
||||
{
|
||||
PathScope: "/photos",
|
||||
Root: http.Dir("./testdata"),
|
||||
Template: tmpl,
|
||||
},
|
||||
},
|
||||
|
@ -128,14 +127,8 @@ func TestBrowseHTTPMethods(t *testing.T) {
|
|||
for method, expected := range map[string]int{
|
||||
http.MethodGet: http.StatusOK,
|
||||
http.MethodHead: http.StatusOK,
|
||||
http.MethodOptions: http.StatusMethodNotAllowed,
|
||||
http.MethodPost: http.StatusMethodNotAllowed,
|
||||
http.MethodPut: http.StatusMethodNotAllowed,
|
||||
http.MethodPatch: http.StatusMethodNotAllowed,
|
||||
http.MethodDelete: http.StatusMethodNotAllowed,
|
||||
"COPY": http.StatusMethodNotAllowed,
|
||||
"MOVE": http.StatusMethodNotAllowed,
|
||||
"MKCOL": http.StatusMethodNotAllowed,
|
||||
http.MethodOptions: http.StatusNotImplemented,
|
||||
"PROPFIND": http.StatusNotImplemented,
|
||||
} {
|
||||
req, err := http.NewRequest(method, "/photos/", nil)
|
||||
if err != nil {
|
||||
|
@ -160,10 +153,10 @@ func TestBrowseTemplate(t *testing.T) {
|
|||
t.Fatalf("Next shouldn't be called")
|
||||
return 0, nil
|
||||
}),
|
||||
Root: "./testdata",
|
||||
Configs: []Config{
|
||||
{
|
||||
PathScope: "/photos",
|
||||
Root: http.Dir("./testdata"),
|
||||
Template: tmpl,
|
||||
},
|
||||
},
|
||||
|
@ -215,16 +208,16 @@ func TestBrowseJson(t *testing.T) {
|
|||
t.Fatalf("Next shouldn't be called")
|
||||
return 0, nil
|
||||
}),
|
||||
Root: "./testdata",
|
||||
Configs: []Config{
|
||||
{
|
||||
PathScope: "/photos/",
|
||||
Root: http.Dir("./testdata"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
//Getting the listing from the ./testdata/photos, the listing returned will be used to validate test results
|
||||
testDataPath := b.Root + "/photos/"
|
||||
testDataPath := filepath.Join("./testdata", "photos")
|
||||
file, err := os.Open(testDataPath)
|
||||
if err != nil {
|
||||
if os.IsPermission(err) {
|
||||
|
@ -245,7 +238,7 @@ func TestBrowseJson(t *testing.T) {
|
|||
// Tests fail in CI environment because all file mod times are the same for
|
||||
// some reason, making the sorting unpredictable. To hack around this,
|
||||
// we ensure here that each file has a different mod time.
|
||||
chTime := f.ModTime().Add(-(time.Duration(i) * time.Second))
|
||||
chTime := f.ModTime().UTC().Add(-(time.Duration(i) * time.Second))
|
||||
if err := os.Chtimes(filepath.Join(testDataPath, name), chTime, chTime); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -322,7 +315,7 @@ func TestBrowseJson(t *testing.T) {
|
|||
code, err := b.ServeHTTP(rec, req)
|
||||
|
||||
if code != http.StatusOK {
|
||||
t.Fatalf("Wrong status, expected %d, got %d", http.StatusOK, code)
|
||||
t.Fatalf("In test %d: Wrong status, expected %d, got %d", i, http.StatusOK, code)
|
||||
}
|
||||
if rec.HeaderMap.Get("Content-Type") != "application/json; charset=utf-8" {
|
||||
t.Fatalf("Expected Content type to be application/json; charset=utf-8, but got %s ", rec.HeaderMap.Get("Content-Type"))
|
||||
|
|
|
@ -235,7 +235,7 @@ func (c Context) ToUpper(s string) string {
|
|||
return strings.ToUpper(s)
|
||||
}
|
||||
|
||||
// Split is a passthrough to strings.Split. It will split the first argument at each instance of the seperator and return a slice of strings.
|
||||
// Split is a passthrough to strings.Split. It will split the first argument at each instance of the separator and return a slice of strings.
|
||||
func (c Context) Split(s string, sep string) []string {
|
||||
return strings.Split(s, sep)
|
||||
}
|
||||
|
|
|
@ -200,7 +200,7 @@ func DisabledTest(t *testing.T) {
|
|||
listener, err := net.Listen("tcp", ipPort)
|
||||
if err != nil {
|
||||
// handle error
|
||||
log.Println("listener creatation failed: ", err)
|
||||
log.Println("listener creation failed: ", err)
|
||||
}
|
||||
|
||||
srv := new(FastCGIServer)
|
||||
|
|
|
@ -2,10 +2,12 @@ package middleware
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
|
@ -40,12 +42,11 @@ type fileHandler struct {
|
|||
}
|
||||
|
||||
func (fh *fileHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) {
|
||||
upath := r.URL.Path
|
||||
if !strings.HasPrefix(upath, "/") {
|
||||
upath = "/" + upath
|
||||
r.URL.Path = upath
|
||||
// r.URL.Path has already been cleaned in caddy/server by path.Clean().
|
||||
if r.URL.Path == "" {
|
||||
r.URL.Path = "/"
|
||||
}
|
||||
return fh.serveFile(w, r, path.Clean(upath))
|
||||
return fh.serveFile(w, r, r.URL.Path)
|
||||
}
|
||||
|
||||
// serveFile writes the specified file to the HTTP response.
|
||||
|
@ -66,7 +67,8 @@ func (fh *fileHandler) serveFile(w http.ResponseWriter, r *http.Request, name st
|
|||
return http.StatusForbidden, err
|
||||
}
|
||||
// Likely the server is under load and ran out of file descriptors
|
||||
w.Header().Set("Retry-After", "5") // TODO: 5 seconds enough delay? Or too much?
|
||||
backoff := int(3 + rand.Int31()%3) // 3–5 seconds to prevent a stampede
|
||||
w.Header().Set("Retry-After", strconv.Itoa(backoff))
|
||||
return http.StatusServiceUnavailable, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
@ -86,13 +88,13 @@ func (fh *fileHandler) serveFile(w http.ResponseWriter, r *http.Request, name st
|
|||
url := r.URL.Path
|
||||
if d.IsDir() {
|
||||
// Ensure / at end of directory url
|
||||
if url[len(url)-1] != '/' {
|
||||
if !strings.HasSuffix(url, "/") {
|
||||
redirect(w, r, path.Base(url)+"/")
|
||||
return http.StatusMovedPermanently, nil
|
||||
}
|
||||
} else {
|
||||
// Ensure no / at end of file url
|
||||
if url[len(url)-1] == '/' {
|
||||
if strings.HasSuffix(url, "/") {
|
||||
redirect(w, r, "../"+path.Base(url))
|
||||
return http.StatusMovedPermanently, nil
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"errors"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
@ -11,23 +12,30 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
var testDir = filepath.Join(os.TempDir(), "caddy_testdir")
|
||||
var ErrCustom = errors.New("Custom Error")
|
||||
var (
|
||||
ErrCustom = errors.New("Custom Error")
|
||||
|
||||
testDir = filepath.Join(os.TempDir(), "caddy_testdir")
|
||||
testWebRoot = filepath.Join(testDir, "webroot")
|
||||
)
|
||||
|
||||
// testFiles is a map with relative paths to test files as keys and file content as values.
|
||||
// The map represents the following structure:
|
||||
// - $TEMP/caddy_testdir/
|
||||
// '-- file1.html
|
||||
// '-- dirwithindex/
|
||||
// '---- index.html
|
||||
// '-- dir/
|
||||
// '---- file2.html
|
||||
// '---- hidden.html
|
||||
// '-- unreachable.html
|
||||
// '-- webroot/
|
||||
// '---- file1.html
|
||||
// '---- dirwithindex/
|
||||
// '------ index.html
|
||||
// '---- dir/
|
||||
// '------ file2.html
|
||||
// '------ hidden.html
|
||||
var testFiles = map[string]string{
|
||||
"file1.html": "<h1>file1.html</h1>",
|
||||
filepath.Join("dirwithindex", "index.html"): "<h1>dirwithindex/index.html</h1>",
|
||||
filepath.Join("dir", "file2.html"): "<h1>dir/file2.html</h1>",
|
||||
filepath.Join("dir", "hidden.html"): "<h1>dir/hidden.html</h1>",
|
||||
"unreachable.html": "<h1>must not leak</h1>",
|
||||
filepath.Join("webroot", "file1.html"): "<h1>file1.html</h1>",
|
||||
filepath.Join("webroot", "dirwithindex", "index.html"): "<h1>dirwithindex/index.html</h1>",
|
||||
filepath.Join("webroot", "dir", "file2.html"): "<h1>dir/file2.html</h1>",
|
||||
filepath.Join("webroot", "dir", "hidden.html"): "<h1>dir/hidden.html</h1>",
|
||||
}
|
||||
|
||||
// TestServeHTTP covers positive scenarios when serving files.
|
||||
|
@ -36,7 +44,7 @@ func TestServeHTTP(t *testing.T) {
|
|||
beforeServeHTTPTest(t)
|
||||
defer afterServeHTTPTest(t)
|
||||
|
||||
fileserver := FileServer(http.Dir(testDir), []string{"dir/hidden.html"})
|
||||
fileserver := FileServer(http.Dir(testWebRoot), []string{"dir/hidden.html"})
|
||||
|
||||
movedPermanently := "Moved Permanently"
|
||||
|
||||
|
@ -142,11 +150,20 @@ func TestServeHTTP(t *testing.T) {
|
|||
url: "https://foo/hidden.html",
|
||||
expectedStatus: http.StatusNotFound,
|
||||
},
|
||||
// Test 17 - try to get below the root directory.
|
||||
{
|
||||
url: "https://foo/%2f..%2funreachable.html",
|
||||
expectedStatus: http.StatusNotFound,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
responseRecorder := httptest.NewRecorder()
|
||||
request, err := http.NewRequest("GET", test.url, strings.NewReader(""))
|
||||
request, err := http.NewRequest("GET", test.url, nil)
|
||||
// prevent any URL sanitization within Go: we need unmodified paths here
|
||||
if u, _ := url.Parse(test.url); u.RawPath != "" {
|
||||
request.URL.Path = u.RawPath
|
||||
}
|
||||
status, err := fileserver.ServeHTTP(responseRecorder, request)
|
||||
etag := responseRecorder.Header().Get("Etag")
|
||||
|
||||
|
@ -176,7 +193,7 @@ func TestServeHTTP(t *testing.T) {
|
|||
// beforeServeHTTPTest creates a test directory with the structure, defined in the variable testFiles
|
||||
func beforeServeHTTPTest(t *testing.T) {
|
||||
// make the root test dir
|
||||
err := os.Mkdir(testDir, os.ModePerm)
|
||||
err := os.MkdirAll(testWebRoot, os.ModePerm)
|
||||
if err != nil {
|
||||
if !os.IsExist(err) {
|
||||
t.Fatalf("Failed to create test dir. Error was: %v", err)
|
||||
|
|
|
@ -20,13 +20,14 @@ type Headers struct {
|
|||
// ServeHTTP implements the middleware.Handler interface and serves requests,
|
||||
// setting headers on the response according to the configured rules.
|
||||
func (h Headers) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) {
|
||||
replacer := middleware.NewReplacer(r, nil, "")
|
||||
for _, rule := range h.Rules {
|
||||
if middleware.Path(r.URL.Path).Matches(rule.Path) {
|
||||
for _, header := range rule.Headers {
|
||||
if strings.HasPrefix(header.Name, "-") {
|
||||
w.Header().Del(strings.TrimLeft(header.Name, "-"))
|
||||
} else {
|
||||
w.Header().Set(header.Name, header.Value)
|
||||
w.Header().Set(header.Name, replacer.Replace(header.Value))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,12 +3,17 @@ package headers
|
|||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/mholt/caddy/middleware"
|
||||
)
|
||||
|
||||
func TestHeaders(t *testing.T) {
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
t.Fatalf("Could not determine hostname: %v", err)
|
||||
}
|
||||
for i, test := range []struct {
|
||||
from string
|
||||
name string
|
||||
|
@ -17,6 +22,7 @@ func TestHeaders(t *testing.T) {
|
|||
{"/a", "Foo", "Bar"},
|
||||
{"/a", "Bar", ""},
|
||||
{"/a", "Baz", ""},
|
||||
{"/a", "ServerName", hostname},
|
||||
{"/b", "Foo", ""},
|
||||
{"/b", "Bar", "Removed in /a"},
|
||||
} {
|
||||
|
@ -27,6 +33,7 @@ func TestHeaders(t *testing.T) {
|
|||
Rules: []Rule{
|
||||
{Path: "/a", Headers: []Header{
|
||||
{Name: "Foo", Value: "Bar"},
|
||||
{Name: "ServerName", Value: "{hostname}"},
|
||||
{Name: "-Bar"},
|
||||
}},
|
||||
},
|
||||
|
|
|
@ -3,8 +3,10 @@ package proxy
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
|
@ -41,7 +43,8 @@ type UpstreamHost struct {
|
|||
Fails int32
|
||||
FailTimeout time.Duration
|
||||
Unhealthy bool
|
||||
ExtraHeaders http.Header
|
||||
UpstreamHeaders http.Header
|
||||
DownstreamHeaders http.Header
|
||||
CheckDown UpstreamHostDownFunc
|
||||
WithoutPathPrefix string
|
||||
MaxConns int64
|
||||
|
@ -75,71 +78,160 @@ var tryDuration = 60 * time.Second
|
|||
// ServeHTTP satisfies the middleware.Handler interface.
|
||||
func (p Proxy) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) {
|
||||
for _, upstream := range p.Upstreams {
|
||||
if middleware.Path(r.URL.Path).Matches(upstream.From()) && upstream.AllowedPath(r.URL.Path) {
|
||||
var replacer middleware.Replacer
|
||||
start := time.Now()
|
||||
requestHost := r.Host
|
||||
|
||||
// Since Select() should give us "up" hosts, keep retrying
|
||||
// hosts until timeout (or until we get a nil host).
|
||||
for time.Now().Sub(start) < tryDuration {
|
||||
host := upstream.Select()
|
||||
if host == nil {
|
||||
return http.StatusBadGateway, errUnreachable
|
||||
}
|
||||
proxy := host.ReverseProxy
|
||||
r.Host = host.Name
|
||||
if rr, ok := w.(*middleware.ResponseRecorder); ok && rr.Replacer != nil {
|
||||
rr.Replacer.Set("upstream", host.Name)
|
||||
}
|
||||
|
||||
if baseURL, err := url.Parse(host.Name); err == nil {
|
||||
r.Host = baseURL.Host
|
||||
if proxy == nil {
|
||||
proxy = NewSingleHostReverseProxy(baseURL, host.WithoutPathPrefix)
|
||||
}
|
||||
} else if proxy == nil {
|
||||
return http.StatusInternalServerError, err
|
||||
}
|
||||
var extraHeaders http.Header
|
||||
if host.ExtraHeaders != nil {
|
||||
extraHeaders = make(http.Header)
|
||||
if replacer == nil {
|
||||
rHost := r.Host
|
||||
r.Host = requestHost
|
||||
replacer = middleware.NewReplacer(r, nil, "")
|
||||
r.Host = rHost
|
||||
}
|
||||
for header, values := range host.ExtraHeaders {
|
||||
for _, value := range values {
|
||||
extraHeaders.Add(header,
|
||||
replacer.Replace(value))
|
||||
if header == "Host" {
|
||||
r.Host = replacer.Replace(value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
atomic.AddInt64(&host.Conns, 1)
|
||||
backendErr := proxy.ServeHTTP(w, r, extraHeaders)
|
||||
atomic.AddInt64(&host.Conns, -1)
|
||||
if backendErr == nil {
|
||||
return 0, nil
|
||||
}
|
||||
timeout := host.FailTimeout
|
||||
if timeout == 0 {
|
||||
timeout = 10 * time.Second
|
||||
}
|
||||
atomic.AddInt32(&host.Fails, 1)
|
||||
go func(host *UpstreamHost, timeout time.Duration) {
|
||||
time.Sleep(timeout)
|
||||
atomic.AddInt32(&host.Fails, -1)
|
||||
}(host, timeout)
|
||||
}
|
||||
return http.StatusBadGateway, errUnreachable
|
||||
if !middleware.Path(r.URL.Path).Matches(upstream.From()) ||
|
||||
!upstream.AllowedPath(r.URL.Path) {
|
||||
continue
|
||||
}
|
||||
|
||||
var replacer middleware.Replacer
|
||||
start := time.Now()
|
||||
|
||||
outreq := createUpstreamRequest(r)
|
||||
|
||||
// Since Select() should give us "up" hosts, keep retrying
|
||||
// hosts until timeout (or until we get a nil host).
|
||||
for time.Now().Sub(start) < tryDuration {
|
||||
host := upstream.Select()
|
||||
if host == nil {
|
||||
return http.StatusBadGateway, errUnreachable
|
||||
}
|
||||
if rr, ok := w.(*middleware.ResponseRecorder); ok && rr.Replacer != nil {
|
||||
rr.Replacer.Set("upstream", host.Name)
|
||||
}
|
||||
|
||||
outreq.Host = host.Name
|
||||
if host.UpstreamHeaders != nil {
|
||||
if replacer == nil {
|
||||
rHost := r.Host
|
||||
replacer = middleware.NewReplacer(r, nil, "")
|
||||
outreq.Host = rHost
|
||||
}
|
||||
if v, ok := host.UpstreamHeaders["Host"]; ok {
|
||||
r.Host = replacer.Replace(v[len(v)-1])
|
||||
}
|
||||
// Modify headers for request that will be sent to the upstream host
|
||||
upHeaders := createHeadersByRules(host.UpstreamHeaders, r.Header, replacer)
|
||||
for k, v := range upHeaders {
|
||||
outreq.Header[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
var downHeaderUpdateFn respUpdateFn
|
||||
if host.DownstreamHeaders != nil {
|
||||
if replacer == nil {
|
||||
rHost := r.Host
|
||||
replacer = middleware.NewReplacer(r, nil, "")
|
||||
outreq.Host = rHost
|
||||
}
|
||||
//Creates a function that is used to update headers the response received by the reverse proxy
|
||||
downHeaderUpdateFn = createRespHeaderUpdateFn(host.DownstreamHeaders, replacer)
|
||||
}
|
||||
|
||||
proxy := host.ReverseProxy
|
||||
if baseURL, err := url.Parse(host.Name); err == nil {
|
||||
r.Host = baseURL.Host
|
||||
if proxy == nil {
|
||||
proxy = NewSingleHostReverseProxy(baseURL, host.WithoutPathPrefix)
|
||||
}
|
||||
} else if proxy == nil {
|
||||
return http.StatusInternalServerError, err
|
||||
}
|
||||
|
||||
atomic.AddInt64(&host.Conns, 1)
|
||||
backendErr := proxy.ServeHTTP(w, outreq, downHeaderUpdateFn)
|
||||
atomic.AddInt64(&host.Conns, -1)
|
||||
if backendErr == nil {
|
||||
return 0, nil
|
||||
}
|
||||
timeout := host.FailTimeout
|
||||
if timeout == 0 {
|
||||
timeout = 10 * time.Second
|
||||
}
|
||||
atomic.AddInt32(&host.Fails, 1)
|
||||
go func(host *UpstreamHost, timeout time.Duration) {
|
||||
time.Sleep(timeout)
|
||||
atomic.AddInt32(&host.Fails, -1)
|
||||
}(host, timeout)
|
||||
}
|
||||
return http.StatusBadGateway, errUnreachable
|
||||
}
|
||||
|
||||
return p.Next.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
// createUpstremRequest shallow-copies r into a new request
|
||||
// that can be sent upstream.
|
||||
func createUpstreamRequest(r *http.Request) *http.Request {
|
||||
outreq := new(http.Request)
|
||||
*outreq = *r // includes shallow copies of maps, but okay
|
||||
|
||||
// Remove hop-by-hop headers to the backend. Especially
|
||||
// important is "Connection" because we want a persistent
|
||||
// connection, regardless of what the client sent to us. This
|
||||
// is modifying the same underlying map from r (shallow
|
||||
// copied above) so we only copy it if necessary.
|
||||
for _, h := range hopHeaders {
|
||||
if outreq.Header.Get(h) != "" {
|
||||
outreq.Header = make(http.Header)
|
||||
copyHeader(outreq.Header, r.Header)
|
||||
outreq.Header.Del(h)
|
||||
}
|
||||
}
|
||||
|
||||
if clientIP, _, err := net.SplitHostPort(r.RemoteAddr); err == nil {
|
||||
// If we aren't the first proxy, retain prior
|
||||
// X-Forwarded-For information as a comma+space
|
||||
// separated list and fold multiple headers into one.
|
||||
if prior, ok := outreq.Header["X-Forwarded-For"]; ok {
|
||||
clientIP = strings.Join(prior, ", ") + ", " + clientIP
|
||||
}
|
||||
outreq.Header.Set("X-Forwarded-For", clientIP)
|
||||
}
|
||||
|
||||
return outreq
|
||||
}
|
||||
|
||||
func createRespHeaderUpdateFn(rules http.Header, replacer middleware.Replacer) respUpdateFn {
|
||||
return func(resp *http.Response) {
|
||||
newHeaders := createHeadersByRules(rules, resp.Header, replacer)
|
||||
for h, v := range newHeaders {
|
||||
resp.Header[h] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func createHeadersByRules(rules http.Header, base http.Header, repl middleware.Replacer) http.Header {
|
||||
newHeaders := make(http.Header)
|
||||
for header, values := range rules {
|
||||
if strings.HasPrefix(header, "+") {
|
||||
header = strings.TrimLeft(header, "+")
|
||||
add(newHeaders, header, base[header])
|
||||
applyEach(values, repl.Replace)
|
||||
add(newHeaders, header, values)
|
||||
} else if strings.HasPrefix(header, "-") {
|
||||
base.Del(strings.TrimLeft(header, "-"))
|
||||
} else if _, ok := base[header]; ok {
|
||||
applyEach(values, repl.Replace)
|
||||
for _, v := range values {
|
||||
newHeaders.Set(header, v)
|
||||
}
|
||||
} else {
|
||||
applyEach(values, repl.Replace)
|
||||
add(newHeaders, header, values)
|
||||
add(newHeaders, header, base[header])
|
||||
}
|
||||
}
|
||||
return newHeaders
|
||||
}
|
||||
|
||||
func applyEach(values []string, mapFn func(string) string) {
|
||||
for i, v := range values {
|
||||
values[i] = mapFn(v)
|
||||
}
|
||||
}
|
||||
|
||||
func add(base http.Header, header string, values []string) {
|
||||
for _, v := range values {
|
||||
base.Add(header, v)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -348,6 +348,141 @@ func TestUnixSocketProxyPaths(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestUpstreamHeadersUpdate(t *testing.T) {
|
||||
log.SetOutput(ioutil.Discard)
|
||||
defer log.SetOutput(os.Stderr)
|
||||
|
||||
var actualHeaders http.Header
|
||||
backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Write([]byte("Hello, client"))
|
||||
actualHeaders = r.Header
|
||||
}))
|
||||
defer backend.Close()
|
||||
|
||||
upstream := newFakeUpstream(backend.URL, false)
|
||||
upstream.host.UpstreamHeaders = http.Header{
|
||||
"Connection": {"{>Connection}"},
|
||||
"Upgrade": {"{>Upgrade}"},
|
||||
"+Merge-Me": {"Merge-Value"},
|
||||
"+Add-Me": {"Add-Value"},
|
||||
"-Remove-Me": {""},
|
||||
"Replace-Me": {"{hostname}"},
|
||||
}
|
||||
// set up proxy
|
||||
p := &Proxy{
|
||||
Upstreams: []Upstream{upstream},
|
||||
}
|
||||
|
||||
// create request and response recorder
|
||||
r, err := http.NewRequest("GET", "/", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create request: %v", err)
|
||||
}
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
//add initial headers
|
||||
r.Header.Add("Merge-Me", "Initial")
|
||||
r.Header.Add("Remove-Me", "Remove-Value")
|
||||
r.Header.Add("Replace-Me", "Replace-Value")
|
||||
|
||||
p.ServeHTTP(w, r)
|
||||
|
||||
replacer := middleware.NewReplacer(r, nil, "")
|
||||
|
||||
headerKey := "Merge-Me"
|
||||
values, ok := actualHeaders[headerKey]
|
||||
if !ok {
|
||||
t.Errorf("Request sent to upstream backend does not contain expected %v header. Expected header to be added", headerKey)
|
||||
} else if len(values) < 2 && (values[0] != "Initial" || values[1] != replacer.Replace("{hostname}")) {
|
||||
t.Errorf("Values for proxy header `+Merge-Me` should be merged. Got %v", values)
|
||||
}
|
||||
|
||||
headerKey = "Add-Me"
|
||||
if _, ok := actualHeaders[headerKey]; !ok {
|
||||
t.Errorf("Request sent to upstream backend does not contain expected %v header", headerKey)
|
||||
}
|
||||
|
||||
headerKey = "Remove-Me"
|
||||
if _, ok := actualHeaders[headerKey]; ok {
|
||||
t.Errorf("Request sent to upstream backend should not contain %v header", headerKey)
|
||||
}
|
||||
|
||||
headerKey = "Replace-Me"
|
||||
headerValue := replacer.Replace("{hostname}")
|
||||
value, ok := actualHeaders[headerKey]
|
||||
if !ok {
|
||||
t.Errorf("Request sent to upstream backend should not remove %v header", headerKey)
|
||||
} else if len(value) > 0 && headerValue != value[0] {
|
||||
t.Errorf("Request sent to upstream backend should replace value of %v header with %v. Instead value was %v", headerKey, headerValue, value)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestDownstreamHeadersUpdate(t *testing.T) {
|
||||
log.SetOutput(ioutil.Discard)
|
||||
defer log.SetOutput(os.Stderr)
|
||||
|
||||
backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Add("Merge-Me", "Initial")
|
||||
w.Header().Add("Remove-Me", "Remove-Value")
|
||||
w.Header().Add("Replace-Me", "Replace-Value")
|
||||
w.Write([]byte("Hello, client"))
|
||||
}))
|
||||
defer backend.Close()
|
||||
|
||||
upstream := newFakeUpstream(backend.URL, false)
|
||||
upstream.host.DownstreamHeaders = http.Header{
|
||||
"+Merge-Me": {"Merge-Value"},
|
||||
"+Add-Me": {"Add-Value"},
|
||||
"-Remove-Me": {""},
|
||||
"Replace-Me": {"{hostname}"},
|
||||
}
|
||||
// set up proxy
|
||||
p := &Proxy{
|
||||
Upstreams: []Upstream{upstream},
|
||||
}
|
||||
|
||||
// create request and response recorder
|
||||
r, err := http.NewRequest("GET", "/", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create request: %v", err)
|
||||
}
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
p.ServeHTTP(w, r)
|
||||
|
||||
replacer := middleware.NewReplacer(r, nil, "")
|
||||
actualHeaders := w.Header()
|
||||
|
||||
headerKey := "Merge-Me"
|
||||
values, ok := actualHeaders[headerKey]
|
||||
if !ok {
|
||||
t.Errorf("Downstream response does not contain expected %v header. Expected header should be added", headerKey)
|
||||
} else if len(values) < 2 && (values[0] != "Initial" || values[1] != replacer.Replace("{hostname}")) {
|
||||
t.Errorf("Values for header `+Merge-Me` should be merged. Got %v", values)
|
||||
}
|
||||
|
||||
headerKey = "Add-Me"
|
||||
if _, ok := actualHeaders[headerKey]; !ok {
|
||||
t.Errorf("Downstream response does not contain expected %v header", headerKey)
|
||||
}
|
||||
|
||||
headerKey = "Remove-Me"
|
||||
if _, ok := actualHeaders[headerKey]; ok {
|
||||
t.Errorf("Downstream response should not contain %v header received from upstream", headerKey)
|
||||
}
|
||||
|
||||
headerKey = "Replace-Me"
|
||||
headerValue := replacer.Replace("{hostname}")
|
||||
value, ok := actualHeaders[headerKey]
|
||||
if !ok {
|
||||
t.Errorf("Downstream response should contain %v header and not remove it", headerKey)
|
||||
} else if len(value) > 0 && headerValue != value[0] {
|
||||
t.Errorf("Downstream response should have header %v with value %v. Instead value was %v", headerKey, headerValue, value)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func newFakeUpstream(name string, insecure bool) *fakeUpstream {
|
||||
uri, _ := url.Parse(name)
|
||||
u := &fakeUpstream{
|
||||
|
@ -410,7 +545,7 @@ func (u *fakeWsUpstream) Select() *UpstreamHost {
|
|||
return &UpstreamHost{
|
||||
Name: u.name,
|
||||
ReverseProxy: NewSingleHostReverseProxy(uri, u.without),
|
||||
ExtraHeaders: http.Header{
|
||||
UpstreamHeaders: http.Header{
|
||||
"Connection": {"{>Connection}"},
|
||||
"Upgrade": {"{>Upgrade}"}},
|
||||
}
|
||||
|
|
|
@ -154,57 +154,25 @@ var InsecureTransport http.RoundTripper = &http.Transport{
|
|||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
}
|
||||
|
||||
func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request, extraHeaders http.Header) error {
|
||||
type respUpdateFn func(resp *http.Response)
|
||||
|
||||
func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, outreq *http.Request, respUpdateFn respUpdateFn) error {
|
||||
transport := p.Transport
|
||||
if transport == nil {
|
||||
transport = http.DefaultTransport
|
||||
}
|
||||
|
||||
outreq := new(http.Request)
|
||||
*outreq = *req // includes shallow copies of maps, but okay
|
||||
|
||||
p.Director(outreq)
|
||||
outreq.Proto = "HTTP/1.1"
|
||||
outreq.ProtoMajor = 1
|
||||
outreq.ProtoMinor = 1
|
||||
outreq.Close = false
|
||||
|
||||
// Remove hop-by-hop headers to the backend. Especially
|
||||
// important is "Connection" because we want a persistent
|
||||
// connection, regardless of what the client sent to us. This
|
||||
// is modifying the same underlying map from req (shallow
|
||||
// copied above) so we only copy it if necessary.
|
||||
copiedHeaders := false
|
||||
for _, h := range hopHeaders {
|
||||
if outreq.Header.Get(h) != "" {
|
||||
if !copiedHeaders {
|
||||
outreq.Header = make(http.Header)
|
||||
copyHeader(outreq.Header, req.Header)
|
||||
copiedHeaders = true
|
||||
}
|
||||
outreq.Header.Del(h)
|
||||
}
|
||||
}
|
||||
|
||||
if clientIP, _, err := net.SplitHostPort(req.RemoteAddr); err == nil {
|
||||
// If we aren't the first proxy retain prior
|
||||
// X-Forwarded-For information as a comma+space
|
||||
// separated list and fold multiple headers into one.
|
||||
if prior, ok := outreq.Header["X-Forwarded-For"]; ok {
|
||||
clientIP = strings.Join(prior, ", ") + ", " + clientIP
|
||||
}
|
||||
outreq.Header.Set("X-Forwarded-For", clientIP)
|
||||
}
|
||||
|
||||
if extraHeaders != nil {
|
||||
for k, v := range extraHeaders {
|
||||
outreq.Header[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
res, err := transport.RoundTrip(outreq)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if respUpdateFn != nil {
|
||||
respUpdateFn(res)
|
||||
}
|
||||
|
||||
if res.StatusCode == http.StatusSwitchingProtocols && strings.ToLower(res.Header.Get("Upgrade")) == "websocket" {
|
||||
|
@ -237,9 +205,7 @@ func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request, extr
|
|||
for _, h := range hopHeaders {
|
||||
res.Header.Del(h)
|
||||
}
|
||||
|
||||
copyHeader(rw.Header(), res.Header)
|
||||
|
||||
rw.WriteHeader(res.StatusCode)
|
||||
p.copyResponse(rw, res.Body)
|
||||
}
|
||||
|
@ -260,7 +226,6 @@ func (p *ReverseProxy) copyResponse(dst io.Writer, src io.Reader) {
|
|||
dst = mlw
|
||||
}
|
||||
}
|
||||
|
||||
io.Copy(dst, src)
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,8 @@ var (
|
|||
|
||||
type staticUpstream struct {
|
||||
from string
|
||||
proxyHeaders http.Header
|
||||
upstreamHeaders http.Header
|
||||
downstreamHeaders http.Header
|
||||
Hosts HostPool
|
||||
Policy Policy
|
||||
insecureSkipVerify bool
|
||||
|
@ -42,13 +43,14 @@ func NewStaticUpstreams(c parse.Dispenser) ([]Upstream, error) {
|
|||
var upstreams []Upstream
|
||||
for c.Next() {
|
||||
upstream := &staticUpstream{
|
||||
from: "",
|
||||
proxyHeaders: make(http.Header),
|
||||
Hosts: nil,
|
||||
Policy: &Random{},
|
||||
FailTimeout: 10 * time.Second,
|
||||
MaxFails: 1,
|
||||
MaxConns: 0,
|
||||
from: "",
|
||||
upstreamHeaders: make(http.Header),
|
||||
downstreamHeaders: make(http.Header),
|
||||
Hosts: nil,
|
||||
Policy: &Random{},
|
||||
FailTimeout: 10 * time.Second,
|
||||
MaxFails: 1,
|
||||
MaxConns: 0,
|
||||
}
|
||||
|
||||
if !c.Args(&upstream.from) {
|
||||
|
@ -97,12 +99,13 @@ func (u *staticUpstream) NewHost(host string) (*UpstreamHost, error) {
|
|||
host = "http://" + host
|
||||
}
|
||||
uh := &UpstreamHost{
|
||||
Name: host,
|
||||
Conns: 0,
|
||||
Fails: 0,
|
||||
FailTimeout: u.FailTimeout,
|
||||
Unhealthy: false,
|
||||
ExtraHeaders: u.proxyHeaders,
|
||||
Name: host,
|
||||
Conns: 0,
|
||||
Fails: 0,
|
||||
FailTimeout: u.FailTimeout,
|
||||
Unhealthy: false,
|
||||
UpstreamHeaders: u.upstreamHeaders,
|
||||
DownstreamHeaders: u.downstreamHeaders,
|
||||
CheckDown: func(u *staticUpstream) UpstreamHostDownFunc {
|
||||
return func(uh *UpstreamHost) bool {
|
||||
if uh.Unhealthy {
|
||||
|
@ -182,15 +185,23 @@ func parseBlock(c *parse.Dispenser, u *staticUpstream) error {
|
|||
}
|
||||
u.HealthCheck.Interval = dur
|
||||
}
|
||||
case "header_upstream":
|
||||
fallthrough
|
||||
case "proxy_header":
|
||||
var header, value string
|
||||
if !c.Args(&header, &value) {
|
||||
return c.ArgErr()
|
||||
}
|
||||
u.proxyHeaders.Add(header, value)
|
||||
u.upstreamHeaders.Add(header, value)
|
||||
case "header_downstream":
|
||||
var header, value string
|
||||
if !c.Args(&header, &value) {
|
||||
return c.ArgErr()
|
||||
}
|
||||
u.downstreamHeaders.Add(header, value)
|
||||
case "websocket":
|
||||
u.proxyHeaders.Add("Connection", "{>Connection}")
|
||||
u.proxyHeaders.Add("Upgrade", "{>Upgrade}")
|
||||
u.upstreamHeaders.Add("Connection", "{>Connection}")
|
||||
u.upstreamHeaders.Add("Upgrade", "{>Upgrade}")
|
||||
case "without":
|
||||
if !c.NextArg() {
|
||||
return c.ArgErr()
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -52,6 +53,13 @@ func NewReplacer(r *http.Request, rr *ResponseRecorder, emptyValue string) Repla
|
|||
}
|
||||
return "http"
|
||||
}(),
|
||||
"{hostname}": func() string {
|
||||
name, err := os.Hostname()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return name
|
||||
}(),
|
||||
"{host}": r.Host,
|
||||
"{path}": r.URL.Path,
|
||||
"{path_escaped}": url.QueryEscape(r.URL.Path),
|
||||
|
|
|
@ -3,6 +3,7 @@ package middleware
|
|||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
@ -53,6 +54,14 @@ func TestReplace(t *testing.T) {
|
|||
request.Header.Set("ShorterVal", "1")
|
||||
repl := NewReplacer(request, recordRequest, "-")
|
||||
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
t.Fatal("Failed to determine hostname\n")
|
||||
}
|
||||
if expected, actual := "This hostname is "+hostname, repl.Replace("This hostname is {hostname}"); expected != actual {
|
||||
t.Errorf("{hostname} replacement: expected '%s', got '%s'", expected, actual)
|
||||
}
|
||||
|
||||
if expected, actual := "This host is localhost.", repl.Replace("This host is {host}."); expected != actual {
|
||||
t.Errorf("{host} replacement: expected '%s', got '%s'", expected, actual)
|
||||
}
|
||||
|
|
|
@ -14,7 +14,7 @@ import (
|
|||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -336,11 +336,18 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||
// Use URL.RawPath If you need the original, "raw" URL.Path in your middleware.
|
||||
// Collapse any ./ ../ /// madness here instead of doing that in every plugin.
|
||||
if r.URL.Path != "/" {
|
||||
path := filepath.Clean(r.URL.Path)
|
||||
if !strings.HasPrefix(path, "/") {
|
||||
path = "/" + path
|
||||
cleanedPath := path.Clean(r.URL.Path)
|
||||
if cleanedPath == "." {
|
||||
r.URL.Path = "/"
|
||||
} else {
|
||||
if !strings.HasPrefix(cleanedPath, "/") {
|
||||
cleanedPath = "/" + cleanedPath
|
||||
}
|
||||
if strings.HasSuffix(r.URL.Path, "/") && !strings.HasSuffix(cleanedPath, "/") {
|
||||
cleanedPath = cleanedPath + "/"
|
||||
}
|
||||
r.URL.Path = cleanedPath
|
||||
}
|
||||
r.URL.Path = path
|
||||
}
|
||||
|
||||
// Execute the optional request callback if it exists and it's not disabled
|
||||
|
@ -438,6 +445,7 @@ func standaloneTLSTicketKeyRotation(c *tls.Config, timer *time.Ticker, exitChan
|
|||
c.SessionTicketsDisabled = true // bail if we don't have the entropy for the first one
|
||||
return
|
||||
}
|
||||
c.SessionTicketKey = keys[0] // SetSessionTicketKeys doesn't set a 'tls.keysAlreadSet'
|
||||
c.SetSessionTicketKeys(setSessionTicketKeysTestHook(keys))
|
||||
|
||||
for {
|
||||
|
|
Loading…
Reference in New Issue
Block a user