diff --git a/.travis.yml b/.travis.yml
index c121a7025..7f45cbed7 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,12 +1,9 @@
 language: go
 
 go:
-  - 1.6.2
+  - 1.6.3
   - tip
 
-env:
-  - CGO_ENABLED=0
-
 before_install:
   # Decrypts a script that installs an authenticated cookie
   # for git to use when cloning from googlesource.com.
@@ -24,7 +21,7 @@ script:
   - diff <(echo -n) <(gofmt -s -d .)
   - ineffassign .
   - go vet ./...
-  - go test ./...
+  - go test -race ./...
 
 after_script:
   - golint ./...
diff --git a/appveyor.yml b/appveyor.yml
index 436881033..ba9131791 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -6,12 +6,11 @@ clone_folder: c:\gopath\src\github.com\mholt\caddy
 
 environment:
   GOPATH: c:\gopath
-  CGO_ENABLED: 0
 
 install:
   - rmdir c:\go /s /q
-  - appveyor DownloadFile https://storage.googleapis.com/golang/go1.6.2.windows-amd64.zip
-  - 7z x go1.6.2.windows-amd64.zip -y -oC:\ > NUL
+  - appveyor DownloadFile https://storage.googleapis.com/golang/go1.6.3.windows-amd64.zip
+  - 7z x go1.6.3.windows-amd64.zip -y -oC:\ > NUL
   - go version
   - go env
   - go get -t ./...
@@ -23,7 +22,7 @@ build: off
 
 test_script:
   - go vet ./...
-  - go test ./...
+  - go test -race ./...
   - ineffassign .
 
 after_test:
diff --git a/caddy.go b/caddy.go
index 401c6fc6f..1b1caf54d 100644
--- a/caddy.go
+++ b/caddy.go
@@ -21,8 +21,6 @@ import (
 	"log"
 	"net"
 	"os"
-	"os/exec"
-	"runtime"
 	"strconv"
 	"strings"
 	"sync"
@@ -725,24 +723,6 @@ func IsLoopback(addr string) bool {
 		strings.HasPrefix(host, "127.")
 }
 
-// checkFdlimit issues a warning if the OS limit for
-// max file descriptors is below a recommended minimum.
-func checkFdlimit() {
-	const min = 8192
-
-	// Warn if ulimit is too low for production sites
-	if runtime.GOOS == "linux" || runtime.GOOS == "darwin" {
-		out, err := exec.Command("sh", "-c", "ulimit -n").Output() // use sh because ulimit isn't in Linux $PATH
-		if err == nil {
-			lim, err := strconv.Atoi(string(bytes.TrimSpace(out)))
-			if err == nil && lim < min {
-				fmt.Printf("WARNING: File descriptor limit %d is too low for production servers. "+
-					"At least %d is recommended. Fix with \"ulimit -n %d\".\n", lim, min, min)
-			}
-		}
-	}
-}
-
 // Upgrade re-launches the process, preserving the listeners
 // for a graceful restart. It does NOT load new configuration;
 // it only starts the process anew with a fresh binary.
diff --git a/caddyhttp/proxy/policy.go b/caddyhttp/proxy/policy.go
index 3a11b3ce1..1737c6c58 100644
--- a/caddyhttp/proxy/policy.go
+++ b/caddyhttp/proxy/policy.go
@@ -1,8 +1,11 @@
 package proxy
 
 import (
+	"hash/fnv"
 	"math"
 	"math/rand"
+	"net"
+	"net/http"
 	"sync"
 )
 
@@ -11,20 +14,21 @@ type HostPool []*UpstreamHost
 
 // Policy decides how a host will be selected from a pool.
 type Policy interface {
-	Select(pool HostPool) *UpstreamHost
+	Select(pool HostPool, r *http.Request) *UpstreamHost
 }
 
 func init() {
 	RegisterPolicy("random", func() Policy { return &Random{} })
 	RegisterPolicy("least_conn", func() Policy { return &LeastConn{} })
 	RegisterPolicy("round_robin", func() Policy { return &RoundRobin{} })
+	RegisterPolicy("ip_hash", func() Policy { return &IPHash{} })
 }
 
 // Random is a policy that selects up hosts from a pool at random.
 type Random struct{}
 
 // Select selects an up host at random from the specified pool.
-func (r *Random) Select(pool HostPool) *UpstreamHost {
+func (r *Random) Select(pool HostPool, request *http.Request) *UpstreamHost {
 
 	// Because the number of available hosts isn't known
 	// up front, the host is selected via reservoir sampling
@@ -53,7 +57,7 @@ type LeastConn struct{}
 // Select selects the up host with the least number of connections in the
 // pool.  If more than one host has the same least number of connections,
 // one of the hosts is chosen at random.
-func (r *LeastConn) Select(pool HostPool) *UpstreamHost {
+func (r *LeastConn) Select(pool HostPool, request *http.Request) *UpstreamHost {
 	var bestHost *UpstreamHost
 	count := 0
 	leastConn := int64(math.MaxInt64)
@@ -86,7 +90,7 @@ type RoundRobin struct {
 }
 
 // Select selects an up host from the pool using a round robin ordering scheme.
-func (r *RoundRobin) Select(pool HostPool) *UpstreamHost {
+func (r *RoundRobin) Select(pool HostPool, request *http.Request) *UpstreamHost {
 	poolLen := uint32(len(pool))
 	r.mutex.Lock()
 	defer r.mutex.Unlock()
@@ -100,3 +104,35 @@ func (r *RoundRobin) Select(pool HostPool) *UpstreamHost {
 	}
 	return nil
 }
+
+// IPHash is a policy that selects hosts based on hashing the request ip
+type IPHash struct{}
+
+func hash(s string) uint32 {
+	h := fnv.New32a()
+	h.Write([]byte(s))
+	return h.Sum32()
+}
+
+// Select selects an up host from the pool using a round robin ordering scheme.
+func (r *IPHash) Select(pool HostPool, request *http.Request) *UpstreamHost {
+	poolLen := uint32(len(pool))
+	clientIP, _, err := net.SplitHostPort(request.RemoteAddr)
+	if err != nil {
+		clientIP = request.RemoteAddr
+	}
+	hash := hash(clientIP)
+	for {
+		if poolLen == 0 {
+			break
+		}
+		index := hash % poolLen
+		host := pool[index]
+		if host.Available() {
+			return host
+		}
+		pool = append(pool[:index], pool[index+1:]...)
+		poolLen--
+	}
+	return nil
+}
diff --git a/caddyhttp/proxy/policy_test.go b/caddyhttp/proxy/policy_test.go
index 8da1cadb8..a736d8cac 100644
--- a/caddyhttp/proxy/policy_test.go
+++ b/caddyhttp/proxy/policy_test.go
@@ -21,7 +21,7 @@ func TestMain(m *testing.M) {
 
 type customPolicy struct{}
 
-func (r *customPolicy) Select(pool HostPool) *UpstreamHost {
+func (r *customPolicy) Select(pool HostPool, request *http.Request) *UpstreamHost {
 	return pool[0]
 }
 
@@ -43,37 +43,39 @@ func testPool() HostPool {
 func TestRoundRobinPolicy(t *testing.T) {
 	pool := testPool()
 	rrPolicy := &RoundRobin{}
-	h := rrPolicy.Select(pool)
+	request, _ := http.NewRequest("GET", "/", nil)
+
+	h := rrPolicy.Select(pool, request)
 	// First selected host is 1, because counter starts at 0
 	// and increments before host is selected
 	if h != pool[1] {
 		t.Error("Expected first round robin host to be second host in the pool.")
 	}
-	h = rrPolicy.Select(pool)
+	h = rrPolicy.Select(pool, request)
 	if h != pool[2] {
 		t.Error("Expected second round robin host to be third host in the pool.")
 	}
-	h = rrPolicy.Select(pool)
+	h = rrPolicy.Select(pool, request)
 	if h != pool[0] {
 		t.Error("Expected third round robin host to be first host in the pool.")
 	}
 	// mark host as down
 	pool[1].Unhealthy = true
-	h = rrPolicy.Select(pool)
+	h = rrPolicy.Select(pool, request)
 	if h != pool[2] {
 		t.Error("Expected to skip down host.")
 	}
 	// mark host as up
 	pool[1].Unhealthy = false
 
-	h = rrPolicy.Select(pool)
+	h = rrPolicy.Select(pool, request)
 	if h == pool[2] {
 		t.Error("Expected to balance evenly among healthy hosts")
 	}
 	// mark host as full
 	pool[1].Conns = 1
 	pool[1].MaxConns = 1
-	h = rrPolicy.Select(pool)
+	h = rrPolicy.Select(pool, request)
 	if h != pool[2] {
 		t.Error("Expected to skip full host.")
 	}
@@ -82,14 +84,16 @@ func TestRoundRobinPolicy(t *testing.T) {
 func TestLeastConnPolicy(t *testing.T) {
 	pool := testPool()
 	lcPolicy := &LeastConn{}
+	request, _ := http.NewRequest("GET", "/", nil)
+
 	pool[0].Conns = 10
 	pool[1].Conns = 10
-	h := lcPolicy.Select(pool)
+	h := lcPolicy.Select(pool, request)
 	if h != pool[2] {
 		t.Error("Expected least connection host to be third host.")
 	}
 	pool[2].Conns = 100
-	h = lcPolicy.Select(pool)
+	h = lcPolicy.Select(pool, request)
 	if h != pool[0] && h != pool[1] {
 		t.Error("Expected least connection host to be first or second host.")
 	}
@@ -98,8 +102,127 @@ func TestLeastConnPolicy(t *testing.T) {
 func TestCustomPolicy(t *testing.T) {
 	pool := testPool()
 	customPolicy := &customPolicy{}
-	h := customPolicy.Select(pool)
+	request, _ := http.NewRequest("GET", "/", nil)
+
+	h := customPolicy.Select(pool, request)
 	if h != pool[0] {
 		t.Error("Expected custom policy host to be the first host.")
 	}
 }
+
+func TestIPHashPolicy(t *testing.T) {
+	pool := testPool()
+	ipHash := &IPHash{}
+	request, _ := http.NewRequest("GET", "/", nil)
+	// We should be able to predict where every request is routed.
+	request.RemoteAddr = "172.0.0.1:80"
+	h := ipHash.Select(pool, request)
+	if h != pool[1] {
+		t.Error("Expected ip hash policy host to be the second host.")
+	}
+	request.RemoteAddr = "172.0.0.2:80"
+	h = ipHash.Select(pool, request)
+	if h != pool[1] {
+		t.Error("Expected ip hash policy host to be the second host.")
+	}
+	request.RemoteAddr = "172.0.0.3:80"
+	h = ipHash.Select(pool, request)
+	if h != pool[2] {
+		t.Error("Expected ip hash policy host to be the third host.")
+	}
+	request.RemoteAddr = "172.0.0.4:80"
+	h = ipHash.Select(pool, request)
+	if h != pool[1] {
+		t.Error("Expected ip hash policy host to be the second host.")
+	}
+
+	// we should get the same results without a port
+	request.RemoteAddr = "172.0.0.1"
+	h = ipHash.Select(pool, request)
+	if h != pool[1] {
+		t.Error("Expected ip hash policy host to be the second host.")
+	}
+	request.RemoteAddr = "172.0.0.2"
+	h = ipHash.Select(pool, request)
+	if h != pool[1] {
+		t.Error("Expected ip hash policy host to be the second host.")
+	}
+	request.RemoteAddr = "172.0.0.3"
+	h = ipHash.Select(pool, request)
+	if h != pool[2] {
+		t.Error("Expected ip hash policy host to be the third host.")
+	}
+	request.RemoteAddr = "172.0.0.4"
+	h = ipHash.Select(pool, request)
+	if h != pool[1] {
+		t.Error("Expected ip hash policy host to be the second host.")
+	}
+
+	// we should get a healthy host if the original host is unhealthy and a
+	// healthy host is available
+	request.RemoteAddr = "172.0.0.1"
+	pool[1].Unhealthy = true
+	h = ipHash.Select(pool, request)
+	if h != pool[0] {
+		t.Error("Expected ip hash policy host to be the first host.")
+	}
+
+	request.RemoteAddr = "172.0.0.2"
+	h = ipHash.Select(pool, request)
+	if h != pool[1] {
+		t.Error("Expected ip hash policy host to be the second host.")
+	}
+	pool[1].Unhealthy = false
+
+	request.RemoteAddr = "172.0.0.3"
+	pool[2].Unhealthy = true
+	h = ipHash.Select(pool, request)
+	if h != pool[0] {
+		t.Error("Expected ip hash policy host to be the first host.")
+	}
+	request.RemoteAddr = "172.0.0.4"
+	h = ipHash.Select(pool, request)
+	if h != pool[0] {
+		t.Error("Expected ip hash policy host to be the first host.")
+	}
+
+	// We should be able to resize the host pool and still be able to predict
+	// where a request will be routed with the same IP's used above
+	pool = []*UpstreamHost{
+		{
+			Name: workableServer.URL, // this should resolve (healthcheck test)
+		},
+		{
+			Name: "http://localhost:99998", // this shouldn't
+		},
+	}
+	pool = HostPool(pool)
+	request.RemoteAddr = "172.0.0.1:80"
+	h = ipHash.Select(pool, request)
+	if h != pool[0] {
+		t.Error("Expected ip hash policy host to be the first host.")
+	}
+	request.RemoteAddr = "172.0.0.2:80"
+	h = ipHash.Select(pool, request)
+	if h != pool[1] {
+		t.Error("Expected ip hash policy host to be the second host.")
+	}
+	request.RemoteAddr = "172.0.0.3:80"
+	h = ipHash.Select(pool, request)
+	if h != pool[0] {
+		t.Error("Expected ip hash policy host to be the first host.")
+	}
+	request.RemoteAddr = "172.0.0.4:80"
+	h = ipHash.Select(pool, request)
+	if h != pool[1] {
+		t.Error("Expected ip hash policy host to be the second host.")
+	}
+
+	// We should get nil when there are no healthy hosts
+	pool[0].Unhealthy = true
+	pool[1].Unhealthy = true
+	h = ipHash.Select(pool, request)
+	if h != nil {
+		t.Error("Expected ip hash policy host to be nil.")
+	}
+}
diff --git a/caddyhttp/proxy/proxy.go b/caddyhttp/proxy/proxy.go
index d1d695413..89fa21ae1 100644
--- a/caddyhttp/proxy/proxy.go
+++ b/caddyhttp/proxy/proxy.go
@@ -27,7 +27,7 @@ type Upstream interface {
 	// The path this upstream host should be routed on
 	From() string
 	// Selects an upstream host to be routed to.
-	Select() *UpstreamHost
+	Select(*http.Request) *UpstreamHost
 	// Checks if subpath is not an ignored path
 	AllowedPath(string) bool
 }
@@ -93,7 +93,7 @@ func (p Proxy) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) {
 	// hosts until timeout (or until we get a nil host).
 	start := time.Now()
 	for time.Now().Sub(start) < tryDuration {
-		host := upstream.Select()
+		host := upstream.Select(r)
 		if host == nil {
 			return http.StatusBadGateway, errUnreachable
 		}
diff --git a/caddyhttp/proxy/proxy_test.go b/caddyhttp/proxy/proxy_test.go
index 9b94e6ec7..5739f4ee0 100644
--- a/caddyhttp/proxy/proxy_test.go
+++ b/caddyhttp/proxy/proxy_test.go
@@ -357,9 +357,11 @@ func TestUpstreamHeadersUpdate(t *testing.T) {
 	defer log.SetOutput(os.Stderr)
 
 	var actualHeaders http.Header
+	var actualHost string
 	backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
 		w.Write([]byte("Hello, client"))
 		actualHeaders = r.Header
+		actualHost = r.Host
 	}))
 	defer backend.Close()
 
@@ -371,6 +373,7 @@ func TestUpstreamHeadersUpdate(t *testing.T) {
 		"+Add-Me":    {"Add-Value"},
 		"-Remove-Me": {""},
 		"Replace-Me": {"{hostname}"},
+		"Host":       {"{>Host}"},
 	}
 	// set up proxy
 	p := &Proxy{
@@ -385,10 +388,12 @@ func TestUpstreamHeadersUpdate(t *testing.T) {
 	}
 	w := httptest.NewRecorder()
 
+	const expectHost = "example.com"
 	//add initial headers
 	r.Header.Add("Merge-Me", "Initial")
 	r.Header.Add("Remove-Me", "Remove-Value")
 	r.Header.Add("Replace-Me", "Replace-Value")
+	r.Header.Add("Host", expectHost)
 
 	p.ServeHTTP(w, r)
 
@@ -421,6 +426,10 @@ func TestUpstreamHeadersUpdate(t *testing.T) {
 		t.Errorf("Request sent to upstream backend should replace value of %v header with %v. Instead value was %v", headerKey, headerValue, value)
 	}
 
+	if actualHost != expectHost {
+		t.Errorf("Request sent to upstream backend should have value of Host with %s, but got %s", expectHost, actualHost)
+	}
+
 }
 
 func TestDownstreamHeadersUpdate(t *testing.T) {
@@ -736,7 +745,7 @@ func (u *fakeUpstream) From() string {
 	return u.from
 }
 
-func (u *fakeUpstream) Select() *UpstreamHost {
+func (u *fakeUpstream) Select(r *http.Request) *UpstreamHost {
 	if u.host == nil {
 		uri, err := url.Parse(u.name)
 		if err != nil {
@@ -781,7 +790,7 @@ func (u *fakeWsUpstream) From() string {
 	return "/"
 }
 
-func (u *fakeWsUpstream) Select() *UpstreamHost {
+func (u *fakeWsUpstream) Select(r *http.Request) *UpstreamHost {
 	uri, _ := url.Parse(u.name)
 	return &UpstreamHost{
 		Name:         u.name,
diff --git a/caddyhttp/proxy/upstream.go b/caddyhttp/proxy/upstream.go
index 36620995f..b69ef1a59 100644
--- a/caddyhttp/proxy/upstream.go
+++ b/caddyhttp/proxy/upstream.go
@@ -360,7 +360,7 @@ func (u *staticUpstream) HealthCheckWorker(stop chan struct{}) {
 	}
 }
 
-func (u *staticUpstream) Select() *UpstreamHost {
+func (u *staticUpstream) Select(r *http.Request) *UpstreamHost {
 	pool := u.Hosts
 	if len(pool) == 1 {
 		if !pool[0].Available() {
@@ -378,11 +378,10 @@ func (u *staticUpstream) Select() *UpstreamHost {
 	if allUnavailable {
 		return nil
 	}
-
 	if u.Policy == nil {
-		return (&Random{}).Select(pool)
+		return (&Random{}).Select(pool, r)
 	}
-	return u.Policy.Select(pool)
+	return u.Policy.Select(pool, r)
 }
 
 func (u *staticUpstream) AllowedPath(requestPath string) bool {
diff --git a/caddyhttp/proxy/upstream_test.go b/caddyhttp/proxy/upstream_test.go
index 4fb990f6d..1cf0e041a 100644
--- a/caddyhttp/proxy/upstream_test.go
+++ b/caddyhttp/proxy/upstream_test.go
@@ -1,11 +1,11 @@
 package proxy
 
 import (
+	"github.com/mholt/caddy/caddyfile"
+	"net/http"
 	"strings"
 	"testing"
 	"time"
-
-	"github.com/mholt/caddy/caddyfile"
 )
 
 func TestNewHost(t *testing.T) {
@@ -72,14 +72,15 @@ func TestSelect(t *testing.T) {
 		FailTimeout: 10 * time.Second,
 		MaxFails:    1,
 	}
+	r, _ := http.NewRequest("GET", "/", nil)
 	upstream.Hosts[0].Unhealthy = true
 	upstream.Hosts[1].Unhealthy = true
 	upstream.Hosts[2].Unhealthy = true
-	if h := upstream.Select(); h != nil {
+	if h := upstream.Select(r); h != nil {
 		t.Error("Expected select to return nil as all host are down")
 	}
 	upstream.Hosts[2].Unhealthy = false
-	if h := upstream.Select(); h == nil {
+	if h := upstream.Select(r); h == nil {
 		t.Error("Expected select to not return nil")
 	}
 	upstream.Hosts[0].Conns = 1
@@ -88,11 +89,11 @@ func TestSelect(t *testing.T) {
 	upstream.Hosts[1].MaxConns = 1
 	upstream.Hosts[2].Conns = 1
 	upstream.Hosts[2].MaxConns = 1
-	if h := upstream.Select(); h != nil {
+	if h := upstream.Select(r); h != nil {
 		t.Error("Expected select to return nil as all hosts are full")
 	}
 	upstream.Hosts[2].Conns = 0
-	if h := upstream.Select(); h == nil {
+	if h := upstream.Select(r); h == nil {
 		t.Error("Expected select to not return nil")
 	}
 }
@@ -188,6 +189,7 @@ func TestParseBlockHealthCheck(t *testing.T) {
 }
 
 func TestParseBlock(t *testing.T) {
+	r, _ := http.NewRequest("GET", "/", nil)
 	tests := []struct {
 		config string
 	}{
@@ -207,7 +209,7 @@ func TestParseBlock(t *testing.T) {
 			t.Error("Expected no error. Got:", err.Error())
 		}
 		for _, upstream := range upstreams {
-			headers := upstream.Select().UpstreamHeaders
+			headers := upstream.Select(r).UpstreamHeaders
 
 			if _, ok := headers["Host"]; !ok {
 				t.Errorf("Test %d: Could not find the Host header", i+1)
diff --git a/caddytls/crypto_test.go b/caddytls/crypto_test.go
index e4697ec46..bc96bd3f6 100644
--- a/caddytls/crypto_test.go
+++ b/caddytls/crypto_test.go
@@ -79,19 +79,22 @@ func PrivateKeyBytes(key crypto.PrivateKey) []byte {
 }
 
 func TestStandaloneTLSTicketKeyRotation(t *testing.T) {
+	type syncPkt struct {
+		ticketKey [32]byte
+		keysInUse int
+	}
+
 	tlsGovChan := make(chan struct{})
 	defer close(tlsGovChan)
-	callSync := make(chan bool, 1)
+	callSync := make(chan *syncPkt, 1)
 	defer close(callSync)
 
 	oldHook := setSessionTicketKeysTestHook
 	defer func() {
 		setSessionTicketKeysTestHook = oldHook
 	}()
-	var keysInUse [][32]byte
 	setSessionTicketKeysTestHook = func(keys [][32]byte) [][32]byte {
-		keysInUse = keys
-		callSync <- true
+		callSync <- &syncPkt{keys[0], len(keys)}
 		return keys
 	}
 
@@ -104,17 +107,17 @@ func TestStandaloneTLSTicketKeyRotation(t *testing.T) {
 	var lastTicketKey [32]byte
 	for {
 		select {
-		case <-callSync:
-			if lastTicketKey == keysInUse[0] {
+		case pkt := <-callSync:
+			if lastTicketKey == pkt.ticketKey {
 				close(tlsGovChan)
 				t.Errorf("The same TLS ticket key has been used again (not rotated): %x.", lastTicketKey)
 				return
 			}
-			lastTicketKey = keysInUse[0]
+			lastTicketKey = pkt.ticketKey
 			rounds++
-			if rounds <= NumTickets && len(keysInUse) != rounds {
+			if rounds <= NumTickets && pkt.keysInUse != rounds {
 				close(tlsGovChan)
-				t.Errorf("Expected TLS ticket keys in use: %d; Got instead: %d.", rounds, len(keysInUse))
+				t.Errorf("Expected TLS ticket keys in use: %d; Got instead: %d.", rounds, pkt.keysInUse)
 				return
 			}
 			if c.SessionTicketsDisabled == true {
diff --git a/dist/init/linux-sysvinit/caddy b/dist/init/linux-sysvinit/caddy
index 384a27257..70ddd3226 100644
--- a/dist/init/linux-sysvinit/caddy
+++ b/dist/init/linux-sysvinit/caddy
@@ -20,9 +20,9 @@ DAEMONUSER=www-data
 PIDFILE=/var/run/$NAME.pid
 LOGFILE=/var/log/$NAME.log
 CONFIGFILE=/etc/caddy/Caddyfile
-DAEMONOPTS="-agree=true --pidfile=$PIDFILE log=$LOGFILE -conf=$CONFIGFILE"
+DAEMONOPTS="-agree=true -pidfile=$PIDFILE -log=$LOGFILE -conf=$CONFIGFILE"
 
-USERBIND="$(which setcap) cap_net_bind_service=+ep"
+USERBIND="setcap cap_net_bind_service=+ep"
 STOP_SCHEDULE="${STOP_SCHEDULE:-QUIT/5/TERM/5/KILL/5}"
 
 test -x $DAEMON || exit 0
@@ -37,12 +37,13 @@ ulimit -n 8192
 start() {
     $USERBIND $DAEMON
     start-stop-daemon --start --quiet --make-pidfile --pidfile $PIDFILE \
-        --background --chuid $DAEMONUSER --exec $DAEMON -- $DAEMONOPTS
+        --background --chuid $DAEMONUSER --oknodo --exec $DAEMON -- $DAEMONOPTS
 }
 
 stop() {
-    start-stop-daemon --stop --quiet --remove-pidfile --pidfile $PIDFILE \
-        --retry=$STOP_SCHEDULE --name $NAME --oknodo
+    start-stop-daemon --stop --quiet --pidfile $PIDFILE --retry=$STOP_SCHEDULE \
+        --name $NAME --oknodo
+    rm -f $PIDFILE
 }
 
 reload() {
diff --git a/rlimit_posix.go b/rlimit_posix.go
new file mode 100644
index 000000000..e63987767
--- /dev/null
+++ b/rlimit_posix.go
@@ -0,0 +1,23 @@
+// +build !windows
+
+package caddy
+
+import (
+	"fmt"
+	"syscall"
+)
+
+// checkFdlimit issues a warning if the OS limit for
+// max file descriptors is below a recommended minimum.
+func checkFdlimit() {
+	const min = 8192
+
+	// Warn if ulimit is too low for production sites
+	rlimit := &syscall.Rlimit{}
+	err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, rlimit)
+	if err == nil && rlimit.Cur < min {
+		fmt.Printf("WARNING: File descriptor limit %d is too low for production servers. "+
+			"At least %d is recommended. Fix with \"ulimit -n %d\".\n", rlimit.Cur, min, min)
+	}
+
+}
diff --git a/rlimit_windows.go b/rlimit_windows.go
new file mode 100644
index 000000000..0288102f5
--- /dev/null
+++ b/rlimit_windows.go
@@ -0,0 +1,6 @@
+package caddy
+
+// checkFdlimit issues a warning if the OS limit for
+// max file descriptors is below a recommended minimum.
+func checkFdlimit() {
+}