2019-10-29 04:39:37 +08:00
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddy
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"strings"
"sync"
"time"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
2020-03-17 04:22:40 +08:00
"golang.org/x/crypto/ssh/terminal"
2019-10-29 04:39:37 +08:00
)
func init ( ) {
RegisterModule ( StdoutWriter { } )
RegisterModule ( StderrWriter { } )
RegisterModule ( DiscardWriter { } )
}
2020-03-17 04:22:40 +08:00
// Logging facilitates logging within Caddy. The default log is
// called "default" and you can customize it. You can also define
// additional logs.
2019-12-11 04:36:46 +08:00
//
// By default, all logs at INFO level and higher are written to
// standard error ("stderr" writer) in a human-readable format
2020-03-17 04:22:40 +08:00
// ("console" encoder if stdout is an interactive terminal, "json"
// encoder otherwise).
2019-12-11 04:36:46 +08:00
//
// All defined logs accept all log entries by default, but you
// can filter by level and module/logger names. A logger's name
// is the same as the module's name, but a module may append to
// logger names for more specificity. For example, you can
// filter logs emitted only by HTTP handlers using the name
// "http.handlers", because all HTTP handler module names have
// that prefix.
//
2020-03-17 04:22:40 +08:00
// Caddy logs (except the sink) are zero-allocation, so they are
// very high-performing in terms of memory and CPU time. Enabling
// sampling can further increase throughput on extremely high-load
// servers.
2019-10-29 04:39:37 +08:00
type Logging struct {
2019-12-11 04:36:46 +08:00
// Sink is the destination for all unstructured logs emitted
// from Go's standard library logger. These logs are common
// in dependencies that are not designed specifically for use
// in Caddy. Because it is global and unstructured, the sink
// lacks most advanced features and customizations.
Sink * StandardLibLog ` json:"sink,omitempty" `
// Logs are your logs, keyed by an arbitrary name of your
// choosing. The default log can be customized by defining
// a log called "default". You can further define other logs
// and filter what kinds of entries they accept.
2019-10-29 04:39:37 +08:00
Logs map [ string ] * CustomLog ` json:"logs,omitempty" `
// a list of all keys for open writers; all writers
// that are opened to provision this logging config
// must have their keys added to this list so they
// can be closed when cleaning up
writerKeys [ ] string
}
// openLogs sets up the config and opens all the configured writers.
2020-01-04 02:33:22 +08:00
// It closes its logs when ctx is canceled, so it should clean up
2019-10-29 04:39:37 +08:00
// after itself.
func ( logging * Logging ) openLogs ( ctx Context ) error {
// make sure to deallocate resources when context is done
ctx . OnCancel ( func ( ) {
err := logging . closeLogs ( )
if err != nil {
Log ( ) . Error ( "closing logs" , zap . Error ( err ) )
}
} )
// set up the "sink" log first (std lib's default global logger)
if logging . Sink != nil {
err := logging . Sink . provision ( ctx , logging )
if err != nil {
return fmt . Errorf ( "setting up sink log: %v" , err )
}
}
// as a special case, set up the default structured Caddy log next
if err := logging . setupNewDefault ( ctx ) ; err != nil {
return err
}
// then set up any other custom logs
for name , l := range logging . Logs {
// the default log is already set up
if name == "default" {
continue
}
err := l . provision ( ctx , logging )
if err != nil {
return fmt . Errorf ( "setting up custom log '%s': %v" , name , err )
}
// Any other logs that use the discard writer can be deleted
// entirely. This avoids encoding and processing of each
// log entry that would just be thrown away anyway. Notably,
// we do not reach this point for the default log, which MUST
// exist, otherwise core log emissions would panic because
// they use the Log() function directly which expects a non-nil
// logger. Even if we keep logs with a discard writer, they
// have a nop core, and keeping them at all seems unnecessary.
if _ , ok := l . writerOpener . ( * DiscardWriter ) ; ok {
delete ( logging . Logs , name )
continue
}
}
return nil
}
func ( logging * Logging ) setupNewDefault ( ctx Context ) error {
if logging . Logs == nil {
logging . Logs = make ( map [ string ] * CustomLog )
}
// extract the user-defined default log, if any
newDefault := new ( defaultCustomLog )
if userDefault , ok := logging . Logs [ "default" ] ; ok {
newDefault . CustomLog = userDefault
} else {
// if none, make one with our own default settings
var err error
newDefault , err = newDefaultProductionLog ( )
if err != nil {
return fmt . Errorf ( "setting up default Caddy log: %v" , err )
}
logging . Logs [ "default" ] = newDefault . CustomLog
}
// set up this new log
err := newDefault . CustomLog . provision ( ctx , logging )
if err != nil {
return fmt . Errorf ( "setting up default log: %v" , err )
}
newDefault . logger = zap . New ( newDefault . CustomLog . core )
// redirect the default caddy logs
defaultLoggerMu . Lock ( )
oldDefault := defaultLogger
defaultLogger = newDefault
defaultLoggerMu . Unlock ( )
// if the new writer is different, indicate it in the logs for convenience
var newDefaultLogWriterKey , currentDefaultLogWriterKey string
var newDefaultLogWriterStr , currentDefaultLogWriterStr string
if newDefault . writerOpener != nil {
newDefaultLogWriterKey = newDefault . writerOpener . WriterKey ( )
newDefaultLogWriterStr = newDefault . writerOpener . String ( )
}
if oldDefault . writerOpener != nil {
currentDefaultLogWriterKey = oldDefault . writerOpener . WriterKey ( )
currentDefaultLogWriterStr = oldDefault . writerOpener . String ( )
}
if newDefaultLogWriterKey != currentDefaultLogWriterKey {
oldDefault . logger . Info ( "redirected default logger" ,
zap . String ( "from" , currentDefaultLogWriterStr ) ,
zap . String ( "to" , newDefaultLogWriterStr ) ,
)
}
return nil
}
// closeLogs cleans up resources allocated during openLogs.
// A successful call to openLogs calls this automatically
2020-01-04 02:33:22 +08:00
// when the context is canceled.
2019-10-29 04:39:37 +08:00
func ( logging * Logging ) closeLogs ( ) error {
for _ , key := range logging . writerKeys {
_ , err := writers . Delete ( key )
if err != nil {
log . Printf ( "[ERROR] Closing log writer %v: %v" , key , err )
}
}
return nil
}
// Logger returns a logger that is ready for the module to use.
func ( logging * Logging ) Logger ( mod Module ) * zap . Logger {
2019-12-11 04:36:46 +08:00
modID := string ( mod . CaddyModule ( ) . ID )
2019-10-29 04:39:37 +08:00
var cores [ ] zapcore . Core
if logging != nil {
for _ , l := range logging . Logs {
2019-12-11 04:36:46 +08:00
if l . matchesModule ( modID ) {
2019-10-29 04:39:37 +08:00
if len ( l . Include ) == 0 && len ( l . Exclude ) == 0 {
cores = append ( cores , l . core )
continue
}
cores = append ( cores , & filteringCore { Core : l . core , cl : l } )
}
}
}
multiCore := zapcore . NewTee ( cores ... )
2019-12-11 04:36:46 +08:00
return zap . New ( multiCore ) . Named ( string ( modID ) )
2019-10-29 04:39:37 +08:00
}
// openWriter opens a writer using opener, and returns true if
// the writer is new, or false if the writer already exists.
func ( logging * Logging ) openWriter ( opener WriterOpener ) ( io . WriteCloser , bool , error ) {
key := opener . WriterKey ( )
writer , loaded , err := writers . LoadOrNew ( key , func ( ) ( Destructor , error ) {
w , err := opener . OpenWriter ( )
return writerDestructor { w } , err
} )
2019-12-16 03:58:01 +08:00
if err != nil {
return nil , false , err
2019-10-29 04:39:37 +08:00
}
2019-12-16 03:58:01 +08:00
logging . writerKeys = append ( logging . writerKeys , key )
return writer . ( io . WriteCloser ) , ! loaded , nil
2019-10-29 04:39:37 +08:00
}
// WriterOpener is a module that can open a log writer.
// It can return a human-readable string representation
// of itself so that operators can understand where
// the logs are going.
type WriterOpener interface {
fmt . Stringer
// WriterKey is a string that uniquely identifies this
// writer configuration. It is not shown to humans.
WriterKey ( ) string
// OpenWriter opens a log for writing. The writer
// should be safe for concurrent use but need not
// be synchronous.
OpenWriter ( ) ( io . WriteCloser , error )
}
type writerDestructor struct {
io . WriteCloser
}
func ( wdest writerDestructor ) Destruct ( ) error {
return wdest . Close ( )
}
// StandardLibLog configures the default Go standard library
// global logger in the log package. This is necessary because
// module dependencies which are not built specifically for
2019-12-11 04:36:46 +08:00
// Caddy will use the standard logger. This is also known as
// the "sink" logger.
2019-10-29 04:39:37 +08:00
type StandardLibLog struct {
2019-12-11 04:36:46 +08:00
// The module that writes out log entries for the sink.
WriterRaw json . RawMessage ` json:"writer,omitempty" caddy:"namespace=caddy.logging.writers inline_key=output" `
2019-10-29 04:39:37 +08:00
writer io . WriteCloser
}
func ( sll * StandardLibLog ) provision ( ctx Context , logging * Logging ) error {
if sll . WriterRaw != nil {
2019-12-11 04:36:46 +08:00
mod , err := ctx . LoadModule ( sll , "WriterRaw" )
2019-10-29 04:39:37 +08:00
if err != nil {
return fmt . Errorf ( "loading sink log writer module: %v" , err )
}
2019-12-11 04:36:46 +08:00
wo := mod . ( WriterOpener )
2019-10-29 04:39:37 +08:00
var isNew bool
sll . writer , isNew , err = logging . openWriter ( wo )
if err != nil {
2019-12-11 04:36:46 +08:00
return fmt . Errorf ( "opening sink log writer %#v: %v" , mod , err )
2019-10-29 04:39:37 +08:00
}
if isNew {
log . Printf ( "[INFO] Redirecting sink to: %s" , wo )
log . SetOutput ( sll . writer )
log . Printf ( "[INFO] Redirected sink to here (%s)" , wo )
}
}
return nil
}
// CustomLog represents a custom logger configuration.
2019-12-11 04:36:46 +08:00
//
// By default, a log will emit all log entries. Some entries
// will be skipped if sampling is enabled. Further, the Include
// and Exclude parameters define which loggers (by name) are
// allowed or rejected from emitting in this log. If both Include
// and Exclude are populated, their values must be mutually
// exclusive, and longer namespaces have priority. If neither
// are populated, all logs are emitted.
2019-10-29 04:39:37 +08:00
type CustomLog struct {
2019-12-11 04:36:46 +08:00
// The writer defines where log entries are emitted.
WriterRaw json . RawMessage ` json:"writer,omitempty" caddy:"namespace=caddy.logging.writers inline_key=output" `
// The encoder is how the log entries are formatted or encoded.
EncoderRaw json . RawMessage ` json:"encoder,omitempty" caddy:"namespace=caddy.logging.encoders inline_key=format" `
// Level is the minimum level to emit, and is inclusive.
// Possible levels: DEBUG, INFO, WARN, ERROR, PANIC, and FATAL
Level string ` json:"level,omitempty" `
// Sampling configures log entry sampling. If enabled,
// only some log entries will be emitted. This is useful
// for improving performance on extremely high-pressure
// servers.
Sampling * LogSampling ` json:"sampling,omitempty" `
// Include defines the names of loggers to emit in this
// log. For example, to include only logs emitted by the
// admin API, you would include "admin.api".
Include [ ] string ` json:"include,omitempty" `
// Exclude defines the names of loggers that should be
// skipped by this log. For example, to exclude only
// HTTP access logs, you would exclude "http.log.access".
Exclude [ ] string ` json:"exclude,omitempty" `
2019-10-29 04:39:37 +08:00
writerOpener WriterOpener
writer io . WriteCloser
encoder zapcore . Encoder
levelEnabler zapcore . LevelEnabler
core zapcore . Core
}
func ( cl * CustomLog ) provision ( ctx Context , logging * Logging ) error {
2019-11-12 02:04:41 +08:00
// Replace placeholder for log level
repl := NewReplacer ( )
level , err := repl . ReplaceOrErr ( cl . Level , true , true )
if err != nil {
return fmt . Errorf ( "invalid log level: %v" , err )
}
2019-12-14 06:36:00 +08:00
level = strings . ToLower ( level )
2019-11-12 02:04:41 +08:00
2019-10-29 04:39:37 +08:00
// set up the log level
2019-11-12 02:04:41 +08:00
switch level {
2019-10-29 04:39:37 +08:00
case "debug" :
cl . levelEnabler = zapcore . DebugLevel
case "" , "info" :
cl . levelEnabler = zapcore . InfoLevel
case "warn" :
cl . levelEnabler = zapcore . WarnLevel
case "error" :
cl . levelEnabler = zapcore . ErrorLevel
case "panic" :
cl . levelEnabler = zapcore . PanicLevel
case "fatal" :
cl . levelEnabler = zapcore . FatalLevel
default :
return fmt . Errorf ( "unrecognized log level: %s" , cl . Level )
}
// If both Include and Exclude lists are populated, then each item must
// be a superspace or subspace of an item in the other list, because
// populating both lists means that any given item is either a rule
// or an exception to another rule. But if the item is not a super-
// or sub-space of any item in the other list, it is neither a rule
// nor an exception, and is a contradiction. Ensure, too, that the
// sets do not intersect, which is also a contradiction.
if len ( cl . Include ) > 0 && len ( cl . Exclude ) > 0 {
// prevent intersections
for _ , allow := range cl . Include {
for _ , deny := range cl . Exclude {
if allow == deny {
return fmt . Errorf ( "include and exclude must not intersect, but found %s in both lists" , allow )
}
}
}
// ensure namespaces are nested
outer :
for _ , allow := range cl . Include {
for _ , deny := range cl . Exclude {
if strings . HasPrefix ( allow + "." , deny + "." ) ||
strings . HasPrefix ( deny + "." , allow + "." ) {
continue outer
}
}
return fmt . Errorf ( "when both include and exclude are populated, each element must be a superspace or subspace of one in the other list; check '%s' in include" , allow )
}
}
if cl . EncoderRaw != nil {
2019-12-11 04:36:46 +08:00
mod , err := ctx . LoadModule ( cl , "EncoderRaw" )
2019-10-29 04:39:37 +08:00
if err != nil {
return fmt . Errorf ( "loading log encoder module: %v" , err )
}
2019-12-11 04:36:46 +08:00
cl . encoder = mod . ( zapcore . Encoder )
2019-10-29 04:39:37 +08:00
}
if cl . encoder == nil {
2019-11-05 03:14:22 +08:00
cl . encoder = newDefaultProductionLogEncoder ( )
2019-10-29 04:39:37 +08:00
}
if cl . WriterRaw != nil {
2019-12-11 04:36:46 +08:00
mod , err := ctx . LoadModule ( cl , "WriterRaw" )
2019-10-29 04:39:37 +08:00
if err != nil {
return fmt . Errorf ( "loading log writer module: %v" , err )
}
2019-12-11 04:36:46 +08:00
cl . writerOpener = mod . ( WriterOpener )
2019-10-29 04:39:37 +08:00
}
if cl . writerOpener == nil {
cl . writerOpener = StderrWriter { }
}
2019-11-12 02:04:41 +08:00
2019-10-29 04:39:37 +08:00
cl . writer , _ , err = logging . openWriter ( cl . writerOpener )
if err != nil {
return fmt . Errorf ( "opening log writer using %#v: %v" , cl . writerOpener , err )
}
cl . buildCore ( )
return nil
}
func ( cl * CustomLog ) buildCore ( ) {
// logs which only discard their output don't need
// to perform encoding or any other processing steps
// at all, so just shorcut to a nop core instead
if _ , ok := cl . writerOpener . ( * DiscardWriter ) ; ok {
cl . core = zapcore . NewNopCore ( )
return
}
c := zapcore . NewCore (
cl . encoder ,
zapcore . AddSync ( cl . writer ) ,
cl . levelEnabler ,
)
if cl . Sampling != nil {
if cl . Sampling . Interval == 0 {
cl . Sampling . Interval = 1 * time . Second
}
if cl . Sampling . First == 0 {
cl . Sampling . First = 100
}
if cl . Sampling . Thereafter == 0 {
cl . Sampling . Thereafter = 100
}
c = zapcore . NewSampler ( c , cl . Sampling . Interval ,
cl . Sampling . First , cl . Sampling . Thereafter )
}
cl . core = c
}
2019-12-11 04:36:46 +08:00
func ( cl * CustomLog ) matchesModule ( moduleID string ) bool {
return cl . loggerAllowed ( string ( moduleID ) , true )
2019-10-29 04:39:37 +08:00
}
// loggerAllowed returns true if name is allowed to emit
// to cl. isModule should be true if name is the name of
// a module and you want to see if ANY of that module's
// logs would be permitted.
func ( cl * CustomLog ) loggerAllowed ( name string , isModule bool ) bool {
// accept all loggers by default
if len ( cl . Include ) == 0 && len ( cl . Exclude ) == 0 {
return true
}
// append a dot so that partial names don't match
// (i.e. we don't want "foo.b" to match "foo.bar"); we
// will also have to append a dot when we do HasPrefix
// below to compensate for when when namespaces are equal
if name != "" && name != "*" && name != "." {
name += "."
}
var longestAccept , longestReject int
if len ( cl . Include ) > 0 {
for _ , namespace := range cl . Include {
var hasPrefix bool
if isModule {
hasPrefix = strings . HasPrefix ( namespace + "." , name )
} else {
hasPrefix = strings . HasPrefix ( name , namespace + "." )
}
if hasPrefix && len ( namespace ) > longestAccept {
longestAccept = len ( namespace )
}
}
// the include list was populated, meaning that
// a match in this list is absolutely required
// if we are to accept the entry
if longestAccept == 0 {
return false
}
}
if len ( cl . Exclude ) > 0 {
for _ , namespace := range cl . Exclude {
// * == all logs emitted by modules
// . == all logs emitted by core
if ( namespace == "*" && name != "." ) ||
( namespace == "." && name == "." ) {
return false
}
if strings . HasPrefix ( name , namespace + "." ) &&
len ( namespace ) > longestReject {
longestReject = len ( namespace )
}
}
// the reject list is populated, so we have to
// reject this entry if its match is better
// than the best from the accept list
if longestReject > longestAccept {
return false
}
}
return ( longestAccept > longestReject ) ||
( len ( cl . Include ) == 0 && longestReject == 0 )
}
// filteringCore filters log entries based on logger name,
// according to the rules of a CustomLog.
type filteringCore struct {
zapcore . Core
cl * CustomLog
}
// With properly wraps With.
func ( fc * filteringCore ) With ( fields [ ] zapcore . Field ) zapcore . Core {
return & filteringCore {
Core : fc . Core . With ( fields ) ,
cl : fc . cl ,
}
}
// Check only allows the log entry if its logger name
// is allowed from the include/exclude rules of fc.cl.
func ( fc * filteringCore ) Check ( e zapcore . Entry , ce * zapcore . CheckedEntry ) * zapcore . CheckedEntry {
if fc . cl . loggerAllowed ( e . LoggerName , false ) {
return fc . Core . Check ( e , ce )
}
return ce
}
// LogSampling configures log entry sampling.
type LogSampling struct {
2019-12-11 04:36:46 +08:00
// The window over which to conduct sampling.
Interval time . Duration ` json:"interval,omitempty" `
// Log this many entries within a given level and
// message for each interval.
First int ` json:"first,omitempty" `
// If more entries with the same level and message
// are seen during the same interval, keep one in
// this many entries until the end of the interval.
Thereafter int ` json:"thereafter,omitempty" `
2019-10-29 04:39:37 +08:00
}
type (
2019-12-13 05:31:20 +08:00
// StdoutWriter writes logs to standard out.
2019-10-29 04:39:37 +08:00
StdoutWriter struct { }
2019-12-13 05:31:20 +08:00
// StderrWriter writes logs to standard error.
2019-10-29 04:39:37 +08:00
StderrWriter struct { }
// DiscardWriter discards all writes.
DiscardWriter struct { }
)
// CaddyModule returns the Caddy module information.
func ( StdoutWriter ) CaddyModule ( ) ModuleInfo {
return ModuleInfo {
2019-12-11 04:36:46 +08:00
ID : "caddy.logging.writers.stdout" ,
New : func ( ) Module { return new ( StdoutWriter ) } ,
2019-10-29 04:39:37 +08:00
}
}
// CaddyModule returns the Caddy module information.
func ( StderrWriter ) CaddyModule ( ) ModuleInfo {
return ModuleInfo {
2019-12-11 04:36:46 +08:00
ID : "caddy.logging.writers.stderr" ,
New : func ( ) Module { return new ( StderrWriter ) } ,
2019-10-29 04:39:37 +08:00
}
}
// CaddyModule returns the Caddy module information.
func ( DiscardWriter ) CaddyModule ( ) ModuleInfo {
return ModuleInfo {
2019-12-11 04:36:46 +08:00
ID : "caddy.logging.writers.discard" ,
New : func ( ) Module { return new ( DiscardWriter ) } ,
2019-10-29 04:39:37 +08:00
}
}
func ( StdoutWriter ) String ( ) string { return "stdout" }
func ( StderrWriter ) String ( ) string { return "stderr" }
func ( DiscardWriter ) String ( ) string { return "discard" }
// WriterKey returns a unique key representing stdout.
func ( StdoutWriter ) WriterKey ( ) string { return "std:out" }
// WriterKey returns a unique key representing stderr.
func ( StderrWriter ) WriterKey ( ) string { return "std:err" }
// WriterKey returns a unique key representing discard.
func ( DiscardWriter ) WriterKey ( ) string { return "discard" }
// OpenWriter returns os.Stdout that can't be closed.
func ( StdoutWriter ) OpenWriter ( ) ( io . WriteCloser , error ) {
return notClosable { os . Stdout } , nil
}
// OpenWriter returns os.Stderr that can't be closed.
func ( StderrWriter ) OpenWriter ( ) ( io . WriteCloser , error ) {
return notClosable { os . Stderr } , nil
}
// OpenWriter returns ioutil.Discard that can't be closed.
func ( DiscardWriter ) OpenWriter ( ) ( io . WriteCloser , error ) {
return notClosable { ioutil . Discard } , nil
}
// notClosable is an io.WriteCloser that can't be closed.
type notClosable struct { io . Writer }
func ( fc notClosable ) Close ( ) error { return nil }
type defaultCustomLog struct {
* CustomLog
logger * zap . Logger
}
// newDefaultProductionLog configures a custom log that is
// intended for use by default if no other log is specified
// in a config. It writes to stderr, uses the console encoder,
// and enables INFO-level logs and higher.
func newDefaultProductionLog ( ) ( * defaultCustomLog , error ) {
cl := new ( CustomLog )
cl . writerOpener = StderrWriter { }
var err error
cl . writer , err = cl . writerOpener . OpenWriter ( )
if err != nil {
return nil , err
}
2019-11-05 03:14:22 +08:00
cl . encoder = newDefaultProductionLogEncoder ( )
2019-10-29 04:39:37 +08:00
cl . levelEnabler = zapcore . InfoLevel
cl . buildCore ( )
return & defaultCustomLog {
CustomLog : cl ,
logger : zap . New ( cl . core ) ,
} , nil
}
2019-11-05 03:14:22 +08:00
func newDefaultProductionLogEncoder ( ) zapcore . Encoder {
encCfg := zap . NewProductionEncoderConfig ( )
2020-03-17 04:22:40 +08:00
if terminal . IsTerminal ( int ( os . Stdout . Fd ( ) ) ) {
// if interactive terminal, make output more human-readable by default
encCfg . EncodeTime = func ( ts time . Time , encoder zapcore . PrimitiveArrayEncoder ) {
encoder . AppendString ( ts . UTC ( ) . Format ( "2006/01/02 15:04:05.000" ) )
}
encCfg . EncodeLevel = zapcore . CapitalColorLevelEncoder
return zapcore . NewConsoleEncoder ( encCfg )
2019-11-05 03:14:22 +08:00
}
2020-03-17 04:22:40 +08:00
return zapcore . NewJSONEncoder ( encCfg )
2019-11-05 03:14:22 +08:00
}
2019-10-29 04:39:37 +08:00
// Log returns the current default logger.
func Log ( ) * zap . Logger {
defaultLoggerMu . RLock ( )
defer defaultLoggerMu . RUnlock ( )
return defaultLogger . logger
}
var (
defaultLogger , _ = newDefaultProductionLog ( )
defaultLoggerMu sync . RWMutex
)
var writers = NewUsagePool ( )
// Interface guards
var (
_ io . WriteCloser = ( * notClosable ) ( nil )
_ WriterOpener = ( * StdoutWriter ) ( nil )
_ WriterOpener = ( * StderrWriter ) ( nil )
)