天天看點

zap——logger的建立及logger的使用過程

文章目錄

    • 根據Conifg建立Logger
      • buildEncoder
      • openSinks
      • CombineWriteSyncers
    • log的使用
      • check level
      • Wirte
        • EncodeEntry
      • c.out.Write
    • Sync
    • 總結

根據Conifg建立Logger

// Build constructs a logger from the Config and Options.
func (cfg Config) Build(opts ...Option) (*Logger, error) {
    //封裝編碼器配置
    enc, err := cfg.buildEncoder()
    if err != nil {
        return nil, err
    }
    //讀取OutputPaths及ErrOutputPaths并封裝進sink
    sink, errSink, err := cfg.openSinks()
    if err != nil {
        return nil, err
    }
    //建立Logger
    log := New(
        zapcore.NewCore(enc, sink, cfg.Level),
        cfg.buildOptions(errSink)...,
    )
    if len(opts) > 0 {
        log = log.WithOptions(opts...)
    }
    return log, nil
}

func NewCore(enc Encoder, ws WriteSyncer, enab LevelEnabler) Core {
    return &ioCore{
        LevelEnabler: enab,
        enc:          enc,
        out:          ws,
    }
}
// New constructs a new Logger from the provided zapcore.Core and Options. If
// the passed zapcore.Core is nil, it falls back to using a no-op
// implementation.
//
// This is the most flexible way to construct a Logger, but also the most
// verbose. For typical use cases, the highly-opinionated presets
// (NewProduction, NewDevelopment, and NewExample) or the Config struct are
// more convenient.
//
// For sample code, see the package-level AdvancedConfiguration example.
func New(core zapcore.Core, options ...Option) *Logger {
    if core == nil {
        return NewNop()
    }
    //根據參數建立Logger
    log := &Logger{
        core:        core,
        errorOutput: zapcore.Lock(os.Stderr),
        addStack:    zapcore.FatalLevel + 1,
    }
    return log.WithOptions(options...)
}
// WithOptions clones the current Logger, applies the supplied Options, and
// returns the resulting Logger. It's safe to use concurrently.
func (log *Logger) WithOptions(opts ...Option) *Logger {
    c := log.clone()
    for _, opt := range opts {
        opt.apply(c)//執行hook
    }
    return c
}
           

buildEncoder

buildEncoder是根據Encoding查找對應的編碼器,編碼器已提前注冊到map中,直接擷取即可。

func (cfg Config) buildEncoder() (zapcore.Encoder, error) {
    return newEncoder(cfg.Encoding, cfg.EncoderConfig)
}

func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
    _encoderMutex.RLock()
    defer _encoderMutex.RUnlock()
    if name == "" {
        return nil, errNoEncoderNameSpecified
    }
    constructor, ok := _encoderNameToConstructor[name]
    if !ok {
        return nil, fmt.Errorf("no encoder registered for name %q", name)
    }
    return constructor(encoderConfig)
}

var (
    errNoEncoderNameSpecified = errors.New("no encoder name specified")

    _encoderNameToConstructor = map[string]func(zapcore.EncoderConfig) (zapcore.Encoder, error){
        "console": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
            return zapcore.NewConsoleEncoder(encoderConfig), nil
        },
        "json": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
            return zapcore.NewJSONEncoder(encoderConfig), nil
        },
    }
    _encoderMutex sync.RWMutex
)
           

openSinks

openSinks主要是打開OutputPaths及ErrorOutputPaths便于後期直接向檔案中Write資訊。

func (cfg Config) openSinks() (zapcore.WriteSyncer, zapcore.WriteSyncer, error) {
    sink, closeOut, err := Open(cfg.OutputPaths...)
    if err != nil {
        return nil, nil, err
    }
    errSink, _, err := Open(cfg.ErrorOutputPaths...)
    if err != nil {
        closeOut()
        return nil, nil, err
    }
    return sink, errSink, nil
}

func Open(paths ...string) (zapcore.WriteSyncer, func(), error) {
    writers, close, err := open(paths)
    if err != nil {
        return nil, nil, err
    }
    //封裝writers
    writer := CombineWriteSyncers(writers...)
    return writer, close, nil
}

func open(paths []string) ([]zapcore.WriteSyncer, func(), error) {
    writers := make([]zapcore.WriteSyncer, 0, len(paths))
    closers := make([]io.Closer, 0, len(paths))
    close := func() {//後期關閉時使用
        for _, c := range closers {
            c.Close()
        }
    }

    var openErr error
    for _, path := range paths {
        sink, err := newSink(path)
        if err != nil {
            openErr = multierr.Append(openErr, fmt.Errorf("couldn't open sink %q: %v", path, err))
            continue
        }
        writers = append(writers, sink)
        closers = append(closers, sink)
    }
    if openErr != nil {
        close()
        return writers, nil, openErr
    }

    return writers, close, nil
}

func newSink(rawURL string) (Sink, error) {
    ...
    _sinkMutex.RLock()
    //此處再次需要從提前注冊的map中查詢
    factory, ok := _sinkFactories[u.Scheme]
    _sinkMutex.RUnlock()
    if !ok {
        return nil, &errSinkNotFound{u.Scheme}
    }
    return factory(u)
}

var (
    _sinkMutex     sync.RWMutex
    _sinkFactories map[string]func(*url.URL) (Sink, error) // keyed by scheme
)

func init() {
    resetSinkRegistry()
}

func resetSinkRegistry() {
    _sinkMutex.Lock()
    defer _sinkMutex.Unlock()

    _sinkFactories = map[string]func(*url.URL) (Sink, error){
        schemeFile: newFileSink,
    }
}

//具體檔案的處理(包含網絡檔案)
func newFileSink(u *url.URL) (Sink, error) {
    ...
    switch u.Path {
    case "stdout":
        return nopCloserSink{os.Stdout}, nil
    case "stderr":
        return nopCloserSink{os.Stderr}, nil
    }
    //直接打開檔案,*OS.File具備Write,Close,Sync等方法,即實作了Sink,由此可見Sink設計很巧妙,直接利用現成的File,後期Sync時直接調用底層的功能即可。
    return os.OpenFile(u.Path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)
}
           

CombineWriteSyncers

writers先封裝在WriteSyncer,後封裝進lockedWriteSyncer,最終存入ioCore的out中,後續write時會調用。

func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer {
    if len(writers) == 0 {
        return zapcore.AddSync(ioutil.Discard)
    }
    return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...))
}

// NewMultiWriteSyncer creates a WriteSyncer that duplicates its writes
// and sync calls, much like io.MultiWriter.
func NewMultiWriteSyncer(ws ...WriteSyncer) WriteSyncer {
    if len(ws) == 1 {
        return ws[0]
    }
    // Copy to protect against https://github.com/golang/go/issues/7809
    return multiWriteSyncer(append([]WriteSyncer(nil), ws...))
}

type lockedWriteSyncer struct {
    sync.Mutex
    ws WriteSyncer
}

// Lock wraps a WriteSyncer in a mutex to make it safe for concurrent use. In
// particular, *os.Files must be locked before use.
func Lock(ws WriteSyncer) WriteSyncer {
    if _, ok := ws.(*lockedWriteSyncer); ok {
        // no need to layer on another lock
        return ws
    }
    return &lockedWriteSyncer{ws: ws}
}
           

我們梳理下檔案的處理過程,便與後面邏輯的了解。

File->Sink->[]WriteSyncer->WriteSyncer->multiWriteSyncer->lockedWriteSyncer>ioCore.out->Logger.core
           

log的使用

以Info為例,其他類同。

// Info logs a message at InfoLevel. The message includes any fields passed
// at the log site, as well as any fields accumulated on the logger.
func (log *Logger) Info(msg string, fields ...Field) {
    if ce := log.check(InfoLevel, msg); ce != nil {
        ce.Write(fields...)
    }
}
           

check level

func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
    // check must always be called directly by a method in the Logger interface
    // (e.g., Check, Info, Fatal).
    const callerSkipOffset = 2

    //封裝日志資訊
    ent := zapcore.Entry{
        LoggerName: log.name,
        Time:       time.Now(),
        Level:      lvl,
        Message:    msg,
    }
    ce := log.core.Check(ent, nil)
    willWrite := ce != nil

    //需要特殊處理的級别,會發生panic或exit
    switch ent.Level {
    case zapcore.PanicLevel:
        ce = ce.Should(ent, zapcore.WriteThenPanic)
    case zapcore.FatalLevel:
        ce = ce.Should(ent, zapcore.WriteThenFatal)
    case zapcore.DPanicLevel:
        if log.development {
            ce = ce.Should(ent, zapcore.WriteThenPanic)
        }
    }

    if !willWrite {
        return ce
    }

    // Thread the error output through to the CheckedEntry.
    ce.ErrorOutput = log.errorOutput
    if log.addCaller {
        ce.Entry.Caller = zapcore.NewEntryCaller(runtime.Caller(log.callerSkip + callerSkipOffset))
        if !ce.Entry.Caller.Defined {
            fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", time.Now().UTC())
            log.errorOutput.Sync()
        }
    }
    if log.addStack.Enabled(ce.Entry.Level) {
        ce.Entry.Stack = Stack("").String
    }

    return ce
}

func (c *ioCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
    if c.Enabled(ent.Level) {//根據日志級别決定是否處理,不滿足的不寫日志
        return ce.AddCore(ent, c)
    }
    return ce
}

// Enabled returns true if the given level is at or above this level.
// 具體的日志級别判斷處理處
func (l Level) Enabled(lvl Level) bool {
    return lvl >= l
}

type LevelEnabler interface {
    Enabled(Level) bool
}

//将ent及core存入ce中
func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry {
    if ce == nil {
        ce = getCheckedEntry()
        ce.Entry = ent
    }
    ce.cores = append(ce.cores, core)
    return ce
}

//從pool中擷取CheckedEntry,增強複用率,提高性能
func getCheckedEntry() *CheckedEntry {
    ce := _cePool.Get().(*CheckedEntry)
    ce.reset()
    return ce
}
           

Wirte

func (ce *CheckedEntry) Write(fields ...Field) {
    ...
    var err error
    for i := range ce.cores {
        err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields))
    }
   ...
}

func (c *ioCore) Write(ent Entry, fields []Field) error {
    buf, err := c.enc.EncodeEntry(ent, fields)
    if err != nil {
        return err
    }
    _, err = c.out.Write(buf.Bytes())
    buf.Free()
    if err != nil {
        return err
    }
    if ent.Level > ErrorLevel {
        // Since we may be crashing the program, sync the output. Ignore Sync
        // errors, pending a clean solution to issue #370.
        c.Sync()
    }
    return nil
}
           

ioCore Write通過EncodeEntry先将内容格式化,再寫入格式化後的日志資訊

EncodeEntry

以jsonEncoder為例:注意json中key順序的的處理,是按照固定順序的

func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) {
    final := enc.clone()//從pool中取出,存入enc資訊
    final.buf.AppendByte('{')

    if final.LevelKey != "" {
        final.addKey(final.LevelKey)
        cur := final.buf.Len()
        final.EncodeLevel(ent.Level, final)
        if cur == final.buf.Len() {
            // User-supplied EncodeLevel was a no-op. Fall back to strings to keep
            // output JSON valid.
            final.AppendString(ent.Level.String())
        }
    }
    if final.TimeKey != "" {
        final.AddTime(final.TimeKey, ent.Time)
    }
    if ent.LoggerName != "" && final.NameKey != "" {
        final.addKey(final.NameKey)
        cur := final.buf.Len()
        nameEncoder := final.EncodeName

        // if no name encoder provided, fall back to FullNameEncoder for backwards
        // compatibility
        if nameEncoder == nil {
            nameEncoder = FullNameEncoder
        }

        nameEncoder(ent.LoggerName, final)
        if cur == final.buf.Len() {
            // User-supplied EncodeName was a no-op. Fall back to strings to
            // keep output JSON valid.
            final.AppendString(ent.LoggerName)
        }
    }
    if ent.Caller.Defined && final.CallerKey != "" {
        final.addKey(final.CallerKey)
        cur := final.buf.Len()
        final.EncodeCaller(ent.Caller, final)
        if cur == final.buf.Len() {
            // User-supplied EncodeCaller was a no-op. Fall back to strings to
            // keep output JSON valid.
            final.AppendString(ent.Caller.String())
        }
    }
    if final.MessageKey != "" {
        final.addKey(enc.MessageKey)
        final.AppendString(ent.Message)
    }
    if enc.buf.Len() > 0 {
        final.addElementSeparator()
        final.buf.Write(enc.buf.Bytes())
    }
    addFields(final, fields)
    final.closeOpenNamespaces()
    if ent.Stack != "" && final.StacktraceKey != "" {
        final.AddString(final.StacktraceKey, ent.Stack)
    }
    final.buf.AppendByte('}')
    if final.LineEnding != "" {
        final.buf.AppendString(final.LineEnding)
    } else {
        final.buf.AppendString(DefaultLineEnding)
    }

    ret := final.buf
    putJSONEncoder(final)//存入池中
    return ret, nil
}
           

從以上代碼可知,如果對應的key存在的話,json序列化的順序為:LevelKey,TimeKey,NameKey,CallerKey,MessageKey,fields(注意:InitialFields會按照ASCII碼順序排序),是以無法指定這些key出現的順序。

c.out.Write

之前提到了writers在CombineWriteSyncers中先封裝至multiWriteSyncer,後封裝在lockedWriteSyncer中,則調用順序為先lockedWriteSyncer.Write後multiWriteSyncer.Write,最終調用的是檔案*os.File的Write實作,至此完成資訊的寫入。

func (s *lockedWriteSyncer) Write(bs []byte) (int, error) {
    s.Lock()
    n, err := s.ws.Write(bs)
    s.Unlock()
    return n, err
}

// See https://golang.org/src/io/multi.go
// When not all underlying syncers write the same number of bytes,
// the smallest number is returned even though Write() is called on
// all of them.
func (ws multiWriteSyncer) Write(p []byte) (int, error) {
    var writeErr error
    nWritten := 0
    for _, w := range ws {
        n, err := w.Write(p)
        writeErr = multierr.Append(writeErr, err)
        if nWritten == 0 && n != 0 {
            nWritten = n
        } else if n < nWritten {
            nWritten = n
        }
    }
    return nWritten, writeErr
}

// write writes len(b) bytes to the File.
// It returns the number of bytes written and an error, if any.
func (f *File) write(b []byte) (n int, err error) {
    n, err = f.pfd.Write(b)
    runtime.KeepAlive(f)
    return n, err
}

// write writes len(b) bytes to the File.
// It returns the number of bytes written and an error, if any.
func (f *File) write(b []byte) (n int, err error) {
    n, err = f.pfd.Write(b)
    runtime.KeepAlive(f)
    return n, err
}
           

Sync

再來細看下Sync的調用過程,最終調用的是系統層的Fsync,實作将緩存刷入到磁盤中

func (log *Logger) Sync() error {
    return log.core.Sync()
}

func (s *lockedWriteSyncer) Sync() error {
    s.Lock()
    err := s.ws.Sync()
    s.Unlock()
    return err
}


func (ws multiWriteSyncer) Sync() error {
    var err error
    for _, w := range ws {
        err = multierr.Append(err, w.Sync())
    }
    return err
}

// Sync commits the current contents of the file to stable storage.
// Typically, this means flushing the file system's in-memory copy
// of recently written data to disk.
func (f *File) Sync() error {
    if err := f.checkValid("sync"); err != nil {
        return err
    }
    if e := f.pfd.Fsync(); e != nil {
        return f.wrapErr("sync", e)
    }
    return nil
}
           

總結

logger建立時封裝了需要寫入的檔案、格式化的類型等資訊,正式調用時,檢查資訊級别,完成資訊的格式化、寫入檔案等操作。從整個流程看,zap實際上就是一個封裝了檔案讀寫、日志格式化的工具,這其實也是所有類似log功能工具的實作原理,差別隻是封裝的邏輯、側重點不同。