用seelog替换默认的log库

pull/21/merge
ouqiang 2017-04-03 15:27:19 +08:00
parent 6fc6a0d65d
commit 26d5c1cba9
49 changed files with 7562 additions and 47 deletions

View File

@ -26,10 +26,16 @@ var CmdWeb = cli.Command{
Value: DefaultPort,
Usage: "bind port number",
},
cli.StringFlag{
Name: "env,e",
Value: "dev",
Usage: "runtime environment, dev|test|prod",
},
},
}
func run(ctx *cli.Context) {
setEnvironment(ctx)
app.InitEnv()
m := macaron.Classic()
// 注册路由
@ -64,7 +70,7 @@ func registerMiddleware(m *macaron.Macaron) {
// 解析端口
func parsePort(ctx *cli.Context) int {
var port int
var port int = DefaultPort
if ctx.IsSet("port") {
port = ctx.Int("port")
}
@ -74,3 +80,14 @@ func parsePort(ctx *cli.Context) int {
return port
}
func setEnvironment(ctx *cli.Context) {
var env string = ""
if ctx.IsSet("env") {
env = ctx.String("env")
}
if env == "prod" {
macaron.Env = macaron.PROD
}
}

View File

@ -3,10 +3,10 @@ package ansible
import (
"bytes"
"github.com/ouqiang/cron-scheduler/models"
"github.com/ouqiang/cron-scheduler/modules/utils"
"io/ioutil"
"strconv"
"sync"
"github.com/ouqiang/cron-scheduler/modules/logger"
)
// 主机名
@ -37,11 +37,11 @@ func (h *Hosts) Write() {
host := new(models.Host)
hostModels, err := host.List()
if err != nil {
utils.RecordLog(err)
logger.Error(err)
return
}
if len(hostModels) == 0 {
utils.RecordLog("hosts内容为空")
logger.Info("hosts内容为空")
return
}
buffer := bytes.Buffer{}

View File

@ -10,6 +10,7 @@ import (
"github.com/ouqiang/cron-scheduler/modules/utils"
"github.com/ouqiang/cron-scheduler/service"
"github.com/ouqiang/cron-scheduler/modules/setting"
"github.com/ouqiang/cron-scheduler/modules/logger"
)
var (
@ -23,6 +24,7 @@ var (
)
func InitEnv() {
logger.InitLogger()
CheckEnv()
wd, err := os.Getwd()
if err != nil {
@ -58,15 +60,11 @@ func IsInstalled() bool {
func CheckEnv() {
// ansible不支持安装在windows上, windows只能作为被控机
if runtime.GOOS == "windows" {
panic("不支持在windows上运行")
logger.Fatal("不支持在windows上运行")
}
_, err := utils.ExecShell("ansible", "--version")
if err != nil {
panic(err)
}
_, err = utils.ExecShell("ansible-playbook", "--version")
if err != nil {
panic(err)
logger.Fatal(err)
}
}
@ -74,7 +72,7 @@ func CheckEnv() {
func CreateInstallLock() error {
_, err := os.Create(ConfDir + "/install.lock")
if err != nil {
utils.RecordLog("创建安装锁文件失败")
logger.Error("创建安装锁文件失败")
}
return err
@ -101,10 +99,10 @@ func checkDirExists(path ...string) {
for _, value := range path {
_, err := os.Stat(value)
if os.IsNotExist(err) {
panic(value + "目录不存在")
logger.Fatal(value + "目录不存在")
}
if os.IsPermission(err) {
panic(value + "目录无权限操作")
logger.Fatal(value + "目录无权限操作")
}
}
}
@ -113,11 +111,11 @@ func checkDirExists(path ...string) {
func getDbConfig(configFile string) map[string]string {
config, err := setting.Read(configFile)
if err != nil {
panic(err)
logger.Fatal(err)
}
section := config.Section("db")
if err != nil {
panic(err)
logger.Fatal(err)
}
var db map[string]string = make(map[string]string)
db["user"] = section.Key("user").String()
@ -130,4 +128,4 @@ func getDbConfig(configFile string) map[string]string {
db["engine"] = section.Key("engine").String()
return db
}
}

97
modules/logger/logger.go Normal file
View File

@ -0,0 +1,97 @@
package logger
import (
"github.com/cihub/seelog"
"gopkg.in/macaron.v1"
"fmt"
"os"
)
// 日志库
type Level int8
var logger seelog.LoggerInterface
const (
DEBUG = iota
INFO
WARN
ERROR
FATAL
)
func InitLogger() {
config := getLogConfig()
l, err := seelog.LoggerFromConfigAsString(config)
if err != nil {
panic(err)
}
logger = l
}
func Debug(v ...interface{}) {
write(DEBUG, v)
}
func Info(v ...interface{}) {
write(INFO, v)
}
func Warn(v ...interface{}) {
write(WARN, v)
}
func Error(v ...interface{}) {
write(ERROR, v)
}
func Fatal(v ...interface{}) {
write(FATAL, v)
}
func write(level Level, v ...interface{}) {
defer logger.Flush()
switch level {
case DEBUG:
logger.Debug(v)
case INFO:
logger.Info(v)
case WARN:
logger.Warn(v)
case FATAL:
logger.Critical(v)
os.Exit(1)
case ERROR:
logger.Error(v)
}
}
func getLogConfig() string {
config := `
<seelog>
<outputs formatid="main">
%s
<filter levels="info,critical,error,warn">
<file path="log/cron.log" />
</filter>
</outputs>
<formats>
<format id="main" format="%%Date/%%Time [%%LEV] %%Msg%%n"/>
</formats>
</seelog>`
consoleConfig := ""
if macaron.Env == macaron.DEV {
consoleConfig =
`
<filter levels="info,debug,critical,warn,error">
<console />
</filter>
`
}
config = fmt.Sprintf(config, consoleConfig)
return config
}

View File

@ -1,6 +1,9 @@
package utils
import "encoding/json"
import (
"encoding/json"
"github.com/ouqiang/cron-scheduler/modules/logger"
)
// json 格式输出
@ -32,7 +35,7 @@ func (j *Json) response(code int, message string, data interface{}) string {
result, err := json.Marshal(resp)
if err != nil {
RecordLog(err)
logger.Error(err)
}
return string(result)

View File

@ -3,7 +3,6 @@ package utils
import (
"crypto/md5"
"encoding/hex"
"log"
"math/rand"
"os/exec"
"time"
@ -43,10 +42,4 @@ func RandNumber(max int) int {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
return r.Intn(max)
}
// 日志记录
// todo 保存到哪里 文件,数据库还是elasticsearch?,暂时输出到终端
func RecordLog(v ...interface{}) {
log.Println(v)
}
}

View File

@ -7,6 +7,7 @@ import (
"github.com/ouqiang/cron-scheduler/modules/utils"
"gopkg.in/macaron.v1"
"strconv"
"github.com/ouqiang/cron-scheduler/modules/logger"
)
// 系统安装
@ -38,17 +39,18 @@ func Show(ctx *macaron.Context) {
func Install(ctx *macaron.Context, form InstallForm) string {
json := utils.Json{}
if app.Installed {
logger.Warn("系统重复安装")
return json.Failure(utils.ResponseFailure, "系统已安装!")
}
err := testDbConnection(form)
if err != nil {
utils.RecordLog(err)
logger.Error(err)
return json.Failure(utils.ResponseFailure, "数据库连接失败")
}
// 写入数据库配置
err = writeConfig(form)
if err != nil {
utils.RecordLog(err)
logger.Error(err)
return json.Failure(utils.ResponseFailure, "数据库配置写入文件失败")
}
@ -57,21 +59,21 @@ func Install(ctx *macaron.Context, form InstallForm) string {
migration := new(models.Migration)
err = migration.Exec(form.DbName)
if err != nil {
utils.RecordLog(err)
logger.Error(err)
return json.Failure(utils.ResponseFailure, "创建数据库表失败")
}
// 创建管理员账号
err = createAdminUser(form)
if err != nil {
utils.RecordLog(err)
logger.Error(err)
return json.Failure(utils.ResponseFailure, "创建管理员账号失败")
}
// 创建安装锁
err = app.CreateInstallLock()
if err != nil {
utils.RecordLog(err)
logger.Error(err)
return json.Failure(utils.ResponseFailure, "创建文件安装锁失败")
}
@ -122,10 +124,13 @@ func testDbConnection(form InstallForm) error {
dbConfig["password"] = form.DbPassword
dbConfig["charset"] = "utf8"
db, err := models.CreateTmpDb(dbConfig)
if err == nil {
db.Close()
if err != nil {
return err
}
defer db.Close()
err = db.Ping()
return err
}

View File

@ -5,12 +5,12 @@ import (
"github.com/ouqiang/cron-scheduler/models"
"github.com/ouqiang/cron-scheduler/modules/ansible"
"github.com/ouqiang/cron-scheduler/modules/crontask"
"github.com/ouqiang/cron-scheduler/modules/utils"
"github.com/robfig/cron"
"io/ioutil"
"net/http"
"strconv"
"time"
"github.com/ouqiang/cron-scheduler/modules/logger"
)
type Task struct{}
@ -20,11 +20,11 @@ func (task *Task) Initialize() {
taskModel := new(models.Task)
taskList, err := taskModel.ActiveList()
if err != nil {
utils.RecordLog("获取任务列表错误-", err.Error())
logger.Error("获取任务列表错误-", err.Error())
return
}
if len(taskList) == 0 {
utils.RecordLog("任务列表为空")
logger.Debug("任务列表为空")
return
}
for _, item := range taskList {
@ -36,14 +36,14 @@ func (task *Task) Initialize() {
func (task *Task) Add(taskModel models.Task) {
taskFunc := createHandlerJob(taskModel)
if taskFunc == nil {
utils.RecordLog("添加任务#不存在的任务协议编号", taskModel.Protocol)
logger.Error("添加任务#不存在的任务协议编号", taskModel.Protocol)
return
}
// 定时任务
if taskModel.Type == models.Timing {
err := crontask.DefaultCronTask.AddOrReplace(strconv.Itoa(taskModel.Id), taskModel.Spec, taskFunc)
if err != nil {
utils.RecordLog(err)
logger.Error(err)
}
} else if taskModel.Type == models.Delay {
// 延时任务
@ -65,7 +65,7 @@ func (h *HTTPHandler) Run(taskModel models.Task) (result string, err error) {
}
req, err := http.NewRequest("POST", taskModel.Command, nil)
if err != nil {
utils.RecordLog("创建HTTP请求错误-", err.Error())
logger.Error("创建HTTP请求错误-", err.Error())
return
}
req.Header.Set("Content-type", "application/x-www-form-urlencoded")
@ -78,12 +78,12 @@ func (h *HTTPHandler) Run(taskModel models.Task) (result string, err error) {
}
}()
if err != nil {
utils.RecordLog("HTTP请求错误-", err.Error())
logger.Error("HTTP请求错误-", err.Error())
return
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
utils.RecordLog("读取HTTP请求返回值失败-", err.Error())
logger.Error("读取HTTP请求返回值失败-", err.Error())
}
return string(body), err
@ -160,14 +160,14 @@ func createHandlerJob(taskModel models.Task) cron.FuncJob {
taskFunc := func() {
taskLogId, err := createTaskLog(taskModel.Id)
if err != nil {
utils.RecordLog("写入任务日志失败-", err)
logger.Error("写入任务日志失败-", err)
return
}
// err != nil 执行失败
result, err := handler.Run(taskModel)
_, err = updateTaskLog(int(taskLogId), result, err)
if err != nil {
utils.RecordLog("更新任务日志失败-", err)
logger.Error("更新任务日志失败-", err)
}
}

24
vendor/github.com/cihub/seelog/LICENSE.txt generated vendored Normal file
View File

@ -0,0 +1,24 @@
Copyright (c) 2012, Cloud Instruments Co., Ltd. <info@cin.io>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Cloud Instruments Co., Ltd. nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

116
vendor/github.com/cihub/seelog/README.markdown generated vendored Normal file
View File

@ -0,0 +1,116 @@
Seelog
=======
Seelog is a powerful and easy-to-learn logging framework that provides functionality for flexible dispatching, filtering, and formatting log messages.
It is natively written in the [Go](http://golang.org/) programming language.
[![Build Status](https://drone.io/github.com/cihub/seelog/status.png)](https://drone.io/github.com/cihub/seelog/latest)
Features
------------------
* Xml configuring to be able to change logger parameters without recompilation
* Changing configurations on the fly without app restart
* Possibility to set different log configurations for different project files and functions
* Adjustable message formatting
* Simultaneous log output to multiple streams
* Choosing logger priority strategy to minimize performance hit
* Different output writers
* Console writer
* File writer
* Buffered writer (Chunk writer)
* Rolling log writer (Logging with rotation)
* SMTP writer
* Others... (See [Wiki](https://github.com/cihub/seelog/wiki))
* Log message wrappers (JSON, XML, etc.)
* Global variables and functions for easy usage in standalone apps
* Functions for flexible usage in libraries
Quick-start
-----------
```go
package main
import log "github.com/cihub/seelog"
func main() {
defer log.Flush()
log.Info("Hello from Seelog!")
}
```
Installation
------------
If you don't have the Go development environment installed, visit the
[Getting Started](http://golang.org/doc/install.html) document and follow the instructions. Once you're ready, execute the following command:
```
go get -u github.com/cihub/seelog
```
*IMPORTANT*: If you are not using the latest release version of Go, check out this [wiki page](https://github.com/cihub/seelog/wiki/Notes-on-'go-get')
Documentation
---------------
Seelog has github wiki pages, which contain detailed how-tos references: https://github.com/cihub/seelog/wiki
Examples
---------------
Seelog examples can be found here: [seelog-examples](https://github.com/cihub/seelog-examples)
Issues
---------------
Feel free to push issues that could make Seelog better: https://github.com/cihub/seelog/issues
Changelog
---------------
* **v2.6** : Config using code and custom formatters
* Configuration using code in addition to xml (All internal receiver/dispatcher/logger types are now exported).
* Custom formatters. Check [wiki](https://github.com/cihub/seelog/wiki/Custom-formatters)
* Bugfixes and internal improvements.
* **v2.5** : Interaction with other systems. Part 2: custom receivers
* Finished custom receivers feature. Check [wiki](https://github.com/cihub/seelog/wiki/custom-receivers)
* Added 'LoggerFromCustomReceiver'
* Added 'LoggerFromWriterWithMinLevelAndFormat'
* Added 'LoggerFromCustomReceiver'
* Added 'LoggerFromParamConfigAs...'
* **v2.4** : Interaction with other systems. Part 1: wrapping seelog
* Added configurable caller stack skip logic
* Added 'SetAdditionalStackDepth' to 'LoggerInterface'
* **v2.3** : Rethinking 'rolling' receiver
* Reimplemented 'rolling' receiver
* Added 'Max rolls' feature for 'rolling' receiver with type='date'
* Fixed 'rolling' receiver issue: renaming on Windows
* **v2.2** : go1.0 compatibility point [go1.0 tag]
* Fixed internal bugs
* Added 'ANSI n [;k]' format identifier: %EscN
* Made current release go1 compatible
* **v2.1** : Some new features
* Rolling receiver archiving option.
* Added format identifier: %Line
* Smtp: added paths to PEM files directories
* Added format identifier: %FuncShort
* Warn, Error and Critical methods now return an error
* **v2.0** : Second major release. BREAKING CHANGES.
* Support of binaries with stripped symbols
* Added log strategy: adaptive
* Critical message now forces Flush()
* Added predefined formats: xml-debug, xml-debug-short, xml, xml-short, json-debug, json-debug-short, json, json-short, debug, debug-short, fast
* Added receiver: conn (network connection writer)
* BREAKING CHANGE: added Tracef, Debugf, Infof, etc. to satisfy the print/printf principle
* Bug fixes
* **v1.0** : Initial release. Features:
* Xml config
* Changing configurations on the fly without app restart
* Contraints and exceptions
* Formatting
* Log strategies: sync, async loop, async timer
* Receivers: buffered, console, file, rolling, smtp

198
vendor/github.com/cihub/seelog/archive/archive.go generated vendored Normal file
View File

@ -0,0 +1,198 @@
package archive
import (
"archive/tar"
"archive/zip"
"fmt"
"io"
"io/ioutil"
"os"
"time"
"github.com/cihub/seelog/archive/gzip"
)
// Reader is the interface for reading files from an archive.
type Reader interface {
NextFile() (name string, err error)
io.Reader
}
// ReadCloser is the interface that groups Reader with the Close method.
type ReadCloser interface {
Reader
io.Closer
}
// Writer is the interface for writing files to an archived format.
type Writer interface {
NextFile(name string, fi os.FileInfo) error
io.Writer
}
// WriteCloser is the interface that groups Writer with the Close method.
type WriteCloser interface {
Writer
io.Closer
}
type nopCloser struct{ Reader }
func (nopCloser) Close() error { return nil }
// NopCloser returns a ReadCloser with a no-op Close method wrapping the
// provided Reader r.
func NopCloser(r Reader) ReadCloser {
return nopCloser{r}
}
// Copy copies from src to dest until either EOF is reached on src or an error
// occurs.
//
// When the archive format of src matches that of dst, Copy streams the files
// directly into dst. Otherwise, copy buffers the contents to disk to compute
// headers before writing to dst.
func Copy(dst Writer, src Reader) error {
switch src := src.(type) {
case tarReader:
if dst, ok := dst.(tarWriter); ok {
return copyTar(dst, src)
}
case zipReader:
if dst, ok := dst.(zipWriter); ok {
return copyZip(dst, src)
}
// Switch on concrete type because gzip has no special methods
case *gzip.Reader:
if dst, ok := dst.(*gzip.Writer); ok {
_, err := io.Copy(dst, src)
return err
}
}
return copyBuffer(dst, src)
}
func copyBuffer(dst Writer, src Reader) (err error) {
const defaultFileMode = 0666
buf, err := ioutil.TempFile("", "archive_copy_buffer")
if err != nil {
return err
}
defer os.Remove(buf.Name()) // Do not care about failure removing temp
defer buf.Close() // Do not care about failure closing temp
for {
// Handle the next file
name, err := src.NextFile()
switch err {
case io.EOF: // Done copying
return nil
default: // Failed to write: bail out
return err
case nil: // Proceed below
}
// Buffer the file
if _, err := io.Copy(buf, src); err != nil {
return fmt.Errorf("buffer to disk: %v", err)
}
// Seek to the start of the file for full file copy
if _, err := buf.Seek(0, os.SEEK_SET); err != nil {
return err
}
// Set desired file permissions
if err := os.Chmod(buf.Name(), defaultFileMode); err != nil {
return err
}
fi, err := buf.Stat()
if err != nil {
return err
}
// Write the buffered file
if err := dst.NextFile(name, fi); err != nil {
return err
}
if _, err := io.Copy(dst, buf); err != nil {
return fmt.Errorf("copy to dst: %v", err)
}
if err := buf.Truncate(0); err != nil {
return err
}
if _, err := buf.Seek(0, os.SEEK_SET); err != nil {
return err
}
}
}
type tarReader interface {
Next() (*tar.Header, error)
io.Reader
}
type tarWriter interface {
WriteHeader(hdr *tar.Header) error
io.Writer
}
type zipReader interface {
Files() []*zip.File
}
type zipWriter interface {
CreateHeader(fh *zip.FileHeader) (io.Writer, error)
}
func copyTar(w tarWriter, r tarReader) error {
for {
hdr, err := r.Next()
switch err {
case io.EOF:
return nil
default: // Handle error
return err
case nil: // Proceed below
}
info := hdr.FileInfo()
// Skip directories
if info.IsDir() {
continue
}
if err := w.WriteHeader(hdr); err != nil {
return err
}
if _, err := io.Copy(w, r); err != nil {
return err
}
}
}
func copyZip(zw zipWriter, r zipReader) error {
for _, f := range r.Files() {
if err := copyZipFile(zw, f); err != nil {
return err
}
}
return nil
}
func copyZipFile(zw zipWriter, f *zip.File) error {
rc, err := f.Open()
if err != nil {
return err
}
defer rc.Close() // Read-only
hdr := f.FileHeader
hdr.SetModTime(time.Now())
w, err := zw.CreateHeader(&hdr)
if err != nil {
return err
}
_, err = io.Copy(w, rc)
return err
}

64
vendor/github.com/cihub/seelog/archive/gzip/gzip.go generated vendored Normal file
View File

@ -0,0 +1,64 @@
// Package gzip implements reading and writing of gzip format compressed files.
// See the compress/gzip package for more details.
package gzip
import (
"compress/gzip"
"fmt"
"io"
"os"
)
// Reader is an io.Reader that can be read to retrieve uncompressed data from a
// gzip-format compressed file.
type Reader struct {
gzip.Reader
name string
isEOF bool
}
// NewReader creates a new Reader reading the given reader.
func NewReader(r io.Reader, name string) (*Reader, error) {
gr, err := gzip.NewReader(r)
if err != nil {
return nil, err
}
return &Reader{
Reader: *gr,
name: name,
}, nil
}
// NextFile returns the file name. Calls subsequent to the first call will
// return EOF.
func (r *Reader) NextFile() (name string, err error) {
if r.isEOF {
return "", io.EOF
}
r.isEOF = true
return r.name, nil
}
// Writer is an io.WriteCloser. Writes to a Writer are compressed and written to w.
type Writer struct {
gzip.Writer
name string
noMoreFiles bool
}
// NextFile never returns a next file, and should not be called more than once.
func (w *Writer) NextFile(name string, _ os.FileInfo) error {
if w.noMoreFiles {
return fmt.Errorf("gzip: only accepts one file: already received %q and now %q", w.name, name)
}
w.noMoreFiles = true
w.name = name
return nil
}
// NewWriter returns a new Writer. Writes to the returned writer are compressed
// and written to w.
func NewWriter(w io.Writer) *Writer {
return &Writer{Writer: *gzip.NewWriter(w)}
}

72
vendor/github.com/cihub/seelog/archive/tar/tar.go generated vendored Normal file
View File

@ -0,0 +1,72 @@
package tar
import (
"archive/tar"
"io"
"os"
)
// Reader provides sequential access to the contents of a tar archive.
type Reader struct {
tar.Reader
}
// NewReader creates a new Reader reading from r.
func NewReader(r io.Reader) *Reader {
return &Reader{Reader: *tar.NewReader(r)}
}
// NextFile advances to the next file in the tar archive.
func (r *Reader) NextFile() (name string, err error) {
hdr, err := r.Next()
if err != nil {
return "", err
}
return hdr.Name, nil
}
// Writer provides sequential writing of a tar archive in POSIX.1 format.
type Writer struct {
tar.Writer
closers []io.Closer
}
// NewWriter creates a new Writer writing to w.
func NewWriter(w io.Writer) *Writer {
return &Writer{Writer: *tar.NewWriter(w)}
}
// NewWriteMultiCloser creates a new Writer writing to w that also closes all
// closers in order on close.
func NewWriteMultiCloser(w io.WriteCloser, closers ...io.Closer) *Writer {
return &Writer{
Writer: *tar.NewWriter(w),
closers: closers,
}
}
// NextFile computes and writes a header and prepares to accept the file's
// contents.
func (w *Writer) NextFile(name string, fi os.FileInfo) error {
if name == "" {
name = fi.Name()
}
hdr, err := tar.FileInfoHeader(fi, name)
if err != nil {
return err
}
hdr.Name = name
return w.WriteHeader(hdr)
}
// Close closes the tar archive and all other closers, flushing any unwritten
// data to the underlying writer.
func (w *Writer) Close() error {
err := w.Writer.Close()
for _, c := range w.closers {
if cerr := c.Close(); cerr != nil && err == nil {
err = cerr
}
}
return err
}

89
vendor/github.com/cihub/seelog/archive/zip/zip.go generated vendored Normal file
View File

@ -0,0 +1,89 @@
package zip
import (
"archive/zip"
"io"
"os"
)
// Reader provides sequential access to the contents of a zip archive.
type Reader struct {
zip.Reader
unread []*zip.File
rc io.ReadCloser
}
// NewReader returns a new Reader reading from r, which is assumed to have the
// given size in bytes.
func NewReader(r io.ReaderAt, size int64) (*Reader, error) {
zr, err := zip.NewReader(r, size)
if err != nil {
return nil, err
}
return &Reader{Reader: *zr}, nil
}
// NextFile advances to the next file in the zip archive.
func (r *Reader) NextFile() (name string, err error) {
// Initialize unread
if r.unread == nil {
r.unread = r.Files()[:]
}
// Close previous file
if r.rc != nil {
r.rc.Close() // Read-only
}
if len(r.unread) == 0 {
return "", io.EOF
}
// Open and return next unread
f := r.unread[0]
name, r.unread = f.Name, r.unread[1:]
r.rc, err = f.Open()
if err != nil {
return "", err
}
return name, nil
}
func (r *Reader) Read(p []byte) (n int, err error) {
return r.rc.Read(p)
}
// Files returns the full list of files in the zip archive.
func (r *Reader) Files() []*zip.File {
return r.File
}
// Writer provides sequential writing of a zip archive.1 format.
type Writer struct {
zip.Writer
w io.Writer
}
// NewWriter returns a new Writer writing to w.
func NewWriter(w io.Writer) *Writer {
return &Writer{Writer: *zip.NewWriter(w)}
}
// NextFile computes and writes a header and prepares to accept the file's
// contents.
func (w *Writer) NextFile(name string, fi os.FileInfo) error {
if name == "" {
name = fi.Name()
}
hdr, err := zip.FileInfoHeader(fi)
if err != nil {
return err
}
hdr.Name = name
w.w, err = w.CreateHeader(hdr)
return err
}
func (w *Writer) Write(p []byte) (n int, err error) {
return w.w.Write(p)
}

View File

@ -0,0 +1,129 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"errors"
"fmt"
"math"
"time"
)
var (
adaptiveLoggerMaxInterval = time.Minute
adaptiveLoggerMaxCriticalMsgCount = uint32(1000)
)
// asyncAdaptiveLogger represents asynchronous adaptive logger which acts like
// an async timer logger, but its interval depends on the current message count
// in the queue.
//
// Interval = I, minInterval = m, maxInterval = M, criticalMsgCount = C, msgCount = c:
// I = m + (C - Min(c, C)) / C * (M - m)
type asyncAdaptiveLogger struct {
asyncLogger
minInterval time.Duration
criticalMsgCount uint32
maxInterval time.Duration
}
// NewAsyncLoopLogger creates a new asynchronous adaptive logger
func NewAsyncAdaptiveLogger(
config *logConfig,
minInterval time.Duration,
maxInterval time.Duration,
criticalMsgCount uint32) (*asyncAdaptiveLogger, error) {
if minInterval <= 0 {
return nil, errors.New("async adaptive logger min interval should be > 0")
}
if maxInterval > adaptiveLoggerMaxInterval {
return nil, fmt.Errorf("async adaptive logger max interval should be <= %s",
adaptiveLoggerMaxInterval)
}
if criticalMsgCount <= 0 {
return nil, errors.New("async adaptive logger critical msg count should be > 0")
}
if criticalMsgCount > adaptiveLoggerMaxCriticalMsgCount {
return nil, fmt.Errorf("async adaptive logger critical msg count should be <= %s",
adaptiveLoggerMaxInterval)
}
asnAdaptiveLogger := new(asyncAdaptiveLogger)
asnAdaptiveLogger.asyncLogger = *newAsyncLogger(config)
asnAdaptiveLogger.minInterval = minInterval
asnAdaptiveLogger.maxInterval = maxInterval
asnAdaptiveLogger.criticalMsgCount = criticalMsgCount
go asnAdaptiveLogger.processQueue()
return asnAdaptiveLogger, nil
}
func (asnAdaptiveLogger *asyncAdaptiveLogger) processItem() (closed bool, itemCount int) {
asnAdaptiveLogger.queueHasElements.L.Lock()
defer asnAdaptiveLogger.queueHasElements.L.Unlock()
for asnAdaptiveLogger.msgQueue.Len() == 0 && !asnAdaptiveLogger.Closed() {
asnAdaptiveLogger.queueHasElements.Wait()
}
if asnAdaptiveLogger.Closed() {
return true, asnAdaptiveLogger.msgQueue.Len()
}
asnAdaptiveLogger.processQueueElement()
return false, asnAdaptiveLogger.msgQueue.Len() - 1
}
// I = m + (C - Min(c, C)) / C * (M - m) =>
// I = m + cDiff * mDiff,
// cDiff = (C - Min(c, C)) / C)
// mDiff = (M - m)
func (asnAdaptiveLogger *asyncAdaptiveLogger) calcAdaptiveInterval(msgCount int) time.Duration {
critCountF := float64(asnAdaptiveLogger.criticalMsgCount)
cDiff := (critCountF - math.Min(float64(msgCount), critCountF)) / critCountF
mDiff := float64(asnAdaptiveLogger.maxInterval - asnAdaptiveLogger.minInterval)
return asnAdaptiveLogger.minInterval + time.Duration(cDiff*mDiff)
}
func (asnAdaptiveLogger *asyncAdaptiveLogger) processQueue() {
for !asnAdaptiveLogger.Closed() {
closed, itemCount := asnAdaptiveLogger.processItem()
if closed {
break
}
interval := asnAdaptiveLogger.calcAdaptiveInterval(itemCount)
<-time.After(interval)
}
}

142
vendor/github.com/cihub/seelog/behavior_asynclogger.go generated vendored Normal file
View File

@ -0,0 +1,142 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"container/list"
"fmt"
"sync"
)
// MaxQueueSize is the critical number of messages in the queue that result in an immediate flush.
const (
MaxQueueSize = 10000
)
type msgQueueItem struct {
level LogLevel
context LogContextInterface
message fmt.Stringer
}
// asyncLogger represents common data for all asynchronous loggers
type asyncLogger struct {
commonLogger
msgQueue *list.List
queueHasElements *sync.Cond
}
// newAsyncLogger creates a new asynchronous logger
func newAsyncLogger(config *logConfig) *asyncLogger {
asnLogger := new(asyncLogger)
asnLogger.msgQueue = list.New()
asnLogger.queueHasElements = sync.NewCond(new(sync.Mutex))
asnLogger.commonLogger = *newCommonLogger(config, asnLogger)
return asnLogger
}
func (asnLogger *asyncLogger) innerLog(
level LogLevel,
context LogContextInterface,
message fmt.Stringer) {
asnLogger.addMsgToQueue(level, context, message)
}
func (asnLogger *asyncLogger) Close() {
asnLogger.m.Lock()
defer asnLogger.m.Unlock()
if !asnLogger.Closed() {
asnLogger.flushQueue(true)
asnLogger.config.RootDispatcher.Flush()
if err := asnLogger.config.RootDispatcher.Close(); err != nil {
reportInternalError(err)
}
asnLogger.closedM.Lock()
asnLogger.closed = true
asnLogger.closedM.Unlock()
asnLogger.queueHasElements.Broadcast()
}
}
func (asnLogger *asyncLogger) Flush() {
asnLogger.m.Lock()
defer asnLogger.m.Unlock()
if !asnLogger.Closed() {
asnLogger.flushQueue(true)
asnLogger.config.RootDispatcher.Flush()
}
}
func (asnLogger *asyncLogger) flushQueue(lockNeeded bool) {
if lockNeeded {
asnLogger.queueHasElements.L.Lock()
defer asnLogger.queueHasElements.L.Unlock()
}
for asnLogger.msgQueue.Len() > 0 {
asnLogger.processQueueElement()
}
}
func (asnLogger *asyncLogger) processQueueElement() {
if asnLogger.msgQueue.Len() > 0 {
backElement := asnLogger.msgQueue.Front()
msg, _ := backElement.Value.(msgQueueItem)
asnLogger.processLogMsg(msg.level, msg.message, msg.context)
asnLogger.msgQueue.Remove(backElement)
}
}
func (asnLogger *asyncLogger) addMsgToQueue(
level LogLevel,
context LogContextInterface,
message fmt.Stringer) {
if !asnLogger.Closed() {
asnLogger.queueHasElements.L.Lock()
defer asnLogger.queueHasElements.L.Unlock()
if asnLogger.msgQueue.Len() >= MaxQueueSize {
fmt.Printf("Seelog queue overflow: more than %v messages in the queue. Flushing.\n", MaxQueueSize)
asnLogger.flushQueue(false)
}
queueItem := msgQueueItem{level, context, message}
asnLogger.msgQueue.PushBack(queueItem)
asnLogger.queueHasElements.Broadcast()
} else {
err := fmt.Errorf("queue closed! Cannot process element: %d %#v", level, message)
reportInternalError(err)
}
}

View File

@ -0,0 +1,69 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
// asyncLoopLogger represents asynchronous logger which processes the log queue in
// a 'for' loop
type asyncLoopLogger struct {
asyncLogger
}
// NewAsyncLoopLogger creates a new asynchronous loop logger
func NewAsyncLoopLogger(config *logConfig) *asyncLoopLogger {
asnLoopLogger := new(asyncLoopLogger)
asnLoopLogger.asyncLogger = *newAsyncLogger(config)
go asnLoopLogger.processQueue()
return asnLoopLogger
}
func (asnLoopLogger *asyncLoopLogger) processItem() (closed bool) {
asnLoopLogger.queueHasElements.L.Lock()
defer asnLoopLogger.queueHasElements.L.Unlock()
for asnLoopLogger.msgQueue.Len() == 0 && !asnLoopLogger.Closed() {
asnLoopLogger.queueHasElements.Wait()
}
if asnLoopLogger.Closed() {
return true
}
asnLoopLogger.processQueueElement()
return false
}
func (asnLoopLogger *asyncLoopLogger) processQueue() {
for !asnLoopLogger.Closed() {
closed := asnLoopLogger.processItem()
if closed {
break
}
}
}

View File

@ -0,0 +1,82 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"errors"
"time"
)
// asyncTimerLogger represents asynchronous logger which processes the log queue each
// 'duration' nanoseconds
type asyncTimerLogger struct {
asyncLogger
interval time.Duration
}
// NewAsyncLoopLogger creates a new asynchronous loop logger
func NewAsyncTimerLogger(config *logConfig, interval time.Duration) (*asyncTimerLogger, error) {
if interval <= 0 {
return nil, errors.New("async logger interval should be > 0")
}
asnTimerLogger := new(asyncTimerLogger)
asnTimerLogger.asyncLogger = *newAsyncLogger(config)
asnTimerLogger.interval = interval
go asnTimerLogger.processQueue()
return asnTimerLogger, nil
}
func (asnTimerLogger *asyncTimerLogger) processItem() (closed bool) {
asnTimerLogger.queueHasElements.L.Lock()
defer asnTimerLogger.queueHasElements.L.Unlock()
for asnTimerLogger.msgQueue.Len() == 0 && !asnTimerLogger.Closed() {
asnTimerLogger.queueHasElements.Wait()
}
if asnTimerLogger.Closed() {
return true
}
asnTimerLogger.processQueueElement()
return false
}
func (asnTimerLogger *asyncTimerLogger) processQueue() {
for !asnTimerLogger.Closed() {
closed := asnTimerLogger.processItem()
if closed {
break
}
<-time.After(asnTimerLogger.interval)
}
}

75
vendor/github.com/cihub/seelog/behavior_synclogger.go generated vendored Normal file
View File

@ -0,0 +1,75 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"fmt"
)
// syncLogger performs logging in the same goroutine where 'Trace/Debug/...'
// func was called
type syncLogger struct {
commonLogger
}
// NewSyncLogger creates a new synchronous logger
func NewSyncLogger(config *logConfig) *syncLogger {
syncLogger := new(syncLogger)
syncLogger.commonLogger = *newCommonLogger(config, syncLogger)
return syncLogger
}
func (syncLogger *syncLogger) innerLog(
level LogLevel,
context LogContextInterface,
message fmt.Stringer) {
syncLogger.processLogMsg(level, message, context)
}
func (syncLogger *syncLogger) Close() {
syncLogger.m.Lock()
defer syncLogger.m.Unlock()
if !syncLogger.Closed() {
if err := syncLogger.config.RootDispatcher.Close(); err != nil {
reportInternalError(err)
}
syncLogger.closedM.Lock()
syncLogger.closed = true
syncLogger.closedM.Unlock()
}
}
func (syncLogger *syncLogger) Flush() {
syncLogger.m.Lock()
defer syncLogger.m.Unlock()
if !syncLogger.Closed() {
syncLogger.config.RootDispatcher.Flush()
}
}

212
vendor/github.com/cihub/seelog/cfg_config.go generated vendored Normal file
View File

@ -0,0 +1,212 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"bytes"
"encoding/xml"
"fmt"
"io"
"os"
)
// LoggerFromConfigAsFile creates logger with config from file. File should contain valid seelog xml.
func LoggerFromConfigAsFile(fileName string) (LoggerInterface, error) {
file, err := os.Open(fileName)
if err != nil {
return nil, err
}
defer file.Close()
conf, err := configFromReader(file)
if err != nil {
return nil, err
}
return createLoggerFromFullConfig(conf)
}
// LoggerFromConfigAsBytes creates a logger with config from bytes stream. Bytes should contain valid seelog xml.
func LoggerFromConfigAsBytes(data []byte) (LoggerInterface, error) {
conf, err := configFromReader(bytes.NewBuffer(data))
if err != nil {
return nil, err
}
return createLoggerFromFullConfig(conf)
}
// LoggerFromConfigAsString creates a logger with config from a string. String should contain valid seelog xml.
func LoggerFromConfigAsString(data string) (LoggerInterface, error) {
return LoggerFromConfigAsBytes([]byte(data))
}
// LoggerFromParamConfigAsFile does the same as LoggerFromConfigAsFile, but includes special parser options.
// See 'CfgParseParams' comments.
func LoggerFromParamConfigAsFile(fileName string, parserParams *CfgParseParams) (LoggerInterface, error) {
file, err := os.Open(fileName)
if err != nil {
return nil, err
}
defer file.Close()
conf, err := configFromReaderWithConfig(file, parserParams)
if err != nil {
return nil, err
}
return createLoggerFromFullConfig(conf)
}
// LoggerFromParamConfigAsBytes does the same as LoggerFromConfigAsBytes, but includes special parser options.
// See 'CfgParseParams' comments.
func LoggerFromParamConfigAsBytes(data []byte, parserParams *CfgParseParams) (LoggerInterface, error) {
conf, err := configFromReaderWithConfig(bytes.NewBuffer(data), parserParams)
if err != nil {
return nil, err
}
return createLoggerFromFullConfig(conf)
}
// LoggerFromParamConfigAsString does the same as LoggerFromConfigAsString, but includes special parser options.
// See 'CfgParseParams' comments.
func LoggerFromParamConfigAsString(data string, parserParams *CfgParseParams) (LoggerInterface, error) {
return LoggerFromParamConfigAsBytes([]byte(data), parserParams)
}
// LoggerFromWriterWithMinLevel is shortcut for LoggerFromWriterWithMinLevelAndFormat(output, minLevel, DefaultMsgFormat)
func LoggerFromWriterWithMinLevel(output io.Writer, minLevel LogLevel) (LoggerInterface, error) {
return LoggerFromWriterWithMinLevelAndFormat(output, minLevel, DefaultMsgFormat)
}
// LoggerFromWriterWithMinLevelAndFormat creates a proxy logger that uses io.Writer as the
// receiver with minimal level = minLevel and with specified format.
//
// All messages with level more or equal to minLevel will be written to output and
// formatted using the default seelog format.
//
// Can be called for usage with non-Seelog systems
func LoggerFromWriterWithMinLevelAndFormat(output io.Writer, minLevel LogLevel, format string) (LoggerInterface, error) {
constraints, err := NewMinMaxConstraints(minLevel, CriticalLvl)
if err != nil {
return nil, err
}
formatter, err := NewFormatter(format)
if err != nil {
return nil, err
}
dispatcher, err := NewSplitDispatcher(formatter, []interface{}{output})
if err != nil {
return nil, err
}
conf, err := newFullLoggerConfig(constraints, make([]*LogLevelException, 0), dispatcher, syncloggerTypeFromString, nil, nil)
if err != nil {
return nil, err
}
return createLoggerFromFullConfig(conf)
}
// LoggerFromXMLDecoder creates logger with config from a XML decoder starting from a specific node.
// It should contain valid seelog xml, except for root node name.
func LoggerFromXMLDecoder(xmlParser *xml.Decoder, rootNode xml.Token) (LoggerInterface, error) {
conf, err := configFromXMLDecoder(xmlParser, rootNode)
if err != nil {
return nil, err
}
return createLoggerFromFullConfig(conf)
}
// LoggerFromCustomReceiver creates a proxy logger that uses a CustomReceiver as the
// receiver.
//
// All messages will be sent to the specified custom receiver without additional
// formatting ('%Msg' format is used).
//
// Check CustomReceiver, RegisterReceiver for additional info.
//
// NOTE 1: CustomReceiver.AfterParse is only called when a receiver is instantiated
// by the config parser while parsing config. So, if you are not planning to use the
// same CustomReceiver for both proxying (via LoggerFromCustomReceiver call) and
// loading from config, just leave AfterParse implementation empty.
//
// NOTE 2: Unlike RegisterReceiver, LoggerFromCustomReceiver takes an already initialized
// instance that implements CustomReceiver. So, fill it with data and perform any initialization
// logic before calling this func and it won't be lost.
//
// So:
// * RegisterReceiver takes value just to get the reflect.Type from it and then
// instantiate it as many times as config is reloaded.
//
// * LoggerFromCustomReceiver takes value and uses it without modification and
// reinstantiation, directy passing it to the dispatcher tree.
func LoggerFromCustomReceiver(receiver CustomReceiver) (LoggerInterface, error) {
constraints, err := NewMinMaxConstraints(TraceLvl, CriticalLvl)
if err != nil {
return nil, err
}
output, err := NewCustomReceiverDispatcherByValue(msgonlyformatter, receiver, "user-proxy", CustomReceiverInitArgs{})
if err != nil {
return nil, err
}
dispatcher, err := NewSplitDispatcher(msgonlyformatter, []interface{}{output})
if err != nil {
return nil, err
}
conf, err := newFullLoggerConfig(constraints, make([]*LogLevelException, 0), dispatcher, syncloggerTypeFromString, nil, nil)
if err != nil {
return nil, err
}
return createLoggerFromFullConfig(conf)
}
func CloneLogger(logger LoggerInterface) (LoggerInterface, error) {
switch logger := logger.(type) {
default:
return nil, fmt.Errorf("unexpected type %T", logger)
case *asyncAdaptiveLogger:
clone, err := NewAsyncAdaptiveLogger(logger.commonLogger.config, logger.minInterval, logger.maxInterval, logger.criticalMsgCount)
if err != nil {
return nil, err
}
return clone, nil
case *asyncLoopLogger:
return NewAsyncLoopLogger(logger.commonLogger.config), nil
case *asyncTimerLogger:
clone, err := NewAsyncTimerLogger(logger.commonLogger.config, logger.interval)
if err != nil {
return nil, err
}
return clone, nil
case *syncLogger:
return NewSyncLogger(logger.commonLogger.config), nil
}
}

61
vendor/github.com/cihub/seelog/cfg_errors.go generated vendored Normal file
View File

@ -0,0 +1,61 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"errors"
)
var (
errNodeMustHaveChildren = errors.New("node must have children")
errNodeCannotHaveChildren = errors.New("node cannot have children")
)
type unexpectedChildElementError struct {
baseError
}
func newUnexpectedChildElementError(msg string) *unexpectedChildElementError {
custmsg := "Unexpected child element: " + msg
return &unexpectedChildElementError{baseError{message: custmsg}}
}
type missingArgumentError struct {
baseError
}
func newMissingArgumentError(nodeName, attrName string) *missingArgumentError {
custmsg := "Output '" + nodeName + "' has no '" + attrName + "' attribute"
return &missingArgumentError{baseError{message: custmsg}}
}
type unexpectedAttributeError struct {
baseError
}
func newUnexpectedAttributeError(nodeName, attr string) *unexpectedAttributeError {
custmsg := nodeName + " has unexpected attribute: " + attr
return &unexpectedAttributeError{baseError{message: custmsg}}
}

141
vendor/github.com/cihub/seelog/cfg_logconfig.go generated vendored Normal file
View File

@ -0,0 +1,141 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"errors"
)
type loggerTypeFromString uint8
const (
syncloggerTypeFromString = iota
asyncLooploggerTypeFromString
asyncTimerloggerTypeFromString
adaptiveLoggerTypeFromString
defaultloggerTypeFromString = asyncLooploggerTypeFromString
)
const (
syncloggerTypeFromStringStr = "sync"
asyncloggerTypeFromStringStr = "asyncloop"
asyncTimerloggerTypeFromStringStr = "asynctimer"
adaptiveLoggerTypeFromStringStr = "adaptive"
)
// asyncTimerLoggerData represents specific data for async timer logger
type asyncTimerLoggerData struct {
AsyncInterval uint32
}
// adaptiveLoggerData represents specific data for adaptive timer logger
type adaptiveLoggerData struct {
MinInterval uint32
MaxInterval uint32
CriticalMsgCount uint32
}
var loggerTypeToStringRepresentations = map[loggerTypeFromString]string{
syncloggerTypeFromString: syncloggerTypeFromStringStr,
asyncLooploggerTypeFromString: asyncloggerTypeFromStringStr,
asyncTimerloggerTypeFromString: asyncTimerloggerTypeFromStringStr,
adaptiveLoggerTypeFromString: adaptiveLoggerTypeFromStringStr,
}
// getLoggerTypeFromString parses a string and returns a corresponding logger type, if successful.
func getLoggerTypeFromString(logTypeString string) (level loggerTypeFromString, found bool) {
for logType, logTypeStr := range loggerTypeToStringRepresentations {
if logTypeStr == logTypeString {
return logType, true
}
}
return 0, false
}
// logConfig stores logging configuration. Contains messages dispatcher, allowed log level rules
// (general constraints and exceptions)
type logConfig struct {
Constraints logLevelConstraints // General log level rules (>min and <max, or set of allowed levels)
Exceptions []*LogLevelException // Exceptions to general rules for specific files or funcs
RootDispatcher dispatcherInterface // Root of output tree
}
func NewLoggerConfig(c logLevelConstraints, e []*LogLevelException, d dispatcherInterface) *logConfig {
return &logConfig{c, e, d}
}
// configForParsing is used when parsing config from file: logger type is deduced from string, params
// need to be converted from attributes to values and passed to specific logger constructor. Also,
// custom registered receivers and other parse params are used in this case.
type configForParsing struct {
logConfig
LogType loggerTypeFromString
LoggerData interface{}
Params *CfgParseParams // Check cfg_parser: CfgParseParams
}
func newFullLoggerConfig(
constraints logLevelConstraints,
exceptions []*LogLevelException,
rootDispatcher dispatcherInterface,
logType loggerTypeFromString,
logData interface{},
cfgParams *CfgParseParams) (*configForParsing, error) {
if constraints == nil {
return nil, errors.New("constraints can not be nil")
}
if rootDispatcher == nil {
return nil, errors.New("rootDispatcher can not be nil")
}
config := new(configForParsing)
config.Constraints = constraints
config.Exceptions = exceptions
config.RootDispatcher = rootDispatcher
config.LogType = logType
config.LoggerData = logData
config.Params = cfgParams
return config, nil
}
// IsAllowed returns true if logging with specified log level is allowed in current context.
// If any of exception patterns match current context, then exception constraints are applied. Otherwise,
// the general constraints are used.
func (config *logConfig) IsAllowed(level LogLevel, context LogContextInterface) bool {
allowed := config.Constraints.IsAllowed(level) // General rule
// Exceptions:
if context.IsValid() {
for _, exception := range config.Exceptions {
if exception.MatchesContext(context) {
return exception.IsAllowed(level)
}
}
}
return allowed
}

1269
vendor/github.com/cihub/seelog/cfg_parser.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

25
vendor/github.com/cihub/seelog/common_closer.go generated vendored Normal file
View File

@ -0,0 +1,25 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog

162
vendor/github.com/cihub/seelog/common_constraints.go generated vendored Normal file
View File

@ -0,0 +1,162 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"errors"
"fmt"
"strings"
)
// Represents constraints which form a general rule for log levels selection
type logLevelConstraints interface {
IsAllowed(level LogLevel) bool
}
// A minMaxConstraints represents constraints which use minimal and maximal allowed log levels.
type minMaxConstraints struct {
min LogLevel
max LogLevel
}
// NewMinMaxConstraints creates a new minMaxConstraints struct with the specified min and max levels.
func NewMinMaxConstraints(min LogLevel, max LogLevel) (*minMaxConstraints, error) {
if min > max {
return nil, fmt.Errorf("min level can't be greater than max. Got min: %d, max: %d", min, max)
}
if min < TraceLvl || min > CriticalLvl {
return nil, fmt.Errorf("min level can't be less than Trace or greater than Critical. Got min: %d", min)
}
if max < TraceLvl || max > CriticalLvl {
return nil, fmt.Errorf("max level can't be less than Trace or greater than Critical. Got max: %d", max)
}
return &minMaxConstraints{min, max}, nil
}
// IsAllowed returns true, if log level is in [min, max] range (inclusive).
func (minMaxConstr *minMaxConstraints) IsAllowed(level LogLevel) bool {
return level >= minMaxConstr.min && level <= minMaxConstr.max
}
func (minMaxConstr *minMaxConstraints) String() string {
return fmt.Sprintf("Min: %s. Max: %s", minMaxConstr.min, minMaxConstr.max)
}
//=======================================================
// A listConstraints represents constraints which use allowed log levels list.
type listConstraints struct {
allowedLevels map[LogLevel]bool
}
// NewListConstraints creates a new listConstraints struct with the specified allowed levels.
func NewListConstraints(allowList []LogLevel) (*listConstraints, error) {
if allowList == nil {
return nil, errors.New("list can't be nil")
}
allowLevels, err := createMapFromList(allowList)
if err != nil {
return nil, err
}
err = validateOffLevel(allowLevels)
if err != nil {
return nil, err
}
return &listConstraints{allowLevels}, nil
}
func (listConstr *listConstraints) String() string {
allowedList := "List: "
listLevel := make([]string, len(listConstr.allowedLevels))
var logLevel LogLevel
i := 0
for logLevel = TraceLvl; logLevel <= Off; logLevel++ {
if listConstr.allowedLevels[logLevel] {
listLevel[i] = logLevel.String()
i++
}
}
allowedList += strings.Join(listLevel, ",")
return allowedList
}
func createMapFromList(allowedList []LogLevel) (map[LogLevel]bool, error) {
allowedLevels := make(map[LogLevel]bool, 0)
for _, level := range allowedList {
if level < TraceLvl || level > Off {
return nil, fmt.Errorf("level can't be less than Trace or greater than Critical. Got level: %d", level)
}
allowedLevels[level] = true
}
return allowedLevels, nil
}
func validateOffLevel(allowedLevels map[LogLevel]bool) error {
if _, ok := allowedLevels[Off]; ok && len(allowedLevels) > 1 {
return errors.New("logLevel Off cant be mixed with other levels")
}
return nil
}
// IsAllowed returns true, if log level is in allowed log levels list.
// If the list contains the only item 'common.Off' then IsAllowed will always return false for any input values.
func (listConstr *listConstraints) IsAllowed(level LogLevel) bool {
for l := range listConstr.allowedLevels {
if l == level && level != Off {
return true
}
}
return false
}
// AllowedLevels returns allowed levels configuration as a map.
func (listConstr *listConstraints) AllowedLevels() map[LogLevel]bool {
return listConstr.allowedLevels
}
//=======================================================
type offConstraints struct {
}
func NewOffConstraints() (*offConstraints, error) {
return &offConstraints{}, nil
}
func (offConstr *offConstraints) IsAllowed(level LogLevel) bool {
return false
}
func (offConstr *offConstraints) String() string {
return "Off constraint"
}

234
vendor/github.com/cihub/seelog/common_context.go generated vendored Normal file
View File

@ -0,0 +1,234 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"sync"
"time"
)
var (
workingDir = "/"
stackCache map[uintptr]*logContext
stackCacheLock sync.RWMutex
)
func init() {
wd, err := os.Getwd()
if err == nil {
workingDir = filepath.ToSlash(wd) + "/"
}
stackCache = make(map[uintptr]*logContext)
}
// Represents runtime caller context.
type LogContextInterface interface {
// Caller's function name.
Func() string
// Caller's line number.
Line() int
// Caller's file short path (in slashed form).
ShortPath() string
// Caller's file full path (in slashed form).
FullPath() string
// Caller's file name (without path).
FileName() string
// True if the context is correct and may be used.
// If false, then an error in context evaluation occurred and
// all its other data may be corrupted.
IsValid() bool
// Time when log function was called.
CallTime() time.Time
// Custom context that can be set by calling logger.SetContext
CustomContext() interface{}
}
// Returns context of the caller
func currentContext(custom interface{}) (LogContextInterface, error) {
return specifyContext(1, custom)
}
func extractCallerInfo(skip int) (*logContext, error) {
var stack [1]uintptr
if runtime.Callers(skip+1, stack[:]) != 1 {
return nil, errors.New("error during runtime.Callers")
}
pc := stack[0]
// do we have a cache entry?
stackCacheLock.RLock()
ctx, ok := stackCache[pc]
stackCacheLock.RUnlock()
if ok {
return ctx, nil
}
// look up the details of the given caller
funcInfo := runtime.FuncForPC(pc)
if funcInfo == nil {
return nil, errors.New("error during runtime.FuncForPC")
}
var shortPath string
fullPath, line := funcInfo.FileLine(pc)
if strings.HasPrefix(fullPath, workingDir) {
shortPath = fullPath[len(workingDir):]
} else {
shortPath = fullPath
}
funcName := funcInfo.Name()
if strings.HasPrefix(funcName, workingDir) {
funcName = funcName[len(workingDir):]
}
ctx = &logContext{
funcName: funcName,
line: line,
shortPath: shortPath,
fullPath: fullPath,
fileName: filepath.Base(fullPath),
}
// save the details in the cache; note that it's possible we might
// have written an entry into the map in between the test above and
// this section, but the behaviour is still correct
stackCacheLock.Lock()
stackCache[pc] = ctx
stackCacheLock.Unlock()
return ctx, nil
}
// Returns context of the function with placed "skip" stack frames of the caller
// If skip == 0 then behaves like currentContext
// Context is returned in any situation, even if error occurs. But, if an error
// occurs, the returned context is an error context, which contains no paths
// or names, but states that they can't be extracted.
func specifyContext(skip int, custom interface{}) (LogContextInterface, error) {
callTime := time.Now()
if skip < 0 {
err := fmt.Errorf("can not skip negative stack frames")
return &errorContext{callTime, err}, err
}
caller, err := extractCallerInfo(skip + 2)
if err != nil {
return &errorContext{callTime, err}, err
}
ctx := new(logContext)
*ctx = *caller
ctx.callTime = callTime
ctx.custom = custom
return ctx, nil
}
// Represents a normal runtime caller context.
type logContext struct {
funcName string
line int
shortPath string
fullPath string
fileName string
callTime time.Time
custom interface{}
}
func (context *logContext) IsValid() bool {
return true
}
func (context *logContext) Func() string {
return context.funcName
}
func (context *logContext) Line() int {
return context.line
}
func (context *logContext) ShortPath() string {
return context.shortPath
}
func (context *logContext) FullPath() string {
return context.fullPath
}
func (context *logContext) FileName() string {
return context.fileName
}
func (context *logContext) CallTime() time.Time {
return context.callTime
}
func (context *logContext) CustomContext() interface{} {
return context.custom
}
// Represents an error context
type errorContext struct {
errorTime time.Time
err error
}
func (errContext *errorContext) getErrorText(prefix string) string {
return fmt.Sprintf("%s() error: %s", prefix, errContext.err)
}
func (errContext *errorContext) IsValid() bool {
return false
}
func (errContext *errorContext) Line() int {
return -1
}
func (errContext *errorContext) Func() string {
return errContext.getErrorText("Func")
}
func (errContext *errorContext) ShortPath() string {
return errContext.getErrorText("ShortPath")
}
func (errContext *errorContext) FullPath() string {
return errContext.getErrorText("FullPath")
}
func (errContext *errorContext) FileName() string {
return errContext.getErrorText("FileName")
}
func (errContext *errorContext) CallTime() time.Time {
return errContext.errorTime
}
func (errContext *errorContext) CustomContext() interface{} {
return nil
}

194
vendor/github.com/cihub/seelog/common_exception.go generated vendored Normal file
View File

@ -0,0 +1,194 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"errors"
"fmt"
"regexp"
"strings"
)
// Used in rules creation to validate input file and func filters
var (
fileFormatValidator = regexp.MustCompile(`[a-zA-Z0-9\\/ _\*\.]*`)
funcFormatValidator = regexp.MustCompile(`[a-zA-Z0-9_\*\.]*`)
)
// LogLevelException represents an exceptional case used when you need some specific files or funcs to
// override general constraints and to use their own.
type LogLevelException struct {
funcPatternParts []string
filePatternParts []string
funcPattern string
filePattern string
constraints logLevelConstraints
}
// NewLogLevelException creates a new exception.
func NewLogLevelException(funcPattern string, filePattern string, constraints logLevelConstraints) (*LogLevelException, error) {
if constraints == nil {
return nil, errors.New("constraints can not be nil")
}
exception := new(LogLevelException)
err := exception.initFuncPatternParts(funcPattern)
if err != nil {
return nil, err
}
exception.funcPattern = strings.Join(exception.funcPatternParts, "")
err = exception.initFilePatternParts(filePattern)
if err != nil {
return nil, err
}
exception.filePattern = strings.Join(exception.filePatternParts, "")
exception.constraints = constraints
return exception, nil
}
// MatchesContext returns true if context matches the patterns of this LogLevelException
func (logLevelEx *LogLevelException) MatchesContext(context LogContextInterface) bool {
return logLevelEx.match(context.Func(), context.FullPath())
}
// IsAllowed returns true if log level is allowed according to the constraints of this LogLevelException
func (logLevelEx *LogLevelException) IsAllowed(level LogLevel) bool {
return logLevelEx.constraints.IsAllowed(level)
}
// FuncPattern returns the function pattern of a exception
func (logLevelEx *LogLevelException) FuncPattern() string {
return logLevelEx.funcPattern
}
// FuncPattern returns the file pattern of a exception
func (logLevelEx *LogLevelException) FilePattern() string {
return logLevelEx.filePattern
}
// initFuncPatternParts checks whether the func filter has a correct format and splits funcPattern on parts
func (logLevelEx *LogLevelException) initFuncPatternParts(funcPattern string) (err error) {
if funcFormatValidator.FindString(funcPattern) != funcPattern {
return errors.New("func path \"" + funcPattern + "\" contains incorrect symbols. Only a-z A-Z 0-9 _ * . allowed)")
}
logLevelEx.funcPatternParts = splitPattern(funcPattern)
return nil
}
// Checks whether the file filter has a correct format and splits file patterns using splitPattern.
func (logLevelEx *LogLevelException) initFilePatternParts(filePattern string) (err error) {
if fileFormatValidator.FindString(filePattern) != filePattern {
return errors.New("file path \"" + filePattern + "\" contains incorrect symbols. Only a-z A-Z 0-9 \\ / _ * . allowed)")
}
logLevelEx.filePatternParts = splitPattern(filePattern)
return err
}
func (logLevelEx *LogLevelException) match(funcPath string, filePath string) bool {
if !stringMatchesPattern(logLevelEx.funcPatternParts, funcPath) {
return false
}
return stringMatchesPattern(logLevelEx.filePatternParts, filePath)
}
func (logLevelEx *LogLevelException) String() string {
str := fmt.Sprintf("Func: %s File: %s", logLevelEx.funcPattern, logLevelEx.filePattern)
if logLevelEx.constraints != nil {
str += fmt.Sprintf("Constr: %s", logLevelEx.constraints)
} else {
str += "nil"
}
return str
}
// splitPattern splits pattern into strings and asterisks. Example: "ab*cde**f" -> ["ab", "*", "cde", "*", "f"]
func splitPattern(pattern string) []string {
var patternParts []string
var lastChar rune
for _, char := range pattern {
if char == '*' {
if lastChar != '*' {
patternParts = append(patternParts, "*")
}
} else {
if len(patternParts) != 0 && lastChar != '*' {
patternParts[len(patternParts)-1] += string(char)
} else {
patternParts = append(patternParts, string(char))
}
}
lastChar = char
}
return patternParts
}
// stringMatchesPattern check whether testString matches pattern with asterisks.
// Standard regexp functionality is not used here because of performance issues.
func stringMatchesPattern(patternparts []string, testString string) bool {
if len(patternparts) == 0 {
return len(testString) == 0
}
part := patternparts[0]
if part != "*" {
index := strings.Index(testString, part)
if index == 0 {
return stringMatchesPattern(patternparts[1:], testString[len(part):])
}
} else {
if len(patternparts) == 1 {
return true
}
newTestString := testString
part = patternparts[1]
for {
index := strings.Index(newTestString, part)
if index == -1 {
break
}
newTestString = newTestString[index+len(part):]
result := stringMatchesPattern(patternparts[2:], newTestString)
if result {
return true
}
}
}
return false
}

31
vendor/github.com/cihub/seelog/common_flusher.go generated vendored Normal file
View File

@ -0,0 +1,31 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
// flusherInterface represents all objects that have to do cleanup
// at certain moments of time (e.g. before app shutdown to avoid data loss)
type flusherInterface interface {
Flush()
}

81
vendor/github.com/cihub/seelog/common_loglevel.go generated vendored Normal file
View File

@ -0,0 +1,81 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
// Log level type
type LogLevel uint8
// Log levels
const (
TraceLvl = iota
DebugLvl
InfoLvl
WarnLvl
ErrorLvl
CriticalLvl
Off
)
// Log level string representations (used in configuration files)
const (
TraceStr = "trace"
DebugStr = "debug"
InfoStr = "info"
WarnStr = "warn"
ErrorStr = "error"
CriticalStr = "critical"
OffStr = "off"
)
var levelToStringRepresentations = map[LogLevel]string{
TraceLvl: TraceStr,
DebugLvl: DebugStr,
InfoLvl: InfoStr,
WarnLvl: WarnStr,
ErrorLvl: ErrorStr,
CriticalLvl: CriticalStr,
Off: OffStr,
}
// LogLevelFromString parses a string and returns a corresponding log level, if sucessfull.
func LogLevelFromString(levelStr string) (level LogLevel, found bool) {
for lvl, lvlStr := range levelToStringRepresentations {
if lvlStr == levelStr {
return lvl, true
}
}
return 0, false
}
// LogLevelToString returns seelog string representation for a specified level. Returns "" for invalid log levels.
func (level LogLevel) String() string {
levelStr, ok := levelToStringRepresentations[level]
if ok {
return levelStr
}
return ""
}

242
vendor/github.com/cihub/seelog/dispatch_custom.go generated vendored Normal file
View File

@ -0,0 +1,242 @@
// Copyright (c) 2013 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"errors"
"fmt"
"reflect"
"sort"
)
var registeredReceivers = make(map[string]reflect.Type)
// RegisterReceiver records a custom receiver type, identified by a value
// of that type (second argument), under the specified name. Registered
// names can be used in the "name" attribute of <custom> config items.
//
// RegisterReceiver takes the type of the receiver argument, without taking
// the value into the account. So do NOT enter any data to the second argument
// and only call it like:
// RegisterReceiver("somename", &MyReceiverType{})
//
// After that, when a '<custom>' config tag with this name is used,
// a receiver of the specified type would be instantiated. Check
// CustomReceiver comments for interface details.
//
// NOTE 1: RegisterReceiver fails if you attempt to register different types
// with the same name.
//
// NOTE 2: RegisterReceiver registers those receivers that must be used in
// the configuration files (<custom> items). Basically it is just the way
// you tell seelog config parser what should it do when it meets a
// <custom> tag with a specific name and data attributes.
//
// But If you are only using seelog as a proxy to an already instantiated
// CustomReceiver (via LoggerFromCustomReceiver func), you should not call RegisterReceiver.
func RegisterReceiver(name string, receiver CustomReceiver) {
newType := reflect.TypeOf(reflect.ValueOf(receiver).Elem().Interface())
if t, ok := registeredReceivers[name]; ok && t != newType {
panic(fmt.Sprintf("duplicate types for %s: %s != %s", name, t, newType))
}
registeredReceivers[name] = newType
}
func customReceiverByName(name string) (creceiver CustomReceiver, err error) {
rt, ok := registeredReceivers[name]
if !ok {
return nil, fmt.Errorf("custom receiver name not registered: '%s'", name)
}
v, ok := reflect.New(rt).Interface().(CustomReceiver)
if !ok {
return nil, fmt.Errorf("cannot instantiate receiver with name='%s'", name)
}
return v, nil
}
// CustomReceiverInitArgs represent arguments passed to the CustomReceiver.Init
// func when custom receiver is being initialized.
type CustomReceiverInitArgs struct {
// XmlCustomAttrs represent '<custom>' xml config item attributes that
// start with "data-". Map keys will be the attribute names without the "data-".
// Map values will the those attribute values.
//
// E.g. if you have a '<custom name="somename" data-attr1="a1" data-attr2="a2"/>'
// you will get map with 2 key-value pairs: "attr1"->"a1", "attr2"->"a2"
//
// Note that in custom items you can only use allowed attributes, like "name" and
// your custom attributes, starting with "data-". Any other will lead to a
// parsing error.
XmlCustomAttrs map[string]string
}
// CustomReceiver is the interface that external custom seelog message receivers
// must implement in order to be able to process seelog messages. Those receivers
// are set in the xml config file using the <custom> tag. Check receivers reference
// wiki section on that.
//
// Use seelog.RegisterReceiver on the receiver type before using it.
type CustomReceiver interface {
// ReceiveMessage is called when the custom receiver gets seelog message from
// a parent dispatcher.
//
// Message, level and context args represent all data that was included in the seelog
// message at the time it was logged.
//
// The formatting is already applied to the message and depends on the config
// like with any other receiver.
//
// If you would like to inform seelog of an error that happened during the handling of
// the message, return a non-nil error. This way you'll end up seeing your error like
// any other internal seelog error.
ReceiveMessage(message string, level LogLevel, context LogContextInterface) error
// AfterParse is called immediately after your custom receiver is instantiated by
// the xml config parser. So, if you need to do any startup logic after config parsing,
// like opening file or allocating any resources after the receiver is instantiated, do it here.
//
// If this func returns a non-nil error, then the loading procedure will fail. E.g.
// if you are loading a seelog xml config, the parser would not finish the loading
// procedure and inform about an error like with any other config error.
//
// If your custom logger needs some configuration, you can use custom attributes in
// your config. Check CustomReceiverInitArgs.XmlCustomAttrs comments.
//
// IMPORTANT: This func is NOT called when the LoggerFromCustomReceiver func is used
// to create seelog proxy logger using the custom receiver. This func is only called when
// receiver is instantiated from a config.
AfterParse(initArgs CustomReceiverInitArgs) error
// Flush is called when the custom receiver gets a 'flush' directive from a
// parent receiver. If custom receiver implements some kind of buffering or
// queing, then the appropriate reaction on a flush message is synchronous
// flushing of all those queues/buffers. If custom receiver doesn't have
// such mechanisms, then flush implementation may be left empty.
Flush()
// Close is called when the custom receiver gets a 'close' directive from a
// parent receiver. This happens when a top-level seelog dispatcher is sending
// 'close' to all child nodes and it means that current seelog logger is being closed.
// If you need to do any cleanup after your custom receiver is done, you should do
// it here.
Close() error
}
type customReceiverDispatcher struct {
formatter *formatter
innerReceiver CustomReceiver
customReceiverName string
usedArgs CustomReceiverInitArgs
}
// NewCustomReceiverDispatcher creates a customReceiverDispatcher which dispatches data to a specific receiver created
// using a <custom> tag in the config file.
func NewCustomReceiverDispatcher(formatter *formatter, customReceiverName string, cArgs CustomReceiverInitArgs) (*customReceiverDispatcher, error) {
if formatter == nil {
return nil, errors.New("formatter cannot be nil")
}
if len(customReceiverName) == 0 {
return nil, errors.New("custom receiver name cannot be empty")
}
creceiver, err := customReceiverByName(customReceiverName)
if err != nil {
return nil, err
}
err = creceiver.AfterParse(cArgs)
if err != nil {
return nil, err
}
disp := &customReceiverDispatcher{formatter, creceiver, customReceiverName, cArgs}
return disp, nil
}
// NewCustomReceiverDispatcherByValue is basically the same as NewCustomReceiverDispatcher, but using
// a specific CustomReceiver value instead of instantiating a new one by type.
func NewCustomReceiverDispatcherByValue(formatter *formatter, customReceiver CustomReceiver, name string, cArgs CustomReceiverInitArgs) (*customReceiverDispatcher, error) {
if formatter == nil {
return nil, errors.New("formatter cannot be nil")
}
if customReceiver == nil {
return nil, errors.New("customReceiver cannot be nil")
}
disp := &customReceiverDispatcher{formatter, customReceiver, name, cArgs}
return disp, nil
}
// CustomReceiver implementation. Check CustomReceiver comments.
func (disp *customReceiverDispatcher) Dispatch(
message string,
level LogLevel,
context LogContextInterface,
errorFunc func(err error)) {
defer func() {
if err := recover(); err != nil {
errorFunc(fmt.Errorf("panic in custom receiver '%s'.Dispatch: %s", reflect.TypeOf(disp.innerReceiver), err))
}
}()
err := disp.innerReceiver.ReceiveMessage(disp.formatter.Format(message, level, context), level, context)
if err != nil {
errorFunc(err)
}
}
// CustomReceiver implementation. Check CustomReceiver comments.
func (disp *customReceiverDispatcher) Flush() {
disp.innerReceiver.Flush()
}
// CustomReceiver implementation. Check CustomReceiver comments.
func (disp *customReceiverDispatcher) Close() error {
disp.innerReceiver.Flush()
err := disp.innerReceiver.Close()
if err != nil {
return err
}
return nil
}
func (disp *customReceiverDispatcher) String() string {
datas := ""
skeys := make([]string, 0, len(disp.usedArgs.XmlCustomAttrs))
for i := range disp.usedArgs.XmlCustomAttrs {
skeys = append(skeys, i)
}
sort.Strings(skeys)
for _, key := range skeys {
datas += fmt.Sprintf("<%s, %s> ", key, disp.usedArgs.XmlCustomAttrs[key])
}
str := fmt.Sprintf("Custom receiver %s [fmt='%s'],[data='%s'],[inner='%s']\n",
disp.customReceiverName, disp.formatter.String(), datas, disp.innerReceiver)
return str
}

189
vendor/github.com/cihub/seelog/dispatch_dispatcher.go generated vendored Normal file
View File

@ -0,0 +1,189 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"errors"
"fmt"
"io"
)
// A dispatcherInterface is used to dispatch message to all underlying receivers.
// Dispatch logic depends on given context and log level. Any errors are reported using errorFunc.
// Also, as underlying receivers may have a state, dispatcher has a ShuttingDown method which performs
// an immediate cleanup of all data that is stored in the receivers
type dispatcherInterface interface {
flusherInterface
io.Closer
Dispatch(message string, level LogLevel, context LogContextInterface, errorFunc func(err error))
}
type dispatcher struct {
formatter *formatter
writers []*formattedWriter
dispatchers []dispatcherInterface
}
// Creates a dispatcher which dispatches data to a list of receivers.
// Each receiver should be either a Dispatcher or io.Writer, otherwise an error will be returned
func createDispatcher(formatter *formatter, receivers []interface{}) (*dispatcher, error) {
if formatter == nil {
return nil, errors.New("formatter cannot be nil")
}
if receivers == nil || len(receivers) == 0 {
return nil, errors.New("receivers cannot be nil or empty")
}
disp := &dispatcher{formatter, make([]*formattedWriter, 0), make([]dispatcherInterface, 0)}
for _, receiver := range receivers {
writer, ok := receiver.(*formattedWriter)
if ok {
disp.writers = append(disp.writers, writer)
continue
}
ioWriter, ok := receiver.(io.Writer)
if ok {
writer, err := NewFormattedWriter(ioWriter, disp.formatter)
if err != nil {
return nil, err
}
disp.writers = append(disp.writers, writer)
continue
}
dispInterface, ok := receiver.(dispatcherInterface)
if ok {
disp.dispatchers = append(disp.dispatchers, dispInterface)
continue
}
return nil, errors.New("method can receive either io.Writer or dispatcherInterface")
}
return disp, nil
}
func (disp *dispatcher) Dispatch(
message string,
level LogLevel,
context LogContextInterface,
errorFunc func(err error)) {
for _, writer := range disp.writers {
err := writer.Write(message, level, context)
if err != nil {
errorFunc(err)
}
}
for _, dispInterface := range disp.dispatchers {
dispInterface.Dispatch(message, level, context, errorFunc)
}
}
// Flush goes through all underlying writers which implement flusherInterface interface
// and closes them. Recursively performs the same action for underlying dispatchers
func (disp *dispatcher) Flush() {
for _, disp := range disp.Dispatchers() {
disp.Flush()
}
for _, formatWriter := range disp.Writers() {
flusher, ok := formatWriter.Writer().(flusherInterface)
if ok {
flusher.Flush()
}
}
}
// Close goes through all underlying writers which implement io.Closer interface
// and closes them. Recursively performs the same action for underlying dispatchers
// Before closing, writers are flushed to prevent loss of any buffered data, so
// a call to Flush() func before Close() is not necessary
func (disp *dispatcher) Close() error {
for _, disp := range disp.Dispatchers() {
disp.Flush()
err := disp.Close()
if err != nil {
return err
}
}
for _, formatWriter := range disp.Writers() {
flusher, ok := formatWriter.Writer().(flusherInterface)
if ok {
flusher.Flush()
}
closer, ok := formatWriter.Writer().(io.Closer)
if ok {
err := closer.Close()
if err != nil {
return err
}
}
}
return nil
}
func (disp *dispatcher) Writers() []*formattedWriter {
return disp.writers
}
func (disp *dispatcher) Dispatchers() []dispatcherInterface {
return disp.dispatchers
}
func (disp *dispatcher) String() string {
str := "formatter: " + disp.formatter.String() + "\n"
str += " ->Dispatchers:"
if len(disp.dispatchers) == 0 {
str += "none\n"
} else {
str += "\n"
for _, disp := range disp.dispatchers {
str += fmt.Sprintf(" ->%s", disp)
}
}
str += " ->Writers:"
if len(disp.writers) == 0 {
str += "none\n"
} else {
str += "\n"
for _, writer := range disp.writers {
str += fmt.Sprintf(" ->%s\n", writer)
}
}
return str
}

View File

@ -0,0 +1,66 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"fmt"
)
// A filterDispatcher writes the given message to underlying receivers only if message log level
// is in the allowed list.
type filterDispatcher struct {
*dispatcher
allowList map[LogLevel]bool
}
// NewFilterDispatcher creates a new filterDispatcher using a list of allowed levels.
func NewFilterDispatcher(formatter *formatter, receivers []interface{}, allowList ...LogLevel) (*filterDispatcher, error) {
disp, err := createDispatcher(formatter, receivers)
if err != nil {
return nil, err
}
allows := make(map[LogLevel]bool)
for _, allowLevel := range allowList {
allows[allowLevel] = true
}
return &filterDispatcher{disp, allows}, nil
}
func (filter *filterDispatcher) Dispatch(
message string,
level LogLevel,
context LogContextInterface,
errorFunc func(err error)) {
isAllowed, ok := filter.allowList[level]
if ok && isAllowed {
filter.dispatcher.Dispatch(message, level, context, errorFunc)
}
}
func (filter *filterDispatcher) String() string {
return fmt.Sprintf("filterDispatcher ->\n%s", filter.dispatcher)
}

View File

@ -0,0 +1,47 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"fmt"
)
// A splitDispatcher just writes the given message to underlying receivers. (Splits the message stream.)
type splitDispatcher struct {
*dispatcher
}
func NewSplitDispatcher(formatter *formatter, receivers []interface{}) (*splitDispatcher, error) {
disp, err := createDispatcher(formatter, receivers)
if err != nil {
return nil, err
}
return &splitDispatcher{disp}, nil
}
func (splitter *splitDispatcher) String() string {
return fmt.Sprintf("splitDispatcher ->\n%s", splitter.dispatcher.String())
}

175
vendor/github.com/cihub/seelog/doc.go generated vendored Normal file
View File

@ -0,0 +1,175 @@
// Copyright (c) 2014 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/*
Package seelog implements logging functionality with flexible dispatching, filtering, and formatting.
Creation
To create a logger, use one of the following constructors:
func LoggerFromConfigAsBytes
func LoggerFromConfigAsFile
func LoggerFromConfigAsString
func LoggerFromWriterWithMinLevel
func LoggerFromWriterWithMinLevelAndFormat
func LoggerFromCustomReceiver (check https://github.com/cihub/seelog/wiki/Custom-receivers)
Example:
import log "github.com/cihub/seelog"
func main() {
logger, err := log.LoggerFromConfigAsFile("seelog.xml")
if err != nil {
panic(err)
}
defer logger.Flush()
... use logger ...
}
The "defer" line is important because if you are using asynchronous logger behavior, without this line you may end up losing some
messages when you close your application because they are processed in another non-blocking goroutine. To avoid that you
explicitly defer flushing all messages before closing.
Usage
Logger created using one of the LoggerFrom* funcs can be used directly by calling one of the main log funcs.
Example:
import log "github.com/cihub/seelog"
func main() {
logger, err := log.LoggerFromConfigAsFile("seelog.xml")
if err != nil {
panic(err)
}
defer logger.Flush()
logger.Trace("test")
logger.Debugf("var = %s", "abc")
}
Having loggers as variables is convenient if you are writing your own package with internal logging or if you have
several loggers with different options.
But for most standalone apps it is more convenient to use package level funcs and vars. There is a package level
var 'Current' made for it. You can replace it with another logger using 'ReplaceLogger' and then use package level funcs:
import log "github.com/cihub/seelog"
func main() {
logger, err := log.LoggerFromConfigAsFile("seelog.xml")
if err != nil {
panic(err)
}
log.ReplaceLogger(logger)
defer log.Flush()
log.Trace("test")
log.Debugf("var = %s", "abc")
}
Last lines
log.Trace("test")
log.Debugf("var = %s", "abc")
do the same as
log.Current.Trace("test")
log.Current.Debugf("var = %s", "abc")
In this example the 'Current' logger was replaced using a 'ReplaceLogger' call and became equal to 'logger' variable created from config.
This way you are able to use package level funcs instead of passing the logger variable.
Configuration
Main seelog point is to configure logger via config files and not the code.
The configuration is read by LoggerFrom* funcs. These funcs read xml configuration from different sources and try
to create a logger using it.
All the configuration features are covered in detail in the official wiki: https://github.com/cihub/seelog/wiki.
There are many sections covering different aspects of seelog, but the most important for understanding configs are:
https://github.com/cihub/seelog/wiki/Constraints-and-exceptions
https://github.com/cihub/seelog/wiki/Dispatchers-and-receivers
https://github.com/cihub/seelog/wiki/Formatting
https://github.com/cihub/seelog/wiki/Logger-types
After you understand these concepts, check the 'Reference' section on the main wiki page to get the up-to-date
list of dispatchers, receivers, formats, and logger types.
Here is an example config with all these features:
<seelog type="adaptive" mininterval="2000000" maxinterval="100000000" critmsgcount="500" minlevel="debug">
<exceptions>
<exception filepattern="test*" minlevel="error"/>
</exceptions>
<outputs formatid="all">
<file path="all.log"/>
<filter levels="info">
<console formatid="fmtinfo"/>
</filter>
<filter levels="error,critical" formatid="fmterror">
<console/>
<file path="errors.log"/>
</filter>
</outputs>
<formats>
<format id="fmtinfo" format="[%Level] [%Time] %Msg%n"/>
<format id="fmterror" format="[%LEVEL] [%Time] [%FuncShort @ %File.%Line] %Msg%n"/>
<format id="all" format="[%Level] [%Time] [@ %File.%Line] %Msg%n"/>
<format id="criticalemail" format="Critical error on our server!\n %Time %Date %RelFile %Func %Msg \nSent by Seelog"/>
</formats>
</seelog>
This config represents a logger with adaptive timeout between log messages (check logger types reference) which
logs to console, all.log, and errors.log depending on the log level. Its output formats also depend on log level. This logger will only
use log level 'debug' and higher (minlevel is set) for all files with names that don't start with 'test'. For files starting with 'test'
this logger prohibits all levels below 'error'.
Configuration using code
Although configuration using code is not recommended, it is sometimes needed and it is possible to do with seelog. Basically, what
you need to do to get started is to create constraints, exceptions and a dispatcher tree (same as with config). Most of the New*
functions in this package are used to provide such capabilities.
Here is an example of configuration in code, that demonstrates an async loop logger that logs to a simple split dispatcher with
a console receiver using a specified format and is filtered using a top-level min-max constraints and one expection for
the 'main.go' file. So, this is basically a demonstration of configuration of most of the features:
package main
import log "github.com/cihub/seelog"
func main() {
defer log.Flush()
log.Info("Hello from Seelog!")
consoleWriter, _ := log.NewConsoleWriter()
formatter, _ := log.NewFormatter("%Level %Msg %File%n")
root, _ := log.NewSplitDispatcher(formatter, []interface{}{consoleWriter})
constraints, _ := log.NewMinMaxConstraints(log.TraceLvl, log.CriticalLvl)
specificConstraints, _ := log.NewListConstraints([]log.LogLevel{log.InfoLvl, log.ErrorLvl})
ex, _ := log.NewLogLevelException("*", "*main.go", specificConstraints)
exceptions := []*log.LogLevelException{ex}
logger := log.NewAsyncLoopLogger(log.NewLoggerConfig(constraints, exceptions, root))
log.ReplaceLogger(logger)
log.Trace("This should not be seen")
log.Debug("This should not be seen")
log.Info("Test")
log.Error("Test2")
}
Examples
To learn seelog features faster you should check the examples package: https://github.com/cihub/seelog-examples
It contains many example configs and usecases.
*/
package seelog

466
vendor/github.com/cihub/seelog/format.go generated vendored Normal file
View File

@ -0,0 +1,466 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"bytes"
"errors"
"fmt"
"strconv"
"strings"
"unicode"
"unicode/utf8"
)
// FormatterSymbol is a special symbol used in config files to mark special format aliases.
const (
FormatterSymbol = '%'
)
const (
formatterParameterStart = '('
formatterParameterEnd = ')'
)
// Time and date formats used for %Date and %Time aliases.
const (
DateDefaultFormat = "2006-01-02"
TimeFormat = "15:04:05"
)
var DefaultMsgFormat = "%Ns [%Level] %Msg%n"
var (
DefaultFormatter *formatter
msgonlyformatter *formatter
)
func init() {
var err error
if DefaultFormatter, err = NewFormatter(DefaultMsgFormat); err != nil {
reportInternalError(fmt.Errorf("error during creating DefaultFormatter: %s", err))
}
if msgonlyformatter, err = NewFormatter("%Msg"); err != nil {
reportInternalError(fmt.Errorf("error during creating msgonlyformatter: %s", err))
}
}
// FormatterFunc represents one formatter object that starts with '%' sign in the 'format' attribute
// of the 'format' config item. These special symbols are replaced with context values or special
// strings when message is written to byte receiver.
//
// Check https://github.com/cihub/seelog/wiki/Formatting for details.
// Full list (with descriptions) of formatters: https://github.com/cihub/seelog/wiki/Format-reference
//
// FormatterFunc takes raw log message, level, log context and returns a string, number (of any type) or any object
// that can be evaluated as string.
type FormatterFunc func(message string, level LogLevel, context LogContextInterface) interface{}
// FormatterFuncCreator is a factory of FormatterFunc objects. It is used to generate parameterized
// formatters (such as %Date or %EscM) and custom user formatters.
type FormatterFuncCreator func(param string) FormatterFunc
var formatterFuncs = map[string]FormatterFunc{
"Level": formatterLevel,
"Lev": formatterLev,
"LEVEL": formatterLEVEL,
"LEV": formatterLEV,
"l": formatterl,
"Msg": formatterMsg,
"FullPath": formatterFullPath,
"File": formatterFile,
"RelFile": formatterRelFile,
"Func": FormatterFunction,
"FuncShort": FormatterFunctionShort,
"Line": formatterLine,
"Time": formatterTime,
"UTCTime": formatterUTCTime,
"Ns": formatterNs,
"UTCNs": formatterUTCNs,
"r": formatterr,
"n": formattern,
"t": formattert,
}
var formatterFuncsParameterized = map[string]FormatterFuncCreator{
"Date": createDateTimeFormatterFunc,
"UTCDate": createUTCDateTimeFormatterFunc,
"EscM": createANSIEscapeFunc,
}
func errorAliasReserved(name string) error {
return fmt.Errorf("cannot use '%s' as custom formatter name. Name is reserved", name)
}
// RegisterCustomFormatter registers a new custom formatter factory with a given name. If returned error is nil,
// then this name (prepended by '%' symbol) can be used in 'format' attributes in configuration and
// it will be treated like the standard parameterized formatter identifiers.
//
// RegisterCustomFormatter needs to be called before creating a logger for it to take effect. The general recommendation
// is to call it once in 'init' func of your application or any initializer func.
//
// For usage examples, check https://github.com/cihub/seelog/wiki/Custom-formatters.
//
// Name must only consist of letters (unicode.IsLetter).
//
// Name must not be one of the already registered standard formatter names
// (https://github.com/cihub/seelog/wiki/Format-reference) and previously registered
// custom format names. To avoid any potential name conflicts (in future releases), it is recommended
// to start your custom formatter name with a namespace (e.g. 'MyCompanySomething') or a 'Custom' keyword.
func RegisterCustomFormatter(name string, creator FormatterFuncCreator) error {
if _, ok := formatterFuncs[name]; ok {
return errorAliasReserved(name)
}
if _, ok := formatterFuncsParameterized[name]; ok {
return errorAliasReserved(name)
}
formatterFuncsParameterized[name] = creator
return nil
}
// formatter is used to write messages in a specific format, inserting such additional data
// as log level, date/time, etc.
type formatter struct {
fmtStringOriginal string
fmtString string
formatterFuncs []FormatterFunc
}
// NewFormatter creates a new formatter using a format string
func NewFormatter(formatString string) (*formatter, error) {
fmtr := new(formatter)
fmtr.fmtStringOriginal = formatString
if err := buildFormatterFuncs(fmtr); err != nil {
return nil, err
}
return fmtr, nil
}
func buildFormatterFuncs(formatter *formatter) error {
var (
fsbuf = new(bytes.Buffer)
fsolm1 = len(formatter.fmtStringOriginal) - 1
)
for i := 0; i <= fsolm1; i++ {
if char := formatter.fmtStringOriginal[i]; char != FormatterSymbol {
fsbuf.WriteByte(char)
continue
}
// Check if the index is at the end of the string.
if i == fsolm1 {
return fmt.Errorf("format error: %c cannot be last symbol", FormatterSymbol)
}
// Check if the formatter symbol is doubled and skip it as nonmatching.
if formatter.fmtStringOriginal[i+1] == FormatterSymbol {
fsbuf.WriteRune(FormatterSymbol)
i++
continue
}
function, ni, err := formatter.extractFormatterFunc(i + 1)
if err != nil {
return err
}
// Append formatting string "%v".
fsbuf.Write([]byte{37, 118})
i = ni
formatter.formatterFuncs = append(formatter.formatterFuncs, function)
}
formatter.fmtString = fsbuf.String()
return nil
}
func (formatter *formatter) extractFormatterFunc(index int) (FormatterFunc, int, error) {
letterSequence := formatter.extractLetterSequence(index)
if len(letterSequence) == 0 {
return nil, 0, fmt.Errorf("format error: lack of formatter after %c at %d", FormatterSymbol, index)
}
function, formatterLength, ok := formatter.findFormatterFunc(letterSequence)
if ok {
return function, index + formatterLength - 1, nil
}
function, formatterLength, ok, err := formatter.findFormatterFuncParametrized(letterSequence, index)
if err != nil {
return nil, 0, err
}
if ok {
return function, index + formatterLength - 1, nil
}
return nil, 0, errors.New("format error: unrecognized formatter at " + strconv.Itoa(index) + ": " + letterSequence)
}
func (formatter *formatter) extractLetterSequence(index int) string {
letters := ""
bytesToParse := []byte(formatter.fmtStringOriginal[index:])
runeCount := utf8.RuneCount(bytesToParse)
for i := 0; i < runeCount; i++ {
rune, runeSize := utf8.DecodeRune(bytesToParse)
bytesToParse = bytesToParse[runeSize:]
if unicode.IsLetter(rune) {
letters += string(rune)
} else {
break
}
}
return letters
}
func (formatter *formatter) findFormatterFunc(letters string) (FormatterFunc, int, bool) {
currentVerb := letters
for i := 0; i < len(letters); i++ {
function, ok := formatterFuncs[currentVerb]
if ok {
return function, len(currentVerb), ok
}
currentVerb = currentVerb[:len(currentVerb)-1]
}
return nil, 0, false
}
func (formatter *formatter) findFormatterFuncParametrized(letters string, lettersStartIndex int) (FormatterFunc, int, bool, error) {
currentVerb := letters
for i := 0; i < len(letters); i++ {
functionCreator, ok := formatterFuncsParameterized[currentVerb]
if ok {
parameter := ""
parameterLen := 0
isVerbEqualsLetters := i == 0 // if not, then letter goes after formatter, and formatter is parameterless
if isVerbEqualsLetters {
userParameter := ""
var err error
userParameter, parameterLen, ok, err = formatter.findparameter(lettersStartIndex + len(currentVerb))
if ok {
parameter = userParameter
} else if err != nil {
return nil, 0, false, err
}
}
return functionCreator(parameter), len(currentVerb) + parameterLen, true, nil
}
currentVerb = currentVerb[:len(currentVerb)-1]
}
return nil, 0, false, nil
}
func (formatter *formatter) findparameter(startIndex int) (string, int, bool, error) {
if len(formatter.fmtStringOriginal) == startIndex || formatter.fmtStringOriginal[startIndex] != formatterParameterStart {
return "", 0, false, nil
}
endIndex := strings.Index(formatter.fmtStringOriginal[startIndex:], string(formatterParameterEnd))
if endIndex == -1 {
return "", 0, false, fmt.Errorf("Unmatched parenthesis or invalid parameter at %d: %s",
startIndex, formatter.fmtStringOriginal[startIndex:])
}
endIndex += startIndex
length := endIndex - startIndex + 1
return formatter.fmtStringOriginal[startIndex+1 : endIndex], length, true, nil
}
// Format processes a message with special formatters, log level, and context. Returns formatted string
// with all formatter identifiers changed to appropriate values.
func (formatter *formatter) Format(message string, level LogLevel, context LogContextInterface) string {
if len(formatter.formatterFuncs) == 0 {
return formatter.fmtString
}
params := make([]interface{}, len(formatter.formatterFuncs))
for i, function := range formatter.formatterFuncs {
params[i] = function(message, level, context)
}
return fmt.Sprintf(formatter.fmtString, params...)
}
func (formatter *formatter) String() string {
return formatter.fmtStringOriginal
}
//=====================================================
const (
wrongLogLevel = "WRONG_LOGLEVEL"
wrongEscapeCode = "WRONG_ESCAPE"
)
var levelToString = map[LogLevel]string{
TraceLvl: "Trace",
DebugLvl: "Debug",
InfoLvl: "Info",
WarnLvl: "Warn",
ErrorLvl: "Error",
CriticalLvl: "Critical",
Off: "Off",
}
var levelToShortString = map[LogLevel]string{
TraceLvl: "Trc",
DebugLvl: "Dbg",
InfoLvl: "Inf",
WarnLvl: "Wrn",
ErrorLvl: "Err",
CriticalLvl: "Crt",
Off: "Off",
}
var levelToShortestString = map[LogLevel]string{
TraceLvl: "t",
DebugLvl: "d",
InfoLvl: "i",
WarnLvl: "w",
ErrorLvl: "e",
CriticalLvl: "c",
Off: "o",
}
func formatterLevel(message string, level LogLevel, context LogContextInterface) interface{} {
levelStr, ok := levelToString[level]
if !ok {
return wrongLogLevel
}
return levelStr
}
func formatterLev(message string, level LogLevel, context LogContextInterface) interface{} {
levelStr, ok := levelToShortString[level]
if !ok {
return wrongLogLevel
}
return levelStr
}
func formatterLEVEL(message string, level LogLevel, context LogContextInterface) interface{} {
return strings.ToTitle(formatterLevel(message, level, context).(string))
}
func formatterLEV(message string, level LogLevel, context LogContextInterface) interface{} {
return strings.ToTitle(formatterLev(message, level, context).(string))
}
func formatterl(message string, level LogLevel, context LogContextInterface) interface{} {
levelStr, ok := levelToShortestString[level]
if !ok {
return wrongLogLevel
}
return levelStr
}
func formatterMsg(message string, level LogLevel, context LogContextInterface) interface{} {
return message
}
func formatterFullPath(message string, level LogLevel, context LogContextInterface) interface{} {
return context.FullPath()
}
func formatterFile(message string, level LogLevel, context LogContextInterface) interface{} {
return context.FileName()
}
func formatterRelFile(message string, level LogLevel, context LogContextInterface) interface{} {
return context.ShortPath()
}
func FormatterFunction(message string, level LogLevel, context LogContextInterface) interface{} {
return context.Func()
}
func FormatterFunctionShort(message string, level LogLevel, context LogContextInterface) interface{} {
f := context.Func()
spl := strings.Split(f, ".")
return spl[len(spl)-1]
}
func formatterLine(message string, level LogLevel, context LogContextInterface) interface{} {
return context.Line()
}
func formatterTime(message string, level LogLevel, context LogContextInterface) interface{} {
return context.CallTime().Format(TimeFormat)
}
func formatterUTCTime(message string, level LogLevel, context LogContextInterface) interface{} {
return context.CallTime().UTC().Format(TimeFormat)
}
func formatterNs(message string, level LogLevel, context LogContextInterface) interface{} {
return context.CallTime().UnixNano()
}
func formatterUTCNs(message string, level LogLevel, context LogContextInterface) interface{} {
return context.CallTime().UTC().UnixNano()
}
func formatterr(message string, level LogLevel, context LogContextInterface) interface{} {
return "\r"
}
func formattern(message string, level LogLevel, context LogContextInterface) interface{} {
return "\n"
}
func formattert(message string, level LogLevel, context LogContextInterface) interface{} {
return "\t"
}
func createDateTimeFormatterFunc(dateTimeFormat string) FormatterFunc {
format := dateTimeFormat
if format == "" {
format = DateDefaultFormat
}
return func(message string, level LogLevel, context LogContextInterface) interface{} {
return context.CallTime().Format(format)
}
}
func createUTCDateTimeFormatterFunc(dateTimeFormat string) FormatterFunc {
format := dateTimeFormat
if format == "" {
format = DateDefaultFormat
}
return func(message string, level LogLevel, context LogContextInterface) interface{} {
return context.CallTime().UTC().Format(format)
}
}
func createANSIEscapeFunc(escapeCodeString string) FormatterFunc {
return func(message string, level LogLevel, context LogContextInterface) interface{} {
if len(escapeCodeString) == 0 {
return wrongEscapeCode
}
return fmt.Sprintf("%c[%sm", 0x1B, escapeCodeString)
}
}

10
vendor/github.com/cihub/seelog/internals_baseerror.go generated vendored Normal file
View File

@ -0,0 +1,10 @@
package seelog
// Base struct for custom errors.
type baseError struct {
message string
}
func (be baseError) Error() string {
return be.message
}

320
vendor/github.com/cihub/seelog/internals_fsutils.go generated vendored Normal file
View File

@ -0,0 +1,320 @@
package seelog
import (
"fmt"
"io"
"os"
"path/filepath"
"sync"
)
// File and directory permitions.
const (
defaultFilePermissions = 0666
defaultDirectoryPermissions = 0767
)
const (
// Max number of directories can be read asynchronously.
maxDirNumberReadAsync = 1000
)
type cannotOpenFileError struct {
baseError
}
func newCannotOpenFileError(fname string) *cannotOpenFileError {
return &cannotOpenFileError{baseError{message: "Cannot open file: " + fname}}
}
type notDirectoryError struct {
baseError
}
func newNotDirectoryError(dname string) *notDirectoryError {
return &notDirectoryError{baseError{message: dname + " is not directory"}}
}
// fileFilter is a filtering criteria function for '*os.File'.
// Must return 'false' to set aside the given file.
type fileFilter func(os.FileInfo, *os.File) bool
// filePathFilter is a filtering creteria function for file path.
// Must return 'false' to set aside the given file.
type filePathFilter func(filePath string) bool
// GetSubdirNames returns a list of directories found in
// the given one with dirPath.
func getSubdirNames(dirPath string) ([]string, error) {
fi, err := os.Stat(dirPath)
if err != nil {
return nil, err
}
if !fi.IsDir() {
return nil, newNotDirectoryError(dirPath)
}
dd, err := os.Open(dirPath)
// Cannot open file.
if err != nil {
if dd != nil {
dd.Close()
}
return nil, err
}
defer dd.Close()
// TODO: Improve performance by buffering reading.
allEntities, err := dd.Readdir(-1)
if err != nil {
return nil, err
}
subDirs := []string{}
for _, entity := range allEntities {
if entity.IsDir() {
subDirs = append(subDirs, entity.Name())
}
}
return subDirs, nil
}
// getSubdirAbsPaths recursively visit all the subdirectories
// starting from the given directory and returns absolute paths for them.
func getAllSubdirAbsPaths(dirPath string) (res []string, err error) {
dps, err := getSubdirAbsPaths(dirPath)
if err != nil {
res = []string{}
return
}
res = append(res, dps...)
for _, dp := range dps {
sdps, err := getAllSubdirAbsPaths(dp)
if err != nil {
return []string{}, err
}
res = append(res, sdps...)
}
return
}
// getSubdirAbsPaths supplies absolute paths for all subdirectiries in a given directory.
// Input: (I1) dirPath - absolute path of a directory in question.
// Out: (O1) - slice of subdir asbolute paths; (O2) - error of the operation.
// Remark: If error (O2) is non-nil then (O1) is nil and vice versa.
func getSubdirAbsPaths(dirPath string) ([]string, error) {
sdns, err := getSubdirNames(dirPath)
if err != nil {
return nil, err
}
rsdns := []string{}
for _, sdn := range sdns {
rsdns = append(rsdns, filepath.Join(dirPath, sdn))
}
return rsdns, nil
}
// getOpenFilesInDir supplies a slice of os.File pointers to files located in the directory.
// Remark: Ignores files for which fileFilter returns false
func getOpenFilesInDir(dirPath string, fFilter fileFilter) ([]*os.File, error) {
dfi, err := os.Open(dirPath)
if err != nil {
return nil, newCannotOpenFileError("Cannot open directory " + dirPath)
}
defer dfi.Close()
// Size of read buffer (i.e. chunk of items read at a time).
rbs := 64
resFiles := []*os.File{}
L:
for {
// Read directory entities by reasonable chuncks
// to prevent overflows on big number of files.
fis, e := dfi.Readdir(rbs)
switch e {
// It's OK.
case nil:
// Do nothing, just continue cycle.
case io.EOF:
break L
// Something went wrong.
default:
return nil, e
}
// THINK: Maybe, use async running.
for _, fi := range fis {
// NB: On Linux this could be a problem as
// there are lots of file types available.
if !fi.IsDir() {
f, e := os.Open(filepath.Join(dirPath, fi.Name()))
if e != nil {
if f != nil {
f.Close()
}
// THINK: Add nil as indicator that a problem occurred.
resFiles = append(resFiles, nil)
continue
}
// Check filter condition.
if fFilter != nil && !fFilter(fi, f) {
continue
}
resFiles = append(resFiles, f)
}
}
}
return resFiles, nil
}
func isRegular(m os.FileMode) bool {
return m&os.ModeType == 0
}
// getDirFilePaths return full paths of the files located in the directory.
// Remark: Ignores files for which fileFilter returns false.
func getDirFilePaths(dirPath string, fpFilter filePathFilter, pathIsName bool) ([]string, error) {
dfi, err := os.Open(dirPath)
if err != nil {
return nil, newCannotOpenFileError("Cannot open directory " + dirPath)
}
defer dfi.Close()
var absDirPath string
if !filepath.IsAbs(dirPath) {
absDirPath, err = filepath.Abs(dirPath)
if err != nil {
return nil, fmt.Errorf("cannot get absolute path of directory: %s", err.Error())
}
} else {
absDirPath = dirPath
}
// TODO: check if dirPath is really directory.
// Size of read buffer (i.e. chunk of items read at a time).
rbs := 2 << 5
filePaths := []string{}
var fp string
L:
for {
// Read directory entities by reasonable chuncks
// to prevent overflows on big number of files.
fis, e := dfi.Readdir(rbs)
switch e {
// It's OK.
case nil:
// Do nothing, just continue cycle.
case io.EOF:
break L
// Indicate that something went wrong.
default:
return nil, e
}
// THINK: Maybe, use async running.
for _, fi := range fis {
// NB: Should work on every Windows and non-Windows OS.
if isRegular(fi.Mode()) {
if pathIsName {
fp = fi.Name()
} else {
// Build full path of a file.
fp = filepath.Join(absDirPath, fi.Name())
}
// Check filter condition.
if fpFilter != nil && !fpFilter(fp) {
continue
}
filePaths = append(filePaths, fp)
}
}
}
return filePaths, nil
}
// getOpenFilesByDirectoryAsync runs async reading directories 'dirPaths' and inserts pairs
// in map 'filesInDirMap': Key - directory name, value - *os.File slice.
func getOpenFilesByDirectoryAsync(
dirPaths []string,
fFilter fileFilter,
filesInDirMap map[string][]*os.File,
) error {
n := len(dirPaths)
if n > maxDirNumberReadAsync {
return fmt.Errorf("number of input directories to be read exceeded max value %d", maxDirNumberReadAsync)
}
type filesInDirResult struct {
DirName string
Files []*os.File
Error error
}
dirFilesChan := make(chan *filesInDirResult, n)
var wg sync.WaitGroup
// Register n goroutines which are going to do work.
wg.Add(n)
for i := 0; i < n; i++ {
// Launch asynchronously the piece of work.
go func(dirPath string) {
fs, e := getOpenFilesInDir(dirPath, fFilter)
dirFilesChan <- &filesInDirResult{filepath.Base(dirPath), fs, e}
// Mark the current goroutine as finished (work is done).
wg.Done()
}(dirPaths[i])
}
// Wait for all goroutines to finish their work.
wg.Wait()
// Close the error channel to let for-range clause
// get all the buffered values without blocking and quit in the end.
close(dirFilesChan)
for fidr := range dirFilesChan {
if fidr.Error == nil {
// THINK: What will happen if the key is already present?
filesInDirMap[fidr.DirName] = fidr.Files
} else {
return fidr.Error
}
}
return nil
}
// fileExists return flag whether a given file exists
// and operation error if an unclassified failure occurs.
func fileExists(path string) (bool, error) {
_, err := os.Stat(path)
if err != nil {
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
return true, nil
}
// createDirectory makes directory with a given name
// making all parent directories if necessary.
func createDirectory(dirPath string) error {
var dPath string
var err error
if !filepath.IsAbs(dirPath) {
dPath, err = filepath.Abs(dirPath)
if err != nil {
return err
}
} else {
dPath = dirPath
}
exists, err := fileExists(dPath)
if err != nil {
return err
}
if exists {
return nil
}
return os.MkdirAll(dPath, os.ModeDir)
}
// tryRemoveFile gives a try removing the file
// only ignoring an error when the file does not exist.
func tryRemoveFile(filePath string) (err error) {
err = os.Remove(filePath)
if os.IsNotExist(err) {
err = nil
return
}
return
}

175
vendor/github.com/cihub/seelog/internals_xmlnode.go generated vendored Normal file
View File

@ -0,0 +1,175 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"encoding/xml"
"errors"
"fmt"
"io"
"strings"
)
type xmlNode struct {
name string
attributes map[string]string
children []*xmlNode
value string
}
func newNode() *xmlNode {
node := new(xmlNode)
node.children = make([]*xmlNode, 0)
node.attributes = make(map[string]string)
return node
}
func (node *xmlNode) String() string {
str := fmt.Sprintf("<%s", node.name)
for attrName, attrVal := range node.attributes {
str += fmt.Sprintf(" %s=\"%s\"", attrName, attrVal)
}
str += ">"
str += node.value
if len(node.children) != 0 {
for _, child := range node.children {
str += fmt.Sprintf("%s", child)
}
}
str += fmt.Sprintf("</%s>", node.name)
return str
}
func (node *xmlNode) unmarshal(startEl xml.StartElement) error {
node.name = startEl.Name.Local
for _, v := range startEl.Attr {
_, alreadyExists := node.attributes[v.Name.Local]
if alreadyExists {
return errors.New("tag '" + node.name + "' has duplicated attribute: '" + v.Name.Local + "'")
}
node.attributes[v.Name.Local] = v.Value
}
return nil
}
func (node *xmlNode) add(child *xmlNode) {
if node.children == nil {
node.children = make([]*xmlNode, 0)
}
node.children = append(node.children, child)
}
func (node *xmlNode) hasChildren() bool {
return node.children != nil && len(node.children) > 0
}
//=============================================
func unmarshalConfig(reader io.Reader) (*xmlNode, error) {
xmlParser := xml.NewDecoder(reader)
config, err := unmarshalNode(xmlParser, nil)
if err != nil {
return nil, err
}
if config == nil {
return nil, errors.New("xml has no content")
}
nextConfigEntry, err := unmarshalNode(xmlParser, nil)
if nextConfigEntry != nil {
return nil, errors.New("xml contains more than one root element")
}
return config, nil
}
func unmarshalNode(xmlParser *xml.Decoder, curToken xml.Token) (node *xmlNode, err error) {
firstLoop := true
for {
var tok xml.Token
if firstLoop && curToken != nil {
tok = curToken
firstLoop = false
} else {
tok, err = getNextToken(xmlParser)
if err != nil || tok == nil {
return
}
}
switch tt := tok.(type) {
case xml.SyntaxError:
err = errors.New(tt.Error())
return
case xml.CharData:
value := strings.TrimSpace(string([]byte(tt)))
if node != nil {
node.value += value
}
case xml.StartElement:
if node == nil {
node = newNode()
err := node.unmarshal(tt)
if err != nil {
return nil, err
}
} else {
childNode, childErr := unmarshalNode(xmlParser, tok)
if childErr != nil {
return nil, childErr
}
if childNode != nil {
node.add(childNode)
} else {
return
}
}
case xml.EndElement:
return
}
}
}
func getNextToken(xmlParser *xml.Decoder) (tok xml.Token, err error) {
if tok, err = xmlParser.Token(); err != nil {
if err == io.EOF {
err = nil
return
}
return
}
return
}

307
vendor/github.com/cihub/seelog/log.go generated vendored Normal file
View File

@ -0,0 +1,307 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"errors"
"fmt"
"sync"
"time"
)
const (
staticFuncCallDepth = 3 // See 'commonLogger.log' method comments
loggerFuncCallDepth = 3
)
// Current is the logger used in all package level convenience funcs like 'Trace', 'Debug', 'Flush', etc.
var Current LoggerInterface
// Default logger that is created from an empty config: "<seelog/>". It is not closed by a ReplaceLogger call.
var Default LoggerInterface
// Disabled logger that doesn't produce any output in any circumstances. It is neither closed nor flushed by a ReplaceLogger call.
var Disabled LoggerInterface
var pkgOperationsMutex *sync.Mutex
func init() {
pkgOperationsMutex = new(sync.Mutex)
var err error
if Default == nil {
Default, err = LoggerFromConfigAsBytes([]byte("<seelog />"))
}
if Disabled == nil {
Disabled, err = LoggerFromConfigAsBytes([]byte("<seelog levels=\"off\"/>"))
}
if err != nil {
panic(fmt.Sprintf("Seelog couldn't start. Error: %s", err.Error()))
}
Current = Default
}
func createLoggerFromFullConfig(config *configForParsing) (LoggerInterface, error) {
if config.LogType == syncloggerTypeFromString {
return NewSyncLogger(&config.logConfig), nil
} else if config.LogType == asyncLooploggerTypeFromString {
return NewAsyncLoopLogger(&config.logConfig), nil
} else if config.LogType == asyncTimerloggerTypeFromString {
logData := config.LoggerData
if logData == nil {
return nil, errors.New("async timer data not set")
}
asyncInt, ok := logData.(asyncTimerLoggerData)
if !ok {
return nil, errors.New("invalid async timer data")
}
logger, err := NewAsyncTimerLogger(&config.logConfig, time.Duration(asyncInt.AsyncInterval))
if !ok {
return nil, err
}
return logger, nil
} else if config.LogType == adaptiveLoggerTypeFromString {
logData := config.LoggerData
if logData == nil {
return nil, errors.New("adaptive logger parameters not set")
}
adaptData, ok := logData.(adaptiveLoggerData)
if !ok {
return nil, errors.New("invalid adaptive logger parameters")
}
logger, err := NewAsyncAdaptiveLogger(
&config.logConfig,
time.Duration(adaptData.MinInterval),
time.Duration(adaptData.MaxInterval),
adaptData.CriticalMsgCount,
)
if err != nil {
return nil, err
}
return logger, nil
}
return nil, errors.New("invalid config log type/data")
}
// UseLogger sets the 'Current' package level logger variable to the specified value.
// This variable is used in all Trace/Debug/... package level convenience funcs.
//
// Example:
//
// after calling
// seelog.UseLogger(somelogger)
// the following:
// seelog.Debug("abc")
// will be equal to
// somelogger.Debug("abc")
//
// IMPORTANT: UseLogger do NOT close the previous logger (only flushes it). So if
// you constantly use it to replace loggers and don't close them in other code, you'll
// end up having memory leaks.
//
// To safely replace loggers, use ReplaceLogger.
func UseLogger(logger LoggerInterface) error {
if logger == nil {
return errors.New("logger can not be nil")
}
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
oldLogger := Current
Current = logger
if oldLogger != nil {
oldLogger.Flush()
}
return nil
}
// ReplaceLogger acts as UseLogger but the logger that was previously
// used is disposed (except Default and Disabled loggers).
//
// Example:
// import log "github.com/cihub/seelog"
//
// func main() {
// logger, err := log.LoggerFromConfigAsFile("seelog.xml")
//
// if err != nil {
// panic(err)
// }
//
// log.ReplaceLogger(logger)
// defer log.Flush()
//
// log.Trace("test")
// log.Debugf("var = %s", "abc")
// }
func ReplaceLogger(logger LoggerInterface) error {
if logger == nil {
return errors.New("logger can not be nil")
}
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
defer func() {
if err := recover(); err != nil {
reportInternalError(fmt.Errorf("recovered from panic during ReplaceLogger: %s", err))
}
}()
if Current == Default {
Current.Flush()
} else if Current != nil && !Current.Closed() && Current != Disabled {
Current.Flush()
Current.Close()
}
Current = logger
return nil
}
// Tracef formats message according to format specifier
// and writes to default logger with log level = Trace.
func Tracef(format string, params ...interface{}) {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
Current.traceWithCallDepth(staticFuncCallDepth, newLogFormattedMessage(format, params))
}
// Debugf formats message according to format specifier
// and writes to default logger with log level = Debug.
func Debugf(format string, params ...interface{}) {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
Current.debugWithCallDepth(staticFuncCallDepth, newLogFormattedMessage(format, params))
}
// Infof formats message according to format specifier
// and writes to default logger with log level = Info.
func Infof(format string, params ...interface{}) {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
Current.infoWithCallDepth(staticFuncCallDepth, newLogFormattedMessage(format, params))
}
// Warnf formats message according to format specifier and writes to default logger with log level = Warn
func Warnf(format string, params ...interface{}) error {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
message := newLogFormattedMessage(format, params)
Current.warnWithCallDepth(staticFuncCallDepth, message)
return errors.New(message.String())
}
// Errorf formats message according to format specifier and writes to default logger with log level = Error
func Errorf(format string, params ...interface{}) error {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
message := newLogFormattedMessage(format, params)
Current.errorWithCallDepth(staticFuncCallDepth, message)
return errors.New(message.String())
}
// Criticalf formats message according to format specifier and writes to default logger with log level = Critical
func Criticalf(format string, params ...interface{}) error {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
message := newLogFormattedMessage(format, params)
Current.criticalWithCallDepth(staticFuncCallDepth, message)
return errors.New(message.String())
}
// Trace formats message using the default formats for its operands and writes to default logger with log level = Trace
func Trace(v ...interface{}) {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
Current.traceWithCallDepth(staticFuncCallDepth, newLogMessage(v))
}
// Debug formats message using the default formats for its operands and writes to default logger with log level = Debug
func Debug(v ...interface{}) {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
Current.debugWithCallDepth(staticFuncCallDepth, newLogMessage(v))
}
// Info formats message using the default formats for its operands and writes to default logger with log level = Info
func Info(v ...interface{}) {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
Current.infoWithCallDepth(staticFuncCallDepth, newLogMessage(v))
}
// Warn formats message using the default formats for its operands and writes to default logger with log level = Warn
func Warn(v ...interface{}) error {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
message := newLogMessage(v)
Current.warnWithCallDepth(staticFuncCallDepth, message)
return errors.New(message.String())
}
// Error formats message using the default formats for its operands and writes to default logger with log level = Error
func Error(v ...interface{}) error {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
message := newLogMessage(v)
Current.errorWithCallDepth(staticFuncCallDepth, message)
return errors.New(message.String())
}
// Critical formats message using the default formats for its operands and writes to default logger with log level = Critical
func Critical(v ...interface{}) error {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
message := newLogMessage(v)
Current.criticalWithCallDepth(staticFuncCallDepth, message)
return errors.New(message.String())
}
// Flush immediately processes all currently queued messages and all currently buffered messages.
// It is a blocking call which returns only after the queue is empty and all the buffers are empty.
//
// If Flush is called for a synchronous logger (type='sync'), it only flushes buffers (e.g. '<buffered>' receivers)
// , because there is no queue.
//
// Call this method when your app is going to shut down not to lose any log messages.
func Flush() {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
Current.Flush()
}

370
vendor/github.com/cihub/seelog/logger.go generated vendored Normal file
View File

@ -0,0 +1,370 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"errors"
"fmt"
"os"
"sync"
)
func reportInternalError(err error) {
fmt.Fprintf(os.Stderr, "seelog internal error: %s\n", err)
}
// LoggerInterface represents structs capable of logging Seelog messages
type LoggerInterface interface {
// Tracef formats message according to format specifier
// and writes to log with level = Trace.
Tracef(format string, params ...interface{})
// Debugf formats message according to format specifier
// and writes to log with level = Debug.
Debugf(format string, params ...interface{})
// Infof formats message according to format specifier
// and writes to log with level = Info.
Infof(format string, params ...interface{})
// Warnf formats message according to format specifier
// and writes to log with level = Warn.
Warnf(format string, params ...interface{}) error
// Errorf formats message according to format specifier
// and writes to log with level = Error.
Errorf(format string, params ...interface{}) error
// Criticalf formats message according to format specifier
// and writes to log with level = Critical.
Criticalf(format string, params ...interface{}) error
// Trace formats message using the default formats for its operands
// and writes to log with level = Trace
Trace(v ...interface{})
// Debug formats message using the default formats for its operands
// and writes to log with level = Debug
Debug(v ...interface{})
// Info formats message using the default formats for its operands
// and writes to log with level = Info
Info(v ...interface{})
// Warn formats message using the default formats for its operands
// and writes to log with level = Warn
Warn(v ...interface{}) error
// Error formats message using the default formats for its operands
// and writes to log with level = Error
Error(v ...interface{}) error
// Critical formats message using the default formats for its operands
// and writes to log with level = Critical
Critical(v ...interface{}) error
traceWithCallDepth(callDepth int, message fmt.Stringer)
debugWithCallDepth(callDepth int, message fmt.Stringer)
infoWithCallDepth(callDepth int, message fmt.Stringer)
warnWithCallDepth(callDepth int, message fmt.Stringer)
errorWithCallDepth(callDepth int, message fmt.Stringer)
criticalWithCallDepth(callDepth int, message fmt.Stringer)
// Close flushes all the messages in the logger and closes it. It cannot be used after this operation.
Close()
// Flush flushes all the messages in the logger.
Flush()
// Closed returns true if the logger was previously closed.
Closed() bool
// SetAdditionalStackDepth sets the additional number of frames to skip by runtime.Caller
// when getting function information needed to print seelog format identifiers such as %Func or %File.
//
// This func may be used when you wrap seelog funcs and want to print caller info of you own
// wrappers instead of seelog func callers. In this case you should set depth = 1. If you then
// wrap your wrapper, you should set depth = 2, etc.
//
// NOTE: Incorrect depth value may lead to errors in runtime.Caller evaluation or incorrect
// function/file names in log files. Do not use it if you are not going to wrap seelog funcs.
// You may reset the value to default using a SetAdditionalStackDepth(0) call.
SetAdditionalStackDepth(depth int) error
// Sets logger context that can be used in formatter funcs and custom receivers
SetContext(context interface{})
}
// innerLoggerInterface is an internal logging interface
type innerLoggerInterface interface {
innerLog(level LogLevel, context LogContextInterface, message fmt.Stringer)
Flush()
}
// [file path][func name][level] -> [allowed]
type allowedContextCache map[string]map[string]map[LogLevel]bool
// commonLogger contains all common data needed for logging and contains methods used to log messages.
type commonLogger struct {
config *logConfig // Config used for logging
contextCache allowedContextCache // Caches whether log is enabled for specific "full path-func name-level" sets
closed bool // 'true' when all writers are closed, all data is flushed, logger is unusable. Must be accessed while holding closedM
closedM sync.RWMutex
m sync.Mutex // Mutex for main operations
unusedLevels []bool
innerLogger innerLoggerInterface
addStackDepth int // Additional stack depth needed for correct seelog caller context detection
customContext interface{}
}
func newCommonLogger(config *logConfig, internalLogger innerLoggerInterface) *commonLogger {
cLogger := new(commonLogger)
cLogger.config = config
cLogger.contextCache = make(allowedContextCache)
cLogger.unusedLevels = make([]bool, Off)
cLogger.fillUnusedLevels()
cLogger.innerLogger = internalLogger
return cLogger
}
func (cLogger *commonLogger) SetAdditionalStackDepth(depth int) error {
if depth < 0 {
return fmt.Errorf("negative depth: %d", depth)
}
cLogger.m.Lock()
cLogger.addStackDepth = depth
cLogger.m.Unlock()
return nil
}
func (cLogger *commonLogger) Tracef(format string, params ...interface{}) {
cLogger.traceWithCallDepth(loggerFuncCallDepth, newLogFormattedMessage(format, params))
}
func (cLogger *commonLogger) Debugf(format string, params ...interface{}) {
cLogger.debugWithCallDepth(loggerFuncCallDepth, newLogFormattedMessage(format, params))
}
func (cLogger *commonLogger) Infof(format string, params ...interface{}) {
cLogger.infoWithCallDepth(loggerFuncCallDepth, newLogFormattedMessage(format, params))
}
func (cLogger *commonLogger) Warnf(format string, params ...interface{}) error {
message := newLogFormattedMessage(format, params)
cLogger.warnWithCallDepth(loggerFuncCallDepth, message)
return errors.New(message.String())
}
func (cLogger *commonLogger) Errorf(format string, params ...interface{}) error {
message := newLogFormattedMessage(format, params)
cLogger.errorWithCallDepth(loggerFuncCallDepth, message)
return errors.New(message.String())
}
func (cLogger *commonLogger) Criticalf(format string, params ...interface{}) error {
message := newLogFormattedMessage(format, params)
cLogger.criticalWithCallDepth(loggerFuncCallDepth, message)
return errors.New(message.String())
}
func (cLogger *commonLogger) Trace(v ...interface{}) {
cLogger.traceWithCallDepth(loggerFuncCallDepth, newLogMessage(v))
}
func (cLogger *commonLogger) Debug(v ...interface{}) {
cLogger.debugWithCallDepth(loggerFuncCallDepth, newLogMessage(v))
}
func (cLogger *commonLogger) Info(v ...interface{}) {
cLogger.infoWithCallDepth(loggerFuncCallDepth, newLogMessage(v))
}
func (cLogger *commonLogger) Warn(v ...interface{}) error {
message := newLogMessage(v)
cLogger.warnWithCallDepth(loggerFuncCallDepth, message)
return errors.New(message.String())
}
func (cLogger *commonLogger) Error(v ...interface{}) error {
message := newLogMessage(v)
cLogger.errorWithCallDepth(loggerFuncCallDepth, message)
return errors.New(message.String())
}
func (cLogger *commonLogger) Critical(v ...interface{}) error {
message := newLogMessage(v)
cLogger.criticalWithCallDepth(loggerFuncCallDepth, message)
return errors.New(message.String())
}
func (cLogger *commonLogger) SetContext(c interface{}) {
cLogger.customContext = c
}
func (cLogger *commonLogger) traceWithCallDepth(callDepth int, message fmt.Stringer) {
cLogger.log(TraceLvl, message, callDepth)
}
func (cLogger *commonLogger) debugWithCallDepth(callDepth int, message fmt.Stringer) {
cLogger.log(DebugLvl, message, callDepth)
}
func (cLogger *commonLogger) infoWithCallDepth(callDepth int, message fmt.Stringer) {
cLogger.log(InfoLvl, message, callDepth)
}
func (cLogger *commonLogger) warnWithCallDepth(callDepth int, message fmt.Stringer) {
cLogger.log(WarnLvl, message, callDepth)
}
func (cLogger *commonLogger) errorWithCallDepth(callDepth int, message fmt.Stringer) {
cLogger.log(ErrorLvl, message, callDepth)
}
func (cLogger *commonLogger) criticalWithCallDepth(callDepth int, message fmt.Stringer) {
cLogger.log(CriticalLvl, message, callDepth)
cLogger.innerLogger.Flush()
}
func (cLogger *commonLogger) Closed() bool {
cLogger.closedM.RLock()
defer cLogger.closedM.RUnlock()
return cLogger.closed
}
func (cLogger *commonLogger) fillUnusedLevels() {
for i := 0; i < len(cLogger.unusedLevels); i++ {
cLogger.unusedLevels[i] = true
}
cLogger.fillUnusedLevelsByContraint(cLogger.config.Constraints)
for _, exception := range cLogger.config.Exceptions {
cLogger.fillUnusedLevelsByContraint(exception)
}
}
func (cLogger *commonLogger) fillUnusedLevelsByContraint(constraint logLevelConstraints) {
for i := 0; i < len(cLogger.unusedLevels); i++ {
if constraint.IsAllowed(LogLevel(i)) {
cLogger.unusedLevels[i] = false
}
}
}
// stackCallDepth is used to indicate the call depth of 'log' func.
// This depth level is used in the runtime.Caller(...) call. See
// common_context.go -> specifyContext, extractCallerInfo for details.
func (cLogger *commonLogger) log(level LogLevel, message fmt.Stringer, stackCallDepth int) {
if cLogger.unusedLevels[level] {
return
}
cLogger.m.Lock()
defer cLogger.m.Unlock()
if cLogger.Closed() {
return
}
context, _ := specifyContext(stackCallDepth+cLogger.addStackDepth, cLogger.customContext)
// Context errors are not reported because there are situations
// in which context errors are normal Seelog usage cases. For
// example in executables with stripped symbols.
// Error contexts are returned instead. See common_context.go.
/*if err != nil {
reportInternalError(err)
return
}*/
cLogger.innerLogger.innerLog(level, context, message)
}
func (cLogger *commonLogger) processLogMsg(level LogLevel, message fmt.Stringer, context LogContextInterface) {
defer func() {
if err := recover(); err != nil {
reportInternalError(fmt.Errorf("recovered from panic during message processing: %s", err))
}
}()
if cLogger.config.IsAllowed(level, context) {
cLogger.config.RootDispatcher.Dispatch(message.String(), level, context, reportInternalError)
}
}
func (cLogger *commonLogger) isAllowed(level LogLevel, context LogContextInterface) bool {
funcMap, ok := cLogger.contextCache[context.FullPath()]
if !ok {
funcMap = make(map[string]map[LogLevel]bool, 0)
cLogger.contextCache[context.FullPath()] = funcMap
}
levelMap, ok := funcMap[context.Func()]
if !ok {
levelMap = make(map[LogLevel]bool, 0)
funcMap[context.Func()] = levelMap
}
isAllowValue, ok := levelMap[level]
if !ok {
isAllowValue = cLogger.config.IsAllowed(level, context)
levelMap[level] = isAllowValue
}
return isAllowValue
}
type logMessage struct {
params []interface{}
}
type logFormattedMessage struct {
format string
params []interface{}
}
func newLogMessage(params []interface{}) fmt.Stringer {
message := new(logMessage)
message.params = params
return message
}
func newLogFormattedMessage(format string, params []interface{}) *logFormattedMessage {
message := new(logFormattedMessage)
message.params = params
message.format = format
return message
}
func (message *logMessage) String() string {
return fmt.Sprint(message.params...)
}
func (message *logFormattedMessage) String() string {
return fmt.Sprintf(message.format, message.params...)
}

View File

@ -0,0 +1,161 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"bufio"
"errors"
"fmt"
"io"
"sync"
"time"
)
// bufferedWriter stores data in memory and flushes it every flushPeriod or when buffer is full
type bufferedWriter struct {
flushPeriod time.Duration // data flushes interval (in microseconds)
bufferMutex *sync.Mutex // mutex for buffer operations syncronization
innerWriter io.Writer // inner writer
buffer *bufio.Writer // buffered wrapper for inner writer
bufferSize int // max size of data chunk in bytes
}
// NewBufferedWriter creates a new buffered writer struct.
// bufferSize -- size of memory buffer in bytes
// flushPeriod -- period in which data flushes from memory buffer in milliseconds. 0 - turn off this functionality
func NewBufferedWriter(innerWriter io.Writer, bufferSize int, flushPeriod time.Duration) (*bufferedWriter, error) {
if innerWriter == nil {
return nil, errors.New("argument is nil: innerWriter")
}
if flushPeriod < 0 {
return nil, fmt.Errorf("flushPeriod can not be less than 0. Got: %d", flushPeriod)
}
if bufferSize <= 0 {
return nil, fmt.Errorf("bufferSize can not be less or equal to 0. Got: %d", bufferSize)
}
buffer := bufio.NewWriterSize(innerWriter, bufferSize)
/*if err != nil {
return nil, err
}*/
newWriter := new(bufferedWriter)
newWriter.innerWriter = innerWriter
newWriter.buffer = buffer
newWriter.bufferSize = bufferSize
newWriter.flushPeriod = flushPeriod * 1e6
newWriter.bufferMutex = new(sync.Mutex)
if flushPeriod != 0 {
go newWriter.flushPeriodically()
}
return newWriter, nil
}
func (bufWriter *bufferedWriter) writeBigChunk(bytes []byte) (n int, err error) {
bufferedLen := bufWriter.buffer.Buffered()
n, err = bufWriter.flushInner()
if err != nil {
return
}
written, writeErr := bufWriter.innerWriter.Write(bytes)
return bufferedLen + written, writeErr
}
// Sends data to buffer manager. Waits until all buffers are full.
func (bufWriter *bufferedWriter) Write(bytes []byte) (n int, err error) {
bufWriter.bufferMutex.Lock()
defer bufWriter.bufferMutex.Unlock()
bytesLen := len(bytes)
if bytesLen > bufWriter.bufferSize {
return bufWriter.writeBigChunk(bytes)
}
if bytesLen > bufWriter.buffer.Available() {
n, err = bufWriter.flushInner()
if err != nil {
return
}
}
bufWriter.buffer.Write(bytes)
return len(bytes), nil
}
func (bufWriter *bufferedWriter) Close() error {
closer, ok := bufWriter.innerWriter.(io.Closer)
if ok {
return closer.Close()
}
return nil
}
func (bufWriter *bufferedWriter) Flush() {
bufWriter.bufferMutex.Lock()
defer bufWriter.bufferMutex.Unlock()
bufWriter.flushInner()
}
func (bufWriter *bufferedWriter) flushInner() (n int, err error) {
bufferedLen := bufWriter.buffer.Buffered()
flushErr := bufWriter.buffer.Flush()
return bufWriter.buffer.Buffered() - bufferedLen, flushErr
}
func (bufWriter *bufferedWriter) flushBuffer() {
bufWriter.bufferMutex.Lock()
defer bufWriter.bufferMutex.Unlock()
bufWriter.buffer.Flush()
}
func (bufWriter *bufferedWriter) flushPeriodically() {
if bufWriter.flushPeriod > 0 {
ticker := time.NewTicker(bufWriter.flushPeriod)
for {
<-ticker.C
bufWriter.flushBuffer()
}
}
}
func (bufWriter *bufferedWriter) String() string {
return fmt.Sprintf("bufferedWriter size: %d, flushPeriod: %d", bufWriter.bufferSize, bufWriter.flushPeriod)
}

144
vendor/github.com/cihub/seelog/writers_connwriter.go generated vendored Normal file
View File

@ -0,0 +1,144 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"crypto/tls"
"fmt"
"io"
"net"
)
// connWriter is used to write to a stream-oriented network connection.
type connWriter struct {
innerWriter io.WriteCloser
reconnectOnMsg bool
reconnect bool
net string
addr string
useTLS bool
configTLS *tls.Config
}
// Creates writer to the address addr on the network netName.
// Connection will be opened on each write if reconnectOnMsg = true
func NewConnWriter(netName string, addr string, reconnectOnMsg bool) *connWriter {
newWriter := new(connWriter)
newWriter.net = netName
newWriter.addr = addr
newWriter.reconnectOnMsg = reconnectOnMsg
return newWriter
}
// Creates a writer that uses SSL/TLS
func newTLSWriter(netName string, addr string, reconnectOnMsg bool, config *tls.Config) *connWriter {
newWriter := new(connWriter)
newWriter.net = netName
newWriter.addr = addr
newWriter.reconnectOnMsg = reconnectOnMsg
newWriter.useTLS = true
newWriter.configTLS = config
return newWriter
}
func (connWriter *connWriter) Close() error {
if connWriter.innerWriter == nil {
return nil
}
return connWriter.innerWriter.Close()
}
func (connWriter *connWriter) Write(bytes []byte) (n int, err error) {
if connWriter.neededConnectOnMsg() {
err = connWriter.connect()
if err != nil {
return 0, err
}
}
if connWriter.reconnectOnMsg {
defer connWriter.innerWriter.Close()
}
n, err = connWriter.innerWriter.Write(bytes)
if err != nil {
connWriter.reconnect = true
}
return
}
func (connWriter *connWriter) String() string {
return fmt.Sprintf("Conn writer: [%s, %s, %v]", connWriter.net, connWriter.addr, connWriter.reconnectOnMsg)
}
func (connWriter *connWriter) connect() error {
if connWriter.innerWriter != nil {
connWriter.innerWriter.Close()
connWriter.innerWriter = nil
}
if connWriter.useTLS {
conn, err := tls.Dial(connWriter.net, connWriter.addr, connWriter.configTLS)
if err != nil {
return err
}
connWriter.innerWriter = conn
return nil
}
conn, err := net.Dial(connWriter.net, connWriter.addr)
if err != nil {
return err
}
tcpConn, ok := conn.(*net.TCPConn)
if ok {
tcpConn.SetKeepAlive(true)
}
connWriter.innerWriter = conn
return nil
}
func (connWriter *connWriter) neededConnectOnMsg() bool {
if connWriter.reconnect {
connWriter.reconnect = false
return true
}
if connWriter.innerWriter == nil {
return true
}
return connWriter.reconnectOnMsg
}

View File

@ -0,0 +1,47 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import "fmt"
// consoleWriter is used to write to console
type consoleWriter struct {
}
// Creates a new console writer. Returns error, if the console writer couldn't be created.
func NewConsoleWriter() (writer *consoleWriter, err error) {
newWriter := new(consoleWriter)
return newWriter, nil
}
// Create folder and file on WriteLog/Write first call
func (console *consoleWriter) Write(bytes []byte) (int, error) {
return fmt.Print(string(bytes))
}
func (console *consoleWriter) String() string {
return "Console writer"
}

92
vendor/github.com/cihub/seelog/writers_filewriter.go generated vendored Normal file
View File

@ -0,0 +1,92 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"fmt"
"io"
"os"
"path/filepath"
)
// fileWriter is used to write to a file.
type fileWriter struct {
innerWriter io.WriteCloser
fileName string
}
// Creates a new file and a corresponding writer. Returns error, if the file couldn't be created.
func NewFileWriter(fileName string) (writer *fileWriter, err error) {
newWriter := new(fileWriter)
newWriter.fileName = fileName
return newWriter, nil
}
func (fw *fileWriter) Close() error {
if fw.innerWriter != nil {
err := fw.innerWriter.Close()
if err != nil {
return err
}
fw.innerWriter = nil
}
return nil
}
// Create folder and file on WriteLog/Write first call
func (fw *fileWriter) Write(bytes []byte) (n int, err error) {
if fw.innerWriter == nil {
if err := fw.createFile(); err != nil {
return 0, err
}
}
return fw.innerWriter.Write(bytes)
}
func (fw *fileWriter) createFile() error {
folder, _ := filepath.Split(fw.fileName)
var err error
if 0 != len(folder) {
err = os.MkdirAll(folder, defaultDirectoryPermissions)
if err != nil {
return err
}
}
// If exists
fw.innerWriter, err = os.OpenFile(fw.fileName, os.O_WRONLY|os.O_APPEND|os.O_CREATE, defaultFilePermissions)
if err != nil {
return err
}
return nil
}
func (fw *fileWriter) String() string {
return fmt.Sprintf("File writer: %s", fw.fileName)
}

View File

@ -0,0 +1,62 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"errors"
"fmt"
"io"
)
type formattedWriter struct {
writer io.Writer
formatter *formatter
}
func NewFormattedWriter(writer io.Writer, formatter *formatter) (*formattedWriter, error) {
if formatter == nil {
return nil, errors.New("formatter can not be nil")
}
return &formattedWriter{writer, formatter}, nil
}
func (formattedWriter *formattedWriter) Write(message string, level LogLevel, context LogContextInterface) error {
str := formattedWriter.formatter.Format(message, level, context)
_, err := formattedWriter.writer.Write([]byte(str))
return err
}
func (formattedWriter *formattedWriter) String() string {
return fmt.Sprintf("writer: %s, format: %s", formattedWriter.writer, formattedWriter.formatter)
}
func (formattedWriter *formattedWriter) Writer() io.Writer {
return formattedWriter.writer
}
func (formattedWriter *formattedWriter) Format() *formatter {
return formattedWriter.formatter
}

View File

@ -0,0 +1,763 @@
// Copyright (c) 2013 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/cihub/seelog/archive"
"github.com/cihub/seelog/archive/gzip"
"github.com/cihub/seelog/archive/tar"
"github.com/cihub/seelog/archive/zip"
)
// Common constants
const (
rollingLogHistoryDelimiter = "."
)
// Types of the rolling writer: roll by date, by time, etc.
type rollingType uint8
const (
rollingTypeSize = iota
rollingTypeTime
)
// Types of the rolled file naming mode: prefix, postfix, etc.
type rollingNameMode uint8
const (
rollingNameModePostfix = iota
rollingNameModePrefix
)
var rollingNameModesStringRepresentation = map[rollingNameMode]string{
rollingNameModePostfix: "postfix",
rollingNameModePrefix: "prefix",
}
func rollingNameModeFromString(rollingNameStr string) (rollingNameMode, bool) {
for tp, tpStr := range rollingNameModesStringRepresentation {
if tpStr == rollingNameStr {
return tp, true
}
}
return 0, false
}
var rollingTypesStringRepresentation = map[rollingType]string{
rollingTypeSize: "size",
rollingTypeTime: "date",
}
func rollingTypeFromString(rollingTypeStr string) (rollingType, bool) {
for tp, tpStr := range rollingTypesStringRepresentation {
if tpStr == rollingTypeStr {
return tp, true
}
}
return 0, false
}
// Old logs archivation type.
type rollingArchiveType uint8
const (
rollingArchiveNone = iota
rollingArchiveZip
rollingArchiveGzip
)
var rollingArchiveTypesStringRepresentation = map[rollingArchiveType]string{
rollingArchiveNone: "none",
rollingArchiveZip: "zip",
rollingArchiveGzip: "gzip",
}
type archiver func(f *os.File, exploded bool) archive.WriteCloser
type unarchiver func(f *os.File) (archive.ReadCloser, error)
type compressionType struct {
extension string
handleMultipleEntries bool
archiver archiver
unarchiver unarchiver
}
var compressionTypes = map[rollingArchiveType]compressionType{
rollingArchiveZip: {
extension: ".zip",
handleMultipleEntries: true,
archiver: func(f *os.File, _ bool) archive.WriteCloser {
return zip.NewWriter(f)
},
unarchiver: func(f *os.File) (archive.ReadCloser, error) {
fi, err := f.Stat()
if err != nil {
return nil, err
}
r, err := zip.NewReader(f, fi.Size())
if err != nil {
return nil, err
}
return archive.NopCloser(r), nil
},
},
rollingArchiveGzip: {
extension: ".gz",
handleMultipleEntries: false,
archiver: func(f *os.File, exploded bool) archive.WriteCloser {
gw := gzip.NewWriter(f)
if exploded {
return gw
}
return tar.NewWriteMultiCloser(gw, gw)
},
unarchiver: func(f *os.File) (archive.ReadCloser, error) {
gr, err := gzip.NewReader(f, f.Name())
if err != nil {
return nil, err
}
// Determine if the gzip is a tar
tr := tar.NewReader(gr)
_, err = tr.Next()
isTar := err == nil
// Reset to beginning of file
if _, err := f.Seek(0, os.SEEK_SET); err != nil {
return nil, err
}
gr.Reset(f)
if isTar {
return archive.NopCloser(tar.NewReader(gr)), nil
}
return gr, nil
},
},
}
func (compressionType *compressionType) rollingArchiveTypeName(name string, exploded bool) string {
if !compressionType.handleMultipleEntries && !exploded {
return name + ".tar" + compressionType.extension
} else {
return name + compressionType.extension
}
}
func rollingArchiveTypeFromString(rollingArchiveTypeStr string) (rollingArchiveType, bool) {
for tp, tpStr := range rollingArchiveTypesStringRepresentation {
if tpStr == rollingArchiveTypeStr {
return tp, true
}
}
return 0, false
}
// Default names for different archive types
var rollingArchiveDefaultExplodedName = "old"
func rollingArchiveTypeDefaultName(archiveType rollingArchiveType, exploded bool) (string, error) {
compressionType, ok := compressionTypes[archiveType]
if !ok {
return "", fmt.Errorf("cannot get default filename for archive type = %v", archiveType)
}
return compressionType.rollingArchiveTypeName("log", exploded), nil
}
// rollerVirtual is an interface that represents all virtual funcs that are
// called in different rolling writer subtypes.
type rollerVirtual interface {
needsToRoll() bool // Returns true if needs to switch to another file.
isFileRollNameValid(rname string) bool // Returns true if logger roll file name (postfix/prefix/etc.) is ok.
sortFileRollNamesAsc(fs []string) ([]string, error) // Sorts logger roll file names in ascending order of their creation by logger.
// getNewHistoryRollFileName is called whenever we are about to roll the
// current log file. It returns the name the current log file should be
// rolled to.
getNewHistoryRollFileName(otherHistoryFiles []string) string
getCurrentFileName() string
}
// rollingFileWriter writes received messages to a file, until time interval passes
// or file exceeds a specified limit. After that the current log file is renamed
// and writer starts to log into a new file. You can set a limit for such renamed
// files count, if you want, and then the rolling writer would delete older ones when
// the files count exceed the specified limit.
type rollingFileWriter struct {
fileName string // log file name
currentDirPath string
currentFile *os.File
currentName string
currentFileSize int64
rollingType rollingType // Rolling mode (Files roll by size/date/...)
archiveType rollingArchiveType
archivePath string
archiveExploded bool
fullName bool
maxRolls int
nameMode rollingNameMode
self rollerVirtual // Used for virtual calls
rollLock sync.Mutex
}
func newRollingFileWriter(fpath string, rtype rollingType, atype rollingArchiveType, apath string, maxr int, namemode rollingNameMode,
archiveExploded bool, fullName bool) (*rollingFileWriter, error) {
rw := new(rollingFileWriter)
rw.currentDirPath, rw.fileName = filepath.Split(fpath)
if len(rw.currentDirPath) == 0 {
rw.currentDirPath = "."
}
rw.rollingType = rtype
rw.archiveType = atype
rw.archivePath = apath
rw.nameMode = namemode
rw.maxRolls = maxr
rw.archiveExploded = archiveExploded
rw.fullName = fullName
return rw, nil
}
func (rw *rollingFileWriter) hasRollName(file string) bool {
switch rw.nameMode {
case rollingNameModePostfix:
rname := rw.fileName + rollingLogHistoryDelimiter
return strings.HasPrefix(file, rname)
case rollingNameModePrefix:
rname := rollingLogHistoryDelimiter + rw.fileName
return strings.HasSuffix(file, rname)
}
return false
}
func (rw *rollingFileWriter) createFullFileName(originalName, rollname string) string {
switch rw.nameMode {
case rollingNameModePostfix:
return originalName + rollingLogHistoryDelimiter + rollname
case rollingNameModePrefix:
return rollname + rollingLogHistoryDelimiter + originalName
}
return ""
}
func (rw *rollingFileWriter) getSortedLogHistory() ([]string, error) {
files, err := getDirFilePaths(rw.currentDirPath, nil, true)
if err != nil {
return nil, err
}
var validRollNames []string
for _, file := range files {
if rw.hasRollName(file) {
rname := rw.getFileRollName(file)
if rw.self.isFileRollNameValid(rname) {
validRollNames = append(validRollNames, rname)
}
}
}
sortedTails, err := rw.self.sortFileRollNamesAsc(validRollNames)
if err != nil {
return nil, err
}
validSortedFiles := make([]string, len(sortedTails))
for i, v := range sortedTails {
validSortedFiles[i] = rw.createFullFileName(rw.fileName, v)
}
return validSortedFiles, nil
}
func (rw *rollingFileWriter) createFileAndFolderIfNeeded(first bool) error {
var err error
if len(rw.currentDirPath) != 0 {
err = os.MkdirAll(rw.currentDirPath, defaultDirectoryPermissions)
if err != nil {
return err
}
}
rw.currentName = rw.self.getCurrentFileName()
filePath := filepath.Join(rw.currentDirPath, rw.currentName)
// This will either open the existing file (without truncating it) or
// create if necessary. Append mode avoids any race conditions.
rw.currentFile, err = os.OpenFile(filePath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, defaultFilePermissions)
if err != nil {
return err
}
stat, err := rw.currentFile.Stat()
if err != nil {
rw.currentFile.Close()
rw.currentFile = nil
return err
}
rw.currentFileSize = stat.Size()
return nil
}
func (rw *rollingFileWriter) archiveExplodedLogs(logFilename string, compressionType compressionType) (err error) {
closeWithError := func(c io.Closer) {
if cerr := c.Close(); cerr != nil && err == nil {
err = cerr
}
}
rollPath := filepath.Join(rw.currentDirPath, logFilename)
src, err := os.Open(rollPath)
if err != nil {
return err
}
defer src.Close() // Read-only
// Buffer to a temporary file on the same partition
// Note: archivePath is a path to a directory when handling exploded logs
dst, err := rw.tempArchiveFile(rw.archivePath)
if err != nil {
return err
}
defer func() {
closeWithError(dst)
if err != nil {
os.Remove(dst.Name()) // Can't do anything when we fail to remove temp file
return
}
// Finalize archive by swapping the buffered archive into place
err = os.Rename(dst.Name(), filepath.Join(rw.archivePath,
compressionType.rollingArchiveTypeName(logFilename, true)))
}()
// archive entry
w := compressionType.archiver(dst, true)
defer closeWithError(w)
fi, err := src.Stat()
if err != nil {
return err
}
if err := w.NextFile(logFilename, fi); err != nil {
return err
}
_, err = io.Copy(w, src)
return err
}
func (rw *rollingFileWriter) archiveUnexplodedLogs(compressionType compressionType, rollsToDelete int, history []string) (err error) {
closeWithError := func(c io.Closer) {
if cerr := c.Close(); cerr != nil && err == nil {
err = cerr
}
}
// Buffer to a temporary file on the same partition
// Note: archivePath is a path to a file when handling unexploded logs
dst, err := rw.tempArchiveFile(filepath.Dir(rw.archivePath))
if err != nil {
return err
}
defer func() {
closeWithError(dst)
if err != nil {
os.Remove(dst.Name()) // Can't do anything when we fail to remove temp file
return
}
// Finalize archive by moving the buffered archive into place
err = os.Rename(dst.Name(), rw.archivePath)
}()
w := compressionType.archiver(dst, false)
defer closeWithError(w)
src, err := os.Open(rw.archivePath)
switch {
// Archive exists
case err == nil:
defer src.Close() // Read-only
r, err := compressionType.unarchiver(src)
if err != nil {
return err
}
defer r.Close() // Read-only
if err := archive.Copy(w, r); err != nil {
return err
}
// Failed to stat
case !os.IsNotExist(err):
return err
}
// Add new files to the archive
for i := 0; i < rollsToDelete; i++ {
rollPath := filepath.Join(rw.currentDirPath, history[i])
src, err := os.Open(rollPath)
if err != nil {
return err
}
defer src.Close() // Read-only
fi, err := src.Stat()
if err != nil {
return err
}
if err := w.NextFile(src.Name(), fi); err != nil {
return err
}
if _, err := io.Copy(w, src); err != nil {
return err
}
}
return nil
}
func (rw *rollingFileWriter) deleteOldRolls(history []string) error {
if rw.maxRolls <= 0 {
return nil
}
rollsToDelete := len(history) - rw.maxRolls
if rollsToDelete <= 0 {
return nil
}
if rw.archiveType != rollingArchiveNone {
if rw.archiveExploded {
os.MkdirAll(rw.archivePath, defaultDirectoryPermissions)
// Archive logs
for i := 0; i < rollsToDelete; i++ {
rw.archiveExplodedLogs(history[i], compressionTypes[rw.archiveType])
}
} else {
os.MkdirAll(filepath.Dir(rw.archivePath), defaultDirectoryPermissions)
rw.archiveUnexplodedLogs(compressionTypes[rw.archiveType], rollsToDelete, history)
}
}
var err error
// In all cases (archive files or not) the files should be deleted.
for i := 0; i < rollsToDelete; i++ {
// Try best to delete files without breaking the loop.
if err = tryRemoveFile(filepath.Join(rw.currentDirPath, history[i])); err != nil {
reportInternalError(err)
}
}
return nil
}
func (rw *rollingFileWriter) getFileRollName(fileName string) string {
switch rw.nameMode {
case rollingNameModePostfix:
return fileName[len(rw.fileName+rollingLogHistoryDelimiter):]
case rollingNameModePrefix:
return fileName[:len(fileName)-len(rw.fileName+rollingLogHistoryDelimiter)]
}
return ""
}
func (rw *rollingFileWriter) roll() error {
// First, close current file.
err := rw.currentFile.Close()
if err != nil {
return err
}
rw.currentFile = nil
// Current history of all previous log files.
// For file roller it may be like this:
// * ...
// * file.log.4
// * file.log.5
// * file.log.6
//
// For date roller it may look like this:
// * ...
// * file.log.11.Aug.13
// * file.log.15.Aug.13
// * file.log.16.Aug.13
// Sorted log history does NOT include current file.
history, err := rw.getSortedLogHistory()
if err != nil {
return err
}
// Renames current file to create a new roll history entry
// For file roller it may be like this:
// * ...
// * file.log.4
// * file.log.5
// * file.log.6
// n file.log.7 <---- RENAMED (from file.log)
newHistoryName := rw.createFullFileName(rw.fileName,
rw.self.getNewHistoryRollFileName(history))
err = os.Rename(filepath.Join(rw.currentDirPath, rw.currentName), filepath.Join(rw.currentDirPath, newHistoryName))
if err != nil {
return err
}
// Finally, add the newly added history file to the history archive
// and, if after that the archive exceeds the allowed max limit, older rolls
// must the removed/archived.
history = append(history, newHistoryName)
if len(history) > rw.maxRolls {
err = rw.deleteOldRolls(history)
if err != nil {
return err
}
}
return nil
}
func (rw *rollingFileWriter) Write(bytes []byte) (n int, err error) {
rw.rollLock.Lock()
defer rw.rollLock.Unlock()
if rw.self.needsToRoll() {
if err := rw.roll(); err != nil {
return 0, err
}
}
if rw.currentFile == nil {
err := rw.createFileAndFolderIfNeeded(true)
if err != nil {
return 0, err
}
}
n, err = rw.currentFile.Write(bytes)
rw.currentFileSize += int64(n)
return n, err
}
func (rw *rollingFileWriter) Close() error {
if rw.currentFile != nil {
e := rw.currentFile.Close()
if e != nil {
return e
}
rw.currentFile = nil
}
return nil
}
func (rw *rollingFileWriter) tempArchiveFile(archiveDir string) (*os.File, error) {
tmp := filepath.Join(archiveDir, ".seelog_tmp")
if err := os.MkdirAll(tmp, defaultDirectoryPermissions); err != nil {
return nil, err
}
return ioutil.TempFile(tmp, "archived_logs")
}
// =============================================================================================
// Different types of rolling writers
// =============================================================================================
// --------------------------------------------------
// Rolling writer by SIZE
// --------------------------------------------------
// rollingFileWriterSize performs roll when file exceeds a specified limit.
type rollingFileWriterSize struct {
*rollingFileWriter
maxFileSize int64
}
func NewRollingFileWriterSize(fpath string, atype rollingArchiveType, apath string, maxSize int64, maxRolls int, namemode rollingNameMode, archiveExploded bool) (*rollingFileWriterSize, error) {
rw, err := newRollingFileWriter(fpath, rollingTypeSize, atype, apath, maxRolls, namemode, archiveExploded, false)
if err != nil {
return nil, err
}
rws := &rollingFileWriterSize{rw, maxSize}
rws.self = rws
return rws, nil
}
func (rws *rollingFileWriterSize) needsToRoll() bool {
return rws.currentFileSize >= rws.maxFileSize
}
func (rws *rollingFileWriterSize) isFileRollNameValid(rname string) bool {
if len(rname) == 0 {
return false
}
_, err := strconv.Atoi(rname)
return err == nil
}
type rollSizeFileTailsSlice []string
func (p rollSizeFileTailsSlice) Len() int {
return len(p)
}
func (p rollSizeFileTailsSlice) Less(i, j int) bool {
v1, _ := strconv.Atoi(p[i])
v2, _ := strconv.Atoi(p[j])
return v1 < v2
}
func (p rollSizeFileTailsSlice) Swap(i, j int) {
p[i], p[j] = p[j], p[i]
}
func (rws *rollingFileWriterSize) sortFileRollNamesAsc(fs []string) ([]string, error) {
ss := rollSizeFileTailsSlice(fs)
sort.Sort(ss)
return ss, nil
}
func (rws *rollingFileWriterSize) getNewHistoryRollFileName(otherLogFiles []string) string {
v := 0
if len(otherLogFiles) != 0 {
latest := otherLogFiles[len(otherLogFiles)-1]
v, _ = strconv.Atoi(rws.getFileRollName(latest))
}
return fmt.Sprintf("%d", v+1)
}
func (rws *rollingFileWriterSize) getCurrentFileName() string {
return rws.fileName
}
func (rws *rollingFileWriterSize) String() string {
return fmt.Sprintf("Rolling file writer (By SIZE): filename: %s, archive: %s, archivefile: %s, maxFileSize: %v, maxRolls: %v",
rws.fileName,
rollingArchiveTypesStringRepresentation[rws.archiveType],
rws.archivePath,
rws.maxFileSize,
rws.maxRolls)
}
// --------------------------------------------------
// Rolling writer by TIME
// --------------------------------------------------
// rollingFileWriterTime performs roll when a specified time interval has passed.
type rollingFileWriterTime struct {
*rollingFileWriter
timePattern string
currentTimeFileName string
}
func NewRollingFileWriterTime(fpath string, atype rollingArchiveType, apath string, maxr int,
timePattern string, namemode rollingNameMode, archiveExploded bool, fullName bool) (*rollingFileWriterTime, error) {
rw, err := newRollingFileWriter(fpath, rollingTypeTime, atype, apath, maxr, namemode, archiveExploded, fullName)
if err != nil {
return nil, err
}
rws := &rollingFileWriterTime{rw, timePattern, ""}
rws.self = rws
return rws, nil
}
func (rwt *rollingFileWriterTime) needsToRoll() bool {
newName := time.Now().Format(rwt.timePattern)
if rwt.currentTimeFileName == "" {
// first run; capture the current name
rwt.currentTimeFileName = newName
return false
}
return newName != rwt.currentTimeFileName
}
func (rwt *rollingFileWriterTime) isFileRollNameValid(rname string) bool {
if len(rname) == 0 {
return false
}
_, err := time.ParseInLocation(rwt.timePattern, rname, time.Local)
return err == nil
}
type rollTimeFileTailsSlice struct {
data []string
pattern string
}
func (p rollTimeFileTailsSlice) Len() int {
return len(p.data)
}
func (p rollTimeFileTailsSlice) Less(i, j int) bool {
t1, _ := time.ParseInLocation(p.pattern, p.data[i], time.Local)
t2, _ := time.ParseInLocation(p.pattern, p.data[j], time.Local)
return t1.Before(t2)
}
func (p rollTimeFileTailsSlice) Swap(i, j int) {
p.data[i], p.data[j] = p.data[j], p.data[i]
}
func (rwt *rollingFileWriterTime) sortFileRollNamesAsc(fs []string) ([]string, error) {
ss := rollTimeFileTailsSlice{data: fs, pattern: rwt.timePattern}
sort.Sort(ss)
return ss.data, nil
}
func (rwt *rollingFileWriterTime) getNewHistoryRollFileName(_ []string) string {
newFileName := rwt.currentTimeFileName
rwt.currentTimeFileName = time.Now().Format(rwt.timePattern)
return newFileName
}
func (rwt *rollingFileWriterTime) getCurrentFileName() string {
if rwt.fullName {
return rwt.createFullFileName(rwt.fileName, time.Now().Format(rwt.timePattern))
}
return rwt.fileName
}
func (rwt *rollingFileWriterTime) String() string {
return fmt.Sprintf("Rolling file writer (By TIME): filename: %s, archive: %s, archivefile: %s, pattern: %s, maxRolls: %v",
rwt.fileName,
rollingArchiveTypesStringRepresentation[rwt.archiveType],
rwt.archivePath,
rwt.timePattern,
rwt.maxRolls)
}

214
vendor/github.com/cihub/seelog/writers_smtpwriter.go generated vendored Normal file
View File

@ -0,0 +1,214 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"io/ioutil"
"net/smtp"
"path/filepath"
"strings"
)
const (
// Default subject phrase for sending emails.
DefaultSubjectPhrase = "Diagnostic message from server: "
// Message subject pattern composed according to RFC 5321.
rfc5321SubjectPattern = "From: %s <%s>\nSubject: %s\n\n"
)
// smtpWriter is used to send emails via given SMTP-server.
type smtpWriter struct {
auth smtp.Auth
hostName string
hostPort string
hostNameWithPort string
senderAddress string
senderName string
recipientAddresses []string
caCertDirPaths []string
mailHeaders []string
subject string
}
// NewSMTPWriter returns a new SMTP-writer.
func NewSMTPWriter(sa, sn string, ras []string, hn, hp, un, pwd string, cacdps []string, subj string, headers []string) *smtpWriter {
return &smtpWriter{
auth: smtp.PlainAuth("", un, pwd, hn),
hostName: hn,
hostPort: hp,
hostNameWithPort: fmt.Sprintf("%s:%s", hn, hp),
senderAddress: sa,
senderName: sn,
recipientAddresses: ras,
caCertDirPaths: cacdps,
subject: subj,
mailHeaders: headers,
}
}
func prepareMessage(senderAddr, senderName, subject string, body []byte, headers []string) []byte {
headerLines := fmt.Sprintf(rfc5321SubjectPattern, senderName, senderAddr, subject)
// Build header lines if configured.
if headers != nil && len(headers) > 0 {
headerLines += strings.Join(headers, "\n")
headerLines += "\n"
}
return append([]byte(headerLines), body...)
}
// getTLSConfig gets paths of PEM files with certificates,
// host server name and tries to create an appropriate TLS.Config.
func getTLSConfig(pemFileDirPaths []string, hostName string) (config *tls.Config, err error) {
if pemFileDirPaths == nil || len(pemFileDirPaths) == 0 {
err = errors.New("invalid PEM file paths")
return
}
pemEncodedContent := []byte{}
var (
e error
bytes []byte
)
// Create a file-filter-by-extension, set aside non-pem files.
pemFilePathFilter := func(fp string) bool {
if filepath.Ext(fp) == ".pem" {
return true
}
return false
}
for _, pemFileDirPath := range pemFileDirPaths {
pemFilePaths, err := getDirFilePaths(pemFileDirPath, pemFilePathFilter, false)
if err != nil {
return nil, err
}
// Put together all the PEM files to decode them as a whole byte slice.
for _, pfp := range pemFilePaths {
if bytes, e = ioutil.ReadFile(pfp); e == nil {
pemEncodedContent = append(pemEncodedContent, bytes...)
} else {
return nil, fmt.Errorf("cannot read file: %s: %s", pfp, e.Error())
}
}
}
config = &tls.Config{RootCAs: x509.NewCertPool(), ServerName: hostName}
isAppended := config.RootCAs.AppendCertsFromPEM(pemEncodedContent)
if !isAppended {
// Extract this into a separate error.
err = errors.New("invalid PEM content")
return
}
return
}
// SendMail accepts TLS configuration, connects to the server at addr,
// switches to TLS if possible, authenticates with mechanism a if possible,
// and then sends an email from address from, to addresses to, with message msg.
func sendMailWithTLSConfig(config *tls.Config, addr string, a smtp.Auth, from string, to []string, msg []byte) error {
c, err := smtp.Dial(addr)
if err != nil {
return err
}
// Check if the server supports STARTTLS extension.
if ok, _ := c.Extension("STARTTLS"); ok {
if err = c.StartTLS(config); err != nil {
return err
}
}
// Check if the server supports AUTH extension and use given smtp.Auth.
if a != nil {
if isSupported, _ := c.Extension("AUTH"); isSupported {
if err = c.Auth(a); err != nil {
return err
}
}
}
// Portion of code from the official smtp.SendMail function,
// see http://golang.org/src/pkg/net/smtp/smtp.go.
if err = c.Mail(from); err != nil {
return err
}
for _, addr := range to {
if err = c.Rcpt(addr); err != nil {
return err
}
}
w, err := c.Data()
if err != nil {
return err
}
_, err = w.Write(msg)
if err != nil {
return err
}
err = w.Close()
if err != nil {
return err
}
return c.Quit()
}
// Write pushes a text message properly composed according to RFC 5321
// to a post server, which sends it to the recipients.
func (smtpw *smtpWriter) Write(data []byte) (int, error) {
var err error
if smtpw.caCertDirPaths == nil {
err = smtp.SendMail(
smtpw.hostNameWithPort,
smtpw.auth,
smtpw.senderAddress,
smtpw.recipientAddresses,
prepareMessage(smtpw.senderAddress, smtpw.senderName, smtpw.subject, data, smtpw.mailHeaders),
)
} else {
config, e := getTLSConfig(smtpw.caCertDirPaths, smtpw.hostName)
if e != nil {
return 0, e
}
err = sendMailWithTLSConfig(
config,
smtpw.hostNameWithPort,
smtpw.auth,
smtpw.senderAddress,
smtpw.recipientAddresses,
prepareMessage(smtpw.senderAddress, smtpw.senderName, smtpw.subject, data, smtpw.mailHeaders),
)
}
if err != nil {
return 0, err
}
return len(data), nil
}
// Close closes down SMTP-connection.
func (smtpw *smtpWriter) Close() error {
// Do nothing as Write method opens and closes connection automatically.
return nil
}

50
vendor/github.com/donnie4w/go-logger/readme.txt generated vendored Normal file
View File

@ -0,0 +1,50 @@
go-logger 是golang 的日志库 基于对golang内置log的封装。
用法类似java日志工具包log4j
打印日志有5个方法 DebugInfoWarn, Error ,Fatal 日志级别由低到高
设置日志级别的方法为logger.SetLevel() 如logger.SetLevel(logger.WARN)
logger.Debug(....),logger.Info(...) 日志不会打出,而
logger.Warn(...),logger.Error(...),logger.Fatal(...)日志会打出。
设置日志级别的参数有7个分别为ALLDEBUGINFOWARNERRORFATALOFF
其中 ALL表示所有调用打印日志的方法都会打出而OFF则表示都不会打出。
日志文件切割有两种类型1为按日期切分。2为按日志大小切分。
按日期切分时:每天一个备份日志文件,后缀为 .yyyy-MM-dd
过0点是生成前一天备份文件
按大小切分是需要3个参数1为文件大小2为单位3为文件数量
文件增长到指定限值时,生成备份文件,结尾为依次递增的自然数。
文件数量增长到指定限制时,新生成的日志文件将覆盖前面生成的同名的备份日志文件。
示例:
//指定是否控制台打印默认为true
logger.SetConsole(true)
//指定日志文件备份方式为文件大小的方式
//第一个参数为日志文件存放目录
//第二个参数为日志文件命名
//第三个参数为备份文件最大数量
//第四个参数为备份文件大小
//第五个参数为文件大小的单位 KBMBGB TB
//logger.SetRollingFile("d:/logtest", "test.log", 10, 5, logger.KB)
//指定日志文件备份方式为日期的方式
//第一个参数为日志文件存放目录
//第二个参数为日志文件命名
logger.SetRollingDaily("d:/logtest", "test.log")
//指定日志级别 ALLDEBUGINFOWARNERRORFATALOFF 级别由低到高
//一般习惯是测试阶段为debug生成环境为info以上
logger.SetLevel(logger.DEBUG)
打印日志:
func log(i int) {
logger.Debug("Debug>>>>>>>>>>>>>>>>>>>>>>" + strconv.Itoa(i))
logger.Info("Info>>>>>>>>>>>>>>>>>>>>>>>>>" + strconv.Itoa(i))
logger.Warn("Warn>>>>>>>>>>>>>>>>>>>>>>>>>" + strconv.Itoa(i))
logger.Error("Error>>>>>>>>>>>>>>>>>>>>>>>>>" + strconv.Itoa(i))
logger.Fatal("Fatal>>>>>>>>>>>>>>>>>>>>>>>>>" + strconv.Itoa(i))
}

40
vendor/vendor.json vendored
View File

@ -8,6 +8,42 @@
"revision": "0db4a625e949e956314d7d1adea9bf82384cc10c",
"revisionTime": "2017-02-13T07:20:14Z"
},
{
"checksumSHA1": "1bK29RcAjCAMCYS2HS3O18w4m8k=",
"path": "github.com/cihub/seelog",
"revision": "f561c5e57575bb1e0a2167028b7339b3a8d16fb4",
"revisionTime": "2017-01-30T13:45:32Z"
},
{
"checksumSHA1": "iMU6SM/jawaCOJnyvR5/w8C/C1w=",
"path": "github.com/cihub/seelog/archive",
"revision": "f561c5e57575bb1e0a2167028b7339b3a8d16fb4",
"revisionTime": "2017-01-30T13:45:32Z"
},
{
"checksumSHA1": "oSd7EVei192qS3uYyCAMx9zV3Y8=",
"path": "github.com/cihub/seelog/archive/gzip",
"revision": "f561c5e57575bb1e0a2167028b7339b3a8d16fb4",
"revisionTime": "2017-01-30T13:45:32Z"
},
{
"checksumSHA1": "emesF9mCsIQC59VUE2lvjZYpAHU=",
"path": "github.com/cihub/seelog/archive/tar",
"revision": "f561c5e57575bb1e0a2167028b7339b3a8d16fb4",
"revisionTime": "2017-01-30T13:45:32Z"
},
{
"checksumSHA1": "TXLSkZsRlMifg3mg9+6QgEZJ6m4=",
"path": "github.com/cihub/seelog/archive/zip",
"revision": "f561c5e57575bb1e0a2167028b7339b3a8d16fb4",
"revisionTime": "2017-01-30T13:45:32Z"
},
{
"checksumSHA1": "foY4H6dyBE6AmY0uTZ2rfH+34Q4=",
"path": "github.com/donnie4w/go-logger",
"revision": "34aa9bcfff225db756df097fc49ffd35b68412bb",
"revisionTime": "2016-05-29T15:11:07Z"
},
{
"checksumSHA1": "OkqfwXeTVoiIxNMDA7HKvmrCDw8=",
"path": "github.com/go-macaron/binding",
@ -60,10 +96,6 @@
"revision": "e8fbd41c16b9c0468dbae8db2fe0161a21265b8a",
"revisionTime": "2017-02-21T11:08:50Z"
},
{
"path": "github.com/go-yaml/yaml",
"revision": ""
},
{
"checksumSHA1": "iKPMvbAueGfdyHcWCgzwKzm8WVo=",
"path": "github.com/klauspost/cpuid",