feat: 实现按天分割 压缩日志

pull/71/head
zhengkunwang223 2022-12-08 21:27:49 +08:00 committed by zhengkunwang223
parent b04c2385a1
commit 29c6564223
6 changed files with 382 additions and 26 deletions

View File

@ -5,14 +5,6 @@ system:
data_dir: /opt/1Panel/data
app_oss: "https://1panel.oss-cn-hangzhou.aliyuncs.com/apps.json"
mysql:
path: localhost
port: 3306
db_name: 1Panel
username: root
password: KubeOperator123@mysql
max_idle_conns: 10
max_open_conns: 100
sqlite:
path: /opt/1Panel/data/db
@ -24,9 +16,7 @@ log:
path: /opt/1Panel/log
log_name: 1Panel
log_suffix: .log
log_size: 50 #日志文件大小,单位是 MB
log_backup: 10 #最大过期日志保留个数
log_data: 7 #保留过期文件最大时间,单位 天
log_backup: 10 #最大日志保留个数
cache:
path: /opt/1Panel/data/cache

View File

@ -6,7 +6,5 @@ type LogConfig struct {
Path string `mapstructure:"path"`
LogName string `mapstructure:"log_name"`
LogSuffix string `mapstructure:"log_suffix"`
LogSize int `mapstructure:"log_size"`
LogBackup int `mapstructure:"log_backup"`
LogData int `mapstructure:"log_data"`
}

View File

@ -2,14 +2,13 @@ package log
import (
"fmt"
"path"
"github.com/1Panel-dev/1Panel/backend/log"
"strings"
"time"
"github.com/1Panel-dev/1Panel/backend/configs"
"github.com/1Panel-dev/1Panel/backend/global"
"github.com/natefinch/lumberjack"
"github.com/sirupsen/logrus"
)
@ -17,24 +16,27 @@ func Init() {
l := logrus.New()
setOutput(l, global.CONF.LogConfig)
global.LOG = l
global.LOG.Info("init success")
}
func setOutput(log *logrus.Logger, config configs.LogConfig) {
filePath := path.Join(config.Path, config.LogName+config.LogSuffix)
logPrint := &lumberjack.Logger{
Filename: filePath,
MaxSize: config.LogSize,
MaxBackups: config.LogBackup,
MaxAge: config.LogData,
Compress: true,
func setOutput(logger *logrus.Logger, config configs.LogConfig) {
writer, err := log.NewWriterFromConfig(&log.Config{
LogPath: config.Path,
FileName: config.LogName,
TimeTagFormat: "2006-01-02-15-04-05",
MaxRemain: config.LogBackup,
})
if err != nil {
panic(err)
}
level, err := logrus.ParseLevel(config.Level)
if err != nil {
panic(err)
}
log.SetOutput(logPrint)
log.SetLevel(level)
log.SetFormatter(new(MineFormatter))
logger.SetOutput(writer)
logger.SetLevel(level)
logger.SetFormatter(new(MineFormatter))
}
type MineFormatter struct{}

43
backend/log/config.go Normal file
View File

@ -0,0 +1,43 @@
package log
import (
"errors"
"io"
"os"
"path"
)
const (
RollingTimePattern = "0 0 * * *"
)
var (
BufferSize = 0x100000
DefaultFileMode = os.FileMode(0644)
DefaultFileFlag = os.O_RDWR | os.O_CREATE | os.O_APPEND
ErrInvalidArgument = errors.New("error argument invalid")
QueueSize = 1024
ErrClosed = errors.New("error write on close")
)
type Config struct {
TimeTagFormat string
LogPath string
FileName string
MaxRemain int
}
type Manager interface {
Fire() chan string
Close()
}
type RollingWriter interface {
io.Writer
Close() error
}
func FilePath(c *Config) (filepath string) {
filepath = path.Join(c.LogPath, c.FileName) + ".log"
return
}

54
backend/log/manager.go Normal file
View File

@ -0,0 +1,54 @@
package log
import (
"github.com/robfig/cron/v3"
"path"
"sync"
"time"
)
type manager struct {
thresholdSize int64
startAt time.Time
fire chan string
cr *cron.Cron
context chan int
wg sync.WaitGroup
lock sync.Mutex
}
func (m *manager) Fire() chan string {
return m.fire
}
func (m *manager) Close() {
close(m.context)
m.cr.Stop()
}
func NewManager(c *Config) (Manager, error) {
m := &manager{
startAt: time.Now(),
cr: cron.New(),
fire: make(chan string),
context: make(chan int),
wg: sync.WaitGroup{},
}
if _, err := m.cr.AddFunc(RollingTimePattern, func() {
m.fire <- m.GenLogFileName(c)
}); err != nil {
return nil, err
}
m.cr.Start()
return m, nil
}
func (m *manager) GenLogFileName(c *Config) (filename string) {
m.lock.Lock()
filename = path.Join(c.LogPath, c.FileName+"-"+m.startAt.Format(c.TimeTagFormat)) + ".log"
m.startAt = time.Now()
m.lock.Unlock()
return
}

269
backend/log/writer.go Normal file
View File

@ -0,0 +1,269 @@
package log
import (
"fmt"
"github.com/1Panel-dev/1Panel/backend/utils/files"
"io/ioutil"
"log"
"os"
"path"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
"unsafe"
)
type Writer struct {
m Manager
file *os.File
absPath string
fire chan string
cf *Config
rollingfilech chan string
}
type AsynchronousWriter struct {
Writer
ctx chan int
queue chan []byte
errChan chan error
closed int32
wg sync.WaitGroup
}
func (w *AsynchronousWriter) Close() error {
if atomic.CompareAndSwapInt32(&w.closed, 0, 1) {
close(w.ctx)
w.onClose()
func() {
defer func() {
if r := recover(); r != nil {
}
}()
w.m.Close()
}()
return w.file.Close()
}
return ErrClosed
}
func (w *AsynchronousWriter) onClose() {
var err error
for {
select {
case b := <-w.queue:
if _, err = w.file.Write(b); err != nil {
select {
case w.errChan <- err:
default:
_asyncBufferPool.Put(b)
return
}
}
_asyncBufferPool.Put(b)
default:
return
}
}
}
var _asyncBufferPool = sync.Pool{
New: func() interface{} {
return make([]byte, BufferSize)
},
}
func NewWriterFromConfig(c *Config) (RollingWriter, error) {
if c.LogPath == "" || c.FileName == "" {
return nil, ErrInvalidArgument
}
if err := os.MkdirAll(c.LogPath, 0700); err != nil {
return nil, err
}
filepath := FilePath(c)
file, err := os.OpenFile(filepath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0644)
if err != nil {
return nil, err
}
mng, err := NewManager(c)
if err != nil {
return nil, err
}
var rollingWriter RollingWriter
writer := Writer{
m: mng,
file: file,
absPath: filepath,
fire: mng.Fire(),
cf: c,
}
if c.MaxRemain > 0 {
writer.rollingfilech = make(chan string, c.MaxRemain)
dir, err := ioutil.ReadDir(c.LogPath)
if err != nil {
mng.Close()
return nil, err
}
files := make([]string, 0, 10)
for _, fi := range dir {
if fi.IsDir() {
continue
}
fileName := c.FileName + ".log."
if strings.Contains(fi.Name(), fileName) {
fileSuffix := path.Ext(fi.Name())
if len(fileSuffix) > 1 {
_, err := time.Parse(c.TimeTagFormat, fileSuffix[1:])
if err == nil {
files = append(files, fi.Name())
}
}
}
}
sort.Slice(files, func(i, j int) bool {
fileSuffix1 := path.Ext(files[i])
fileSuffix2 := path.Ext(files[j])
t1, _ := time.Parse(c.TimeTagFormat, fileSuffix1[1:])
t2, _ := time.Parse(c.TimeTagFormat, fileSuffix2[1:])
return t1.Before(t2)
})
for _, file := range files {
retry:
select {
case writer.rollingfilech <- path.Join(c.LogPath, file):
default:
writer.DoRemove()
goto retry
}
}
}
wr := &AsynchronousWriter{
ctx: make(chan int),
queue: make(chan []byte, QueueSize),
errChan: make(chan error, QueueSize),
wg: sync.WaitGroup{},
closed: 0,
Writer: writer,
}
wr.wg.Add(1)
go wr.writer()
wr.wg.Wait()
rollingWriter = wr
return rollingWriter, nil
}
func (w *AsynchronousWriter) writer() {
var err error
w.wg.Done()
for {
select {
case filename := <-w.fire:
if err = w.Reopen(filename); err != nil && len(w.errChan) < cap(w.errChan) {
w.errChan <- err
}
case b := <-w.queue:
if _, err = w.file.Write(b); err != nil && len(w.errChan) < cap(w.errChan) {
w.errChan <- err
}
_asyncBufferPool.Put(b)
case <-w.ctx:
return
}
}
}
func (w *Writer) DoRemove() {
select {
case file := <-w.rollingfilech:
if err := os.Remove(file); err != nil {
log.Println("error in remove log file", file, err)
}
}
}
func (w *Writer) Write(b []byte) (int, error) {
var ok = false
for !ok {
select {
case filename := <-w.fire:
if err := w.Reopen(filename); err != nil {
return 0, err
}
default:
ok = true
}
}
fp := atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&w.file)))
file := (*os.File)(fp)
return file.Write(b)
}
func (w *Writer) Reopen(file string) error {
fileInfo, err := w.file.Stat()
if err != nil {
return err
}
if fileInfo.Size() == 0 {
return nil
}
w.file.Close()
if err := os.Rename(w.absPath, file); err != nil {
return err
}
newFile, err := os.OpenFile(w.absPath, DefaultFileFlag, DefaultFileMode)
if err != nil {
return err
}
w.file = newFile
go func() {
if err := w.CompressFile(file); err != nil {
log.Println("error in compress log file", err)
return
}
if w.cf.MaxRemain > 0 {
retry:
select {
case w.rollingfilech <- file:
default:
w.DoRemove()
goto retry
}
}
}()
return nil
}
func (w *Writer) CompressFile(logFile string) error {
op := files.NewFileOp()
comFileName := path.Base(logFile) + ".tar.gz"
filePath := path.Dir(logFile)
fmt.Println(path.Dir(logFile))
if err := op.Compress([]string{logFile}, filePath, comFileName, files.TarGz); err != nil {
return err
}
if errR := os.Remove(logFile); errR != nil {
return errR
}
return nil
}