diff --git a/internal/archive/archives/archives.go b/internal/archive/archives/archives.go index b70ba95b..6d48624f 100644 --- a/internal/archive/archives/archives.go +++ b/internal/archive/archives/archives.go @@ -1,42 +1,53 @@ package archives import ( - "github.com/alist-org/alist/v3/internal/archive/tool" - "github.com/alist-org/alist/v3/internal/model" - "github.com/alist-org/alist/v3/internal/stream" - "github.com/alist-org/alist/v3/pkg/utils" "io" "io/fs" "os" stdpath "path" "strings" + + "github.com/alist-org/alist/v3/internal/archive/tool" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" + "github.com/alist-org/alist/v3/pkg/utils" ) type Archives struct { } -func (_ *Archives) AcceptedExtensions() []string { +func (*Archives) AcceptedExtensions() []string { return []string{ ".br", ".bz2", ".gz", ".lz4", ".lz", ".sz", ".s2", ".xz", ".zz", ".zst", ".tar", ".rar", ".7z", } } -func (_ *Archives) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) { +func (*Archives) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) { fsys, err := getFs(ss, args) if err != nil { return nil, err } - _, err = fsys.ReadDir(".") + files, err := fsys.ReadDir(".") if err != nil { return nil, filterPassword(err) } + + tree := make([]model.ObjTree, 0, len(files)) + for _, file := range files { + info, err := file.Info() + if err != nil { + continue + } + tree = append(tree, &model.ObjectTree{Object: *toModelObj(info)}) + } return &model.ArchiveMetaInfo{ Comment: "", Encrypted: false, + Tree: tree, }, nil } -func (_ *Archives) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) { +func (*Archives) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) { fsys, err := getFs(ss, args.ArchiveArgs) if err != nil { return nil, err @@ -58,7 +69,7 @@ func (_ *Archives) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) }) } -func (_ *Archives) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) { +func (*Archives) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) { fsys, err := getFs(ss, args.ArchiveArgs) if err != nil { return nil, 0, err @@ -74,7 +85,7 @@ func (_ *Archives) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArg return file, stat.Size(), nil } -func (_ *Archives) Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error { +func (*Archives) Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error { fsys, err := getFs(ss, args.ArchiveArgs) if err != nil { return err diff --git a/internal/archive/archives/utils.go b/internal/archive/archives/utils.go index b72e6bc6..fdae1009 100644 --- a/internal/archive/archives/utils.go +++ b/internal/archive/archives/utils.go @@ -1,15 +1,16 @@ package archives import ( - "github.com/alist-org/alist/v3/internal/errs" - "github.com/alist-org/alist/v3/internal/model" - "github.com/alist-org/alist/v3/internal/stream" - "github.com/mholt/archives" "io" fs2 "io/fs" "os" stdpath "path" "strings" + + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/stream" + "github.com/mholt/archives" ) func getFs(ss *stream.SeekableStream, args model.ArchiveArgs) (*archives.ArchiveFS, error) { @@ -17,6 +18,9 @@ func getFs(ss *stream.SeekableStream, args model.ArchiveArgs) (*archives.Archive if err != nil { return nil, err } + if r, ok := reader.(*stream.RangeReadReadAtSeeker); ok { + r.InitHeadCache() + } format, _, err := archives.Identify(ss.Ctx, ss.GetName(), reader) if err != nil { return nil, errs.UnknownArchiveFormat diff --git a/internal/archive/zip/zip.go b/internal/archive/zip/zip.go index ccb70e65..e5285518 100644 --- a/internal/archive/zip/zip.go +++ b/internal/archive/zip/zip.go @@ -1,25 +1,26 @@ package zip import ( + "io" + "os" + stdpath "path" + "strings" + "github.com/alist-org/alist/v3/internal/archive/tool" "github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/stream" "github.com/yeka/zip" - "io" - "os" - stdpath "path" - "strings" ) type Zip struct { } -func (_ *Zip) AcceptedExtensions() []string { +func (*Zip) AcceptedExtensions() []string { return []string{".zip"} } -func (_ *Zip) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) { +func (*Zip) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) { reader, err := stream.NewReadAtSeeker(ss, 0) if err != nil { return nil, err @@ -29,19 +30,81 @@ func (_ *Zip) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model. return nil, err } encrypted := false + dirMap := make(map[string]*model.ObjectTree) + dirMap["."] = &model.ObjectTree{} for _, file := range zipReader.File { if file.IsEncrypted() { encrypted = true break } + + name := strings.TrimPrefix(decodeName(file.Name), "/") + var dir string + var dirObj *model.ObjectTree + isNewFolder := false + if !file.FileInfo().IsDir() { + // 先将 文件 添加到 所在的文件夹 + dir = stdpath.Dir(name) + dirObj = dirMap[dir] + if dirObj == nil { + isNewFolder = true + dirObj = &model.ObjectTree{} + dirObj.IsFolder = true + dirObj.Name = stdpath.Base(dir) + dirObj.Modified = file.ModTime() + dirMap[dir] = dirObj + } + dirObj.Children = append( + dirObj.Children, &model.ObjectTree{ + Object: *toModelObj(file.FileInfo()), + }, + ) + } else { + dir = strings.TrimSuffix(name, "/") + dirObj = dirMap[dir] + if dirObj == nil { + isNewFolder = true + dirObj = &model.ObjectTree{} + dirMap[dir] = dirObj + } + dirObj.IsFolder = true + dirObj.Name = stdpath.Base(dir) + dirObj.Modified = file.ModTime() + } + if isNewFolder { + // 将 文件夹 添加到 父文件夹 + dir = stdpath.Dir(dir) + pDirObj := dirMap[dir] + if pDirObj != nil { + pDirObj.Children = append(pDirObj.Children, dirObj) + continue + } + + for { + // 考虑压缩包仅记录文件的路径,不记录文件夹 + pDirObj = &model.ObjectTree{} + pDirObj.IsFolder = true + pDirObj.Name = stdpath.Base(dir) + pDirObj.Modified = file.ModTime() + dirMap[dir] = pDirObj + pDirObj.Children = append(pDirObj.Children, dirObj) + dir = stdpath.Dir(dir) + if dirMap[dir] != nil { + break + } + dirObj = pDirObj + } + } } + return &model.ArchiveMetaInfo{ Comment: zipReader.Comment, Encrypted: encrypted, + Tree: dirMap["."].GetChildren(), }, nil } -func (_ *Zip) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) { +func (*Zip) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) { reader, err := stream.NewReadAtSeeker(ss, 0) if err != nil { return nil, err @@ -53,6 +116,7 @@ func (_ *Zip) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]mo if args.InnerPath == "/" { ret := make([]model.Obj, 0) passVerified := false + var dir *model.Object for _, file := range zipReader.File { if !passVerified && file.IsEncrypted() { file.SetPassword(args.Password) @@ -63,12 +127,24 @@ func (_ *Zip) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]mo _ = rc.Close() passVerified = true } - name := decodeName(file.Name) - if strings.Contains(strings.TrimSuffix(name, "/"), "/") { + name := strings.TrimSuffix(decodeName(file.Name), "/") + if strings.Contains(name, "/") { + // 有些压缩包不压缩第一个文件夹 + strs := strings.Split(name, "/") + if dir == nil && len(strs) == 2 { + dir = &model.Object{ + Name: strs[0], + Modified: ss.ModTime(), + IsFolder: true, + } + } continue } ret = append(ret, toModelObj(file.FileInfo())) } + if len(ret) == 0 && dir != nil { + ret = append(ret, dir) + } return ret, nil } else { innerPath := strings.TrimPrefix(args.InnerPath, "/") + "/" @@ -76,13 +152,11 @@ func (_ *Zip) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]mo exist := false for _, file := range zipReader.File { name := decodeName(file.Name) - if name == innerPath { - exist = true - } dir := stdpath.Dir(strings.TrimSuffix(name, "/")) + "/" if dir != innerPath { continue } + exist = true ret = append(ret, toModelObj(file.FileInfo())) } if !exist { @@ -92,7 +166,7 @@ func (_ *Zip) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]mo } } -func (_ *Zip) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) { +func (*Zip) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) { reader, err := stream.NewReadAtSeeker(ss, 0) if err != nil { return nil, 0, err @@ -117,7 +191,7 @@ func (_ *Zip) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (i return nil, 0, errs.ObjectNotFound } -func (_ *Zip) Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error { +func (*Zip) Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error { reader, err := stream.NewReadAtSeeker(ss, 0) if err != nil { return err diff --git a/internal/model/archive.go b/internal/model/archive.go index 03ac7c36..01b83691 100644 --- a/internal/model/archive.go +++ b/internal/model/archive.go @@ -1,5 +1,7 @@ package model +import "time" + type ObjTree interface { Obj GetChildren() []ObjTree @@ -45,5 +47,7 @@ func (m *ArchiveMetaInfo) GetTree() []ObjTree { type ArchiveMetaProvider struct { ArchiveMeta + *Sort DriverProviding bool + Expiration *time.Duration } diff --git a/internal/op/archive.go b/internal/op/archive.go index 6a9fa084..a241838c 100644 --- a/internal/op/archive.go +++ b/internal/op/archive.go @@ -3,13 +3,14 @@ package op import ( "context" stderrors "errors" - "github.com/alist-org/alist/v3/internal/archive/tool" - "github.com/alist-org/alist/v3/internal/stream" "io" stdpath "path" "strings" "time" + "github.com/alist-org/alist/v3/internal/archive/tool" + "github.com/alist-org/alist/v3/internal/stream" + "github.com/Xhofe/go-cache" "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/errs" @@ -40,8 +41,8 @@ func GetArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg if err != nil { return nil, errors.Wrapf(err, "failed to get %s archive met: %+v", path, err) } - if !storage.Config().NoCache { - archiveMetaCache.Set(key, m, cache.WithEx[*model.ArchiveMetaProvider](time.Minute*time.Duration(storage.GetStorage().CacheExpiration))) + if m.Expiration != nil { + archiveMetaCache.Set(key, m, cache.WithEx[*model.ArchiveMetaProvider](*m.Expiration)) } return m, nil } @@ -82,7 +83,15 @@ func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg } meta, err := storageAr.GetArchiveMeta(ctx, obj, args.ArchiveArgs) if !errors.Is(err, errs.NotImplement) { - return obj, &model.ArchiveMetaProvider{ArchiveMeta: meta, DriverProviding: true}, err + archiveMetaProvider := &model.ArchiveMetaProvider{ArchiveMeta: meta, DriverProviding: true} + if meta.GetTree() != nil { + archiveMetaProvider.Sort = &storage.GetStorage().Sort + } + if !storage.Config().NoCache { + Expiration := time.Minute * time.Duration(storage.GetStorage().CacheExpiration) + archiveMetaProvider.Expiration = &Expiration + } + return obj, archiveMetaProvider, err } } obj, t, ss, err := getArchiveToolAndStream(ctx, storage, path, args.LinkArgs) @@ -95,7 +104,21 @@ func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg } }() meta, err := t.GetMeta(ss, args.ArchiveArgs) - return obj, &model.ArchiveMetaProvider{ArchiveMeta: meta, DriverProviding: false}, err + if err != nil { + return nil, nil, err + } + archiveMetaProvider := &model.ArchiveMetaProvider{ArchiveMeta: meta, DriverProviding: false} + if meta.GetTree() != nil { + archiveMetaProvider.Sort = &storage.GetStorage().Sort + } + if !storage.Config().NoCache { + Expiration := time.Minute * time.Duration(storage.GetStorage().CacheExpiration) + archiveMetaProvider.Expiration = &Expiration + } else if ss.Link.MFile == nil { + // alias、crypt 驱动 + archiveMetaProvider.Expiration = ss.Link.Expiration + } + return obj, archiveMetaProvider, err } var archiveListCache = cache.NewMemCache(cache.WithShards[[]model.Obj](64)) @@ -113,10 +136,10 @@ func ListArchive(ctx context.Context, storage driver.Driver, path string, args m log.Debugf("use cache when list archive [%s]%s", path, args.InnerPath) return files, nil } - if meta, ok := archiveMetaCache.Get(metaKey); ok { - log.Debugf("use meta cache when list archive [%s]%s", path, args.InnerPath) - return getChildrenFromArchiveMeta(meta, args.InnerPath) - } + // if meta, ok := archiveMetaCache.Get(metaKey); ok { + // log.Debugf("use meta cache when list archive [%s]%s", path, args.InnerPath) + // return getChildrenFromArchiveMeta(meta, args.InnerPath) + // } } objs, err, _ := archiveListG.Do(key, func() ([]model.Obj, error) { obj, files, err := listArchive(ctx, storage, path, args) diff --git a/internal/stream/stream.go b/internal/stream/stream.go index 0915ee6b..1962fb46 100644 --- a/internal/stream/stream.go +++ b/internal/stream/stream.go @@ -13,6 +13,7 @@ import ( "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/pkg/http_range" "github.com/alist-org/alist/v3/pkg/utils" + "github.com/sirupsen/logrus" ) type FileStream struct { @@ -189,6 +190,7 @@ func NewSeekableStream(fs FileStream, link *model.Link) (*SeekableStream, error) if ss.Link.RangeReadCloser != nil { ss.rangeReadCloser = ss.Link.RangeReadCloser + ss.Add(ss.rangeReadCloser) return &ss, nil } if len(ss.Link.URL) > 0 { @@ -197,6 +199,7 @@ func NewSeekableStream(fs FileStream, link *model.Link) (*SeekableStream, error) return nil, err } ss.rangeReadCloser = rrc + ss.Add(rrc) return &ss, nil } } @@ -248,8 +251,6 @@ func (ss *SeekableStream) Read(p []byte) (n int, err error) { return 0, nil } ss.Reader = io.NopCloser(rc) - ss.Closers.Add(rc) - } return ss.Reader.Read(p) } @@ -337,10 +338,62 @@ type RangeReadReadAtSeeker struct { ss *SeekableStream masterOff int64 readers []*readerCur + *headCache } -type FileReadAtSeeker struct { - ss *SeekableStream +type headCache struct { + *readerCur + bufs [][]byte +} + +func (c *headCache) read(p []byte) (n int, err error) { + pL := len(p) + logrus.Debugf("headCache read_%d", pL) + if c.cur < int64(pL) { + bufL := int64(pL) - c.cur + buf := make([]byte, bufL) + lr := io.LimitReader(c.reader, bufL) + off := 0 + for c.cur < int64(pL) { + n, err = lr.Read(buf[off:]) + off += n + c.cur += int64(n) + if err == io.EOF && n == int(bufL) { + err = nil + } + if err != nil { + break + } + } + c.bufs = append(c.bufs, buf) + } + n = 0 + if c.cur >= int64(pL) { + for i := 0; n < pL; i++ { + buf := c.bufs[i] + r := len(buf) + if n+r > pL { + r = pL - n + } + n += copy(p[n:], buf[:r]) + } + } + return +} +func (r *headCache) close() error { + for i := range r.bufs { + r.bufs[i] = nil + } + r.bufs = nil + return nil +} + +func (r *RangeReadReadAtSeeker) InitHeadCache() { + if r.ss.Link.MFile == nil && r.masterOff == 0 { + reader := r.readers[0] + r.readers = r.readers[1:] + r.headCache = &headCache{readerCur: reader} + } } func NewReadAtSeeker(ss *SeekableStream, offset int64, forceRange ...bool) (SStreamReadAtSeeker, error) { @@ -351,27 +404,23 @@ func NewReadAtSeeker(ss *SeekableStream, offset int64, forceRange ...bool) (SStr } return &FileReadAtSeeker{ss: ss}, nil } - var r io.Reader - var err error + r := &RangeReadReadAtSeeker{ + ss: ss, + masterOff: offset, + } if offset != 0 || utils.IsBool(forceRange...) { if offset < 0 || offset > ss.GetSize() { return nil, errors.New("offset out of range") } - r, err = ss.RangeRead(http_range.Range{Start: offset, Length: -1}) + _, err := r.getReaderAtOffset(offset) if err != nil { return nil, err } - if rc, ok := r.(io.Closer); ok { - ss.Closers.Add(rc) - } } else { - r = ss + rc := &readerCur{reader: ss, cur: offset} + r.readers = append(r.readers, rc) } - return &RangeReadReadAtSeeker{ - ss: ss, - masterOff: offset, - readers: []*readerCur{{reader: r, cur: offset}}, - }, nil + return r, nil } func (r *RangeReadReadAtSeeker) GetRawStream() *SeekableStream { @@ -379,38 +428,71 @@ func (r *RangeReadReadAtSeeker) GetRawStream() *SeekableStream { } func (r *RangeReadReadAtSeeker) getReaderAtOffset(off int64) (*readerCur, error) { + var rc *readerCur for _, reader := range r.readers { + if reader.cur == -1 { + continue + } if reader.cur == off { + // logrus.Debugf("getReaderAtOffset match_%d", off) return reader, nil } + if reader.cur > 0 && off >= reader.cur && (rc == nil || reader.cur < rc.cur) { + rc = reader + } } - reader, err := r.ss.RangeRead(http_range.Range{Start: off, Length: -1}) + if rc != nil && off-rc.cur <= utils.MB { + n, err := utils.CopyWithBufferN(utils.NullWriter{}, rc.reader, off-rc.cur) + rc.cur += n + if err == io.EOF && rc.cur == off { + err = nil + } + if err == nil { + logrus.Debugf("getReaderAtOffset old_%d", off) + return rc, nil + } + rc.cur = -1 + } + logrus.Debugf("getReaderAtOffset new_%d", off) + + // Range请求不能超过文件大小,有些云盘处理不了就会返回整个文件 + reader, err := r.ss.RangeRead(http_range.Range{Start: off, Length: r.ss.GetSize() - off}) if err != nil { return nil, err } - if c, ok := reader.(io.Closer); ok { - r.ss.Closers.Add(c) - } - rc := &readerCur{reader: reader, cur: off} + rc = &readerCur{reader: reader, cur: off} r.readers = append(r.readers, rc) return rc, nil } func (r *RangeReadReadAtSeeker) ReadAt(p []byte, off int64) (int, error) { + if off == 0 && r.headCache != nil { + return r.headCache.read(p) + } rc, err := r.getReaderAtOffset(off) if err != nil { return 0, err } - num := 0 + n, num := 0, 0 for num < len(p) { - n, err := rc.reader.Read(p[num:]) + n, err = rc.reader.Read(p[num:]) rc.cur += int64(n) num += n - if err != nil { - return num, err + if err == nil { + continue } + if err == io.EOF { + // io.EOF是reader读取完了 + rc.cur = -1 + // yeka/zip包 没有处理EOF,我们要兼容 + // https://github.com/yeka/zip/blob/03d6312748a9d6e0bc0c9a7275385c09f06d9c14/reader.go#L433 + if num == len(p) { + err = nil + } + } + break } - return num, nil + return num, err } func (r *RangeReadReadAtSeeker) Seek(offset int64, whence int) (int64, error) { @@ -437,6 +519,9 @@ func (r *RangeReadReadAtSeeker) Seek(offset int64, whence int) (int64, error) { } func (r *RangeReadReadAtSeeker) Read(p []byte) (n int, err error) { + if r.masterOff == 0 && r.headCache != nil { + return r.headCache.read(p) + } rc, err := r.getReaderAtOffset(r.masterOff) if err != nil { return 0, err @@ -448,9 +533,16 @@ func (r *RangeReadReadAtSeeker) Read(p []byte) (n int, err error) { } func (r *RangeReadReadAtSeeker) Close() error { + if r.headCache != nil { + r.headCache.close() + } return r.ss.Close() } +type FileReadAtSeeker struct { + ss *SeekableStream +} + func (f *FileReadAtSeeker) GetRawStream() *SeekableStream { return f.ss } diff --git a/pkg/utils/io.go b/pkg/utils/io.go index e06fb235..c314307d 100644 --- a/pkg/utils/io.go +++ b/pkg/utils/io.go @@ -233,3 +233,9 @@ func CopyWithBufferN(dst io.Writer, src io.Reader, n int64) (written int64, err } return } + +type NullWriter struct{} + +func (NullWriter) Write(p []byte) (n int, err error) { + return len(p), nil +} diff --git a/server/handles/archive.go b/server/handles/archive.go index bad99bac..6ff13641 100644 --- a/server/handles/archive.go +++ b/server/handles/archive.go @@ -2,6 +2,10 @@ package handles import ( "fmt" + "net/url" + stdpath "path" + "strings" + "github.com/alist-org/alist/v3/internal/archive/tool" "github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/errs" @@ -15,9 +19,6 @@ import ( "github.com/gin-gonic/gin" "github.com/pkg/errors" log "github.com/sirupsen/logrus" - "mime" - stdpath "path" - "strings" ) type ArchiveMetaReq struct { @@ -31,6 +32,7 @@ type ArchiveMetaResp struct { Comment string `json:"comment"` IsEncrypted bool `json:"encrypted"` Content []ArchiveContentResp `json:"content"` + Sort *model.Sort `json:"sort,omitempty"` RawURL string `json:"raw_url"` Sign string `json:"sign"` } @@ -128,6 +130,7 @@ func FsArchiveMeta(c *gin.Context) { Comment: ret.GetComment(), IsEncrypted: ret.IsEncrypted(), Content: toContentResp(ret.GetTree()), + Sort: ret.Sort, RawURL: fmt.Sprintf("%s%s%s", common.GetApiUrl(c.Request), api, utils.EncodePath(reqPath, true)), Sign: s, }) @@ -361,14 +364,11 @@ func ArchiveInternalExtract(c *gin.Context) { "Referrer-Policy": "no-referrer", "Cache-Control": "max-age=0, no-cache, no-store, must-revalidate", } - if c.Query("attachment") == "true" { - filename := stdpath.Base(innerPath) - headers["Content-Disposition"] = fmt.Sprintf("attachment; filename=\"%s\"", filename) - } + filename := stdpath.Base(innerPath) + headers["Content-Disposition"] = fmt.Sprintf(`attachment; filename="%s"; filename*=UTF-8''%s`, filename, url.PathEscape(filename)) contentType := c.Request.Header.Get("Content-Type") if contentType == "" { - fileExt := stdpath.Ext(innerPath) - contentType = mime.TypeByExtension(fileExt) + contentType = utils.GetMimeType(filename) } c.DataFromReader(200, size, contentType, rc, headers) }