fix(archive): unable to preview (#7843)

* fix(archive): unrecognition zip

* feat(archive): add tree for zip meta

* fix bug

* refactor(archive):  meta cache time use Link Expiration first

* feat(archive): return sort policy in meta (#2)

* refactor

* perf(archive): reduce new network requests

---------

Co-authored-by: KirCute_ECT <951206789@qq.com>
pull/7807/head^2
j2rong4cn 2025-01-27 20:08:56 +08:00 committed by GitHub
parent 2be0c3d1a0
commit 5c5d8378e5
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 287 additions and 73 deletions

View File

@ -1,42 +1,53 @@
package archives package archives
import ( import (
"github.com/alist-org/alist/v3/internal/archive/tool"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/utils"
"io" "io"
"io/fs" "io/fs"
"os" "os"
stdpath "path" stdpath "path"
"strings" "strings"
"github.com/alist-org/alist/v3/internal/archive/tool"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/utils"
) )
type Archives struct { type Archives struct {
} }
func (_ *Archives) AcceptedExtensions() []string { func (*Archives) AcceptedExtensions() []string {
return []string{ return []string{
".br", ".bz2", ".gz", ".lz4", ".lz", ".sz", ".s2", ".xz", ".zz", ".zst", ".tar", ".rar", ".7z", ".br", ".bz2", ".gz", ".lz4", ".lz", ".sz", ".s2", ".xz", ".zz", ".zst", ".tar", ".rar", ".7z",
} }
} }
func (_ *Archives) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) { func (*Archives) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
fsys, err := getFs(ss, args) fsys, err := getFs(ss, args)
if err != nil { if err != nil {
return nil, err return nil, err
} }
_, err = fsys.ReadDir(".") files, err := fsys.ReadDir(".")
if err != nil { if err != nil {
return nil, filterPassword(err) return nil, filterPassword(err)
} }
tree := make([]model.ObjTree, 0, len(files))
for _, file := range files {
info, err := file.Info()
if err != nil {
continue
}
tree = append(tree, &model.ObjectTree{Object: *toModelObj(info)})
}
return &model.ArchiveMetaInfo{ return &model.ArchiveMetaInfo{
Comment: "", Comment: "",
Encrypted: false, Encrypted: false,
Tree: tree,
}, nil }, nil
} }
func (_ *Archives) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) { func (*Archives) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
fsys, err := getFs(ss, args.ArchiveArgs) fsys, err := getFs(ss, args.ArchiveArgs)
if err != nil { if err != nil {
return nil, err return nil, err
@ -58,7 +69,7 @@ func (_ *Archives) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs)
}) })
} }
func (_ *Archives) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) { func (*Archives) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
fsys, err := getFs(ss, args.ArchiveArgs) fsys, err := getFs(ss, args.ArchiveArgs)
if err != nil { if err != nil {
return nil, 0, err return nil, 0, err
@ -74,7 +85,7 @@ func (_ *Archives) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArg
return file, stat.Size(), nil return file, stat.Size(), nil
} }
func (_ *Archives) Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error { func (*Archives) Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
fsys, err := getFs(ss, args.ArchiveArgs) fsys, err := getFs(ss, args.ArchiveArgs)
if err != nil { if err != nil {
return err return err

View File

@ -1,15 +1,16 @@
package archives package archives
import ( import (
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/mholt/archives"
"io" "io"
fs2 "io/fs" fs2 "io/fs"
"os" "os"
stdpath "path" stdpath "path"
"strings" "strings"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/mholt/archives"
) )
func getFs(ss *stream.SeekableStream, args model.ArchiveArgs) (*archives.ArchiveFS, error) { func getFs(ss *stream.SeekableStream, args model.ArchiveArgs) (*archives.ArchiveFS, error) {
@ -17,6 +18,9 @@ func getFs(ss *stream.SeekableStream, args model.ArchiveArgs) (*archives.Archive
if err != nil { if err != nil {
return nil, err return nil, err
} }
if r, ok := reader.(*stream.RangeReadReadAtSeeker); ok {
r.InitHeadCache()
}
format, _, err := archives.Identify(ss.Ctx, ss.GetName(), reader) format, _, err := archives.Identify(ss.Ctx, ss.GetName(), reader)
if err != nil { if err != nil {
return nil, errs.UnknownArchiveFormat return nil, errs.UnknownArchiveFormat

View File

@ -1,25 +1,26 @@
package zip package zip
import ( import (
"io"
"os"
stdpath "path"
"strings"
"github.com/alist-org/alist/v3/internal/archive/tool" "github.com/alist-org/alist/v3/internal/archive/tool"
"github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/stream" "github.com/alist-org/alist/v3/internal/stream"
"github.com/yeka/zip" "github.com/yeka/zip"
"io"
"os"
stdpath "path"
"strings"
) )
type Zip struct { type Zip struct {
} }
func (_ *Zip) AcceptedExtensions() []string { func (*Zip) AcceptedExtensions() []string {
return []string{".zip"} return []string{".zip"}
} }
func (_ *Zip) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) { func (*Zip) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
reader, err := stream.NewReadAtSeeker(ss, 0) reader, err := stream.NewReadAtSeeker(ss, 0)
if err != nil { if err != nil {
return nil, err return nil, err
@ -29,19 +30,81 @@ func (_ *Zip) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.
return nil, err return nil, err
} }
encrypted := false encrypted := false
dirMap := make(map[string]*model.ObjectTree)
dirMap["."] = &model.ObjectTree{}
for _, file := range zipReader.File { for _, file := range zipReader.File {
if file.IsEncrypted() { if file.IsEncrypted() {
encrypted = true encrypted = true
break break
} }
name := strings.TrimPrefix(decodeName(file.Name), "/")
var dir string
var dirObj *model.ObjectTree
isNewFolder := false
if !file.FileInfo().IsDir() {
// 先将 文件 添加到 所在的文件夹
dir = stdpath.Dir(name)
dirObj = dirMap[dir]
if dirObj == nil {
isNewFolder = true
dirObj = &model.ObjectTree{}
dirObj.IsFolder = true
dirObj.Name = stdpath.Base(dir)
dirObj.Modified = file.ModTime()
dirMap[dir] = dirObj
} }
dirObj.Children = append(
dirObj.Children, &model.ObjectTree{
Object: *toModelObj(file.FileInfo()),
},
)
} else {
dir = strings.TrimSuffix(name, "/")
dirObj = dirMap[dir]
if dirObj == nil {
isNewFolder = true
dirObj = &model.ObjectTree{}
dirMap[dir] = dirObj
}
dirObj.IsFolder = true
dirObj.Name = stdpath.Base(dir)
dirObj.Modified = file.ModTime()
}
if isNewFolder {
// 将 文件夹 添加到 父文件夹
dir = stdpath.Dir(dir)
pDirObj := dirMap[dir]
if pDirObj != nil {
pDirObj.Children = append(pDirObj.Children, dirObj)
continue
}
for {
// 考虑压缩包仅记录文件的路径,不记录文件夹
pDirObj = &model.ObjectTree{}
pDirObj.IsFolder = true
pDirObj.Name = stdpath.Base(dir)
pDirObj.Modified = file.ModTime()
dirMap[dir] = pDirObj
pDirObj.Children = append(pDirObj.Children, dirObj)
dir = stdpath.Dir(dir)
if dirMap[dir] != nil {
break
}
dirObj = pDirObj
}
}
}
return &model.ArchiveMetaInfo{ return &model.ArchiveMetaInfo{
Comment: zipReader.Comment, Comment: zipReader.Comment,
Encrypted: encrypted, Encrypted: encrypted,
Tree: dirMap["."].GetChildren(),
}, nil }, nil
} }
func (_ *Zip) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) { func (*Zip) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
reader, err := stream.NewReadAtSeeker(ss, 0) reader, err := stream.NewReadAtSeeker(ss, 0)
if err != nil { if err != nil {
return nil, err return nil, err
@ -53,6 +116,7 @@ func (_ *Zip) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]mo
if args.InnerPath == "/" { if args.InnerPath == "/" {
ret := make([]model.Obj, 0) ret := make([]model.Obj, 0)
passVerified := false passVerified := false
var dir *model.Object
for _, file := range zipReader.File { for _, file := range zipReader.File {
if !passVerified && file.IsEncrypted() { if !passVerified && file.IsEncrypted() {
file.SetPassword(args.Password) file.SetPassword(args.Password)
@ -63,12 +127,24 @@ func (_ *Zip) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]mo
_ = rc.Close() _ = rc.Close()
passVerified = true passVerified = true
} }
name := decodeName(file.Name) name := strings.TrimSuffix(decodeName(file.Name), "/")
if strings.Contains(strings.TrimSuffix(name, "/"), "/") { if strings.Contains(name, "/") {
// 有些压缩包不压缩第一个文件夹
strs := strings.Split(name, "/")
if dir == nil && len(strs) == 2 {
dir = &model.Object{
Name: strs[0],
Modified: ss.ModTime(),
IsFolder: true,
}
}
continue continue
} }
ret = append(ret, toModelObj(file.FileInfo())) ret = append(ret, toModelObj(file.FileInfo()))
} }
if len(ret) == 0 && dir != nil {
ret = append(ret, dir)
}
return ret, nil return ret, nil
} else { } else {
innerPath := strings.TrimPrefix(args.InnerPath, "/") + "/" innerPath := strings.TrimPrefix(args.InnerPath, "/") + "/"
@ -76,13 +152,11 @@ func (_ *Zip) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]mo
exist := false exist := false
for _, file := range zipReader.File { for _, file := range zipReader.File {
name := decodeName(file.Name) name := decodeName(file.Name)
if name == innerPath {
exist = true
}
dir := stdpath.Dir(strings.TrimSuffix(name, "/")) + "/" dir := stdpath.Dir(strings.TrimSuffix(name, "/")) + "/"
if dir != innerPath { if dir != innerPath {
continue continue
} }
exist = true
ret = append(ret, toModelObj(file.FileInfo())) ret = append(ret, toModelObj(file.FileInfo()))
} }
if !exist { if !exist {
@ -92,7 +166,7 @@ func (_ *Zip) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]mo
} }
} }
func (_ *Zip) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) { func (*Zip) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
reader, err := stream.NewReadAtSeeker(ss, 0) reader, err := stream.NewReadAtSeeker(ss, 0)
if err != nil { if err != nil {
return nil, 0, err return nil, 0, err
@ -117,7 +191,7 @@ func (_ *Zip) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (i
return nil, 0, errs.ObjectNotFound return nil, 0, errs.ObjectNotFound
} }
func (_ *Zip) Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error { func (*Zip) Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
reader, err := stream.NewReadAtSeeker(ss, 0) reader, err := stream.NewReadAtSeeker(ss, 0)
if err != nil { if err != nil {
return err return err

View File

@ -1,5 +1,7 @@
package model package model
import "time"
type ObjTree interface { type ObjTree interface {
Obj Obj
GetChildren() []ObjTree GetChildren() []ObjTree
@ -45,5 +47,7 @@ func (m *ArchiveMetaInfo) GetTree() []ObjTree {
type ArchiveMetaProvider struct { type ArchiveMetaProvider struct {
ArchiveMeta ArchiveMeta
*Sort
DriverProviding bool DriverProviding bool
Expiration *time.Duration
} }

View File

@ -3,13 +3,14 @@ package op
import ( import (
"context" "context"
stderrors "errors" stderrors "errors"
"github.com/alist-org/alist/v3/internal/archive/tool"
"github.com/alist-org/alist/v3/internal/stream"
"io" "io"
stdpath "path" stdpath "path"
"strings" "strings"
"time" "time"
"github.com/alist-org/alist/v3/internal/archive/tool"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/Xhofe/go-cache" "github.com/Xhofe/go-cache"
"github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/errs"
@ -40,8 +41,8 @@ func GetArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "failed to get %s archive met: %+v", path, err) return nil, errors.Wrapf(err, "failed to get %s archive met: %+v", path, err)
} }
if !storage.Config().NoCache { if m.Expiration != nil {
archiveMetaCache.Set(key, m, cache.WithEx[*model.ArchiveMetaProvider](time.Minute*time.Duration(storage.GetStorage().CacheExpiration))) archiveMetaCache.Set(key, m, cache.WithEx[*model.ArchiveMetaProvider](*m.Expiration))
} }
return m, nil return m, nil
} }
@ -82,7 +83,15 @@ func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg
} }
meta, err := storageAr.GetArchiveMeta(ctx, obj, args.ArchiveArgs) meta, err := storageAr.GetArchiveMeta(ctx, obj, args.ArchiveArgs)
if !errors.Is(err, errs.NotImplement) { if !errors.Is(err, errs.NotImplement) {
return obj, &model.ArchiveMetaProvider{ArchiveMeta: meta, DriverProviding: true}, err archiveMetaProvider := &model.ArchiveMetaProvider{ArchiveMeta: meta, DriverProviding: true}
if meta.GetTree() != nil {
archiveMetaProvider.Sort = &storage.GetStorage().Sort
}
if !storage.Config().NoCache {
Expiration := time.Minute * time.Duration(storage.GetStorage().CacheExpiration)
archiveMetaProvider.Expiration = &Expiration
}
return obj, archiveMetaProvider, err
} }
} }
obj, t, ss, err := getArchiveToolAndStream(ctx, storage, path, args.LinkArgs) obj, t, ss, err := getArchiveToolAndStream(ctx, storage, path, args.LinkArgs)
@ -95,7 +104,21 @@ func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg
} }
}() }()
meta, err := t.GetMeta(ss, args.ArchiveArgs) meta, err := t.GetMeta(ss, args.ArchiveArgs)
return obj, &model.ArchiveMetaProvider{ArchiveMeta: meta, DriverProviding: false}, err if err != nil {
return nil, nil, err
}
archiveMetaProvider := &model.ArchiveMetaProvider{ArchiveMeta: meta, DriverProviding: false}
if meta.GetTree() != nil {
archiveMetaProvider.Sort = &storage.GetStorage().Sort
}
if !storage.Config().NoCache {
Expiration := time.Minute * time.Duration(storage.GetStorage().CacheExpiration)
archiveMetaProvider.Expiration = &Expiration
} else if ss.Link.MFile == nil {
// alias、crypt 驱动
archiveMetaProvider.Expiration = ss.Link.Expiration
}
return obj, archiveMetaProvider, err
} }
var archiveListCache = cache.NewMemCache(cache.WithShards[[]model.Obj](64)) var archiveListCache = cache.NewMemCache(cache.WithShards[[]model.Obj](64))
@ -113,10 +136,10 @@ func ListArchive(ctx context.Context, storage driver.Driver, path string, args m
log.Debugf("use cache when list archive [%s]%s", path, args.InnerPath) log.Debugf("use cache when list archive [%s]%s", path, args.InnerPath)
return files, nil return files, nil
} }
if meta, ok := archiveMetaCache.Get(metaKey); ok { // if meta, ok := archiveMetaCache.Get(metaKey); ok {
log.Debugf("use meta cache when list archive [%s]%s", path, args.InnerPath) // log.Debugf("use meta cache when list archive [%s]%s", path, args.InnerPath)
return getChildrenFromArchiveMeta(meta, args.InnerPath) // return getChildrenFromArchiveMeta(meta, args.InnerPath)
} // }
} }
objs, err, _ := archiveListG.Do(key, func() ([]model.Obj, error) { objs, err, _ := archiveListG.Do(key, func() ([]model.Obj, error) {
obj, files, err := listArchive(ctx, storage, path, args) obj, files, err := listArchive(ctx, storage, path, args)

View File

@ -13,6 +13,7 @@ import (
"github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/http_range" "github.com/alist-org/alist/v3/pkg/http_range"
"github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/pkg/utils"
"github.com/sirupsen/logrus"
) )
type FileStream struct { type FileStream struct {
@ -189,6 +190,7 @@ func NewSeekableStream(fs FileStream, link *model.Link) (*SeekableStream, error)
if ss.Link.RangeReadCloser != nil { if ss.Link.RangeReadCloser != nil {
ss.rangeReadCloser = ss.Link.RangeReadCloser ss.rangeReadCloser = ss.Link.RangeReadCloser
ss.Add(ss.rangeReadCloser)
return &ss, nil return &ss, nil
} }
if len(ss.Link.URL) > 0 { if len(ss.Link.URL) > 0 {
@ -197,6 +199,7 @@ func NewSeekableStream(fs FileStream, link *model.Link) (*SeekableStream, error)
return nil, err return nil, err
} }
ss.rangeReadCloser = rrc ss.rangeReadCloser = rrc
ss.Add(rrc)
return &ss, nil return &ss, nil
} }
} }
@ -248,8 +251,6 @@ func (ss *SeekableStream) Read(p []byte) (n int, err error) {
return 0, nil return 0, nil
} }
ss.Reader = io.NopCloser(rc) ss.Reader = io.NopCloser(rc)
ss.Closers.Add(rc)
} }
return ss.Reader.Read(p) return ss.Reader.Read(p)
} }
@ -337,10 +338,62 @@ type RangeReadReadAtSeeker struct {
ss *SeekableStream ss *SeekableStream
masterOff int64 masterOff int64
readers []*readerCur readers []*readerCur
*headCache
} }
type FileReadAtSeeker struct { type headCache struct {
ss *SeekableStream *readerCur
bufs [][]byte
}
func (c *headCache) read(p []byte) (n int, err error) {
pL := len(p)
logrus.Debugf("headCache read_%d", pL)
if c.cur < int64(pL) {
bufL := int64(pL) - c.cur
buf := make([]byte, bufL)
lr := io.LimitReader(c.reader, bufL)
off := 0
for c.cur < int64(pL) {
n, err = lr.Read(buf[off:])
off += n
c.cur += int64(n)
if err == io.EOF && n == int(bufL) {
err = nil
}
if err != nil {
break
}
}
c.bufs = append(c.bufs, buf)
}
n = 0
if c.cur >= int64(pL) {
for i := 0; n < pL; i++ {
buf := c.bufs[i]
r := len(buf)
if n+r > pL {
r = pL - n
}
n += copy(p[n:], buf[:r])
}
}
return
}
func (r *headCache) close() error {
for i := range r.bufs {
r.bufs[i] = nil
}
r.bufs = nil
return nil
}
func (r *RangeReadReadAtSeeker) InitHeadCache() {
if r.ss.Link.MFile == nil && r.masterOff == 0 {
reader := r.readers[0]
r.readers = r.readers[1:]
r.headCache = &headCache{readerCur: reader}
}
} }
func NewReadAtSeeker(ss *SeekableStream, offset int64, forceRange ...bool) (SStreamReadAtSeeker, error) { func NewReadAtSeeker(ss *SeekableStream, offset int64, forceRange ...bool) (SStreamReadAtSeeker, error) {
@ -351,27 +404,23 @@ func NewReadAtSeeker(ss *SeekableStream, offset int64, forceRange ...bool) (SStr
} }
return &FileReadAtSeeker{ss: ss}, nil return &FileReadAtSeeker{ss: ss}, nil
} }
var r io.Reader r := &RangeReadReadAtSeeker{
var err error ss: ss,
masterOff: offset,
}
if offset != 0 || utils.IsBool(forceRange...) { if offset != 0 || utils.IsBool(forceRange...) {
if offset < 0 || offset > ss.GetSize() { if offset < 0 || offset > ss.GetSize() {
return nil, errors.New("offset out of range") return nil, errors.New("offset out of range")
} }
r, err = ss.RangeRead(http_range.Range{Start: offset, Length: -1}) _, err := r.getReaderAtOffset(offset)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if rc, ok := r.(io.Closer); ok {
ss.Closers.Add(rc)
}
} else { } else {
r = ss rc := &readerCur{reader: ss, cur: offset}
r.readers = append(r.readers, rc)
} }
return &RangeReadReadAtSeeker{ return r, nil
ss: ss,
masterOff: offset,
readers: []*readerCur{{reader: r, cur: offset}},
}, nil
} }
func (r *RangeReadReadAtSeeker) GetRawStream() *SeekableStream { func (r *RangeReadReadAtSeeker) GetRawStream() *SeekableStream {
@ -379,39 +428,72 @@ func (r *RangeReadReadAtSeeker) GetRawStream() *SeekableStream {
} }
func (r *RangeReadReadAtSeeker) getReaderAtOffset(off int64) (*readerCur, error) { func (r *RangeReadReadAtSeeker) getReaderAtOffset(off int64) (*readerCur, error) {
var rc *readerCur
for _, reader := range r.readers { for _, reader := range r.readers {
if reader.cur == -1 {
continue
}
if reader.cur == off { if reader.cur == off {
// logrus.Debugf("getReaderAtOffset match_%d", off)
return reader, nil return reader, nil
} }
if reader.cur > 0 && off >= reader.cur && (rc == nil || reader.cur < rc.cur) {
rc = reader
} }
reader, err := r.ss.RangeRead(http_range.Range{Start: off, Length: -1}) }
if rc != nil && off-rc.cur <= utils.MB {
n, err := utils.CopyWithBufferN(utils.NullWriter{}, rc.reader, off-rc.cur)
rc.cur += n
if err == io.EOF && rc.cur == off {
err = nil
}
if err == nil {
logrus.Debugf("getReaderAtOffset old_%d", off)
return rc, nil
}
rc.cur = -1
}
logrus.Debugf("getReaderAtOffset new_%d", off)
// Range请求不能超过文件大小有些云盘处理不了就会返回整个文件
reader, err := r.ss.RangeRead(http_range.Range{Start: off, Length: r.ss.GetSize() - off})
if err != nil { if err != nil {
return nil, err return nil, err
} }
if c, ok := reader.(io.Closer); ok { rc = &readerCur{reader: reader, cur: off}
r.ss.Closers.Add(c)
}
rc := &readerCur{reader: reader, cur: off}
r.readers = append(r.readers, rc) r.readers = append(r.readers, rc)
return rc, nil return rc, nil
} }
func (r *RangeReadReadAtSeeker) ReadAt(p []byte, off int64) (int, error) { func (r *RangeReadReadAtSeeker) ReadAt(p []byte, off int64) (int, error) {
if off == 0 && r.headCache != nil {
return r.headCache.read(p)
}
rc, err := r.getReaderAtOffset(off) rc, err := r.getReaderAtOffset(off)
if err != nil { if err != nil {
return 0, err return 0, err
} }
num := 0 n, num := 0, 0
for num < len(p) { for num < len(p) {
n, err := rc.reader.Read(p[num:]) n, err = rc.reader.Read(p[num:])
rc.cur += int64(n) rc.cur += int64(n)
num += n num += n
if err != nil { if err == nil {
continue
}
if err == io.EOF {
// io.EOF是reader读取完了
rc.cur = -1
// yeka/zip包 没有处理EOF我们要兼容
// https://github.com/yeka/zip/blob/03d6312748a9d6e0bc0c9a7275385c09f06d9c14/reader.go#L433
if num == len(p) {
err = nil
}
}
break
}
return num, err return num, err
} }
}
return num, nil
}
func (r *RangeReadReadAtSeeker) Seek(offset int64, whence int) (int64, error) { func (r *RangeReadReadAtSeeker) Seek(offset int64, whence int) (int64, error) {
switch whence { switch whence {
@ -437,6 +519,9 @@ func (r *RangeReadReadAtSeeker) Seek(offset int64, whence int) (int64, error) {
} }
func (r *RangeReadReadAtSeeker) Read(p []byte) (n int, err error) { func (r *RangeReadReadAtSeeker) Read(p []byte) (n int, err error) {
if r.masterOff == 0 && r.headCache != nil {
return r.headCache.read(p)
}
rc, err := r.getReaderAtOffset(r.masterOff) rc, err := r.getReaderAtOffset(r.masterOff)
if err != nil { if err != nil {
return 0, err return 0, err
@ -448,9 +533,16 @@ func (r *RangeReadReadAtSeeker) Read(p []byte) (n int, err error) {
} }
func (r *RangeReadReadAtSeeker) Close() error { func (r *RangeReadReadAtSeeker) Close() error {
if r.headCache != nil {
r.headCache.close()
}
return r.ss.Close() return r.ss.Close()
} }
type FileReadAtSeeker struct {
ss *SeekableStream
}
func (f *FileReadAtSeeker) GetRawStream() *SeekableStream { func (f *FileReadAtSeeker) GetRawStream() *SeekableStream {
return f.ss return f.ss
} }

View File

@ -233,3 +233,9 @@ func CopyWithBufferN(dst io.Writer, src io.Reader, n int64) (written int64, err
} }
return return
} }
type NullWriter struct{}
func (NullWriter) Write(p []byte) (n int, err error) {
return len(p), nil
}

View File

@ -2,6 +2,10 @@ package handles
import ( import (
"fmt" "fmt"
"net/url"
stdpath "path"
"strings"
"github.com/alist-org/alist/v3/internal/archive/tool" "github.com/alist-org/alist/v3/internal/archive/tool"
"github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/errs"
@ -15,9 +19,6 @@ import (
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/pkg/errors" "github.com/pkg/errors"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"mime"
stdpath "path"
"strings"
) )
type ArchiveMetaReq struct { type ArchiveMetaReq struct {
@ -31,6 +32,7 @@ type ArchiveMetaResp struct {
Comment string `json:"comment"` Comment string `json:"comment"`
IsEncrypted bool `json:"encrypted"` IsEncrypted bool `json:"encrypted"`
Content []ArchiveContentResp `json:"content"` Content []ArchiveContentResp `json:"content"`
Sort *model.Sort `json:"sort,omitempty"`
RawURL string `json:"raw_url"` RawURL string `json:"raw_url"`
Sign string `json:"sign"` Sign string `json:"sign"`
} }
@ -128,6 +130,7 @@ func FsArchiveMeta(c *gin.Context) {
Comment: ret.GetComment(), Comment: ret.GetComment(),
IsEncrypted: ret.IsEncrypted(), IsEncrypted: ret.IsEncrypted(),
Content: toContentResp(ret.GetTree()), Content: toContentResp(ret.GetTree()),
Sort: ret.Sort,
RawURL: fmt.Sprintf("%s%s%s", common.GetApiUrl(c.Request), api, utils.EncodePath(reqPath, true)), RawURL: fmt.Sprintf("%s%s%s", common.GetApiUrl(c.Request), api, utils.EncodePath(reqPath, true)),
Sign: s, Sign: s,
}) })
@ -361,14 +364,11 @@ func ArchiveInternalExtract(c *gin.Context) {
"Referrer-Policy": "no-referrer", "Referrer-Policy": "no-referrer",
"Cache-Control": "max-age=0, no-cache, no-store, must-revalidate", "Cache-Control": "max-age=0, no-cache, no-store, must-revalidate",
} }
if c.Query("attachment") == "true" {
filename := stdpath.Base(innerPath) filename := stdpath.Base(innerPath)
headers["Content-Disposition"] = fmt.Sprintf("attachment; filename=\"%s\"", filename) headers["Content-Disposition"] = fmt.Sprintf(`attachment; filename="%s"; filename*=UTF-8''%s`, filename, url.PathEscape(filename))
}
contentType := c.Request.Header.Get("Content-Type") contentType := c.Request.Header.Get("Content-Type")
if contentType == "" { if contentType == "" {
fileExt := stdpath.Ext(innerPath) contentType = utils.GetMimeType(filename)
contentType = mime.TypeByExtension(fileExt)
} }
c.DataFromReader(200, size, contentType, rc, headers) c.DataFromReader(200, size, contentType, rc, headers)
} }