fix: form upload when ticked `As A Task` (#5145)

pull/5148/head
Sean 2023-09-03 15:40:40 +08:00 committed by GitHub
parent 8102142007
commit e7c0d94b44
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 51 additions and 73 deletions

View File

@ -148,7 +148,7 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
// Part Size Unit: Bytes, Default: 20MB,
// Maximum number of slices 10,000, ≈195.3125GB
var partSize = calPartSize(stream.GetSize())
const dateFormat = "2006-01-02T15:04:05.88Z"
const dateFormat = "2006-01-02T15:04:05.999Z"
mtime := stream.ModTime()
mtimeStr := mtime.UTC().Format(dateFormat)
ctimeStr := stream.CreateTime().UTC().Format(dateFormat)

View File

@ -55,7 +55,6 @@ func (f *FileStream) SetExist(obj model.Obj) {
// CacheFullInTempFile save all data into tmpFile. Not recommended since it wears disk,
// and can't start upload until the file is written. It's not thread-safe!
// won't check if some
func (f *FileStream) CacheFullInTempFile() (model.File, error) {
if f.tmpFile != nil {
return f.tmpFile, nil
@ -82,28 +81,28 @@ func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
if httpRange.Length == -1 {
httpRange.Length = f.GetSize()
}
if f.peekBuff != nil && httpRange.Start < int64(f.peekBuff.Len()) && httpRange.Start+httpRange.Length-1 < int64(f.peekBuff.Len()) {
return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil
}
if httpRange.Start == 0 && httpRange.Length <= InMemoryBufMaxSizeBytes && f.peekBuff == nil {
bufSize := utils.Min(httpRange.Length, f.GetSize())
newBuf := bytes.NewBuffer(make([]byte, 0, bufSize))
n, err := io.CopyN(newBuf, f.Reader, bufSize)
if err != nil {
return nil, err
}
if n != bufSize {
return nil, fmt.Errorf("stream RangeRead did not get all data in peek, expect =%d ,actual =%d", bufSize, n)
}
f.peekBuff = bytes.NewReader(newBuf.Bytes())
f.Reader = io.MultiReader(f.peekBuff, f.Reader)
return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil
}
if f.tmpFile == nil {
_, err := f.CacheFullInTempFile()
if err != nil {
return nil, err
if httpRange.Start == 0 && httpRange.Length <= InMemoryBufMaxSizeBytes && f.peekBuff == nil {
bufSize := utils.Min(httpRange.Length, f.GetSize())
newBuf := bytes.NewBuffer(make([]byte, 0, bufSize))
n, err := io.CopyN(newBuf, f.Reader, bufSize)
if err != nil {
return nil, err
}
if n != bufSize {
return nil, fmt.Errorf("stream RangeRead did not get all data in peek, expect =%d ,actual =%d", bufSize, n)
}
f.peekBuff = bytes.NewReader(newBuf.Bytes())
f.Reader = io.MultiReader(f.peekBuff, f.Reader)
return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil
} else {
_, err := f.CacheFullInTempFile()
if err != nil {
return nil, err
}
}
}
return io.NewSectionReader(f.tmpFile, httpRange.Start, httpRange.Length), nil
@ -228,54 +227,7 @@ func (ss *SeekableStream) CacheFullInTempFile() (model.File, error) {
return ss.tmpFile, nil
}
//func (f *FileStream) SetReader(r io.Reader) {
// f.Reader = r
//}
/*
// RangePeek allow once peek at start of the data, since most drives check first XX bytes for rapid-upload
func (f *FileStream) RangePeek(length int64) (*bytes.Buffer, error) {
if length > InMemoryBufMaxSize*1024*1024 {
return nil, errs.NewErr(errs.StreamPeekFail, "can't peek size > %d MB", InMemoryBufMaxSize)
}
httpRange := &http_range.Range{Length: length}
bufSize := utils.Min(httpRange.Length, f.GetSize())
buf := bytes.NewBuffer(make([]byte, 0, bufSize))
if f.link == nil && f.tmpFile == nil {
if !f.peekedOnce {
f.mu.Lock()
f.peekedOnce = true
_, err := io.CopyN(buf, f.Reader, bufSize)
if err != nil {
f.mu.Unlock()
return nil, errs.NewErr(errs.StreamPeekFail, "failed to copyN %d bytes data", bufSize)
}
f.Reader = io.MultiReader(buf, f.Reader)
f.mu.Unlock()
return buf, nil
}
return nil, errs.NewErr(errs.StreamPeekFail, "link and tmpFile both are null")
}
f.mu.Lock()
defer f.mu.Unlock()
rc, _, err := GetReadCloserFromLink(f.Obj, f.link, httpRange)
if err != nil {
return nil, err
}
_, err = io.CopyN(buf, rc, bufSize)
if err != nil {
return nil, err
}
return buf, nil
}*/
//func (f *FileStream) SetTmpFile(r *os.File) {
// f.mu.Lock()
// //f.readDisabled = true
// f.tmpFile = r
// f.Reader = r
// f.mu.Unlock()
//}
func (f *FileStream) SetTmpFile(r *os.File) {
f.Reader = r
f.tmpFile = r
}

View File

@ -1,8 +1,13 @@
package handles
import (
"github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/pkg/utils/random"
"io"
"net/url"
"os"
stdpath "path"
"reflect"
"strconv"
"time"
@ -97,7 +102,24 @@ func FsForm(c *gin.Context) {
common.ErrorResp(c, err, 500)
return
}
f, err := file.Open()
tmpFile, tmpInSys := "", ""
fv := reflect.ValueOf(*file)
tmpInSys = fv.FieldByName("tmpfile").String()
var f io.Reader
var osFile *os.File
if len(tmpInSys) > 0 {
tmpFile = conf.Conf.TempDir + "file-" + random.String(8)
err = os.Rename(tmpInSys, tmpFile)
if err != nil {
common.ErrorResp(c, err, 500)
return
}
osFile, err = os.Open(tmpFile)
f = osFile
} else {
f, err = file.Open()
}
if err != nil {
common.ErrorResp(c, err, 500)
return
@ -118,12 +140,16 @@ func FsForm(c *gin.Context) {
common.ErrorResp(c, err, 500)
return
}
if osFile != nil {
ss.SetTmpFile(osFile)
}
if asTask {
err = fs.PutAsTask(dir, ss)
} else {
err = fs.PutDirectly(c, dir, ss, true)
}
defer f.Close()
if err != nil {
common.ErrorResp(c, err, 500)
return