mirror of https://github.com/Xhofe/alist
fix: form upload when ticked `As A Task` (#5145)
parent
8102142007
commit
e7c0d94b44
|
@ -148,7 +148,7 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
|
||||||
// Part Size Unit: Bytes, Default: 20MB,
|
// Part Size Unit: Bytes, Default: 20MB,
|
||||||
// Maximum number of slices 10,000, ≈195.3125GB
|
// Maximum number of slices 10,000, ≈195.3125GB
|
||||||
var partSize = calPartSize(stream.GetSize())
|
var partSize = calPartSize(stream.GetSize())
|
||||||
const dateFormat = "2006-01-02T15:04:05.88Z"
|
const dateFormat = "2006-01-02T15:04:05.999Z"
|
||||||
mtime := stream.ModTime()
|
mtime := stream.ModTime()
|
||||||
mtimeStr := mtime.UTC().Format(dateFormat)
|
mtimeStr := mtime.UTC().Format(dateFormat)
|
||||||
ctimeStr := stream.CreateTime().UTC().Format(dateFormat)
|
ctimeStr := stream.CreateTime().UTC().Format(dateFormat)
|
||||||
|
|
|
@ -55,7 +55,6 @@ func (f *FileStream) SetExist(obj model.Obj) {
|
||||||
|
|
||||||
// CacheFullInTempFile save all data into tmpFile. Not recommended since it wears disk,
|
// CacheFullInTempFile save all data into tmpFile. Not recommended since it wears disk,
|
||||||
// and can't start upload until the file is written. It's not thread-safe!
|
// and can't start upload until the file is written. It's not thread-safe!
|
||||||
// won't check if some
|
|
||||||
func (f *FileStream) CacheFullInTempFile() (model.File, error) {
|
func (f *FileStream) CacheFullInTempFile() (model.File, error) {
|
||||||
if f.tmpFile != nil {
|
if f.tmpFile != nil {
|
||||||
return f.tmpFile, nil
|
return f.tmpFile, nil
|
||||||
|
@ -82,10 +81,10 @@ func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
|
||||||
if httpRange.Length == -1 {
|
if httpRange.Length == -1 {
|
||||||
httpRange.Length = f.GetSize()
|
httpRange.Length = f.GetSize()
|
||||||
}
|
}
|
||||||
|
|
||||||
if f.peekBuff != nil && httpRange.Start < int64(f.peekBuff.Len()) && httpRange.Start+httpRange.Length-1 < int64(f.peekBuff.Len()) {
|
if f.peekBuff != nil && httpRange.Start < int64(f.peekBuff.Len()) && httpRange.Start+httpRange.Length-1 < int64(f.peekBuff.Len()) {
|
||||||
return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil
|
return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil
|
||||||
}
|
}
|
||||||
|
if f.tmpFile == nil {
|
||||||
if httpRange.Start == 0 && httpRange.Length <= InMemoryBufMaxSizeBytes && f.peekBuff == nil {
|
if httpRange.Start == 0 && httpRange.Length <= InMemoryBufMaxSizeBytes && f.peekBuff == nil {
|
||||||
bufSize := utils.Min(httpRange.Length, f.GetSize())
|
bufSize := utils.Min(httpRange.Length, f.GetSize())
|
||||||
newBuf := bytes.NewBuffer(make([]byte, 0, bufSize))
|
newBuf := bytes.NewBuffer(make([]byte, 0, bufSize))
|
||||||
|
@ -99,13 +98,13 @@ func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
|
||||||
f.peekBuff = bytes.NewReader(newBuf.Bytes())
|
f.peekBuff = bytes.NewReader(newBuf.Bytes())
|
||||||
f.Reader = io.MultiReader(f.peekBuff, f.Reader)
|
f.Reader = io.MultiReader(f.peekBuff, f.Reader)
|
||||||
return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil
|
return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil
|
||||||
}
|
} else {
|
||||||
if f.tmpFile == nil {
|
|
||||||
_, err := f.CacheFullInTempFile()
|
_, err := f.CacheFullInTempFile()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return io.NewSectionReader(f.tmpFile, httpRange.Start, httpRange.Length), nil
|
return io.NewSectionReader(f.tmpFile, httpRange.Start, httpRange.Length), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -228,54 +227,7 @@ func (ss *SeekableStream) CacheFullInTempFile() (model.File, error) {
|
||||||
return ss.tmpFile, nil
|
return ss.tmpFile, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//func (f *FileStream) SetReader(r io.Reader) {
|
func (f *FileStream) SetTmpFile(r *os.File) {
|
||||||
// f.Reader = r
|
f.Reader = r
|
||||||
//}
|
f.tmpFile = r
|
||||||
|
|
||||||
/*
|
|
||||||
// RangePeek allow once peek at start of the data, since most drives check first XX bytes for rapid-upload
|
|
||||||
func (f *FileStream) RangePeek(length int64) (*bytes.Buffer, error) {
|
|
||||||
if length > InMemoryBufMaxSize*1024*1024 {
|
|
||||||
return nil, errs.NewErr(errs.StreamPeekFail, "can't peek size > %d MB", InMemoryBufMaxSize)
|
|
||||||
}
|
}
|
||||||
httpRange := &http_range.Range{Length: length}
|
|
||||||
bufSize := utils.Min(httpRange.Length, f.GetSize())
|
|
||||||
buf := bytes.NewBuffer(make([]byte, 0, bufSize))
|
|
||||||
if f.link == nil && f.tmpFile == nil {
|
|
||||||
if !f.peekedOnce {
|
|
||||||
f.mu.Lock()
|
|
||||||
f.peekedOnce = true
|
|
||||||
_, err := io.CopyN(buf, f.Reader, bufSize)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
f.mu.Unlock()
|
|
||||||
return nil, errs.NewErr(errs.StreamPeekFail, "failed to copyN %d bytes data", bufSize)
|
|
||||||
}
|
|
||||||
f.Reader = io.MultiReader(buf, f.Reader)
|
|
||||||
f.mu.Unlock()
|
|
||||||
return buf, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
return nil, errs.NewErr(errs.StreamPeekFail, "link and tmpFile both are null")
|
|
||||||
}
|
|
||||||
f.mu.Lock()
|
|
||||||
defer f.mu.Unlock()
|
|
||||||
rc, _, err := GetReadCloserFromLink(f.Obj, f.link, httpRange)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
_, err = io.CopyN(buf, rc, bufSize)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return buf, nil
|
|
||||||
}*/
|
|
||||||
|
|
||||||
//func (f *FileStream) SetTmpFile(r *os.File) {
|
|
||||||
// f.mu.Lock()
|
|
||||||
// //f.readDisabled = true
|
|
||||||
// f.tmpFile = r
|
|
||||||
// f.Reader = r
|
|
||||||
// f.mu.Unlock()
|
|
||||||
//}
|
|
||||||
|
|
|
@ -1,8 +1,13 @@
|
||||||
package handles
|
package handles
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils/random"
|
||||||
|
"io"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"os"
|
||||||
stdpath "path"
|
stdpath "path"
|
||||||
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -97,7 +102,24 @@ func FsForm(c *gin.Context) {
|
||||||
common.ErrorResp(c, err, 500)
|
common.ErrorResp(c, err, 500)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
f, err := file.Open()
|
tmpFile, tmpInSys := "", ""
|
||||||
|
fv := reflect.ValueOf(*file)
|
||||||
|
tmpInSys = fv.FieldByName("tmpfile").String()
|
||||||
|
|
||||||
|
var f io.Reader
|
||||||
|
var osFile *os.File
|
||||||
|
if len(tmpInSys) > 0 {
|
||||||
|
tmpFile = conf.Conf.TempDir + "file-" + random.String(8)
|
||||||
|
err = os.Rename(tmpInSys, tmpFile)
|
||||||
|
if err != nil {
|
||||||
|
common.ErrorResp(c, err, 500)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
osFile, err = os.Open(tmpFile)
|
||||||
|
f = osFile
|
||||||
|
} else {
|
||||||
|
f, err = file.Open()
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
common.ErrorResp(c, err, 500)
|
common.ErrorResp(c, err, 500)
|
||||||
return
|
return
|
||||||
|
@ -118,12 +140,16 @@ func FsForm(c *gin.Context) {
|
||||||
common.ErrorResp(c, err, 500)
|
common.ErrorResp(c, err, 500)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if osFile != nil {
|
||||||
|
ss.SetTmpFile(osFile)
|
||||||
|
}
|
||||||
|
|
||||||
if asTask {
|
if asTask {
|
||||||
err = fs.PutAsTask(dir, ss)
|
err = fs.PutAsTask(dir, ss)
|
||||||
} else {
|
} else {
|
||||||
err = fs.PutDirectly(c, dir, ss, true)
|
err = fs.PutDirectly(c, dir, ss, true)
|
||||||
}
|
}
|
||||||
defer f.Close()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
common.ErrorResp(c, err, 500)
|
common.ErrorResp(c, err, 500)
|
||||||
return
|
return
|
||||||
|
|
Loading…
Reference in New Issue