mirror of https://github.com/Xhofe/alist
feat(traffic): support limit task worker count & file stream rate (#7948)
* feat: set task workers num & client stream rate limit * feat: server stream rate limit * upgrade xhofe/tache * .pull/7972/head^2
parent
399336b33c
commit
3b71500f23
|
@ -17,6 +17,7 @@ func Init() {
|
||||||
bootstrap.Log()
|
bootstrap.Log()
|
||||||
bootstrap.InitDB()
|
bootstrap.InitDB()
|
||||||
data.InitData()
|
data.InitData()
|
||||||
|
bootstrap.InitStreamLimit()
|
||||||
bootstrap.InitIndex()
|
bootstrap.InitIndex()
|
||||||
bootstrap.InitUpgradePatch()
|
bootstrap.InitUpgradePatch()
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,8 +8,6 @@ import (
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
|
||||||
"github.com/alist-org/alist/v3/internal/stream"
|
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
@ -20,6 +18,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/conf"
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
@ -144,7 +143,7 @@ func (d *Pan115) DownloadWithUA(pickCode, ua string) (*driver115.DownloadInfo, e
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
bytes, err := crypto.Decode(string(result.EncodedData), key)
|
b, err := crypto.Decode(string(result.EncodedData), key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -152,7 +151,7 @@ func (d *Pan115) DownloadWithUA(pickCode, ua string) (*driver115.DownloadInfo, e
|
||||||
downloadInfo := struct {
|
downloadInfo := struct {
|
||||||
Url string `json:"url"`
|
Url string `json:"url"`
|
||||||
}{}
|
}{}
|
||||||
if err := utils.Json.Unmarshal(bytes, &downloadInfo); err != nil {
|
if err := utils.Json.Unmarshal(b, &downloadInfo); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -290,13 +289,10 @@ func (c *Pan115) UploadByOSS(ctx context.Context, params *driver115.UploadOSSPar
|
||||||
}
|
}
|
||||||
|
|
||||||
var bodyBytes []byte
|
var bodyBytes []byte
|
||||||
r := &stream.ReaderWithCtx{
|
r := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||||
Reader: &stream.ReaderUpdatingProgress{
|
Reader: s,
|
||||||
Reader: s,
|
UpdateProgress: up,
|
||||||
UpdateProgress: up,
|
})
|
||||||
},
|
|
||||||
Ctx: ctx,
|
|
||||||
}
|
|
||||||
if err = bucket.PutObject(params.Object, r, append(
|
if err = bucket.PutObject(params.Object, r, append(
|
||||||
driver115.OssOption(params, ossToken),
|
driver115.OssOption(params, ossToken),
|
||||||
oss.CallbackResult(&bodyBytes),
|
oss.CallbackResult(&bodyBytes),
|
||||||
|
@ -405,16 +401,12 @@ func (d *Pan115) UploadByMultipart(ctx context.Context, params *driver115.Upload
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
buf := make([]byte, chunk.Size)
|
buf := make([]byte, chunk.Size)
|
||||||
if _, err = tmpF.ReadAt(buf, chunk.Offset); err != nil && !errors.Is(err, io.EOF) {
|
if _, err = tmpF.ReadAt(buf, chunk.Offset); err != nil && !errors.Is(err, io.EOF) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
if part, err = bucket.UploadPart(imur, driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(buf)),
|
||||||
if part, err = bucket.UploadPart(imur, &stream.ReaderWithCtx{
|
chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil {
|
||||||
Reader: bytes.NewBuffer(buf),
|
|
||||||
Ctx: ctx,
|
|
||||||
}, chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil {
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,7 +6,6 @@ import (
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/alist-org/alist/v3/internal/stream"
|
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
@ -249,10 +248,10 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, file model.FileStrea
|
||||||
input := &s3manager.UploadInput{
|
input := &s3manager.UploadInput{
|
||||||
Bucket: &resp.Data.Bucket,
|
Bucket: &resp.Data.Bucket,
|
||||||
Key: &resp.Data.Key,
|
Key: &resp.Data.Key,
|
||||||
Body: &stream.ReaderUpdatingProgress{
|
Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||||
Reader: file,
|
Reader: file,
|
||||||
UpdateProgress: up,
|
UpdateProgress: up,
|
||||||
},
|
}),
|
||||||
}
|
}
|
||||||
_, err = uploader.UploadWithContext(ctx, input)
|
_, err = uploader.UploadWithContext(ctx, input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -81,6 +81,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
|
||||||
batchSize = 10
|
batchSize = 10
|
||||||
getS3UploadUrl = d.getS3PreSignedUrls
|
getS3UploadUrl = d.getS3PreSignedUrls
|
||||||
}
|
}
|
||||||
|
limited := driver.NewLimitedUploadStream(ctx, file)
|
||||||
for i := 1; i <= chunkCount; i += batchSize {
|
for i := 1; i <= chunkCount; i += batchSize {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
|
@ -103,7 +104,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
|
||||||
if j == chunkCount {
|
if j == chunkCount {
|
||||||
curSize = file.GetSize() - (int64(chunkCount)-1)*chunkSize
|
curSize = file.GetSize() - (int64(chunkCount)-1)*chunkSize
|
||||||
}
|
}
|
||||||
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.LimitReader(file, chunkSize), curSize, false, getS3UploadUrl)
|
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.LimitReader(limited, chunkSize), curSize, false, getS3UploadUrl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -631,12 +631,13 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||||
// Progress
|
// Progress
|
||||||
p := driver.NewProgress(stream.GetSize(), up)
|
p := driver.NewProgress(stream.GetSize(), up)
|
||||||
|
|
||||||
|
rateLimited := driver.NewLimitedUploadStream(ctx, stream)
|
||||||
// 上传所有分片
|
// 上传所有分片
|
||||||
for _, uploadPartInfo := range uploadPartInfos {
|
for _, uploadPartInfo := range uploadPartInfos {
|
||||||
index := uploadPartInfo.PartNumber - 1
|
index := uploadPartInfo.PartNumber - 1
|
||||||
partSize := partInfos[index].PartSize
|
partSize := partInfos[index].PartSize
|
||||||
log.Debugf("[139] uploading part %+v/%+v", index, len(uploadPartInfos))
|
log.Debugf("[139] uploading part %+v/%+v", index, len(uploadPartInfos))
|
||||||
limitReader := io.LimitReader(stream, partSize)
|
limitReader := io.LimitReader(rateLimited, partSize)
|
||||||
|
|
||||||
// Update Progress
|
// Update Progress
|
||||||
r := io.TeeReader(limitReader, p)
|
r := io.TeeReader(limitReader, p)
|
||||||
|
@ -787,6 +788,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||||
if part == 0 {
|
if part == 0 {
|
||||||
part = 1
|
part = 1
|
||||||
}
|
}
|
||||||
|
rateLimited := driver.NewLimitedUploadStream(ctx, stream)
|
||||||
for i := int64(0); i < part; i++ {
|
for i := int64(0); i < part; i++ {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
|
@ -798,7 +800,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||||
byteSize = partSize
|
byteSize = partSize
|
||||||
}
|
}
|
||||||
|
|
||||||
limitReader := io.LimitReader(stream, byteSize)
|
limitReader := io.LimitReader(rateLimited, byteSize)
|
||||||
// Update Progress
|
// Update Progress
|
||||||
r := io.TeeReader(limitReader, p)
|
r := io.TeeReader(limitReader, p)
|
||||||
req, err := http.NewRequest("POST", resp.Data.UploadResult.RedirectionURL, r)
|
req, err := http.NewRequest("POST", resp.Data.UploadResult.RedirectionURL, r)
|
||||||
|
|
|
@ -365,7 +365,7 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
|
||||||
log.Debugf("uploadData: %+v", uploadData)
|
log.Debugf("uploadData: %+v", uploadData)
|
||||||
requestURL := uploadData.RequestURL
|
requestURL := uploadData.RequestURL
|
||||||
uploadHeaders := strings.Split(decodeURIComponent(uploadData.RequestHeader), "&")
|
uploadHeaders := strings.Split(decodeURIComponent(uploadData.RequestHeader), "&")
|
||||||
req, err := http.NewRequest(http.MethodPut, requestURL, bytes.NewReader(byteData))
|
req, err := http.NewRequest(http.MethodPut, requestURL, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -375,11 +375,11 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
|
||||||
req.Header.Set(v[0:i], v[i+1:])
|
req.Header.Set(v[0:i], v[i+1:])
|
||||||
}
|
}
|
||||||
r, err := base.HttpClient.Do(req)
|
r, err := base.HttpClient.Do(req)
|
||||||
log.Debugf("%+v %+v", r, r.Request.Header)
|
|
||||||
r.Body.Close()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
log.Debugf("%+v %+v", r, r.Request.Header)
|
||||||
|
_ = r.Body.Close()
|
||||||
up(float64(i) * 100 / float64(count))
|
up(float64(i) * 100 / float64(count))
|
||||||
}
|
}
|
||||||
fileMd5 := hex.EncodeToString(md5Sum.Sum(nil))
|
fileMd5 := hex.EncodeToString(md5Sum.Sum(nil))
|
||||||
|
|
|
@ -19,6 +19,8 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/sync/semaphore"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/conf"
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
@ -174,8 +176,8 @@ func (y *Cloud189PC) put(ctx context.Context, url string, headers map[string]str
|
||||||
}
|
}
|
||||||
|
|
||||||
var erron RespErr
|
var erron RespErr
|
||||||
jsoniter.Unmarshal(body, &erron)
|
_ = jsoniter.Unmarshal(body, &erron)
|
||||||
xml.Unmarshal(body, &erron)
|
_ = xml.Unmarshal(body, &erron)
|
||||||
if erron.HasError() {
|
if erron.HasError() {
|
||||||
return nil, &erron
|
return nil, &erron
|
||||||
}
|
}
|
||||||
|
@ -508,6 +510,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
||||||
retry.Attempts(3),
|
retry.Attempts(3),
|
||||||
retry.Delay(time.Second),
|
retry.Delay(time.Second),
|
||||||
retry.DelayType(retry.BackOffDelay))
|
retry.DelayType(retry.BackOffDelay))
|
||||||
|
sem := semaphore.NewWeighted(3)
|
||||||
|
|
||||||
fileMd5 := md5.New()
|
fileMd5 := md5.New()
|
||||||
silceMd5 := md5.New()
|
silceMd5 := md5.New()
|
||||||
|
@ -517,7 +520,9 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
||||||
if utils.IsCanceled(upCtx) {
|
if utils.IsCanceled(upCtx) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
if err = sem.Acquire(ctx, 1); err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
byteData := make([]byte, sliceSize)
|
byteData := make([]byte, sliceSize)
|
||||||
if i == count {
|
if i == count {
|
||||||
byteData = byteData[:lastPartSize]
|
byteData = byteData[:lastPartSize]
|
||||||
|
@ -526,6 +531,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
||||||
// 读取块
|
// 读取块
|
||||||
silceMd5.Reset()
|
silceMd5.Reset()
|
||||||
if _, err := io.ReadFull(io.TeeReader(file, io.MultiWriter(fileMd5, silceMd5)), byteData); err != io.EOF && err != nil {
|
if _, err := io.ReadFull(io.TeeReader(file, io.MultiWriter(fileMd5, silceMd5)), byteData); err != io.EOF && err != nil {
|
||||||
|
sem.Release(1)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -535,6 +541,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
||||||
partInfo := fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes))
|
partInfo := fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes))
|
||||||
|
|
||||||
threadG.Go(func(ctx context.Context) error {
|
threadG.Go(func(ctx context.Context) error {
|
||||||
|
defer sem.Release(1)
|
||||||
uploadUrls, err := y.GetMultiUploadUrls(ctx, isFamily, initMultiUpload.Data.UploadFileID, partInfo)
|
uploadUrls, err := y.GetMultiUploadUrls(ctx, isFamily, initMultiUpload.Data.UploadFileID, partInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -542,7 +549,8 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
||||||
|
|
||||||
// step.4 上传切片
|
// step.4 上传切片
|
||||||
uploadUrl := uploadUrls[0]
|
uploadUrl := uploadUrls[0]
|
||||||
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, bytes.NewReader(byteData), isFamily)
|
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false,
|
||||||
|
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)), isFamily)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -794,6 +802,7 @@ func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
rateLimited := driver.NewLimitedUploadStream(ctx, io.NopCloser(tempFile))
|
||||||
|
|
||||||
// 创建上传会话
|
// 创建上传会话
|
||||||
uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, file.GetName(), fmt.Sprint(file.GetSize()), isFamily)
|
uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, file.GetName(), fmt.Sprint(file.GetSize()), isFamily)
|
||||||
|
@ -820,7 +829,7 @@ func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model
|
||||||
header["Edrive-UploadFileId"] = fmt.Sprint(status.UploadFileId)
|
header["Edrive-UploadFileId"] = fmt.Sprint(status.UploadFileId)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := y.put(ctx, status.FileUploadUrl, header, true, io.NopCloser(tempFile), isFamily)
|
_, err := y.put(ctx, status.FileUploadUrl, header, true, rateLimited, isFamily)
|
||||||
if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" {
|
if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,7 +3,6 @@ package alist_v3
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/alist-org/alist/v3/internal/stream"
|
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"path"
|
"path"
|
||||||
|
@ -183,10 +182,11 @@ func (d *AListV3) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, d.Address+"/api/fs/put", &stream.ReaderUpdatingProgress{
|
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||||
Reader: s,
|
Reader: s,
|
||||||
UpdateProgress: up,
|
UpdateProgress: up,
|
||||||
})
|
})
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodPut, d.Address+"/api/fs/put", reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,13 +14,12 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/stream"
|
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/conf"
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
"github.com/alist-org/alist/v3/pkg/cron"
|
"github.com/alist-org/alist/v3/pkg/cron"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
|
@ -194,7 +193,10 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
|
||||||
}
|
}
|
||||||
if d.RapidUpload {
|
if d.RapidUpload {
|
||||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||||
utils.CopyWithBufferN(buf, file, 1024)
|
_, err := utils.CopyWithBufferN(buf, file, 1024)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
reqBody["pre_hash"] = utils.HashData(utils.SHA1, buf.Bytes())
|
reqBody["pre_hash"] = utils.HashData(utils.SHA1, buf.Bytes())
|
||||||
if localFile != nil {
|
if localFile != nil {
|
||||||
if _, err := localFile.Seek(0, io.SeekStart); err != nil {
|
if _, err := localFile.Seek(0, io.SeekStart); err != nil {
|
||||||
|
@ -286,6 +288,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
|
||||||
file.Reader = localFile
|
file.Reader = localFile
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rateLimited := driver.NewLimitedUploadStream(ctx, file)
|
||||||
for i, partInfo := range resp.PartInfoList {
|
for i, partInfo := range resp.PartInfoList {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
|
@ -294,7 +297,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
|
||||||
if d.InternalUpload {
|
if d.InternalUpload {
|
||||||
url = partInfo.InternalUploadUrl
|
url = partInfo.InternalUploadUrl
|
||||||
}
|
}
|
||||||
req, err := http.NewRequest("PUT", url, io.LimitReader(file, DEFAULT))
|
req, err := http.NewRequest("PUT", url, io.LimitReader(rateLimited, DEFAULT))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -303,7 +306,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
res.Body.Close()
|
_ = res.Body.Close()
|
||||||
if count > 0 {
|
if count > 0 {
|
||||||
up(float64(i) * 100 / float64(count))
|
up(float64(i) * 100 / float64(count))
|
||||||
}
|
}
|
||||||
|
|
|
@ -77,7 +77,7 @@ func (d *AliyundriveOpen) uploadPart(ctx context.Context, r io.Reader, partInfo
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
res.Body.Close()
|
_ = res.Body.Close()
|
||||||
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusConflict {
|
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusConflict {
|
||||||
return fmt.Errorf("upload status: %d", res.StatusCode)
|
return fmt.Errorf("upload status: %d", res.StatusCode)
|
||||||
}
|
}
|
||||||
|
@ -251,8 +251,9 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
|
||||||
rd = utils.NewMultiReadable(srd)
|
rd = utils.NewMultiReadable(srd)
|
||||||
}
|
}
|
||||||
err = retry.Do(func() error {
|
err = retry.Do(func() error {
|
||||||
rd.Reset()
|
_ = rd.Reset()
|
||||||
return d.uploadPart(ctx, rd, createResp.PartInfoList[i])
|
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
|
||||||
|
return d.uploadPart(ctx, rateLimitedRd, createResp.PartInfoList[i])
|
||||||
},
|
},
|
||||||
retry.Attempts(3),
|
retry.Attempts(3),
|
||||||
retry.DelayType(retry.BackOffDelay),
|
retry.DelayType(retry.BackOffDelay),
|
||||||
|
|
|
@ -12,6 +12,8 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/sync/semaphore"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
|
@ -263,16 +265,21 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
||||||
retry.Attempts(3),
|
retry.Attempts(3),
|
||||||
retry.Delay(time.Second),
|
retry.Delay(time.Second),
|
||||||
retry.DelayType(retry.BackOffDelay))
|
retry.DelayType(retry.BackOffDelay))
|
||||||
|
sem := semaphore.NewWeighted(3)
|
||||||
for i, partseq := range precreateResp.BlockList {
|
for i, partseq := range precreateResp.BlockList {
|
||||||
if utils.IsCanceled(upCtx) {
|
if utils.IsCanceled(upCtx) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
if err = sem.Acquire(ctx, 1); err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
i, partseq, offset, byteSize := i, partseq, int64(partseq)*sliceSize, sliceSize
|
i, partseq, offset, byteSize := i, partseq, int64(partseq)*sliceSize, sliceSize
|
||||||
if partseq+1 == count {
|
if partseq+1 == count {
|
||||||
byteSize = lastBlockSize
|
byteSize = lastBlockSize
|
||||||
}
|
}
|
||||||
threadG.Go(func(ctx context.Context) error {
|
threadG.Go(func(ctx context.Context) error {
|
||||||
|
defer sem.Release(1)
|
||||||
params := map[string]string{
|
params := map[string]string{
|
||||||
"method": "upload",
|
"method": "upload",
|
||||||
"access_token": d.AccessToken,
|
"access_token": d.AccessToken,
|
||||||
|
@ -281,7 +288,8 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
||||||
"uploadid": precreateResp.Uploadid,
|
"uploadid": precreateResp.Uploadid,
|
||||||
"partseq": strconv.Itoa(partseq),
|
"partseq": strconv.Itoa(partseq),
|
||||||
}
|
}
|
||||||
err := d.uploadSlice(ctx, params, stream.GetName(), io.NewSectionReader(tempFile, offset, byteSize))
|
err := d.uploadSlice(ctx, params, stream.GetName(),
|
||||||
|
driver.NewLimitedUploadStream(ctx, io.NewSectionReader(tempFile, offset, byteSize)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,6 +13,8 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/sync/semaphore"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
|
@ -314,10 +316,14 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||||
retry.Attempts(3),
|
retry.Attempts(3),
|
||||||
retry.Delay(time.Second),
|
retry.Delay(time.Second),
|
||||||
retry.DelayType(retry.BackOffDelay))
|
retry.DelayType(retry.BackOffDelay))
|
||||||
|
sem := semaphore.NewWeighted(3)
|
||||||
for i, partseq := range precreateResp.BlockList {
|
for i, partseq := range precreateResp.BlockList {
|
||||||
if utils.IsCanceled(upCtx) {
|
if utils.IsCanceled(upCtx) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
if err = sem.Acquire(ctx, 1); err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
i, partseq, offset, byteSize := i, partseq, int64(partseq)*DEFAULT, DEFAULT
|
i, partseq, offset, byteSize := i, partseq, int64(partseq)*DEFAULT, DEFAULT
|
||||||
if partseq+1 == count {
|
if partseq+1 == count {
|
||||||
|
@ -325,6 +331,7 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||||
}
|
}
|
||||||
|
|
||||||
threadG.Go(func(ctx context.Context) error {
|
threadG.Go(func(ctx context.Context) error {
|
||||||
|
defer sem.Release(1)
|
||||||
uploadParams := map[string]string{
|
uploadParams := map[string]string{
|
||||||
"method": "upload",
|
"method": "upload",
|
||||||
"path": params["path"],
|
"path": params["path"],
|
||||||
|
@ -335,7 +342,8 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||||
_, err = d.Post("https://c3.pcs.baidu.com/rest/2.0/pcs/superfile2", func(r *resty.Request) {
|
_, err = d.Post("https://c3.pcs.baidu.com/rest/2.0/pcs/superfile2", func(r *resty.Request) {
|
||||||
r.SetContext(ctx)
|
r.SetContext(ctx)
|
||||||
r.SetQueryParams(uploadParams)
|
r.SetQueryParams(uploadParams)
|
||||||
r.SetFileReader("file", stream.GetName(), io.NewSectionReader(tempFile, offset, byteSize))
|
r.SetFileReader("file", stream.GetName(),
|
||||||
|
driver.NewLimitedUploadStream(ctx, io.NewSectionReader(tempFile, offset, byteSize)))
|
||||||
}, nil)
|
}, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/conf"
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
|
"github.com/alist-org/alist/v3/internal/net"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -26,7 +27,7 @@ func InitClient() {
|
||||||
NoRedirectClient.SetHeader("user-agent", UserAgent)
|
NoRedirectClient.SetHeader("user-agent", UserAgent)
|
||||||
|
|
||||||
RestyClient = NewRestyClient()
|
RestyClient = NewRestyClient()
|
||||||
HttpClient = NewHttpClient()
|
HttpClient = net.NewHttpClient()
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewRestyClient() *resty.Client {
|
func NewRestyClient() *resty.Client {
|
||||||
|
@ -38,13 +39,3 @@ func NewRestyClient() *resty.Client {
|
||||||
SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
|
SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
|
||||||
return client
|
return client
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewHttpClient() *http.Client {
|
|
||||||
return &http.Client{
|
|
||||||
Timeout: time.Hour * 48,
|
|
||||||
Transport: &http.Transport{
|
|
||||||
Proxy: http.ProxyFromEnvironment,
|
|
||||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -6,7 +6,6 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/alist-org/alist/v3/internal/stream"
|
|
||||||
"io"
|
"io"
|
||||||
"mime/multipart"
|
"mime/multipart"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -249,13 +248,13 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStr
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
r := &stream.ReaderUpdatingProgress{
|
r := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||||
Reader: &stream.SimpleReaderWithSize{
|
Reader: &driver.SimpleReaderWithSize{
|
||||||
Reader: body,
|
Reader: body,
|
||||||
Size: int64(body.Len()),
|
Size: int64(body.Len()),
|
||||||
},
|
},
|
||||||
UpdateProgress: up,
|
UpdateProgress: up,
|
||||||
}
|
})
|
||||||
req, err := http.NewRequestWithContext(ctx, "POST", "https://pan-yz.chaoxing.com/upload", r)
|
req, err := http.NewRequestWithContext(ctx, "POST", "https://pan-yz.chaoxing.com/upload", r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -1,7 +1,9 @@
|
||||||
package cloudreve
|
package cloudreve
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"path"
|
"path"
|
||||||
|
@ -173,7 +175,7 @@ func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.File
|
||||||
var n int
|
var n int
|
||||||
buf = make([]byte, chunkSize)
|
buf = make([]byte, chunkSize)
|
||||||
n, err = io.ReadAtLeast(stream, buf, chunkSize)
|
n, err = io.ReadAtLeast(stream, buf, chunkSize)
|
||||||
if err != nil && err != io.ErrUnexpectedEOF {
|
if err != nil && !errors.Is(err, io.ErrUnexpectedEOF) {
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -186,7 +188,7 @@ func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.File
|
||||||
err = d.request(http.MethodPost, "/file/upload/"+u.SessionID+"/"+strconv.Itoa(chunk), func(req *resty.Request) {
|
err = d.request(http.MethodPost, "/file/upload/"+u.SessionID+"/"+strconv.Itoa(chunk), func(req *resty.Request) {
|
||||||
req.SetHeader("Content-Type", "application/octet-stream")
|
req.SetHeader("Content-Type", "application/octet-stream")
|
||||||
req.SetHeader("Content-Length", strconv.Itoa(n))
|
req.SetHeader("Content-Length", strconv.Itoa(n))
|
||||||
req.SetBody(buf)
|
req.SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewReader(buf)))
|
||||||
}, nil)
|
}, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
break
|
break
|
||||||
|
|
|
@ -100,7 +100,7 @@ func (d *Cloudreve) login() error {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if err != nil && err.Error() != "CAPTCHA not match." {
|
if err.Error() != "CAPTCHA not match." {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -202,7 +202,8 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk), bytes.NewBuffer(byteData))
|
req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk),
|
||||||
|
driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -214,7 +215,7 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
res.Body.Close()
|
_ = res.Body.Close()
|
||||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||||
chunk++
|
chunk++
|
||||||
}
|
}
|
||||||
|
@ -241,7 +242,7 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
req, err := http.NewRequest("PUT", uploadUrl, bytes.NewBuffer(byteData))
|
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -256,10 +257,10 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
|
||||||
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
|
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
|
||||||
if res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200 {
|
if res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200 {
|
||||||
data, _ := io.ReadAll(res.Body)
|
data, _ := io.ReadAll(res.Body)
|
||||||
res.Body.Close()
|
_ = res.Body.Close()
|
||||||
return errors.New(string(data))
|
return errors.New(string(data))
|
||||||
}
|
}
|
||||||
res.Body.Close()
|
_ = res.Body.Close()
|
||||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||||
}
|
}
|
||||||
// 上传成功发送回调请求
|
// 上传成功发送回调请求
|
||||||
|
|
|
@ -191,7 +191,7 @@ func (d *Dropbox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
||||||
}
|
}
|
||||||
|
|
||||||
url := d.contentBase + "/2/files/upload_session/append_v2"
|
url := d.contentBase + "/2/files/upload_session/append_v2"
|
||||||
reader := io.LimitReader(stream, PartSize)
|
reader := driver.NewLimitedUploadStream(ctx, io.LimitReader(stream, PartSize))
|
||||||
req, err := http.NewRequest(http.MethodPost, url, reader)
|
req, err := http.NewRequest(http.MethodPost, url, reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed to update file when append to upload session, err: %+v", err)
|
log.Errorf("failed to update file when append to upload session, err: %+v", err)
|
||||||
|
@ -219,13 +219,8 @@ func (d *Dropbox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_ = res.Body.Close()
|
_ = res.Body.Close()
|
||||||
|
up(float64(i+1) * 100 / float64(count))
|
||||||
if count > 0 {
|
|
||||||
up(float64(i+1) * 100 / float64(count))
|
|
||||||
}
|
|
||||||
|
|
||||||
offset += byteSize
|
offset += byteSize
|
||||||
|
|
||||||
}
|
}
|
||||||
// 3.finish
|
// 3.finish
|
||||||
toPath := dstDir.GetPath() + "/" + stream.GetName()
|
toPath := dstDir.GetPath() + "/" + stream.GetName()
|
||||||
|
|
|
@ -2,7 +2,6 @@ package ftp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"github.com/alist-org/alist/v3/internal/stream"
|
|
||||||
stdpath "path"
|
stdpath "path"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
@ -120,13 +119,10 @@ func (d *FTP) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, u
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
path := stdpath.Join(dstDir.GetPath(), s.GetName())
|
path := stdpath.Join(dstDir.GetPath(), s.GetName())
|
||||||
return d.conn.Stor(encode(path, d.Encoding), &stream.ReaderWithCtx{
|
return d.conn.Stor(encode(path, d.Encoding), driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||||
Reader: &stream.ReaderUpdatingProgress{
|
Reader: s,
|
||||||
Reader: s,
|
UpdateProgress: up,
|
||||||
UpdateProgress: up,
|
}))
|
||||||
},
|
|
||||||
Ctx: ctx,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ driver.Driver = (*FTP)(nil)
|
var _ driver.Driver = (*FTP)(nil)
|
||||||
|
|
|
@ -16,7 +16,6 @@ import (
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/internal/stream"
|
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
@ -676,13 +675,13 @@ func (d *Github) putBlob(ctx context.Context, s model.FileStreamer, up driver.Up
|
||||||
afterContentReader := strings.NewReader(afterContent)
|
afterContentReader := strings.NewReader(afterContent)
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost,
|
req, err := http.NewRequestWithContext(ctx, http.MethodPost,
|
||||||
fmt.Sprintf("https://api.github.com/repos/%s/%s/git/blobs", d.Owner, d.Repo),
|
fmt.Sprintf("https://api.github.com/repos/%s/%s/git/blobs", d.Owner, d.Repo),
|
||||||
&stream.ReaderUpdatingProgress{
|
driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||||
Reader: &stream.SimpleReaderWithSize{
|
Reader: &driver.SimpleReaderWithSize{
|
||||||
Reader: io.MultiReader(beforeContentReader, contentReader, afterContentReader),
|
Reader: io.MultiReader(beforeContentReader, contentReader, afterContentReader),
|
||||||
Size: length,
|
Size: length,
|
||||||
},
|
},
|
||||||
UpdateProgress: up,
|
UpdateProgress: up,
|
||||||
})
|
}))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
@ -698,6 +697,7 @@ func (d *Github) putBlob(ctx context.Context, s model.FileStreamer, up driver.Up
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
resBody, err := io.ReadAll(res.Body)
|
resBody, err := io.ReadAll(res.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
|
|
@ -158,7 +158,8 @@ func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
|
||||||
putUrl := res.Header().Get("location")
|
putUrl := res.Header().Get("location")
|
||||||
if stream.GetSize() < d.ChunkSize*1024*1024 {
|
if stream.GetSize() < d.ChunkSize*1024*1024 {
|
||||||
_, err = d.request(putUrl, http.MethodPut, func(req *resty.Request) {
|
_, err = d.request(putUrl, http.MethodPut, func(req *resty.Request) {
|
||||||
req.SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).SetBody(stream)
|
req.SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).
|
||||||
|
SetBody(driver.NewLimitedUploadStream(ctx, stream))
|
||||||
}, nil)
|
}, nil)
|
||||||
} else {
|
} else {
|
||||||
err = d.chunkUpload(ctx, stream, putUrl)
|
err = d.chunkUpload(ctx, stream, putUrl)
|
||||||
|
|
|
@ -11,10 +11,10 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
"github.com/golang-jwt/jwt/v4"
|
"github.com/golang-jwt/jwt/v4"
|
||||||
|
@ -126,8 +126,7 @@ func (d *GoogleDrive) refreshToken() error {
|
||||||
}
|
}
|
||||||
d.AccessToken = resp.AccessToken
|
d.AccessToken = resp.AccessToken
|
||||||
return nil
|
return nil
|
||||||
}
|
} else if os.IsExist(gdsaFileErr) {
|
||||||
if gdsaFileErr != nil && os.IsExist(gdsaFileErr) {
|
|
||||||
return gdsaFileErr
|
return gdsaFileErr
|
||||||
}
|
}
|
||||||
url := "https://www.googleapis.com/oauth2/v4/token"
|
url := "https://www.googleapis.com/oauth2/v4/token"
|
||||||
|
@ -229,6 +228,7 @@ func (d *GoogleDrive) chunkUpload(ctx context.Context, stream model.FileStreamer
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
reader = driver.NewLimitedUploadStream(ctx, reader)
|
||||||
_, err = d.request(url, http.MethodPut, func(req *resty.Request) {
|
_, err = d.request(url, http.MethodPut, func(req *resty.Request) {
|
||||||
req.SetHeaders(map[string]string{
|
req.SetHeaders(map[string]string{
|
||||||
"Content-Length": strconv.FormatInt(chunkSize, 10),
|
"Content-Length": strconv.FormatInt(chunkSize, 10),
|
||||||
|
|
|
@ -124,7 +124,7 @@ func (d *GooglePhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := d.request(postUrl, http.MethodPost, func(req *resty.Request) {
|
resp, err := d.request(postUrl, http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(stream).SetContext(ctx)
|
req.SetBody(driver.NewLimitedUploadStream(ctx, stream)).SetContext(ctx)
|
||||||
}, nil, postHeaders)
|
}, nil, postHeaders)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -392,10 +392,11 @@ func (d *HalalCloud) put(ctx context.Context, dstDir model.Obj, fileStream model
|
||||||
if fileStream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
|
if fileStream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
|
||||||
uploader.PartSize = fileStream.GetSize() / (s3manager.MaxUploadParts - 1)
|
uploader.PartSize = fileStream.GetSize() / (s3manager.MaxUploadParts - 1)
|
||||||
}
|
}
|
||||||
|
reader := driver.NewLimitedUploadStream(ctx, fileStream)
|
||||||
_, err = uploader.UploadWithContext(ctx, &s3manager.UploadInput{
|
_, err = uploader.UploadWithContext(ctx, &s3manager.UploadInput{
|
||||||
Bucket: aws.String(result.Bucket),
|
Bucket: aws.String(result.Bucket),
|
||||||
Key: aws.String(result.Key),
|
Key: aws.String(result.Key),
|
||||||
Body: io.TeeReader(fileStream, driver.NewProgress(fileStream.GetSize(), up)),
|
Body: io.TeeReader(reader, driver.NewProgress(fileStream.GetSize(), up)),
|
||||||
})
|
})
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
||||||
|
|
|
@ -309,13 +309,13 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreame
|
||||||
upToken := utils.Json.Get(res, "upToken").ToString()
|
upToken := utils.Json.Get(res, "upToken").ToString()
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
key := fmt.Sprintf("disk/%d/%d/%d/%s/%016d", now.Year(), now.Month(), now.Day(), d.account, now.UnixMilli())
|
key := fmt.Sprintf("disk/%d/%d/%d/%s/%016d", now.Year(), now.Month(), now.Day(), d.account, now.UnixMilli())
|
||||||
reader := &stream.ReaderUpdatingProgress{
|
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||||
Reader: &stream.SimpleReaderWithSize{
|
Reader: &driver.SimpleReaderWithSize{
|
||||||
Reader: tempFile,
|
Reader: tempFile,
|
||||||
Size: s.GetSize(),
|
Size: s.GetSize(),
|
||||||
},
|
},
|
||||||
UpdateProgress: up,
|
UpdateProgress: up,
|
||||||
}
|
})
|
||||||
var token string
|
var token string
|
||||||
if s.GetSize() <= DefaultPartSize {
|
if s.GetSize() <= DefaultPartSize {
|
||||||
res, err := d.upClient.R().SetContext(ctx).SetMultipartFormData(map[string]string{
|
res, err := d.upClient.R().SetContext(ctx).SetMultipartFormData(map[string]string{
|
||||||
|
|
|
@ -3,7 +3,6 @@ package ipfs
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/alist-org/alist/v3/internal/stream"
|
|
||||||
"net/url"
|
"net/url"
|
||||||
stdpath "path"
|
stdpath "path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
@ -111,13 +110,10 @@ func (d *IPFS) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
|
|
||||||
func (d *IPFS) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *IPFS) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
// TODO upload file, optional
|
// TODO upload file, optional
|
||||||
_, err := d.sh.Add(&stream.ReaderWithCtx{
|
_, err := d.sh.Add(driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||||
Reader: &stream.ReaderUpdatingProgress{
|
Reader: s,
|
||||||
Reader: s,
|
UpdateProgress: up,
|
||||||
UpdateProgress: up,
|
}), ToFiles(stdpath.Join(dstDir.GetPath(), s.GetName())))
|
||||||
},
|
|
||||||
Ctx: ctx,
|
|
||||||
}, ToFiles(stdpath.Join(dstDir.GetPath(), s.GetName())))
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3,9 +3,6 @@ package kodbox
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/alist-org/alist/v3/internal/stream"
|
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
|
||||||
"github.com/go-resty/resty/v2"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -13,6 +10,8 @@ import (
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"github.com/go-resty/resty/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
type KodBox struct {
|
type KodBox struct {
|
||||||
|
@ -229,10 +228,10 @@ func (d *KodBox) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
func (d *KodBox) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
func (d *KodBox) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
var resp *CommonResp
|
var resp *CommonResp
|
||||||
_, err := d.request(http.MethodPost, "/?explorer/upload/fileUpload", func(req *resty.Request) {
|
_, err := d.request(http.MethodPost, "/?explorer/upload/fileUpload", func(req *resty.Request) {
|
||||||
r := &stream.ReaderUpdatingProgress{
|
r := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||||
Reader: s,
|
Reader: s,
|
||||||
UpdateProgress: up,
|
UpdateProgress: up,
|
||||||
}
|
})
|
||||||
req.SetFileReader("file", s.GetName(), r).
|
req.SetFileReader("file", s.GetName(), r).
|
||||||
SetResult(&resp).
|
SetResult(&resp).
|
||||||
SetFormData(map[string]string{
|
SetFormData(map[string]string{
|
||||||
|
|
|
@ -2,7 +2,6 @@ package lanzou
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"github.com/alist-org/alist/v3/internal/stream"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
|
@ -213,6 +212,10 @@ func (d *LanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer
|
||||||
if d.IsCookie() || d.IsAccount() {
|
if d.IsCookie() || d.IsAccount() {
|
||||||
var resp RespText[[]FileOrFolder]
|
var resp RespText[[]FileOrFolder]
|
||||||
_, err := d._post(d.BaseUrl+"/html5up.php", func(req *resty.Request) {
|
_, err := d._post(d.BaseUrl+"/html5up.php", func(req *resty.Request) {
|
||||||
|
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||||
|
Reader: s,
|
||||||
|
UpdateProgress: up,
|
||||||
|
})
|
||||||
req.SetFormData(map[string]string{
|
req.SetFormData(map[string]string{
|
||||||
"task": "1",
|
"task": "1",
|
||||||
"vie": "2",
|
"vie": "2",
|
||||||
|
@ -220,10 +223,7 @@ func (d *LanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer
|
||||||
"id": "WU_FILE_0",
|
"id": "WU_FILE_0",
|
||||||
"name": s.GetName(),
|
"name": s.GetName(),
|
||||||
"folder_id_bb_n": dstDir.GetID(),
|
"folder_id_bb_n": dstDir.GetID(),
|
||||||
}).SetFileReader("upload_file", s.GetName(), &stream.ReaderUpdatingProgress{
|
}).SetFileReader("upload_file", s.GetName(), reader).SetContext(ctx)
|
||||||
Reader: s,
|
|
||||||
UpdateProgress: up,
|
|
||||||
}).SetContext(ctx)
|
|
||||||
}, &resp, true)
|
}, &resp, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -320,7 +320,10 @@ func (c *Lark) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
|
||||||
Build()
|
Build()
|
||||||
|
|
||||||
// 发起请求
|
// 发起请求
|
||||||
uploadLimit.Wait(ctx)
|
err := uploadLimit.Wait(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
resp, err := c.client.Drive.File.UploadPrepare(ctx, req)
|
resp, err := c.client.Drive.File.UploadPrepare(ctx, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -341,7 +344,7 @@ func (c *Lark) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
|
||||||
length = stream.GetSize() - int64(i*blockSize)
|
length = stream.GetSize() - int64(i*blockSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
reader := io.LimitReader(stream, length)
|
reader := driver.NewLimitedUploadStream(ctx, io.LimitReader(stream, length))
|
||||||
|
|
||||||
req := larkdrive.NewUploadPartFileReqBuilder().
|
req := larkdrive.NewUploadPartFileReqBuilder().
|
||||||
Body(larkdrive.NewUploadPartFileReqBodyBuilder().
|
Body(larkdrive.NewUploadPartFileReqBodyBuilder().
|
||||||
|
@ -353,7 +356,10 @@ func (c *Lark) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
|
||||||
Build()
|
Build()
|
||||||
|
|
||||||
// 发起请求
|
// 发起请求
|
||||||
uploadLimit.Wait(ctx)
|
err = uploadLimit.Wait(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
resp, err := c.client.Drive.File.UploadPart(ctx, req)
|
resp, err := c.client.Drive.File.UploadPart(ctx, req)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -5,7 +5,6 @@ import (
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/alist-org/alist/v3/internal/stream"
|
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -195,13 +194,13 @@ func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, file model.FileS
|
||||||
input := &s3manager.UploadInput{
|
input := &s3manager.UploadInput{
|
||||||
Bucket: &resp.Data.Bucket,
|
Bucket: &resp.Data.Bucket,
|
||||||
Key: &resp.Data.Object,
|
Key: &resp.Data.Object,
|
||||||
Body: &stream.ReaderUpdatingProgress{
|
Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||||
Reader: &stream.SimpleReaderWithSize{
|
Reader: &driver.SimpleReaderWithSize{
|
||||||
Reader: tempFile,
|
Reader: tempFile,
|
||||||
Size: file.GetSize(),
|
Size: file.GetSize(),
|
||||||
},
|
},
|
||||||
UpdateProgress: up,
|
UpdateProgress: up,
|
||||||
},
|
}),
|
||||||
}
|
}
|
||||||
_, err = uploader.UploadWithContext(ctx, input)
|
_, err = uploader.UploadWithContext(ctx, input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -156,6 +156,7 @@ func (d *Mega) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
reader := driver.NewLimitedUploadStream(ctx, stream)
|
||||||
for id := 0; id < u.Chunks(); id++ {
|
for id := 0; id < u.Chunks(); id++ {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
|
@ -165,7 +166,7 @@ func (d *Mega) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
chunk := make([]byte, chkSize)
|
chunk := make([]byte, chkSize)
|
||||||
n, err := io.ReadFull(stream, chunk)
|
n, err := io.ReadFull(reader, chunk)
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -64,7 +64,7 @@ func (d *Misskey) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Misskey) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
func (d *Misskey) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
return d.put(dstDir, stream, up)
|
return d.put(ctx, dstDir, stream, up)
|
||||||
}
|
}
|
||||||
|
|
||||||
//func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
//func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
package misskey
|
package misskey
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
|
@ -190,16 +189,16 @@ func (d *Misskey) remove(obj model.Obj) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Misskey) put(dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
func (d *Misskey) put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
var file MFile
|
var file MFile
|
||||||
|
|
||||||
fileContent, err := io.ReadAll(stream)
|
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||||
if err != nil {
|
Reader: stream,
|
||||||
return nil, err
|
UpdateProgress: up,
|
||||||
}
|
})
|
||||||
|
|
||||||
req := base.RestyClient.R().
|
req := base.RestyClient.R().
|
||||||
SetFileReader("file", stream.GetName(), io.NopCloser(bytes.NewReader(fileContent))).
|
SetContext(ctx).
|
||||||
|
SetFileReader("file", stream.GetName(), reader).
|
||||||
SetFormData(map[string]string{
|
SetFormData(map[string]string{
|
||||||
"folderId": handleFolderId(dstDir).(string),
|
"folderId": handleFolderId(dstDir).(string),
|
||||||
"name": stream.GetName(),
|
"name": stream.GetName(),
|
||||||
|
@ -207,7 +206,8 @@ func (d *Misskey) put(dstDir model.Obj, stream model.FileStreamer, up driver.Upd
|
||||||
"isSensitive": "false",
|
"isSensitive": "false",
|
||||||
"force": "false",
|
"force": "false",
|
||||||
}).
|
}).
|
||||||
SetResult(&file).SetAuthToken(d.AccessToken)
|
SetResult(&file).
|
||||||
|
SetAuthToken(d.AccessToken)
|
||||||
|
|
||||||
resp, err := req.Post(d.Endpoint + "/api/drive/files/create")
|
resp, err := req.Post(d.Endpoint + "/api/drive/files/create")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -10,6 +10,8 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/sync/semaphore"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
@ -301,6 +303,7 @@ func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
|
||||||
retry.Attempts(3),
|
retry.Attempts(3),
|
||||||
retry.Delay(time.Second),
|
retry.Delay(time.Second),
|
||||||
retry.DelayType(retry.BackOffDelay))
|
retry.DelayType(retry.BackOffDelay))
|
||||||
|
sem := semaphore.NewWeighted(3)
|
||||||
|
|
||||||
// step.3
|
// step.3
|
||||||
parts, err := d.client.GetAllMultiUploadUrls(initUpdload.UploadFileID, initUpdload.PartInfos)
|
parts, err := d.client.GetAllMultiUploadUrls(initUpdload.UploadFileID, initUpdload.PartInfos)
|
||||||
|
@ -312,6 +315,9 @@ func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
|
||||||
if utils.IsCanceled(upCtx) {
|
if utils.IsCanceled(upCtx) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
if err = sem.Acquire(ctx, 1); err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
i, part, byteSize := i, part, initUpdload.PartSize
|
i, part, byteSize := i, part, initUpdload.PartSize
|
||||||
if part.PartNumber == uploadPartData.PartTotal {
|
if part.PartNumber == uploadPartData.PartTotal {
|
||||||
byteSize = initUpdload.LastPartSize
|
byteSize = initUpdload.LastPartSize
|
||||||
|
@ -319,7 +325,9 @@ func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
|
||||||
|
|
||||||
// step.4
|
// step.4
|
||||||
threadG.Go(func(ctx context.Context) error {
|
threadG.Go(func(ctx context.Context) error {
|
||||||
req, err := part.NewRequest(ctx, io.NewSectionReader(file, int64(part.PartNumber-1)*initUpdload.PartSize, byteSize))
|
defer sem.Release(1)
|
||||||
|
reader := io.NewSectionReader(file, int64(part.PartNumber-1)*initUpdload.PartSize, byteSize)
|
||||||
|
req, err := part.NewRequest(ctx, driver.NewLimitedUploadStream(ctx, reader))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -328,7 +336,7 @@ func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
resp.Body.Close()
|
_ = resp.Body.Close()
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
return fmt.Errorf("upload err,code=%d", resp.StatusCode)
|
return fmt.Errorf("upload err,code=%d", resp.StatusCode)
|
||||||
}
|
}
|
||||||
|
|
|
@ -116,16 +116,3 @@ func (ch *Characteristic) merge(data map[string]string) map[string]interface{} {
|
||||||
}
|
}
|
||||||
return body
|
return body
|
||||||
}
|
}
|
||||||
|
|
||||||
type InlineReadCloser struct {
|
|
||||||
io.Reader
|
|
||||||
io.Closer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rc *InlineReadCloser) Read(p []byte) (int, error) {
|
|
||||||
return rc.Reader.Read(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rc *InlineReadCloser) Close() error {
|
|
||||||
return rc.Closer.Close()
|
|
||||||
}
|
|
||||||
|
|
|
@ -2,8 +2,6 @@ package netease_music
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
|
||||||
"github.com/alist-org/alist/v3/internal/stream"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"path"
|
"path"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
@ -12,6 +10,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
@ -69,13 +68,10 @@ func (d *NeteaseMusic) request(url, method string, opt ReqOption) ([]byte, error
|
||||||
opt.up = func(_ float64) {}
|
opt.up = func(_ float64) {}
|
||||||
}
|
}
|
||||||
req.SetContentLength(true)
|
req.SetContentLength(true)
|
||||||
req.SetBody(&InlineReadCloser{
|
req.SetBody(driver.NewLimitedUploadStream(opt.ctx, &driver.ReaderUpdatingProgress{
|
||||||
Reader: &stream.ReaderUpdatingProgress{
|
Reader: opt.stream,
|
||||||
Reader: opt.stream,
|
UpdateProgress: opt.up,
|
||||||
UpdateProgress: opt.up,
|
}))
|
||||||
},
|
|
||||||
Closer: opt.stream,
|
|
||||||
})
|
|
||||||
} else {
|
} else {
|
||||||
req.SetFormData(data)
|
req.SetFormData(data)
|
||||||
}
|
}
|
||||||
|
|
|
@ -152,12 +152,8 @@ func (d *Onedrive) upSmall(ctx context.Context, dstDir model.Obj, stream model.F
|
||||||
// 1. upload new file
|
// 1. upload new file
|
||||||
// ApiDoc: https://learn.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_put_content?view=odsp-graph-online
|
// ApiDoc: https://learn.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_put_content?view=odsp-graph-online
|
||||||
url := d.GetMetaUrl(false, filepath) + "/content"
|
url := d.GetMetaUrl(false, filepath) + "/content"
|
||||||
data, err := io.ReadAll(stream)
|
_, err := d.Request(url, http.MethodPut, func(req *resty.Request) {
|
||||||
if err != nil {
|
req.SetBody(driver.NewLimitedUploadStream(ctx, stream)).SetContext(ctx)
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = d.Request(url, http.MethodPut, func(req *resty.Request) {
|
|
||||||
req.SetBody(data).SetContext(ctx)
|
|
||||||
}, nil)
|
}, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("onedrive: Failed to upload new file(path=%v): %w", filepath, err)
|
return fmt.Errorf("onedrive: Failed to upload new file(path=%v): %w", filepath, err)
|
||||||
|
@ -225,7 +221,7 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
req, err := http.NewRequest("PUT", uploadUrl, bytes.NewBuffer(byteData))
|
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -140,12 +140,8 @@ func (d *OnedriveAPP) GetFile(path string) (*File, error) {
|
||||||
|
|
||||||
func (d *OnedriveAPP) upSmall(ctx context.Context, dstDir model.Obj, stream model.FileStreamer) error {
|
func (d *OnedriveAPP) upSmall(ctx context.Context, dstDir model.Obj, stream model.FileStreamer) error {
|
||||||
url := d.GetMetaUrl(false, stdpath.Join(dstDir.GetPath(), stream.GetName())) + "/content"
|
url := d.GetMetaUrl(false, stdpath.Join(dstDir.GetPath(), stream.GetName())) + "/content"
|
||||||
data, err := io.ReadAll(stream)
|
_, err := d.Request(url, http.MethodPut, func(req *resty.Request) {
|
||||||
if err != nil {
|
req.SetBody(driver.NewLimitedUploadStream(ctx, stream)).SetContext(ctx)
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = d.Request(url, http.MethodPut, func(req *resty.Request) {
|
|
||||||
req.SetBody(data).SetContext(ctx)
|
|
||||||
}, nil)
|
}, nil)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -175,7 +171,7 @@ func (d *OnedriveAPP) upBig(ctx context.Context, dstDir model.Obj, stream model.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
req, err := http.NewRequest("PUT", uploadUrl, bytes.NewBuffer(byteData))
|
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,7 +10,6 @@ import (
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/internal/op"
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
"github.com/alist-org/alist/v3/internal/stream"
|
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||||
jsoniter "github.com/json-iterator/go"
|
jsoniter "github.com/json-iterator/go"
|
||||||
|
@ -430,13 +429,10 @@ func (d *PikPak) UploadByOSS(ctx context.Context, params *S3Params, s model.File
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = bucket.PutObject(params.Key, &stream.ReaderWithCtx{
|
err = bucket.PutObject(params.Key, driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||||
Reader: &stream.ReaderUpdatingProgress{
|
Reader: s,
|
||||||
Reader: s,
|
UpdateProgress: up,
|
||||||
UpdateProgress: up,
|
}), OssOption(params)...)
|
||||||
},
|
|
||||||
Ctx: ctx,
|
|
||||||
}, OssOption(params)...)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -522,11 +518,8 @@ func (d *PikPak) UploadByMultipart(ctx context.Context, params *S3Params, fileSi
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
b := bytes.NewBuffer(buf)
|
b := driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(buf))
|
||||||
if part, err = bucket.UploadPart(imur, &stream.ReaderWithCtx{
|
if part, err = bucket.UploadPart(imur, b, chunk.Size, chunk.Number, OssOption(params)...); err == nil {
|
||||||
Reader: b,
|
|
||||||
Ctx: ctx,
|
|
||||||
}, chunk.Size, chunk.Number, OssOption(params)...); err == nil {
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package quark
|
package quark
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"crypto/sha1"
|
"crypto/sha1"
|
||||||
|
@ -178,7 +179,7 @@ func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.File
|
||||||
}
|
}
|
||||||
// part up
|
// part up
|
||||||
partSize := pre.Metadata.PartSize
|
partSize := pre.Metadata.PartSize
|
||||||
var bytes []byte
|
var part []byte
|
||||||
md5s := make([]string, 0)
|
md5s := make([]string, 0)
|
||||||
defaultBytes := make([]byte, partSize)
|
defaultBytes := make([]byte, partSize)
|
||||||
total := stream.GetSize()
|
total := stream.GetSize()
|
||||||
|
@ -189,17 +190,18 @@ func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.File
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
}
|
}
|
||||||
if left > int64(partSize) {
|
if left > int64(partSize) {
|
||||||
bytes = defaultBytes
|
part = defaultBytes
|
||||||
} else {
|
} else {
|
||||||
bytes = make([]byte, left)
|
part = make([]byte, left)
|
||||||
}
|
}
|
||||||
_, err := io.ReadFull(tempFile, bytes)
|
_, err := io.ReadFull(tempFile, part)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
left -= int64(len(bytes))
|
left -= int64(len(part))
|
||||||
log.Debugf("left: %d", left)
|
log.Debugf("left: %d", left)
|
||||||
m, err := d.upPart(ctx, pre, stream.GetMimetype(), partNumber, bytes)
|
reader := driver.NewLimitedUploadStream(ctx, bytes.NewReader(part))
|
||||||
|
m, err := d.upPart(ctx, pre, stream.GetMimetype(), partNumber, reader)
|
||||||
//m, err := driver.UpPart(pre, file.GetMIMEType(), partNumber, bytes, account, md5Str, sha1Str)
|
//m, err := driver.UpPart(pre, file.GetMIMEType(), partNumber, bytes, account, md5Str, sha1Str)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -119,7 +120,7 @@ func (d *QuarkOrUC) upHash(md5, sha1, taskId string) (bool, error) {
|
||||||
return resp.Data.Finish, err
|
return resp.Data.Finish, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *QuarkOrUC) upPart(ctx context.Context, pre UpPreResp, mineType string, partNumber int, bytes []byte) (string, error) {
|
func (d *QuarkOrUC) upPart(ctx context.Context, pre UpPreResp, mineType string, partNumber int, bytes io.Reader) (string, error) {
|
||||||
//func (driver QuarkOrUC) UpPart(pre UpPreResp, mineType string, partNumber int, bytes []byte, account *model.Account, md5Str, sha1Str string) (string, error) {
|
//func (driver QuarkOrUC) UpPart(pre UpPreResp, mineType string, partNumber int, bytes []byte, account *model.Account, md5Str, sha1Str string) (string, error) {
|
||||||
timeStr := time.Now().UTC().Format(http.TimeFormat)
|
timeStr := time.Now().UTC().Format(http.TimeFormat)
|
||||||
data := base.Json{
|
data := base.Json{
|
||||||
|
@ -163,6 +164,9 @@ x-oss-user-agent:aliyun-sdk-js/6.6.1 Chrome 98.0.4758.80 on Windows 10 64-bit
|
||||||
"partNumber": strconv.Itoa(partNumber),
|
"partNumber": strconv.Itoa(partNumber),
|
||||||
"uploadId": pre.Data.UploadId,
|
"uploadId": pre.Data.UploadId,
|
||||||
}).SetBody(bytes).Put(u)
|
}).SetBody(bytes).Put(u)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
if res.StatusCode() != 200 {
|
if res.StatusCode() != 200 {
|
||||||
return "", fmt.Errorf("up status: %d, error: %s", res.StatusCode(), res.String())
|
return "", fmt.Errorf("up status: %d, error: %s", res.StatusCode(), res.String())
|
||||||
}
|
}
|
||||||
|
@ -230,6 +234,9 @@ x-oss-user-agent:aliyun-sdk-js/6.6.1 Chrome 98.0.4758.80 on Windows 10 64-bit
|
||||||
SetQueryParams(map[string]string{
|
SetQueryParams(map[string]string{
|
||||||
"uploadId": pre.Data.UploadId,
|
"uploadId": pre.Data.UploadId,
|
||||||
}).SetBody(body).Post(u)
|
}).SetBody(body).Post(u)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if res.StatusCode() != 200 {
|
if res.StatusCode() != 200 {
|
||||||
return fmt.Errorf("up status: %d, error: %s", res.StatusCode(), res.String())
|
return fmt.Errorf("up status: %d, error: %s", res.StatusCode(), res.String())
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,7 +12,6 @@ import (
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
istream "github.com/alist-org/alist/v3/internal/stream"
|
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils/random"
|
"github.com/alist-org/alist/v3/pkg/utils/random"
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
@ -387,8 +386,8 @@ func (d *Quqi) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
|
||||||
}
|
}
|
||||||
uploader := s3manager.NewUploader(s)
|
uploader := s3manager.NewUploader(s)
|
||||||
buf := make([]byte, 1024*1024*2)
|
buf := make([]byte, 1024*1024*2)
|
||||||
fup := &istream.ReaderUpdatingProgress{
|
fup := &driver.ReaderUpdatingProgress{
|
||||||
Reader: &istream.SimpleReaderWithSize{
|
Reader: &driver.SimpleReaderWithSize{
|
||||||
Reader: f,
|
Reader: f,
|
||||||
Size: int64(len(buf)),
|
Size: int64(len(buf)),
|
||||||
},
|
},
|
||||||
|
@ -402,12 +401,19 @@ func (d *Quqi) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
reader := bytes.NewReader(buf[:n])
|
||||||
_, err = uploader.S3.UploadPartWithContext(ctx, &s3.UploadPartInput{
|
_, err = uploader.S3.UploadPartWithContext(ctx, &s3.UploadPartInput{
|
||||||
UploadId: &uploadInitResp.Data.UploadID,
|
UploadId: &uploadInitResp.Data.UploadID,
|
||||||
Key: &uploadInitResp.Data.Key,
|
Key: &uploadInitResp.Data.Key,
|
||||||
Bucket: &uploadInitResp.Data.Bucket,
|
Bucket: &uploadInitResp.Data.Bucket,
|
||||||
PartNumber: aws.Int64(partNumber),
|
PartNumber: aws.Int64(partNumber),
|
||||||
Body: bytes.NewReader(buf[:n]),
|
Body: struct {
|
||||||
|
*driver.RateLimitReader
|
||||||
|
io.Seeker
|
||||||
|
}{
|
||||||
|
RateLimitReader: driver.NewLimitedUploadStream(ctx, reader),
|
||||||
|
Seeker: reader,
|
||||||
|
},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -4,18 +4,17 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/alist-org/alist/v3/server/common"
|
|
||||||
"io"
|
"io"
|
||||||
"net/url"
|
"net/url"
|
||||||
stdpath "path"
|
stdpath "path"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/stream"
|
|
||||||
"github.com/alist-org/alist/v3/pkg/cron"
|
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/cron"
|
||||||
|
"github.com/alist-org/alist/v3/server/common"
|
||||||
"github.com/aws/aws-sdk-go/aws/session"
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
"github.com/aws/aws-sdk-go/service/s3"
|
"github.com/aws/aws-sdk-go/service/s3"
|
||||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||||
|
@ -174,10 +173,10 @@ func (d *S3) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up
|
||||||
input := &s3manager.UploadInput{
|
input := &s3manager.UploadInput{
|
||||||
Bucket: &d.Bucket,
|
Bucket: &d.Bucket,
|
||||||
Key: &key,
|
Key: &key,
|
||||||
Body: &stream.ReaderUpdatingProgress{
|
Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||||
Reader: s,
|
Reader: s,
|
||||||
UpdateProgress: up,
|
UpdateProgress: up,
|
||||||
},
|
}),
|
||||||
ContentType: &contentType,
|
ContentType: &contentType,
|
||||||
}
|
}
|
||||||
_, err := uploader.UploadWithContext(ctx, input)
|
_, err := uploader.UploadWithContext(ctx, input)
|
||||||
|
|
|
@ -3,7 +3,6 @@ package seafile
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/alist-org/alist/v3/internal/stream"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
@ -215,10 +214,10 @@ func (d *Seafile) Put(ctx context.Context, dstDir model.Obj, s model.FileStreame
|
||||||
u := string(res)
|
u := string(res)
|
||||||
u = u[1 : len(u)-1] // remove quotes
|
u = u[1 : len(u)-1] // remove quotes
|
||||||
_, err = d.request(http.MethodPost, u, func(req *resty.Request) {
|
_, err = d.request(http.MethodPost, u, func(req *resty.Request) {
|
||||||
r := &stream.ReaderUpdatingProgress{
|
r := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||||
Reader: s,
|
Reader: s,
|
||||||
UpdateProgress: up,
|
UpdateProgress: up,
|
||||||
}
|
})
|
||||||
req.SetFileReader("file", s.GetName(), r).
|
req.SetFileReader("file", s.GetName(), r).
|
||||||
SetFormData(map[string]string{
|
SetFormData(map[string]string{
|
||||||
"parent_dir": path,
|
"parent_dir": path,
|
||||||
|
|
|
@ -111,7 +111,7 @@ func (d *SFTP) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = dstFile.Close()
|
_ = dstFile.Close()
|
||||||
}()
|
}()
|
||||||
err = utils.CopyWithCtx(ctx, dstFile, stream, stream.GetSize(), up)
|
err = utils.CopyWithCtx(ctx, dstFile, driver.NewLimitedUploadStream(ctx, stream), stream.GetSize(), up)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -186,7 +186,7 @@ func (d *SMB) Put(ctx context.Context, dstDir model.Obj, stream model.FileStream
|
||||||
_ = d.fs.Remove(fullPath)
|
_ = d.fs.Remove(fullPath)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
err = utils.CopyWithCtx(ctx, out, stream, stream.GetSize(), up)
|
err = utils.CopyWithCtx(ctx, out, driver.NewLimitedUploadStream(ctx, stream), stream.GetSize(), up)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -148,7 +148,7 @@ func (d *Teambition) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||||
var newFile *FileUpload
|
var newFile *FileUpload
|
||||||
if stream.GetSize() <= 20971520 {
|
if stream.GetSize() <= 20971520 {
|
||||||
// post upload
|
// post upload
|
||||||
newFile, err = d.upload(ctx, stream, token)
|
newFile, err = d.upload(ctx, stream, token, up)
|
||||||
} else {
|
} else {
|
||||||
// chunk upload
|
// chunk upload
|
||||||
//err = base.ErrNotImplement
|
//err = base.ErrNotImplement
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package teambition
|
package teambition
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
@ -120,11 +121,15 @@ func (d *Teambition) getFiles(parentId string) ([]model.Obj, error) {
|
||||||
return files, nil
|
return files, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Teambition) upload(ctx context.Context, file model.FileStreamer, token string) (*FileUpload, error) {
|
func (d *Teambition) upload(ctx context.Context, file model.FileStreamer, token string, up driver.UpdateProgress) (*FileUpload, error) {
|
||||||
prefix := "tcs"
|
prefix := "tcs"
|
||||||
if d.isInternational() {
|
if d.isInternational() {
|
||||||
prefix = "us-tcs"
|
prefix = "us-tcs"
|
||||||
}
|
}
|
||||||
|
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||||
|
Reader: file,
|
||||||
|
UpdateProgress: up,
|
||||||
|
})
|
||||||
var newFile FileUpload
|
var newFile FileUpload
|
||||||
res, err := base.RestyClient.R().
|
res, err := base.RestyClient.R().
|
||||||
SetContext(ctx).
|
SetContext(ctx).
|
||||||
|
@ -134,7 +139,8 @@ func (d *Teambition) upload(ctx context.Context, file model.FileStreamer, token
|
||||||
"type": file.GetMimetype(),
|
"type": file.GetMimetype(),
|
||||||
"size": strconv.FormatInt(file.GetSize(), 10),
|
"size": strconv.FormatInt(file.GetSize(), 10),
|
||||||
"lastModifiedDate": time.Now().Format("Mon Jan 02 2006 15:04:05 GMT+0800 (中国标准时间)"),
|
"lastModifiedDate": time.Now().Format("Mon Jan 02 2006 15:04:05 GMT+0800 (中国标准时间)"),
|
||||||
}).SetMultipartField("file", file.GetName(), file.GetMimetype(), file).
|
}).
|
||||||
|
SetMultipartField("file", file.GetName(), file.GetMimetype(), reader).
|
||||||
Post(fmt.Sprintf("https://%s.teambition.net/upload", prefix))
|
Post(fmt.Sprintf("https://%s.teambition.net/upload", prefix))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -183,10 +189,9 @@ func (d *Teambition) chunkUpload(ctx context.Context, file model.FileStreamer, t
|
||||||
"Authorization": token,
|
"Authorization": token,
|
||||||
"Content-Type": "application/octet-stream",
|
"Content-Type": "application/octet-stream",
|
||||||
"Referer": referer,
|
"Referer": referer,
|
||||||
}).SetBody(chunkData).Post(u)
|
}).
|
||||||
if err != nil {
|
SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewReader(chunkData))).
|
||||||
return nil, err
|
Post(u)
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -252,7 +257,10 @@ func (d *Teambition) newUpload(ctx context.Context, dstDir model.Obj, stream mod
|
||||||
Key: &uploadToken.Upload.Key,
|
Key: &uploadToken.Upload.Key,
|
||||||
ContentDisposition: &uploadToken.Upload.ContentDisposition,
|
ContentDisposition: &uploadToken.Upload.ContentDisposition,
|
||||||
ContentType: &uploadToken.Upload.ContentType,
|
ContentType: &uploadToken.Upload.ContentType,
|
||||||
Body: stream,
|
Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||||
|
Reader: stream,
|
||||||
|
UpdateProgress: up,
|
||||||
|
}),
|
||||||
}
|
}
|
||||||
_, err = uploader.UploadWithContext(ctx, input)
|
_, err = uploader.UploadWithContext(ctx, input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -228,7 +228,7 @@ func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
||||||
res, err := base.RestyClient.R().
|
res, err := base.RestyClient.R().
|
||||||
SetContext(ctx).
|
SetContext(ctx).
|
||||||
SetQueryParams(params).
|
SetQueryParams(params).
|
||||||
SetFileReader("file", stream.GetName(), bytes.NewReader(byteData)).
|
SetFileReader("file", stream.GetName(), driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData))).
|
||||||
SetHeader("Cookie", d.Cookie).
|
SetHeader("Cookie", d.Cookie).
|
||||||
Post(u)
|
Post(u)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -3,7 +3,6 @@ package thunder
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/alist-org/alist/v3/internal/stream"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -383,10 +382,10 @@ func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, file model.Fi
|
||||||
Bucket: aws.String(param.Bucket),
|
Bucket: aws.String(param.Bucket),
|
||||||
Key: aws.String(param.Key),
|
Key: aws.String(param.Key),
|
||||||
Expires: aws.Time(param.Expiration),
|
Expires: aws.Time(param.Expiration),
|
||||||
Body: &stream.ReaderUpdatingProgress{
|
Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||||
Reader: file,
|
Reader: file,
|
||||||
UpdateProgress: up,
|
UpdateProgress: up,
|
||||||
},
|
}),
|
||||||
})
|
})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -508,7 +508,7 @@ func (xc *XunLeiBrowserCommon) Put(ctx context.Context, dstDir model.Obj, stream
|
||||||
Bucket: aws.String(param.Bucket),
|
Bucket: aws.String(param.Bucket),
|
||||||
Key: aws.String(param.Key),
|
Key: aws.String(param.Key),
|
||||||
Expires: aws.Time(param.Expiration),
|
Expires: aws.Time(param.Expiration),
|
||||||
Body: io.TeeReader(stream, driver.NewProgress(stream.GetSize(), up)),
|
Body: driver.NewLimitedUploadStream(ctx, io.TeeReader(stream, driver.NewProgress(stream.GetSize(), up))),
|
||||||
})
|
})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,7 +8,6 @@ import (
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/internal/op"
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
"github.com/alist-org/alist/v3/internal/stream"
|
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
hash_extend "github.com/alist-org/alist/v3/pkg/utils/hash"
|
hash_extend "github.com/alist-org/alist/v3/pkg/utils/hash"
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
@ -414,10 +413,10 @@ func (xc *XunLeiXCommon) Put(ctx context.Context, dstDir model.Obj, file model.F
|
||||||
Bucket: aws.String(param.Bucket),
|
Bucket: aws.String(param.Bucket),
|
||||||
Key: aws.String(param.Key),
|
Key: aws.String(param.Key),
|
||||||
Expires: aws.Time(param.Expiration),
|
Expires: aws.Time(param.Expiration),
|
||||||
Body: &stream.ReaderUpdatingProgress{
|
Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||||
Reader: file,
|
Reader: file,
|
||||||
UpdateProgress: up,
|
UpdateProgress: up,
|
||||||
},
|
}),
|
||||||
})
|
})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,7 +4,6 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/alist-org/alist/v3/internal/stream"
|
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
@ -59,7 +58,7 @@ func (d *Trainbit) List(ctx context.Context, dir model.Obj, args model.ListArgs)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var jsonData any
|
var jsonData any
|
||||||
json.Unmarshal(data, &jsonData)
|
err = json.Unmarshal(data, &jsonData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -122,10 +121,10 @@ func (d *Trainbit) Put(ctx context.Context, dstDir model.Obj, s model.FileStream
|
||||||
query.Add("guid", guid)
|
query.Add("guid", guid)
|
||||||
query.Add("name", url.QueryEscape(local2provider(s.GetName(), false)+"."))
|
query.Add("name", url.QueryEscape(local2provider(s.GetName(), false)+"."))
|
||||||
endpoint.RawQuery = query.Encode()
|
endpoint.RawQuery = query.Encode()
|
||||||
progressReader := &stream.ReaderUpdatingProgress{
|
progressReader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||||
Reader: s,
|
Reader: s,
|
||||||
UpdateProgress: up,
|
UpdateProgress: up,
|
||||||
}
|
})
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint.String(), progressReader)
|
req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint.String(), progressReader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -3,7 +3,6 @@ package url_tree
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"github.com/alist-org/alist/v3/internal/op"
|
|
||||||
stdpath "path"
|
stdpath "path"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -11,6 +10,7 @@ import (
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
|
@ -126,13 +126,10 @@ func (d *USS) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
func (d *USS) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *USS) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
return d.client.Put(&upyun.PutObjectConfig{
|
return d.client.Put(&upyun.PutObjectConfig{
|
||||||
Path: getKey(path.Join(dstDir.GetPath(), s.GetName()), false),
|
Path: getKey(path.Join(dstDir.GetPath(), s.GetName()), false),
|
||||||
Reader: &stream.ReaderWithCtx{
|
Reader: driver.NewLimitedUploadStream(ctx, &stream.ReaderUpdatingProgress{
|
||||||
Reader: &stream.ReaderUpdatingProgress{
|
Reader: s,
|
||||||
Reader: s,
|
UpdateProgress: up,
|
||||||
UpdateProgress: up,
|
}),
|
||||||
},
|
|
||||||
Ctx: ctx,
|
|
||||||
},
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -278,7 +278,8 @@ func (d *Vtencent) FileUpload(ctx context.Context, dstDir model.Obj, stream mode
|
||||||
input := &s3manager.UploadInput{
|
input := &s3manager.UploadInput{
|
||||||
Bucket: aws.String(fmt.Sprintf("%s-%d", params.StorageBucket, params.StorageAppID)),
|
Bucket: aws.String(fmt.Sprintf("%s-%d", params.StorageBucket, params.StorageAppID)),
|
||||||
Key: ¶ms.Video.StoragePath,
|
Key: ¶ms.Video.StoragePath,
|
||||||
Body: io.TeeReader(stream, io.MultiWriter(hash, driver.NewProgress(stream.GetSize(), up))),
|
Body: driver.NewLimitedUploadStream(ctx,
|
||||||
|
io.TeeReader(stream, io.MultiWriter(hash, driver.NewProgress(stream.GetSize(), up)))),
|
||||||
}
|
}
|
||||||
_, err = uploader.UploadWithContext(ctx, input)
|
_, err = uploader.UploadWithContext(ctx, input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -2,7 +2,6 @@ package webdav
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"github.com/alist-org/alist/v3/internal/stream"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
@ -99,13 +98,11 @@ func (d *WebDav) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer
|
||||||
r.Header.Set("Content-Type", s.GetMimetype())
|
r.Header.Set("Content-Type", s.GetMimetype())
|
||||||
r.ContentLength = s.GetSize()
|
r.ContentLength = s.GetSize()
|
||||||
}
|
}
|
||||||
err := d.client.WriteStream(path.Join(dstDir.GetPath(), s.GetName()), &stream.ReaderWithCtx{
|
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||||
Reader: &stream.ReaderUpdatingProgress{
|
Reader: s,
|
||||||
Reader: s,
|
UpdateProgress: up,
|
||||||
UpdateProgress: up,
|
})
|
||||||
},
|
err := d.client.WriteStream(path.Join(dstDir.GetPath(), s.GetName()), reader, 0644, callback)
|
||||||
Ctx: ctx,
|
|
||||||
}, 0644, callback)
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -70,7 +70,7 @@ func (d *WeiYun) Init(ctx context.Context) error {
|
||||||
if d.client.LoginType() == 1 {
|
if d.client.LoginType() == 1 {
|
||||||
d.cron = cron.NewCron(time.Minute * 5)
|
d.cron = cron.NewCron(time.Minute * 5)
|
||||||
d.cron.Do(func() {
|
d.cron.Do(func() {
|
||||||
d.client.KeepAlive()
|
_ = d.client.KeepAlive()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -364,12 +364,13 @@ func (d *WeiYun) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||||
threadG.Go(func(ctx context.Context) error {
|
threadG.Go(func(ctx context.Context) error {
|
||||||
for {
|
for {
|
||||||
channel.Len = int(math.Min(float64(stream.GetSize()-channel.Offset), float64(channel.Len)))
|
channel.Len = int(math.Min(float64(stream.GetSize()-channel.Offset), float64(channel.Len)))
|
||||||
|
len64 := int64(channel.Len)
|
||||||
upData, err := d.client.UploadFile(upCtx, channel, preData.UploadAuthData,
|
upData, err := d.client.UploadFile(upCtx, channel, preData.UploadAuthData,
|
||||||
io.NewSectionReader(file, channel.Offset, int64(channel.Len)))
|
driver.NewLimitedUploadStream(ctx, io.NewSectionReader(file, channel.Offset, len64)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
cur := total.Add(int64(channel.Len))
|
cur := total.Add(len64)
|
||||||
up(float64(cur) * 100.0 / float64(stream.GetSize()))
|
up(float64(cur) * 100.0 / float64(stream.GetSize()))
|
||||||
// 上传完成
|
// 上传完成
|
||||||
if upData.UploadState != 1 {
|
if upData.UploadState != 1 {
|
||||||
|
|
|
@ -155,7 +155,7 @@ func (d *Wopan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
|
||||||
_, err := d.client.Upload2C(d.getSpaceType(), wopan.Upload2CFile{
|
_, err := d.client.Upload2C(d.getSpaceType(), wopan.Upload2CFile{
|
||||||
Name: stream.GetName(),
|
Name: stream.GetName(),
|
||||||
Size: stream.GetSize(),
|
Size: stream.GetSize(),
|
||||||
Content: stream,
|
Content: driver.NewLimitedUploadStream(ctx, stream),
|
||||||
ContentType: stream.GetMimetype(),
|
ContentType: stream.GetMimetype(),
|
||||||
}, dstDir.GetID(), d.FamilyID, wopan.Upload2COption{
|
}, dstDir.GetID(), d.FamilyID, wopan.Upload2COption{
|
||||||
OnProgress: func(current, total int64) {
|
OnProgress: func(current, total int64) {
|
||||||
|
|
|
@ -2,7 +2,6 @@ package yandex_disk
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"github.com/alist-org/alist/v3/internal/stream"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -118,10 +117,11 @@ func (d *YandexDisk) Put(ctx context.Context, dstDir model.Obj, s model.FileStre
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
req, err := http.NewRequestWithContext(ctx, resp.Method, resp.Href, &stream.ReaderUpdatingProgress{
|
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||||
Reader: s,
|
Reader: s,
|
||||||
UpdateProgress: up,
|
UpdateProgress: up,
|
||||||
})
|
})
|
||||||
|
req, err := http.NewRequestWithContext(ctx, resp.Method, resp.Href, reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
6
go.mod
6
go.mod
|
@ -62,7 +62,7 @@ require (
|
||||||
github.com/u2takey/ffmpeg-go v0.5.0
|
github.com/u2takey/ffmpeg-go v0.5.0
|
||||||
github.com/upyun/go-sdk/v3 v3.0.4
|
github.com/upyun/go-sdk/v3 v3.0.4
|
||||||
github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5
|
github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5
|
||||||
github.com/xhofe/tache v0.1.3
|
github.com/xhofe/tache v0.1.5
|
||||||
github.com/xhofe/wopan-sdk-go v0.1.3
|
github.com/xhofe/wopan-sdk-go v0.1.3
|
||||||
github.com/yeka/zip v0.0.0-20231116150916-03d6312748a9
|
github.com/yeka/zip v0.0.0-20231116150916-03d6312748a9
|
||||||
github.com/zzzhr1990/go-common-entity v0.0.0-20221216044934-fd1c571e3a22
|
github.com/zzzhr1990/go-common-entity v0.0.0-20221216044934-fd1c571e3a22
|
||||||
|
@ -102,6 +102,7 @@ require (
|
||||||
github.com/jackc/puddle/v2 v2.2.1 // indirect
|
github.com/jackc/puddle/v2 v2.2.1 // indirect
|
||||||
github.com/klauspost/pgzip v1.2.6 // indirect
|
github.com/klauspost/pgzip v1.2.6 // indirect
|
||||||
github.com/kr/text v0.2.0 // indirect
|
github.com/kr/text v0.2.0 // indirect
|
||||||
|
github.com/matoous/go-nanoid/v2 v2.1.0 // indirect
|
||||||
github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78 // indirect
|
github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78 // indirect
|
||||||
github.com/sorairolake/lzip-go v0.3.5 // indirect
|
github.com/sorairolake/lzip-go v0.3.5 // indirect
|
||||||
github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543 // indirect
|
github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543 // indirect
|
||||||
|
@ -170,7 +171,6 @@ require (
|
||||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
|
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
|
||||||
github.com/jackc/pgx/v5 v5.5.5 // indirect
|
github.com/jackc/pgx/v5 v5.5.5 // indirect
|
||||||
github.com/jaevor/go-nanoid v1.3.0 // indirect
|
|
||||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||||
github.com/jinzhu/now v1.1.5 // indirect
|
github.com/jinzhu/now v1.1.5 // indirect
|
||||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||||
|
@ -240,7 +240,7 @@ require (
|
||||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||||
go.etcd.io/bbolt v1.3.8 // indirect
|
go.etcd.io/bbolt v1.3.8 // indirect
|
||||||
golang.org/x/arch v0.8.0 // indirect
|
golang.org/x/arch v0.8.0 // indirect
|
||||||
golang.org/x/sync v0.10.0 // indirect
|
golang.org/x/sync v0.10.0
|
||||||
golang.org/x/sys v0.28.0 // indirect
|
golang.org/x/sys v0.28.0 // indirect
|
||||||
golang.org/x/term v0.27.0 // indirect
|
golang.org/x/term v0.27.0 // indirect
|
||||||
golang.org/x/text v0.21.0
|
golang.org/x/text v0.21.0
|
||||||
|
|
8
go.sum
8
go.sum
|
@ -337,8 +337,6 @@ github.com/jackc/pgx/v5 v5.5.5 h1:amBjrZVmksIdNjxGW/IiIMzxMKZFelXbUoPNb+8sjQw=
|
||||||
github.com/jackc/pgx/v5 v5.5.5/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
|
github.com/jackc/pgx/v5 v5.5.5/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
|
||||||
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
|
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
|
||||||
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||||
github.com/jaevor/go-nanoid v1.3.0 h1:nD+iepesZS6pr3uOVf20vR9GdGgJW1HPaR46gtrxzkg=
|
|
||||||
github.com/jaevor/go-nanoid v1.3.0/go.mod h1:SI+jFaPuddYkqkVQoNGHs81navCtH388TcrH0RqFKgY=
|
|
||||||
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
|
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
|
||||||
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
||||||
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
|
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
|
||||||
|
@ -403,6 +401,8 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0
|
||||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||||
github.com/maruel/natural v1.1.1 h1:Hja7XhhmvEFhcByqDoHz9QZbkWey+COd9xWfCfn1ioo=
|
github.com/maruel/natural v1.1.1 h1:Hja7XhhmvEFhcByqDoHz9QZbkWey+COd9xWfCfn1ioo=
|
||||||
github.com/maruel/natural v1.1.1/go.mod h1:v+Rfd79xlw1AgVBjbO0BEQmptqb5HvL/k9GRHB7ZKEg=
|
github.com/maruel/natural v1.1.1/go.mod h1:v+Rfd79xlw1AgVBjbO0BEQmptqb5HvL/k9GRHB7ZKEg=
|
||||||
|
github.com/matoous/go-nanoid/v2 v2.1.0 h1:P64+dmq21hhWdtvZfEAofnvJULaRR1Yib0+PnU669bE=
|
||||||
|
github.com/matoous/go-nanoid/v2 v2.1.0/go.mod h1:KlbGNQ+FhrUNIHUxZdL63t7tl4LaPkZNpUULS8H4uVM=
|
||||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||||
|
@ -596,8 +596,8 @@ github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||||
github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25 h1:eDfebW/yfq9DtG9RO3KP7BT2dot2CvJGIvrB0NEoDXI=
|
github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25 h1:eDfebW/yfq9DtG9RO3KP7BT2dot2CvJGIvrB0NEoDXI=
|
||||||
github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25/go.mod h1:fH4oNm5F9NfI5dLi0oIMtsLNKQOirUDbEMCIBb/7SU0=
|
github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25/go.mod h1:fH4oNm5F9NfI5dLi0oIMtsLNKQOirUDbEMCIBb/7SU0=
|
||||||
github.com/xhofe/tache v0.1.3 h1:MipxzlljYX29E1YI/SLC7hVomVF+51iP1OUzlsuq1wE=
|
github.com/xhofe/tache v0.1.5 h1:ezDcgim7tj7KNMXliQsmf8BJQbaZtitfyQA9Nt+B4WM=
|
||||||
github.com/xhofe/tache v0.1.3/go.mod h1:iKumPFvywf30FRpAHHCt64G0JHLMzT0K+wyGedHsmTQ=
|
github.com/xhofe/tache v0.1.5/go.mod h1:PYt6I/XUKliSg1uHlgsk6ha+le/f6PAvjUtFZAVl3a8=
|
||||||
github.com/xhofe/wopan-sdk-go v0.1.3 h1:J58X6v+n25ewBZjb05pKOr7AWGohb+Rdll4CThGh6+A=
|
github.com/xhofe/wopan-sdk-go v0.1.3 h1:J58X6v+n25ewBZjb05pKOr7AWGohb+Rdll4CThGh6+A=
|
||||||
github.com/xhofe/wopan-sdk-go v0.1.3/go.mod h1:dcY9yA28fnaoZPnXZiVTFSkcd7GnIPTpTIIlfSI5z5Q=
|
github.com/xhofe/wopan-sdk-go v0.1.3/go.mod h1:dcY9yA28fnaoZPnXZiVTFSkcd7GnIPTpTIIlfSI5z5Q=
|
||||||
github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
|
github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
|
||||||
|
|
|
@ -11,6 +11,7 @@ import (
|
||||||
"github.com/alist-org/alist/v3/pkg/utils/random"
|
"github.com/alist-org/alist/v3/pkg/utils/random"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"gorm.io/gorm"
|
"gorm.io/gorm"
|
||||||
|
"strconv"
|
||||||
)
|
)
|
||||||
|
|
||||||
var initialSettingItems []model.SettingItem
|
var initialSettingItems []model.SettingItem
|
||||||
|
@ -191,12 +192,12 @@ func InitialSettings() []model.SettingItem {
|
||||||
{Key: conf.LdapDefaultPermission, Value: "0", Type: conf.TypeNumber, Group: model.LDAP, Flag: model.PRIVATE},
|
{Key: conf.LdapDefaultPermission, Value: "0", Type: conf.TypeNumber, Group: model.LDAP, Flag: model.PRIVATE},
|
||||||
{Key: conf.LdapLoginTips, Value: "login with ldap", Type: conf.TypeString, Group: model.LDAP, Flag: model.PUBLIC},
|
{Key: conf.LdapLoginTips, Value: "login with ldap", Type: conf.TypeString, Group: model.LDAP, Flag: model.PUBLIC},
|
||||||
|
|
||||||
//s3 settings
|
// s3 settings
|
||||||
{Key: conf.S3AccessKeyId, Value: "", Type: conf.TypeString, Group: model.S3, Flag: model.PRIVATE},
|
{Key: conf.S3AccessKeyId, Value: "", Type: conf.TypeString, Group: model.S3, Flag: model.PRIVATE},
|
||||||
{Key: conf.S3SecretAccessKey, Value: "", Type: conf.TypeString, Group: model.S3, Flag: model.PRIVATE},
|
{Key: conf.S3SecretAccessKey, Value: "", Type: conf.TypeString, Group: model.S3, Flag: model.PRIVATE},
|
||||||
{Key: conf.S3Buckets, Value: "[]", Type: conf.TypeString, Group: model.S3, Flag: model.PRIVATE},
|
{Key: conf.S3Buckets, Value: "[]", Type: conf.TypeString, Group: model.S3, Flag: model.PRIVATE},
|
||||||
|
|
||||||
//ftp settings
|
// ftp settings
|
||||||
{Key: conf.FTPPublicHost, Value: "127.0.0.1", Type: conf.TypeString, Group: model.FTP, Flag: model.PRIVATE},
|
{Key: conf.FTPPublicHost, Value: "127.0.0.1", Type: conf.TypeString, Group: model.FTP, Flag: model.PRIVATE},
|
||||||
{Key: conf.FTPPasvPortMap, Value: "", Type: conf.TypeText, Group: model.FTP, Flag: model.PRIVATE},
|
{Key: conf.FTPPasvPortMap, Value: "", Type: conf.TypeText, Group: model.FTP, Flag: model.PRIVATE},
|
||||||
{Key: conf.FTPProxyUserAgent, Value: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) " +
|
{Key: conf.FTPProxyUserAgent, Value: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) " +
|
||||||
|
@ -205,6 +206,18 @@ func InitialSettings() []model.SettingItem {
|
||||||
{Key: conf.FTPImplicitTLS, Value: "false", Type: conf.TypeBool, Group: model.FTP, Flag: model.PRIVATE},
|
{Key: conf.FTPImplicitTLS, Value: "false", Type: conf.TypeBool, Group: model.FTP, Flag: model.PRIVATE},
|
||||||
{Key: conf.FTPTLSPrivateKeyPath, Value: "", Type: conf.TypeString, Group: model.FTP, Flag: model.PRIVATE},
|
{Key: conf.FTPTLSPrivateKeyPath, Value: "", Type: conf.TypeString, Group: model.FTP, Flag: model.PRIVATE},
|
||||||
{Key: conf.FTPTLSPublicCertPath, Value: "", Type: conf.TypeString, Group: model.FTP, Flag: model.PRIVATE},
|
{Key: conf.FTPTLSPublicCertPath, Value: "", Type: conf.TypeString, Group: model.FTP, Flag: model.PRIVATE},
|
||||||
|
|
||||||
|
// traffic settings
|
||||||
|
{Key: conf.TaskOfflineDownloadThreadsNum, Value: strconv.Itoa(conf.Conf.Tasks.Download.Workers), Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE},
|
||||||
|
{Key: conf.TaskOfflineDownloadTransferThreadsNum, Value: strconv.Itoa(conf.Conf.Tasks.Transfer.Workers), Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE},
|
||||||
|
{Key: conf.TaskUploadThreadsNum, Value: strconv.Itoa(conf.Conf.Tasks.Upload.Workers), Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE},
|
||||||
|
{Key: conf.TaskCopyThreadsNum, Value: strconv.Itoa(conf.Conf.Tasks.Copy.Workers), Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE},
|
||||||
|
{Key: conf.TaskDecompressDownloadThreadsNum, Value: strconv.Itoa(conf.Conf.Tasks.Decompress.Workers), Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE},
|
||||||
|
{Key: conf.TaskDecompressUploadThreadsNum, Value: strconv.Itoa(conf.Conf.Tasks.DecompressUpload.Workers), Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE},
|
||||||
|
{Key: conf.StreamMaxClientDownloadSpeed, Value: "-1", Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE},
|
||||||
|
{Key: conf.StreamMaxClientUploadSpeed, Value: "-1", Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE},
|
||||||
|
{Key: conf.StreamMaxServerDownloadSpeed, Value: "-1", Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE},
|
||||||
|
{Key: conf.StreamMaxServerUploadSpeed, Value: "-1", Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE},
|
||||||
}
|
}
|
||||||
initialSettingItems = append(initialSettingItems, tool.Tools.Items()...)
|
initialSettingItems = append(initialSettingItems, tool.Tools.Items()...)
|
||||||
if flags.Dev {
|
if flags.Dev {
|
||||||
|
|
|
@ -0,0 +1,53 @@
|
||||||
|
package bootstrap
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
|
"github.com/alist-org/alist/v3/internal/setting"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
|
"golang.org/x/time/rate"
|
||||||
|
)
|
||||||
|
|
||||||
|
type blockBurstLimiter struct {
|
||||||
|
*rate.Limiter
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l blockBurstLimiter) WaitN(ctx context.Context, total int) error {
|
||||||
|
for total > 0 {
|
||||||
|
n := l.Burst()
|
||||||
|
if l.Limiter.Limit() == rate.Inf || n > total {
|
||||||
|
n = total
|
||||||
|
}
|
||||||
|
err := l.Limiter.WaitN(ctx, n)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
total -= n
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func streamFilterNegative(limit int) (rate.Limit, int) {
|
||||||
|
if limit < 0 {
|
||||||
|
return rate.Inf, 0
|
||||||
|
}
|
||||||
|
return rate.Limit(limit) * 1024.0, limit * 1024
|
||||||
|
}
|
||||||
|
|
||||||
|
func initLimiter(limiter *stream.Limiter, s string) {
|
||||||
|
clientDownLimit, burst := streamFilterNegative(setting.GetInt(s, -1))
|
||||||
|
*limiter = blockBurstLimiter{Limiter: rate.NewLimiter(clientDownLimit, burst)}
|
||||||
|
op.RegisterSettingChangingCallback(func() {
|
||||||
|
newLimit, newBurst := streamFilterNegative(setting.GetInt(s, -1))
|
||||||
|
(*limiter).SetLimit(newLimit)
|
||||||
|
(*limiter).SetBurst(newBurst)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func InitStreamLimit() {
|
||||||
|
initLimiter(&stream.ClientDownloadLimit, conf.StreamMaxClientDownloadSpeed)
|
||||||
|
initLimiter(&stream.ClientUploadLimit, conf.StreamMaxClientUploadSpeed)
|
||||||
|
initLimiter(&stream.ServerDownloadLimit, conf.StreamMaxServerDownloadSpeed)
|
||||||
|
initLimiter(&stream.ServerUploadLimit, conf.StreamMaxServerUploadSpeed)
|
||||||
|
}
|
|
@ -5,17 +5,44 @@ import (
|
||||||
"github.com/alist-org/alist/v3/internal/db"
|
"github.com/alist-org/alist/v3/internal/db"
|
||||||
"github.com/alist-org/alist/v3/internal/fs"
|
"github.com/alist-org/alist/v3/internal/fs"
|
||||||
"github.com/alist-org/alist/v3/internal/offline_download/tool"
|
"github.com/alist-org/alist/v3/internal/offline_download/tool"
|
||||||
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
|
"github.com/alist-org/alist/v3/internal/setting"
|
||||||
"github.com/xhofe/tache"
|
"github.com/xhofe/tache"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func taskFilterNegative(num int) int64 {
|
||||||
|
if num < 0 {
|
||||||
|
num = 0
|
||||||
|
}
|
||||||
|
return int64(num)
|
||||||
|
}
|
||||||
|
|
||||||
func InitTaskManager() {
|
func InitTaskManager() {
|
||||||
fs.UploadTaskManager = tache.NewManager[*fs.UploadTask](tache.WithWorks(conf.Conf.Tasks.Upload.Workers), tache.WithMaxRetry(conf.Conf.Tasks.Upload.MaxRetry)) //upload will not support persist
|
fs.UploadTaskManager = tache.NewManager[*fs.UploadTask](tache.WithWorks(setting.GetInt(conf.TaskUploadThreadsNum, conf.Conf.Tasks.Upload.Workers)), tache.WithMaxRetry(conf.Conf.Tasks.Upload.MaxRetry)) //upload will not support persist
|
||||||
fs.CopyTaskManager = tache.NewManager[*fs.CopyTask](tache.WithWorks(conf.Conf.Tasks.Copy.Workers), tache.WithPersistFunction(db.GetTaskDataFunc("copy", conf.Conf.Tasks.Copy.TaskPersistant), db.UpdateTaskDataFunc("copy", conf.Conf.Tasks.Copy.TaskPersistant)), tache.WithMaxRetry(conf.Conf.Tasks.Copy.MaxRetry))
|
op.RegisterSettingChangingCallback(func() {
|
||||||
tool.DownloadTaskManager = tache.NewManager[*tool.DownloadTask](tache.WithWorks(conf.Conf.Tasks.Download.Workers), tache.WithPersistFunction(db.GetTaskDataFunc("download", conf.Conf.Tasks.Download.TaskPersistant), db.UpdateTaskDataFunc("download", conf.Conf.Tasks.Download.TaskPersistant)), tache.WithMaxRetry(conf.Conf.Tasks.Download.MaxRetry))
|
fs.UploadTaskManager.SetWorkersNumActive(taskFilterNegative(setting.GetInt(conf.TaskUploadThreadsNum, conf.Conf.Tasks.Upload.Workers)))
|
||||||
tool.TransferTaskManager = tache.NewManager[*tool.TransferTask](tache.WithWorks(conf.Conf.Tasks.Transfer.Workers), tache.WithPersistFunction(db.GetTaskDataFunc("transfer", conf.Conf.Tasks.Transfer.TaskPersistant), db.UpdateTaskDataFunc("transfer", conf.Conf.Tasks.Transfer.TaskPersistant)), tache.WithMaxRetry(conf.Conf.Tasks.Transfer.MaxRetry))
|
})
|
||||||
|
fs.CopyTaskManager = tache.NewManager[*fs.CopyTask](tache.WithWorks(setting.GetInt(conf.TaskCopyThreadsNum, conf.Conf.Tasks.Copy.Workers)), tache.WithPersistFunction(db.GetTaskDataFunc("copy", conf.Conf.Tasks.Copy.TaskPersistant), db.UpdateTaskDataFunc("copy", conf.Conf.Tasks.Copy.TaskPersistant)), tache.WithMaxRetry(conf.Conf.Tasks.Copy.MaxRetry))
|
||||||
|
op.RegisterSettingChangingCallback(func() {
|
||||||
|
fs.CopyTaskManager.SetWorkersNumActive(taskFilterNegative(setting.GetInt(conf.TaskCopyThreadsNum, conf.Conf.Tasks.Copy.Workers)))
|
||||||
|
})
|
||||||
|
tool.DownloadTaskManager = tache.NewManager[*tool.DownloadTask](tache.WithWorks(setting.GetInt(conf.TaskOfflineDownloadThreadsNum, conf.Conf.Tasks.Download.Workers)), tache.WithPersistFunction(db.GetTaskDataFunc("download", conf.Conf.Tasks.Download.TaskPersistant), db.UpdateTaskDataFunc("download", conf.Conf.Tasks.Download.TaskPersistant)), tache.WithMaxRetry(conf.Conf.Tasks.Download.MaxRetry))
|
||||||
|
op.RegisterSettingChangingCallback(func() {
|
||||||
|
tool.DownloadTaskManager.SetWorkersNumActive(taskFilterNegative(setting.GetInt(conf.TaskOfflineDownloadThreadsNum, conf.Conf.Tasks.Download.Workers)))
|
||||||
|
})
|
||||||
|
tool.TransferTaskManager = tache.NewManager[*tool.TransferTask](tache.WithWorks(setting.GetInt(conf.TaskOfflineDownloadTransferThreadsNum, conf.Conf.Tasks.Transfer.Workers)), tache.WithPersistFunction(db.GetTaskDataFunc("transfer", conf.Conf.Tasks.Transfer.TaskPersistant), db.UpdateTaskDataFunc("transfer", conf.Conf.Tasks.Transfer.TaskPersistant)), tache.WithMaxRetry(conf.Conf.Tasks.Transfer.MaxRetry))
|
||||||
|
op.RegisterSettingChangingCallback(func() {
|
||||||
|
tool.TransferTaskManager.SetWorkersNumActive(taskFilterNegative(setting.GetInt(conf.TaskOfflineDownloadTransferThreadsNum, conf.Conf.Tasks.Transfer.Workers)))
|
||||||
|
})
|
||||||
if len(tool.TransferTaskManager.GetAll()) == 0 { //prevent offline downloaded files from being deleted
|
if len(tool.TransferTaskManager.GetAll()) == 0 { //prevent offline downloaded files from being deleted
|
||||||
CleanTempDir()
|
CleanTempDir()
|
||||||
}
|
}
|
||||||
fs.ArchiveDownloadTaskManager = tache.NewManager[*fs.ArchiveDownloadTask](tache.WithWorks(conf.Conf.Tasks.Decompress.Workers), tache.WithPersistFunction(db.GetTaskDataFunc("decompress", conf.Conf.Tasks.Decompress.TaskPersistant), db.UpdateTaskDataFunc("decompress", conf.Conf.Tasks.Decompress.TaskPersistant)), tache.WithMaxRetry(conf.Conf.Tasks.Decompress.MaxRetry))
|
fs.ArchiveDownloadTaskManager = tache.NewManager[*fs.ArchiveDownloadTask](tache.WithWorks(setting.GetInt(conf.TaskDecompressDownloadThreadsNum, conf.Conf.Tasks.Decompress.Workers)), tache.WithPersistFunction(db.GetTaskDataFunc("decompress", conf.Conf.Tasks.Decompress.TaskPersistant), db.UpdateTaskDataFunc("decompress", conf.Conf.Tasks.Decompress.TaskPersistant)), tache.WithMaxRetry(conf.Conf.Tasks.Decompress.MaxRetry))
|
||||||
fs.ArchiveContentUploadTaskManager.Manager = tache.NewManager[*fs.ArchiveContentUploadTask](tache.WithWorks(conf.Conf.Tasks.DecompressUpload.Workers), tache.WithMaxRetry(conf.Conf.Tasks.DecompressUpload.MaxRetry)) //decompress upload will not support persist
|
op.RegisterSettingChangingCallback(func() {
|
||||||
|
fs.ArchiveDownloadTaskManager.SetWorkersNumActive(taskFilterNegative(setting.GetInt(conf.TaskDecompressDownloadThreadsNum, conf.Conf.Tasks.Decompress.Workers)))
|
||||||
|
})
|
||||||
|
fs.ArchiveContentUploadTaskManager.Manager = tache.NewManager[*fs.ArchiveContentUploadTask](tache.WithWorks(setting.GetInt(conf.TaskDecompressUploadThreadsNum, conf.Conf.Tasks.DecompressUpload.Workers)), tache.WithMaxRetry(conf.Conf.Tasks.DecompressUpload.MaxRetry)) //decompress upload will not support persist
|
||||||
|
op.RegisterSettingChangingCallback(func() {
|
||||||
|
fs.ArchiveContentUploadTaskManager.SetWorkersNumActive(taskFilterNegative(setting.GetInt(conf.TaskDecompressUploadThreadsNum, conf.Conf.Tasks.DecompressUpload.Workers)))
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -115,6 +115,18 @@ const (
|
||||||
FTPImplicitTLS = "ftp_implicit_tls"
|
FTPImplicitTLS = "ftp_implicit_tls"
|
||||||
FTPTLSPrivateKeyPath = "ftp_tls_private_key_path"
|
FTPTLSPrivateKeyPath = "ftp_tls_private_key_path"
|
||||||
FTPTLSPublicCertPath = "ftp_tls_public_cert_path"
|
FTPTLSPublicCertPath = "ftp_tls_public_cert_path"
|
||||||
|
|
||||||
|
// traffic
|
||||||
|
TaskOfflineDownloadThreadsNum = "offline_download_task_threads_num"
|
||||||
|
TaskOfflineDownloadTransferThreadsNum = "offline_download_transfer_task_threads_num"
|
||||||
|
TaskUploadThreadsNum = "upload_task_threads_num"
|
||||||
|
TaskCopyThreadsNum = "copy_task_threads_num"
|
||||||
|
TaskDecompressDownloadThreadsNum = "decompress_download_task_threads_num"
|
||||||
|
TaskDecompressUploadThreadsNum = "decompress_upload_task_threads_num"
|
||||||
|
StreamMaxClientDownloadSpeed = "max_client_download_speed"
|
||||||
|
StreamMaxClientUploadSpeed = "max_client_upload_speed"
|
||||||
|
StreamMaxServerDownloadSpeed = "max_server_download_speed"
|
||||||
|
StreamMaxServerUploadSpeed = "max_server_upload_speed"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
|
@ -77,6 +77,29 @@ type Remove interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
type Put interface {
|
type Put interface {
|
||||||
|
// Put a file (provided as a FileStreamer) into the driver
|
||||||
|
// Besides the most basic upload functionality, the following features also need to be implemented:
|
||||||
|
// 1. Canceling (when `<-ctx.Done()` returns), by the following methods:
|
||||||
|
// (1) Use request methods that carry context, such as the following:
|
||||||
|
// a. http.NewRequestWithContext
|
||||||
|
// b. resty.Request.SetContext
|
||||||
|
// c. s3manager.Uploader.UploadWithContext
|
||||||
|
// d. utils.CopyWithCtx
|
||||||
|
// (2) Use a `driver.ReaderWithCtx` or a `driver.NewLimitedUploadStream`
|
||||||
|
// (3) Use `utils.IsCanceled` to check if the upload has been canceled during the upload process,
|
||||||
|
// this is typically applicable to chunked uploads.
|
||||||
|
// 2. Submit upload progress (via `up`) in real-time. There are three recommended ways as follows:
|
||||||
|
// (1) Use `utils.CopyWithCtx`
|
||||||
|
// (2) Use `driver.ReaderUpdatingProgress`
|
||||||
|
// (3) Use `driver.Progress` with `io.TeeReader`
|
||||||
|
// 3. Slow down upload speed (via `stream.ServerUploadLimit`). It requires you to wrap the read stream
|
||||||
|
// in a `driver.RateLimitReader` or a `driver.RateLimitFile` after calculating the file's hash and
|
||||||
|
// before uploading the file or file chunks. Or you can directly call `driver.ServerUploadLimitWaitN`
|
||||||
|
// if your file chunks are sufficiently small (less than about 50KB).
|
||||||
|
// NOTE that the network speed may be significantly slower than the stream's read speed. Therefore, if
|
||||||
|
// you use a `errgroup.Group` to upload each chunk in parallel, you should consider using a recursive
|
||||||
|
// mutex like `semaphore.Weighted` to limit the maximum number of upload threads, preventing excessive
|
||||||
|
// memory usage caused by buffering too many file chunks awaiting upload.
|
||||||
Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up UpdateProgress) error
|
Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up UpdateProgress) error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -113,6 +136,29 @@ type CopyResult interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
type PutResult interface {
|
type PutResult interface {
|
||||||
|
// Put a file (provided as a FileStreamer) into the driver and return the put obj
|
||||||
|
// Besides the most basic upload functionality, the following features also need to be implemented:
|
||||||
|
// 1. Canceling (when `<-ctx.Done()` returns), which can be supported by the following methods:
|
||||||
|
// (1) Use request methods that carry context, such as the following:
|
||||||
|
// a. http.NewRequestWithContext
|
||||||
|
// b. resty.Request.SetContext
|
||||||
|
// c. s3manager.Uploader.UploadWithContext
|
||||||
|
// d. utils.CopyWithCtx
|
||||||
|
// (2) Use a `driver.ReaderWithCtx` or `driver.NewLimitedUploadStream`
|
||||||
|
// (3) Use `utils.IsCanceled` to check if the upload has been canceled during the upload process,
|
||||||
|
// this is typically applicable to chunked uploads.
|
||||||
|
// 2. Submit upload progress (via `up`) in real-time. There are three recommended ways as follows:
|
||||||
|
// (1) Use `utils.CopyWithCtx`
|
||||||
|
// (2) Use `driver.ReaderUpdatingProgress`
|
||||||
|
// (3) Use `driver.Progress` with `io.TeeReader`
|
||||||
|
// 3. Slow down upload speed (via `stream.ServerUploadLimit`). It requires you to wrap the read stream
|
||||||
|
// in a `driver.RateLimitReader` or a `driver.RateLimitFile` after calculating the file's hash and
|
||||||
|
// before uploading the file or file chunks. Or you can directly call `driver.ServerUploadLimitWaitN`
|
||||||
|
// if your file chunks are sufficiently small (less than about 50KB).
|
||||||
|
// NOTE that the network speed may be significantly slower than the stream's read speed. Therefore, if
|
||||||
|
// you use a `errgroup.Group` to upload each chunk in parallel, you should consider using a recursive
|
||||||
|
// mutex like `semaphore.Weighted` to limit the maximum number of upload threads, preventing excessive
|
||||||
|
// memory usage caused by buffering too many file chunks awaiting upload.
|
||||||
Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up UpdateProgress) (model.Obj, error)
|
Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up UpdateProgress) (model.Obj, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -159,28 +205,6 @@ type ArchiveDecompressResult interface {
|
||||||
ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error)
|
ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type UpdateProgress = model.UpdateProgress
|
|
||||||
|
|
||||||
type Progress struct {
|
|
||||||
Total int64
|
|
||||||
Done int64
|
|
||||||
up UpdateProgress
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Progress) Write(b []byte) (n int, err error) {
|
|
||||||
n = len(b)
|
|
||||||
p.Done += int64(n)
|
|
||||||
p.up(float64(p.Done) / float64(p.Total) * 100)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewProgress(total int64, up UpdateProgress) *Progress {
|
|
||||||
return &Progress{
|
|
||||||
Total: total,
|
|
||||||
up: up,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type Reference interface {
|
type Reference interface {
|
||||||
InitReference(storage Driver) error
|
InitReference(storage Driver) error
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,62 @@
|
||||||
|
package driver
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
type UpdateProgress = model.UpdateProgress
|
||||||
|
|
||||||
|
type Progress struct {
|
||||||
|
Total int64
|
||||||
|
Done int64
|
||||||
|
up UpdateProgress
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Progress) Write(b []byte) (n int, err error) {
|
||||||
|
n = len(b)
|
||||||
|
p.Done += int64(n)
|
||||||
|
p.up(float64(p.Done) / float64(p.Total) * 100)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewProgress(total int64, up UpdateProgress) *Progress {
|
||||||
|
return &Progress{
|
||||||
|
Total: total,
|
||||||
|
up: up,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type RateLimitReader = stream.RateLimitReader
|
||||||
|
|
||||||
|
type RateLimitWriter = stream.RateLimitWriter
|
||||||
|
|
||||||
|
type RateLimitFile = stream.RateLimitFile
|
||||||
|
|
||||||
|
func NewLimitedUploadStream(ctx context.Context, r io.Reader) *RateLimitReader {
|
||||||
|
return &RateLimitReader{
|
||||||
|
Reader: r,
|
||||||
|
Limiter: stream.ServerUploadLimit,
|
||||||
|
Ctx: ctx,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewLimitedUploadFile(ctx context.Context, f model.File) *RateLimitFile {
|
||||||
|
return &RateLimitFile{
|
||||||
|
File: f,
|
||||||
|
Limiter: stream.ServerUploadLimit,
|
||||||
|
Ctx: ctx,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ServerUploadLimitWaitN(ctx context.Context, n int) error {
|
||||||
|
return stream.ServerUploadLimit.WaitN(ctx, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
type ReaderWithCtx = stream.ReaderWithCtx
|
||||||
|
|
||||||
|
type ReaderUpdatingProgress = stream.ReaderUpdatingProgress
|
||||||
|
|
||||||
|
type SimpleReaderWithSize = stream.SimpleReaderWithSize
|
|
@ -12,6 +12,7 @@ const (
|
||||||
LDAP
|
LDAP
|
||||||
S3
|
S3
|
||||||
FTP
|
FTP
|
||||||
|
TRAFFIC
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
|
@ -3,6 +3,7 @@ package net
|
||||||
import (
|
import (
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
"context"
|
"context"
|
||||||
|
"crypto/tls"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"mime"
|
"mime"
|
||||||
|
@ -14,7 +15,6 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
|
||||||
"github.com/alist-org/alist/v3/internal/conf"
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||||
|
@ -264,7 +264,7 @@ var httpClient *http.Client
|
||||||
|
|
||||||
func HttpClient() *http.Client {
|
func HttpClient() *http.Client {
|
||||||
once.Do(func() {
|
once.Do(func() {
|
||||||
httpClient = base.NewHttpClient()
|
httpClient = NewHttpClient()
|
||||||
httpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
httpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||||
if len(via) >= 10 {
|
if len(via) >= 10 {
|
||||||
return errors.New("stopped after 10 redirects")
|
return errors.New("stopped after 10 redirects")
|
||||||
|
@ -275,3 +275,13 @@ func HttpClient() *http.Client {
|
||||||
})
|
})
|
||||||
return httpClient
|
return httpClient
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NewHttpClient() *http.Client {
|
||||||
|
return &http.Client{
|
||||||
|
Timeout: time.Hour * 48,
|
||||||
|
Transport: &http.Transport{
|
||||||
|
Proxy: http.ProxyFromEnvironment,
|
||||||
|
TLSClientConfig: &tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -26,9 +26,18 @@ var settingGroupCacheF = func(key string, item []model.SettingItem) {
|
||||||
settingGroupCache.Set(key, item, cache.WithEx[[]model.SettingItem](time.Hour))
|
settingGroupCache.Set(key, item, cache.WithEx[[]model.SettingItem](time.Hour))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var settingChangingCallbacks = make([]func(), 0)
|
||||||
|
|
||||||
|
func RegisterSettingChangingCallback(f func()) {
|
||||||
|
settingChangingCallbacks = append(settingChangingCallbacks, f)
|
||||||
|
}
|
||||||
|
|
||||||
func SettingCacheUpdate() {
|
func SettingCacheUpdate() {
|
||||||
settingCache.Clear()
|
settingCache.Clear()
|
||||||
settingGroupCache.Clear()
|
settingGroupCache.Clear()
|
||||||
|
for _, cb := range settingChangingCallbacks {
|
||||||
|
cb()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetPublicSettingsMap() map[string]string {
|
func GetPublicSettingsMap() map[string]string {
|
||||||
|
|
|
@ -0,0 +1,152 @@
|
||||||
|
package stream
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"golang.org/x/time/rate"
|
||||||
|
"io"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Limiter interface {
|
||||||
|
Limit() rate.Limit
|
||||||
|
Burst() int
|
||||||
|
TokensAt(time.Time) float64
|
||||||
|
Tokens() float64
|
||||||
|
Allow() bool
|
||||||
|
AllowN(time.Time, int) bool
|
||||||
|
Reserve() *rate.Reservation
|
||||||
|
ReserveN(time.Time, int) *rate.Reservation
|
||||||
|
Wait(context.Context) error
|
||||||
|
WaitN(context.Context, int) error
|
||||||
|
SetLimit(rate.Limit)
|
||||||
|
SetLimitAt(time.Time, rate.Limit)
|
||||||
|
SetBurst(int)
|
||||||
|
SetBurstAt(time.Time, int)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ClientDownloadLimit Limiter
|
||||||
|
ClientUploadLimit Limiter
|
||||||
|
ServerDownloadLimit Limiter
|
||||||
|
ServerUploadLimit Limiter
|
||||||
|
)
|
||||||
|
|
||||||
|
type RateLimitReader struct {
|
||||||
|
io.Reader
|
||||||
|
Limiter Limiter
|
||||||
|
Ctx context.Context
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RateLimitReader) Read(p []byte) (n int, err error) {
|
||||||
|
if r.Ctx != nil && utils.IsCanceled(r.Ctx) {
|
||||||
|
return 0, r.Ctx.Err()
|
||||||
|
}
|
||||||
|
n, err = r.Reader.Read(p)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if r.Limiter != nil {
|
||||||
|
if r.Ctx == nil {
|
||||||
|
r.Ctx = context.Background()
|
||||||
|
}
|
||||||
|
err = r.Limiter.WaitN(r.Ctx, n)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RateLimitReader) Close() error {
|
||||||
|
if c, ok := r.Reader.(io.Closer); ok {
|
||||||
|
return c.Close()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type RateLimitWriter struct {
|
||||||
|
io.Writer
|
||||||
|
Limiter Limiter
|
||||||
|
Ctx context.Context
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *RateLimitWriter) Write(p []byte) (n int, err error) {
|
||||||
|
if w.Ctx != nil && utils.IsCanceled(w.Ctx) {
|
||||||
|
return 0, w.Ctx.Err()
|
||||||
|
}
|
||||||
|
n, err = w.Writer.Write(p)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if w.Limiter != nil {
|
||||||
|
if w.Ctx == nil {
|
||||||
|
w.Ctx = context.Background()
|
||||||
|
}
|
||||||
|
err = w.Limiter.WaitN(w.Ctx, n)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *RateLimitWriter) Close() error {
|
||||||
|
if c, ok := w.Writer.(io.Closer); ok {
|
||||||
|
return c.Close()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type RateLimitFile struct {
|
||||||
|
model.File
|
||||||
|
Limiter Limiter
|
||||||
|
Ctx context.Context
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RateLimitFile) Read(p []byte) (n int, err error) {
|
||||||
|
if r.Ctx != nil && utils.IsCanceled(r.Ctx) {
|
||||||
|
return 0, r.Ctx.Err()
|
||||||
|
}
|
||||||
|
n, err = r.File.Read(p)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if r.Limiter != nil {
|
||||||
|
if r.Ctx == nil {
|
||||||
|
r.Ctx = context.Background()
|
||||||
|
}
|
||||||
|
err = r.Limiter.WaitN(r.Ctx, n)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RateLimitFile) ReadAt(p []byte, off int64) (n int, err error) {
|
||||||
|
if r.Ctx != nil && utils.IsCanceled(r.Ctx) {
|
||||||
|
return 0, r.Ctx.Err()
|
||||||
|
}
|
||||||
|
n, err = r.File.ReadAt(p, off)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if r.Limiter != nil {
|
||||||
|
if r.Ctx == nil {
|
||||||
|
r.Ctx = context.Background()
|
||||||
|
}
|
||||||
|
err = r.Limiter.WaitN(r.Ctx, n)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
type RateLimitRangeReadCloser struct {
|
||||||
|
model.RangeReadCloserIF
|
||||||
|
Limiter Limiter
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rrc RateLimitRangeReadCloser) RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||||
|
rc, err := rrc.RangeReadCloserIF.RangeRead(ctx, httpRange)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &RateLimitReader{
|
||||||
|
Reader: rc,
|
||||||
|
Limiter: rrc.Limiter,
|
||||||
|
Ctx: ctx,
|
||||||
|
}, nil
|
||||||
|
}
|
|
@ -182,14 +182,24 @@ func NewSeekableStream(fs FileStream, link *model.Link) (*SeekableStream, error)
|
||||||
}
|
}
|
||||||
if ss.Link != nil {
|
if ss.Link != nil {
|
||||||
if ss.Link.MFile != nil {
|
if ss.Link.MFile != nil {
|
||||||
ss.mFile = ss.Link.MFile
|
mFile := ss.Link.MFile
|
||||||
ss.Reader = ss.Link.MFile
|
if _, ok := mFile.(*os.File); !ok {
|
||||||
ss.Closers.Add(ss.Link.MFile)
|
mFile = &RateLimitFile{
|
||||||
|
File: mFile,
|
||||||
|
Limiter: ServerDownloadLimit,
|
||||||
|
Ctx: fs.Ctx,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ss.mFile = mFile
|
||||||
|
ss.Reader = mFile
|
||||||
|
ss.Closers.Add(mFile)
|
||||||
return &ss, nil
|
return &ss, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if ss.Link.RangeReadCloser != nil {
|
if ss.Link.RangeReadCloser != nil {
|
||||||
ss.rangeReadCloser = ss.Link.RangeReadCloser
|
ss.rangeReadCloser = RateLimitRangeReadCloser{
|
||||||
|
RangeReadCloserIF: ss.Link.RangeReadCloser,
|
||||||
|
Limiter: ServerDownloadLimit,
|
||||||
|
}
|
||||||
ss.Add(ss.rangeReadCloser)
|
ss.Add(ss.rangeReadCloser)
|
||||||
return &ss, nil
|
return &ss, nil
|
||||||
}
|
}
|
||||||
|
@ -198,6 +208,10 @@ func NewSeekableStream(fs FileStream, link *model.Link) (*SeekableStream, error)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
rrc = RateLimitRangeReadCloser{
|
||||||
|
RangeReadCloserIF: rrc,
|
||||||
|
Limiter: ServerDownloadLimit,
|
||||||
|
}
|
||||||
ss.rangeReadCloser = rrc
|
ss.rangeReadCloser = rrc
|
||||||
ss.Add(rrc)
|
ss.Add(rrc)
|
||||||
return &ss, nil
|
return &ss, nil
|
||||||
|
@ -259,7 +273,7 @@ func (ss *SeekableStream) CacheFullInTempFile() (model.File, error) {
|
||||||
if ss.tmpFile != nil {
|
if ss.tmpFile != nil {
|
||||||
return ss.tmpFile, nil
|
return ss.tmpFile, nil
|
||||||
}
|
}
|
||||||
if ss.mFile != nil {
|
if _, ok := ss.mFile.(*os.File); ok {
|
||||||
return ss.mFile, nil
|
return ss.mFile, nil
|
||||||
}
|
}
|
||||||
tmpF, err := utils.CreateTempFile(ss, ss.GetSize())
|
tmpF, err := utils.CreateTempFile(ss, ss.GetSize())
|
||||||
|
@ -276,7 +290,7 @@ func (ss *SeekableStream) CacheFullInTempFileAndUpdateProgress(up model.UpdatePr
|
||||||
if ss.tmpFile != nil {
|
if ss.tmpFile != nil {
|
||||||
return ss.tmpFile, nil
|
return ss.tmpFile, nil
|
||||||
}
|
}
|
||||||
if ss.mFile != nil {
|
if _, ok := ss.mFile.(*os.File); ok {
|
||||||
return ss.mFile, nil
|
return ss.mFile, nil
|
||||||
}
|
}
|
||||||
tmpF, err := utils.CreateTempFile(&ReaderUpdatingProgress{
|
tmpF, err := utils.CreateTempFile(&ReaderUpdatingProgress{
|
||||||
|
@ -293,12 +307,13 @@ func (ss *SeekableStream) CacheFullInTempFileAndUpdateProgress(up model.UpdatePr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FileStream) SetTmpFile(r *os.File) {
|
func (f *FileStream) SetTmpFile(r *os.File) {
|
||||||
f.Reader = r
|
f.Add(r)
|
||||||
f.tmpFile = r
|
f.tmpFile = r
|
||||||
|
f.Reader = r
|
||||||
}
|
}
|
||||||
|
|
||||||
type ReaderWithSize interface {
|
type ReaderWithSize interface {
|
||||||
io.Reader
|
io.ReadCloser
|
||||||
GetSize() int64
|
GetSize() int64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -311,6 +326,13 @@ func (r *SimpleReaderWithSize) GetSize() int64 {
|
||||||
return r.Size
|
return r.Size
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *SimpleReaderWithSize) Close() error {
|
||||||
|
if c, ok := r.Reader.(io.Closer); ok {
|
||||||
|
return c.Close()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
type ReaderUpdatingProgress struct {
|
type ReaderUpdatingProgress struct {
|
||||||
Reader ReaderWithSize
|
Reader ReaderWithSize
|
||||||
model.UpdateProgress
|
model.UpdateProgress
|
||||||
|
@ -324,6 +346,10 @@ func (r *ReaderUpdatingProgress) Read(p []byte) (n int, err error) {
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *ReaderUpdatingProgress) Close() error {
|
||||||
|
return r.Reader.Close()
|
||||||
|
}
|
||||||
|
|
||||||
type SStreamReadAtSeeker interface {
|
type SStreamReadAtSeeker interface {
|
||||||
model.File
|
model.File
|
||||||
GetRawStream() *SeekableStream
|
GetRawStream() *SeekableStream
|
||||||
|
@ -534,7 +560,7 @@ func (r *RangeReadReadAtSeeker) Read(p []byte) (n int, err error) {
|
||||||
|
|
||||||
func (r *RangeReadReadAtSeeker) Close() error {
|
func (r *RangeReadReadAtSeeker) Close() error {
|
||||||
if r.headCache != nil {
|
if r.headCache != nil {
|
||||||
r.headCache.close()
|
_ = r.headCache.close()
|
||||||
}
|
}
|
||||||
return r.ss.Close()
|
return r.ss.Close()
|
||||||
}
|
}
|
||||||
|
@ -562,17 +588,3 @@ func (f *FileReadAtSeeker) Seek(offset int64, whence int) (int64, error) {
|
||||||
func (f *FileReadAtSeeker) Close() error {
|
func (f *FileReadAtSeeker) Close() error {
|
||||||
return f.ss.Close()
|
return f.ss.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
type ReaderWithCtx struct {
|
|
||||||
io.Reader
|
|
||||||
Ctx context.Context
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *ReaderWithCtx) Read(p []byte) (n int, err error) {
|
|
||||||
select {
|
|
||||||
case <-r.Ctx.Done():
|
|
||||||
return 0, r.Ctx.Err()
|
|
||||||
default:
|
|
||||||
return r.Reader.Read(p)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -3,6 +3,7 @@ package stream
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
|
@ -76,3 +77,22 @@ func checkContentRange(header *http.Header, offset int64) bool {
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ReaderWithCtx struct {
|
||||||
|
io.Reader
|
||||||
|
Ctx context.Context
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ReaderWithCtx) Read(p []byte) (n int, err error) {
|
||||||
|
if utils.IsCanceled(r.Ctx) {
|
||||||
|
return 0, r.Ctx.Err()
|
||||||
|
}
|
||||||
|
return r.Reader.Read(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ReaderWithCtx) Close() error {
|
||||||
|
if c, ok := r.Reader.(io.Closer); ok {
|
||||||
|
return c.Close()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"os"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/internal/net"
|
"github.com/alist-org/alist/v3/internal/net"
|
||||||
|
@ -23,11 +24,22 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.
|
||||||
if contentType != "" {
|
if contentType != "" {
|
||||||
w.Header().Set("Content-Type", contentType)
|
w.Header().Set("Content-Type", contentType)
|
||||||
}
|
}
|
||||||
http.ServeContent(w, r, file.GetName(), file.ModTime(), link.MFile)
|
mFile := link.MFile
|
||||||
|
if _, ok := mFile.(*os.File); !ok {
|
||||||
|
mFile = &stream.RateLimitFile{
|
||||||
|
File: mFile,
|
||||||
|
Limiter: stream.ServerDownloadLimit,
|
||||||
|
Ctx: r.Context(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
http.ServeContent(w, r, file.GetName(), file.ModTime(), mFile)
|
||||||
return nil
|
return nil
|
||||||
} else if link.RangeReadCloser != nil {
|
} else if link.RangeReadCloser != nil {
|
||||||
attachFileName(w, file)
|
attachFileName(w, file)
|
||||||
net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), link.RangeReadCloser)
|
net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), &stream.RateLimitRangeReadCloser{
|
||||||
|
RangeReadCloserIF: link.RangeReadCloser,
|
||||||
|
Limiter: stream.ServerDownloadLimit,
|
||||||
|
})
|
||||||
return nil
|
return nil
|
||||||
} else if link.Concurrency != 0 || link.PartSize != 0 {
|
} else if link.Concurrency != 0 || link.PartSize != 0 {
|
||||||
attachFileName(w, file)
|
attachFileName(w, file)
|
||||||
|
@ -47,7 +59,10 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.
|
||||||
rc, err := down.Download(ctx, req)
|
rc, err := down.Download(ctx, req)
|
||||||
return rc, err
|
return rc, err
|
||||||
}
|
}
|
||||||
net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), &model.RangeReadCloser{RangeReader: rangeReader})
|
net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), &stream.RateLimitRangeReadCloser{
|
||||||
|
RangeReadCloserIF: &model.RangeReadCloser{RangeReader: rangeReader},
|
||||||
|
Limiter: stream.ServerDownloadLimit,
|
||||||
|
})
|
||||||
return nil
|
return nil
|
||||||
} else {
|
} else {
|
||||||
//transparent proxy
|
//transparent proxy
|
||||||
|
@ -65,7 +80,11 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.
|
||||||
if r.Method == http.MethodHead {
|
if r.Method == http.MethodHead {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
_, err = utils.CopyWithBuffer(w, res.Body)
|
_, err = utils.CopyWithBuffer(w, &stream.RateLimitReader{
|
||||||
|
Reader: res.Body,
|
||||||
|
Limiter: stream.ServerDownloadLimit,
|
||||||
|
Ctx: r.Context(),
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -60,7 +60,12 @@ func OpenDownload(ctx context.Context, reqPath string, offset int64) (*FileDownl
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FileDownloadProxy) Read(p []byte) (n int, err error) {
|
func (f *FileDownloadProxy) Read(p []byte) (n int, err error) {
|
||||||
return f.reader.Read(p)
|
n, err = f.reader.Read(p)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = stream.ClientDownloadLimit.WaitN(f.reader.GetRawStream().Ctx, n)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FileDownloadProxy) Write(p []byte) (n int, err error) {
|
func (f *FileDownloadProxy) Write(p []byte) (n int, err error) {
|
||||||
|
|
|
@ -59,7 +59,12 @@ func (f *FileUploadProxy) Read(p []byte) (n int, err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FileUploadProxy) Write(p []byte) (n int, err error) {
|
func (f *FileUploadProxy) Write(p []byte) (n int, err error) {
|
||||||
return f.buffer.Write(p)
|
n, err = f.buffer.Write(p)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = stream.ClientUploadLimit.WaitN(f.ctx, n)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FileUploadProxy) Seek(offset int64, whence int) (int64, error) {
|
func (f *FileUploadProxy) Seek(offset int64, whence int) (int64, error) {
|
||||||
|
@ -96,7 +101,6 @@ func (f *FileUploadProxy) Close() error {
|
||||||
WebPutAsTask: true,
|
WebPutAsTask: true,
|
||||||
}
|
}
|
||||||
s.SetTmpFile(f.buffer)
|
s.SetTmpFile(f.buffer)
|
||||||
s.Closers.Add(f.buffer)
|
|
||||||
_, err = fs.PutAsTask(f.ctx, dir, s)
|
_, err = fs.PutAsTask(f.ctx, dir, s)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -127,7 +131,7 @@ func (f *FileUploadWithLengthProxy) Read(p []byte) (n int, err error) {
|
||||||
return 0, errs.NotSupport
|
return 0, errs.NotSupport
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FileUploadWithLengthProxy) Write(p []byte) (n int, err error) {
|
func (f *FileUploadWithLengthProxy) write(p []byte) (n int, err error) {
|
||||||
if f.pipeWriter != nil {
|
if f.pipeWriter != nil {
|
||||||
select {
|
select {
|
||||||
case e := <-f.errChan:
|
case e := <-f.errChan:
|
||||||
|
@ -174,6 +178,15 @@ func (f *FileUploadWithLengthProxy) Write(p []byte) (n int, err error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *FileUploadWithLengthProxy) Write(p []byte) (n int, err error) {
|
||||||
|
n, err = f.write(p)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = stream.ClientUploadLimit.WaitN(f.ctx, n)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
func (f *FileUploadWithLengthProxy) Seek(offset int64, whence int) (int64, error) {
|
func (f *FileUploadWithLengthProxy) Seek(offset int64, whence int) (int64, error) {
|
||||||
return 0, errs.NotSupport
|
return 0, errs.NotSupport
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,9 @@
|
||||||
package middlewares
|
package middlewares
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
|
"io"
|
||||||
)
|
)
|
||||||
|
|
||||||
func MaxAllowed(n int) gin.HandlerFunc {
|
func MaxAllowed(n int) gin.HandlerFunc {
|
||||||
|
@ -14,3 +16,37 @@ func MaxAllowed(n int) gin.HandlerFunc {
|
||||||
c.Next()
|
c.Next()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func UploadRateLimiter(limiter stream.Limiter) gin.HandlerFunc {
|
||||||
|
return func(c *gin.Context) {
|
||||||
|
c.Request.Body = &stream.RateLimitReader{
|
||||||
|
Reader: c.Request.Body,
|
||||||
|
Limiter: limiter,
|
||||||
|
Ctx: c,
|
||||||
|
}
|
||||||
|
c.Next()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResponseWriterWrapper struct {
|
||||||
|
gin.ResponseWriter
|
||||||
|
WrapWriter io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *ResponseWriterWrapper) Write(p []byte) (n int, err error) {
|
||||||
|
return w.WrapWriter.Write(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func DownloadRateLimiter(limiter stream.Limiter) gin.HandlerFunc {
|
||||||
|
return func(c *gin.Context) {
|
||||||
|
c.Writer = &ResponseWriterWrapper{
|
||||||
|
ResponseWriter: c.Writer,
|
||||||
|
WrapWriter: &stream.RateLimitWriter{
|
||||||
|
Writer: c.Writer,
|
||||||
|
Limiter: limiter,
|
||||||
|
Ctx: c,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
c.Next()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"github.com/alist-org/alist/v3/cmd/flags"
|
"github.com/alist-org/alist/v3/cmd/flags"
|
||||||
"github.com/alist-org/alist/v3/internal/conf"
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
"github.com/alist-org/alist/v3/internal/message"
|
"github.com/alist-org/alist/v3/internal/message"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/alist-org/alist/v3/server/common"
|
"github.com/alist-org/alist/v3/server/common"
|
||||||
"github.com/alist-org/alist/v3/server/handles"
|
"github.com/alist-org/alist/v3/server/handles"
|
||||||
|
@ -38,13 +39,14 @@ func Init(e *gin.Engine) {
|
||||||
WebDav(g.Group("/dav"))
|
WebDav(g.Group("/dav"))
|
||||||
S3(g.Group("/s3"))
|
S3(g.Group("/s3"))
|
||||||
|
|
||||||
g.GET("/d/*path", middlewares.Down, handles.Down)
|
downloadLimiter := middlewares.DownloadRateLimiter(stream.ClientDownloadLimit)
|
||||||
g.GET("/p/*path", middlewares.Down, handles.Proxy)
|
g.GET("/d/*path", middlewares.Down, downloadLimiter, handles.Down)
|
||||||
|
g.GET("/p/*path", middlewares.Down, downloadLimiter, handles.Proxy)
|
||||||
g.HEAD("/d/*path", middlewares.Down, handles.Down)
|
g.HEAD("/d/*path", middlewares.Down, handles.Down)
|
||||||
g.HEAD("/p/*path", middlewares.Down, handles.Proxy)
|
g.HEAD("/p/*path", middlewares.Down, handles.Proxy)
|
||||||
g.GET("/ad/*path", middlewares.Down, handles.ArchiveDown)
|
g.GET("/ad/*path", middlewares.Down, downloadLimiter, handles.ArchiveDown)
|
||||||
g.GET("/ap/*path", middlewares.Down, handles.ArchiveProxy)
|
g.GET("/ap/*path", middlewares.Down, downloadLimiter, handles.ArchiveProxy)
|
||||||
g.GET("/ae/*path", middlewares.Down, handles.ArchiveInternalExtract)
|
g.GET("/ae/*path", middlewares.Down, downloadLimiter, handles.ArchiveInternalExtract)
|
||||||
g.HEAD("/ad/*path", middlewares.Down, handles.ArchiveDown)
|
g.HEAD("/ad/*path", middlewares.Down, handles.ArchiveDown)
|
||||||
g.HEAD("/ap/*path", middlewares.Down, handles.ArchiveProxy)
|
g.HEAD("/ap/*path", middlewares.Down, handles.ArchiveProxy)
|
||||||
g.HEAD("/ae/*path", middlewares.Down, handles.ArchiveInternalExtract)
|
g.HEAD("/ae/*path", middlewares.Down, handles.ArchiveInternalExtract)
|
||||||
|
@ -173,8 +175,9 @@ func _fs(g *gin.RouterGroup) {
|
||||||
g.POST("/copy", handles.FsCopy)
|
g.POST("/copy", handles.FsCopy)
|
||||||
g.POST("/remove", handles.FsRemove)
|
g.POST("/remove", handles.FsRemove)
|
||||||
g.POST("/remove_empty_directory", handles.FsRemoveEmptyDirectory)
|
g.POST("/remove_empty_directory", handles.FsRemoveEmptyDirectory)
|
||||||
g.PUT("/put", middlewares.FsUp, handles.FsStream)
|
uploadLimiter := middlewares.UploadRateLimiter(stream.ClientUploadLimit)
|
||||||
g.PUT("/form", middlewares.FsUp, handles.FsForm)
|
g.PUT("/put", middlewares.FsUp, uploadLimiter, handles.FsStream)
|
||||||
|
g.PUT("/form", middlewares.FsUp, uploadLimiter, handles.FsForm)
|
||||||
g.POST("/link", middlewares.AuthAdmin, handles.Link)
|
g.POST("/link", middlewares.AuthAdmin, handles.Link)
|
||||||
// g.POST("/add_aria2", handles.AddOfflineDownload)
|
// g.POST("/add_aria2", handles.AddOfflineDownload)
|
||||||
// g.POST("/add_qbit", handles.AddQbittorrent)
|
// g.POST("/add_qbit", handles.AddQbittorrent)
|
||||||
|
|
|
@ -3,6 +3,8 @@ package server
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/subtle"
|
"crypto/subtle"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
|
"github.com/alist-org/alist/v3/server/middlewares"
|
||||||
"net/http"
|
"net/http"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -27,8 +29,10 @@ func WebDav(dav *gin.RouterGroup) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
dav.Use(WebDAVAuth)
|
dav.Use(WebDAVAuth)
|
||||||
dav.Any("/*path", ServeWebDAV)
|
uploadLimiter := middlewares.UploadRateLimiter(stream.ClientUploadLimit)
|
||||||
dav.Any("", ServeWebDAV)
|
downloadLimiter := middlewares.DownloadRateLimiter(stream.ClientDownloadLimit)
|
||||||
|
dav.Any("/*path", uploadLimiter, downloadLimiter, ServeWebDAV)
|
||||||
|
dav.Any("", uploadLimiter, downloadLimiter, ServeWebDAV)
|
||||||
dav.Handle("PROPFIND", "/*path", ServeWebDAV)
|
dav.Handle("PROPFIND", "/*path", ServeWebDAV)
|
||||||
dav.Handle("PROPFIND", "", ServeWebDAV)
|
dav.Handle("PROPFIND", "", ServeWebDAV)
|
||||||
dav.Handle("MKCOL", "/*path", ServeWebDAV)
|
dav.Handle("MKCOL", "/*path", ServeWebDAV)
|
||||||
|
|
Loading…
Reference in New Issue