alist/drivers/123/upload.go

156 lines
4.7 KiB
Go
Raw Normal View History

package _123
import (
"context"
"fmt"
"io"
"math"
"net/http"
"strconv"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
)
func (d *Pan123) getS3PreSignedUrls(ctx context.Context, upReq *UploadResp, start, end int) (*S3PreSignedURLs, error) {
data := base.Json{
"bucket": upReq.Data.Bucket,
"key": upReq.Data.Key,
"partNumberEnd": end,
"partNumberStart": start,
"uploadId": upReq.Data.UploadId,
"StorageNode": upReq.Data.StorageNode,
}
var s3PreSignedUrls S3PreSignedURLs
_, err := d.Request(S3PreSignedUrls, http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetContext(ctx)
}, &s3PreSignedUrls)
if err != nil {
return nil, err
}
return &s3PreSignedUrls, nil
}
func (d *Pan123) getS3Auth(ctx context.Context, upReq *UploadResp, start, end int) (*S3PreSignedURLs, error) {
data := base.Json{
"StorageNode": upReq.Data.StorageNode,
"bucket": upReq.Data.Bucket,
"key": upReq.Data.Key,
"partNumberEnd": end,
"partNumberStart": start,
"uploadId": upReq.Data.UploadId,
}
var s3PreSignedUrls S3PreSignedURLs
_, err := d.Request(S3Auth, http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetContext(ctx)
}, &s3PreSignedUrls)
if err != nil {
return nil, err
}
return &s3PreSignedUrls, nil
}
func (d *Pan123) completeS3(ctx context.Context, upReq *UploadResp, file model.FileStreamer, isMultipart bool) error {
data := base.Json{
"StorageNode": upReq.Data.StorageNode,
"bucket": upReq.Data.Bucket,
"fileId": upReq.Data.FileId,
"fileSize": file.GetSize(),
"isMultipart": isMultipart,
"key": upReq.Data.Key,
"uploadId": upReq.Data.UploadId,
}
_, err := d.Request(UploadCompleteV2, http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetContext(ctx)
}, nil)
return err
}
func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, reader io.Reader, up driver.UpdateProgress) error {
chunkSize := int64(1024 * 1024 * 16)
// fetch s3 pre signed urls
chunkCount := int(math.Ceil(float64(file.GetSize()) / float64(chunkSize)))
// only 1 batch is allowed
isMultipart := chunkCount > 1
batchSize := 1
getS3UploadUrl := d.getS3Auth
if isMultipart {
batchSize = 10
getS3UploadUrl = d.getS3PreSignedUrls
}
for i := 1; i <= chunkCount; i += batchSize {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
start := i
end := i + batchSize
if end > chunkCount+1 {
end = chunkCount + 1
}
s3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, start, end)
if err != nil {
return err
}
// upload each chunk
for j := start; j < end; j++ {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
curSize := chunkSize
if j == chunkCount {
curSize = file.GetSize() - (int64(chunkCount)-1)*chunkSize
}
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.LimitReader(reader, chunkSize), curSize, false, getS3UploadUrl)
if err != nil {
return err
}
feat: refactor offline download (#5408 close #4108) * wip: refactor offline download (#5331) * base tool * working: aria2 * refactor: change type of percentage to float64 * wip: adapt aria2 * wip: use items in offline_download * wip: use tool manager * wip: adapt qBittorrent * chore: fix typo * Squashed commit of the following: commit 4fc0a77565702f9bf498485d42336502f2ee9776 Author: Andy Hsu <i@nn.ci> Date: Fri Oct 20 21:06:25 2023 +0800 fix(baidu_netdisk): upload file > 4GB (close #5392) commit aaffaee2b54fc067d240ea0c20ea3c2f39615d6e Author: gmugu <94156510@qq.com> Date: Thu Oct 19 19:17:53 2023 +0800 perf(webdav): support request with cookies (#5391) commit 8ef8023c20bfeee97ec82155b52eae0d80b1410e Author: NewbieOrange <NewbieOrange@users.noreply.github.com> Date: Thu Oct 19 19:17:09 2023 +0800 fix(aliyundrive_open): upload progress for normal upload (#5398) commit cdfbe6dcf2b361e4c93c2703c2f8c9bddeac0ee6 Author: foxxorcat <95907542+foxxorcat@users.noreply.github.com> Date: Wed Oct 18 16:27:07 2023 +0800 fix: hash gcid empty file (#5394) commit 94d028743abf8e0d736f80c0ec4fb294a1cc064c Author: Andy Hsu <i@nn.ci> Date: Sat Oct 14 13:17:51 2023 +0800 ci: remove `pr-welcome` label when close issue [skip ci] commit 7f7335435c2f32a3eef76fac4c4f783d9d8624fd Author: itsHenry <2671230065@qq.com> Date: Sat Oct 14 13:12:46 2023 +0800 feat(cloudreve): support thumbnail (#5373 close #5348) * feat(cloudreve): support thumbnail * chore: remove unnecessary code commit b9e192b29cffddf14a0dfb2d3885def57a56ce16 Author: foxxorcat <95907542+foxxorcat@users.noreply.github.com> Date: Thu Oct 12 20:57:12 2023 +0800 fix(115): limit request rate (#5367 close #5275) * fix(115):limit request rate * chore(115): fix unit of `limit_rate` --------- Co-authored-by: Andy Hsu <i@nn.ci> commit 69a98eaef612b58596e5c26c341b6d7cedecdf19 Author: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Date: Wed Oct 11 22:01:55 2023 +0800 fix(deps): update module github.com/aliyun/aliyun-oss-go-sdk to v2.2.9+incompatible (#5141) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> commit 1ebc96a4e5220c979fd581bb3b5640e9436f6665 Author: Andy Hsu <i@nn.ci> Date: Tue Oct 10 18:32:00 2023 +0800 fix(wopan): fatal error concurrent map writes (close #5352) commit 66e2324cac75cb3ef05af45dbdd10b124d534aff Author: Andy Hsu <i@nn.ci> Date: Tue Oct 10 18:23:11 2023 +0800 chore(deps): upgrade dependencies commit 7600dc28df137c439e538b4257731c33a63db9b5 Author: Andy Hsu <i@nn.ci> Date: Tue Oct 10 18:13:58 2023 +0800 fix(aliyundrive_open): change default api to raw server (close #5358) commit 8ef89ad0a496d5acc398794c0afa4f77c67ad371 Author: foxxorcat <95907542+foxxorcat@users.noreply.github.com> Date: Tue Oct 10 18:08:27 2023 +0800 fix(baidu_netdisk): hash and `error 2` (#5356) * fix(baidu):hash and error:2 * fix:invalid memory address commit 35d672217dde69e65b41b1fcd9786c1cfebcdc45 Author: jeffmingup <1960588251@qq.com> Date: Sun Oct 8 19:29:45 2023 +0800 fix(onedrive_app): incorrect api on `_accessToken` (#5346) commit 1a283bb2720eff6d1b0c1dd6f1667a6449905a9b Author: foxxorcat <95907542+foxxorcat@users.noreply.github.com> Date: Fri Oct 6 16:04:39 2023 +0800 feat(google_drive): add `hash_info`, `ctime`, `thumbnail` (#5334) commit a008f54f4d5eda5738abfd54bf1abf1e18c08430 Author: nkh0472 <67589323+nkh0472@users.noreply.github.com> Date: Thu Oct 5 13:10:51 2023 +0800 docs: minor language improvements (#5329) [skip ci] * fix: adapt update progress type * Squashed commit of the following: commit 65c5ec0c34d5f027a65933fe89af53791747bdd4 Author: itsHenry <2671230065@qq.com> Date: Sat Nov 4 13:35:09 2023 +0800 feat(cloudreve): folder size count and switch (#5457 close #5395) commit a6325967d0de18e6b6c744f06cb1ebaa08ec687e Author: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Date: Mon Oct 30 15:11:20 2023 +0800 fix(deps): update module github.com/charmbracelet/lipgloss to v0.9.1 (#5234) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> commit 4dff49470adce36416d8c56594e84868c04d023b Author: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Date: Mon Oct 30 15:10:36 2023 +0800 fix(deps): update golang.org/x/exp digest to 7918f67 (#5366) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> commit cc86d6f3d1ff2120669c9dda719b7faabb922f52 Author: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Date: Sun Oct 29 14:45:55 2023 +0800 fix(deps): update module golang.org/x/net to v0.17.0 [security] (#5370) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> commit c0f9c8ebafdf8dd2afe5c0b9fba24456819c3155 Author: Andy Hsu <i@nn.ci> Date: Thu Oct 26 19:21:09 2023 +0800 feat: add ignore direct link params (close #5434)
2023-11-06 08:56:55 +00:00
up(float64(j) * 100 / float64(chunkCount))
}
}
// complete s3 upload
return d.completeS3(ctx, upReq, file, chunkCount > 1)
}
func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSignedUrls *S3PreSignedURLs, cur, end int, reader io.Reader, curSize int64, retry bool, getS3UploadUrl func(ctx context.Context, upReq *UploadResp, start int, end int) (*S3PreSignedURLs, error)) error {
uploadUrl := s3PreSignedUrls.Data.PreSignedUrls[strconv.Itoa(cur)]
if uploadUrl == "" {
return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls)
}
req, err := http.NewRequest("PUT", uploadUrl, reader)
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = curSize
//req.Header.Set("Content-Length", strconv.FormatInt(curSize, 10))
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode == http.StatusForbidden {
if retry {
return fmt.Errorf("upload s3 chunk %d failed, status code: %d", cur, res.StatusCode)
}
// refresh s3 pre signed urls
newS3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, cur, end)
if err != nil {
return err
}
s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls
// retry
return d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, cur, end, reader, curSize, true, getS3UploadUrl)
}
if res.StatusCode != http.StatusOK {
body, err := io.ReadAll(res.Body)
if err != nil {
return err
}
return fmt.Errorf("upload s3 chunk %d failed, status code: %d, body: %s", cur, res.StatusCode, body)
}
return nil
}