alist/drivers/baidu_netdisk/driver.go

323 lines
8.8 KiB
Go
Raw Normal View History

2022-09-02 14:46:31 +00:00
package baidu_netdisk
import (
"context"
"crypto/md5"
"encoding/hex"
"errors"
"io"
"math"
"net/url"
stdpath "path"
"strconv"
"time"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/errgroup"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/avast/retry-go"
log "github.com/sirupsen/logrus"
2022-09-02 14:46:31 +00:00
)
type BaiduNetdisk struct {
model.Storage
Addition
uploadThread int
vipType int // 会员类型0普通用户(4G/4M)、1普通会员(10G/16M)、2超级会员(20G/32M)
2022-09-02 14:46:31 +00:00
}
func (d *BaiduNetdisk) Config() driver.Config {
return config
}
func (d *BaiduNetdisk) GetAddition() driver.Additional {
return &d.Addition
2022-09-02 14:46:31 +00:00
}
func (d *BaiduNetdisk) Init(ctx context.Context) error {
d.uploadThread, _ = strconv.Atoi(d.UploadThread)
if d.uploadThread < 1 || d.uploadThread > 32 {
d.uploadThread, d.UploadThread = 3, "3"
}
if _, err := url.Parse(d.UploadAPI); d.UploadAPI == "" || err != nil {
d.UploadAPI = "https://d.pcs.baidu.com"
}
res, err := d.get("/xpan/nas", map[string]string{
"method": "uinfo",
}, nil)
log.Debugf("[baidu] get uinfo: %s", string(res))
if err != nil {
return err
}
d.vipType = utils.Json.Get(res, "vip_type").ToInt()
return nil
2022-09-02 14:46:31 +00:00
}
func (d *BaiduNetdisk) Drop(ctx context.Context) error {
return nil
}
func (d *BaiduNetdisk) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
files, err := d.getFiles(dir.GetPath())
if err != nil {
return nil, err
}
return utils.SliceConvert(files, func(src File) (model.Obj, error) {
return fileToObj(src), nil
})
2022-09-02 14:46:31 +00:00
}
func (d *BaiduNetdisk) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if d.DownloadAPI == "crack" {
return d.linkCrack(file, args)
}
return d.linkOfficial(file, args)
}
func (d *BaiduNetdisk) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
var newDir File
_, err := d.create(stdpath.Join(parentDir.GetPath(), dirName), 0, 1, "", "", &newDir, 0, 0)
if err != nil {
return nil, err
}
return fileToObj(newDir), nil
2022-09-02 14:46:31 +00:00
}
func (d *BaiduNetdisk) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
2022-09-02 14:46:31 +00:00
data := []base.Json{
{
"path": srcObj.GetPath(),
"dest": dstDir.GetPath(),
"newname": srcObj.GetName(),
},
}
_, err := d.manage("move", data)
if err != nil {
return nil, err
}
if srcObj, ok := srcObj.(*model.ObjThumb); ok {
srcObj.SetPath(stdpath.Join(dstDir.GetPath(), srcObj.GetName()))
srcObj.Modified = time.Now()
return srcObj, nil
}
return nil, nil
2022-09-02 14:46:31 +00:00
}
func (d *BaiduNetdisk) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
2022-09-02 14:46:31 +00:00
data := []base.Json{
{
"path": srcObj.GetPath(),
"newname": newName,
},
}
_, err := d.manage("rename", data)
if err != nil {
return nil, err
}
if srcObj, ok := srcObj.(*model.ObjThumb); ok {
srcObj.SetPath(stdpath.Join(stdpath.Dir(srcObj.GetPath()), newName))
srcObj.Name = newName
srcObj.Modified = time.Now()
return srcObj, nil
}
return nil, nil
2022-09-02 14:46:31 +00:00
}
func (d *BaiduNetdisk) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
data := []base.Json{
{
"path": srcObj.GetPath(),
"dest": dstDir.GetPath(),
"newname": srcObj.GetName(),
2022-09-02 14:46:31 +00:00
},
}
_, err := d.manage("copy", data)
return err
}
func (d *BaiduNetdisk) Remove(ctx context.Context, obj model.Obj) error {
data := []string{obj.GetPath()}
_, err := d.manage("delete", data)
return err
}
func (d *BaiduNetdisk) PutRapid(ctx context.Context, dstDir model.Obj, stream model.FileStreamer) (model.Obj, error) {
contentMd5 := stream.GetHash().GetHash(utils.MD5)
if len(contentMd5) < utils.MD5.Width {
return nil, errors.New("invalid hash")
}
streamSize := stream.GetSize()
path := stdpath.Join(dstDir.GetPath(), stream.GetName())
mtime := stream.ModTime().Unix()
ctime := stream.CreateTime().Unix()
blockList, _ := utils.Json.MarshalToString([]string{contentMd5})
var newFile File
_, err := d.create(path, streamSize, 0, "", blockList, &newFile, mtime, ctime)
if err != nil {
return nil, err
}
return fileToObj(newFile), nil
}
func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
// rapid upload
if newObj, err := d.PutRapid(ctx, dstDir, stream); err == nil {
return newObj, nil
}
tempFile, err := stream.CacheFullInTempFile()
if err != nil {
return nil, err
2022-09-02 14:46:31 +00:00
}
streamSize := stream.GetSize()
sliceSize := d.getSliceSize()
count := int(math.Max(math.Ceil(float64(streamSize)/float64(sliceSize)), 1))
lastBlockSize := streamSize % sliceSize
if streamSize > 0 && lastBlockSize == 0 {
lastBlockSize = sliceSize
}
//cal md5 for first 256k data
const SliceSize int64 = 256 * 1024
2022-09-02 14:46:31 +00:00
// cal md5
blockList := make([]string, 0, count)
byteSize := sliceSize
fileMd5H := md5.New()
sliceMd5H := md5.New()
sliceMd5H2 := md5.New()
slicemd5H2Write := utils.LimitWriter(sliceMd5H2, SliceSize)
for i := 1; i <= count; i++ {
if utils.IsCanceled(ctx) {
return nil, ctx.Err()
2022-09-02 14:46:31 +00:00
}
if i == count {
byteSize = lastBlockSize
2022-09-02 14:46:31 +00:00
}
_, err := io.CopyN(io.MultiWriter(fileMd5H, sliceMd5H, slicemd5H2Write), tempFile, byteSize)
if err != nil && err != io.EOF {
return nil, err
2022-09-02 14:46:31 +00:00
}
blockList = append(blockList, hex.EncodeToString(sliceMd5H.Sum(nil)))
sliceMd5H.Reset()
2022-09-02 14:46:31 +00:00
}
contentMd5 := hex.EncodeToString(fileMd5H.Sum(nil))
sliceMd5 := hex.EncodeToString(sliceMd5H2.Sum(nil))
blockListStr, _ := utils.Json.MarshalToString(blockList)
path := stdpath.Join(dstDir.GetPath(), stream.GetName())
mtime := stream.ModTime().Unix()
ctime := stream.CreateTime().Unix()
// step.1 预上传
// 尝试获取之前的进度
precreateResp, ok := base.GetUploadProgress[*PrecreateResp](d, d.AccessToken, contentMd5)
if !ok {
params := map[string]string{
"method": "precreate",
}
form := map[string]string{
"path": path,
"size": strconv.FormatInt(streamSize, 10),
"isdir": "0",
"autoinit": "1",
"rtype": "3",
"block_list": blockListStr,
"content-md5": contentMd5,
"slice-md5": sliceMd5,
}
joinTime(form, ctime, mtime)
log.Debugf("[baidu_netdisk] precreate data: %s", form)
_, err = d.postForm("/xpan/file", params, form, &precreateResp)
2022-09-02 14:46:31 +00:00
if err != nil {
return nil, err
2022-09-02 14:46:31 +00:00
}
log.Debugf("%+v", precreateResp)
if precreateResp.ReturnType == 2 {
//rapid upload, since got md5 match from baidu server
if err != nil {
return nil, err
}
return fileToObj(precreateResp.File), nil
}
}
// step.2 上传分片
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
retry.Attempts(3),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
for i, partseq := range precreateResp.BlockList {
if utils.IsCanceled(upCtx) {
break
2022-09-11 09:09:48 +00:00
}
i, partseq, offset, byteSize := i, partseq, int64(partseq)*sliceSize, sliceSize
if partseq+1 == count {
byteSize = lastBlockSize
}
threadG.Go(func(ctx context.Context) error {
params := map[string]string{
"method": "upload",
"access_token": d.AccessToken,
"type": "tmpfile",
"path": path,
"uploadid": precreateResp.Uploadid,
"partseq": strconv.Itoa(partseq),
}
err := d.uploadSlice(ctx, params, stream.GetName(), io.NewSectionReader(tempFile, offset, byteSize))
if err != nil {
return err
}
feat: refactor offline download (#5408 close #4108) * wip: refactor offline download (#5331) * base tool * working: aria2 * refactor: change type of percentage to float64 * wip: adapt aria2 * wip: use items in offline_download * wip: use tool manager * wip: adapt qBittorrent * chore: fix typo * Squashed commit of the following: commit 4fc0a77565702f9bf498485d42336502f2ee9776 Author: Andy Hsu <i@nn.ci> Date: Fri Oct 20 21:06:25 2023 +0800 fix(baidu_netdisk): upload file > 4GB (close #5392) commit aaffaee2b54fc067d240ea0c20ea3c2f39615d6e Author: gmugu <94156510@qq.com> Date: Thu Oct 19 19:17:53 2023 +0800 perf(webdav): support request with cookies (#5391) commit 8ef8023c20bfeee97ec82155b52eae0d80b1410e Author: NewbieOrange <NewbieOrange@users.noreply.github.com> Date: Thu Oct 19 19:17:09 2023 +0800 fix(aliyundrive_open): upload progress for normal upload (#5398) commit cdfbe6dcf2b361e4c93c2703c2f8c9bddeac0ee6 Author: foxxorcat <95907542+foxxorcat@users.noreply.github.com> Date: Wed Oct 18 16:27:07 2023 +0800 fix: hash gcid empty file (#5394) commit 94d028743abf8e0d736f80c0ec4fb294a1cc064c Author: Andy Hsu <i@nn.ci> Date: Sat Oct 14 13:17:51 2023 +0800 ci: remove `pr-welcome` label when close issue [skip ci] commit 7f7335435c2f32a3eef76fac4c4f783d9d8624fd Author: itsHenry <2671230065@qq.com> Date: Sat Oct 14 13:12:46 2023 +0800 feat(cloudreve): support thumbnail (#5373 close #5348) * feat(cloudreve): support thumbnail * chore: remove unnecessary code commit b9e192b29cffddf14a0dfb2d3885def57a56ce16 Author: foxxorcat <95907542+foxxorcat@users.noreply.github.com> Date: Thu Oct 12 20:57:12 2023 +0800 fix(115): limit request rate (#5367 close #5275) * fix(115):limit request rate * chore(115): fix unit of `limit_rate` --------- Co-authored-by: Andy Hsu <i@nn.ci> commit 69a98eaef612b58596e5c26c341b6d7cedecdf19 Author: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Date: Wed Oct 11 22:01:55 2023 +0800 fix(deps): update module github.com/aliyun/aliyun-oss-go-sdk to v2.2.9+incompatible (#5141) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> commit 1ebc96a4e5220c979fd581bb3b5640e9436f6665 Author: Andy Hsu <i@nn.ci> Date: Tue Oct 10 18:32:00 2023 +0800 fix(wopan): fatal error concurrent map writes (close #5352) commit 66e2324cac75cb3ef05af45dbdd10b124d534aff Author: Andy Hsu <i@nn.ci> Date: Tue Oct 10 18:23:11 2023 +0800 chore(deps): upgrade dependencies commit 7600dc28df137c439e538b4257731c33a63db9b5 Author: Andy Hsu <i@nn.ci> Date: Tue Oct 10 18:13:58 2023 +0800 fix(aliyundrive_open): change default api to raw server (close #5358) commit 8ef89ad0a496d5acc398794c0afa4f77c67ad371 Author: foxxorcat <95907542+foxxorcat@users.noreply.github.com> Date: Tue Oct 10 18:08:27 2023 +0800 fix(baidu_netdisk): hash and `error 2` (#5356) * fix(baidu):hash and error:2 * fix:invalid memory address commit 35d672217dde69e65b41b1fcd9786c1cfebcdc45 Author: jeffmingup <1960588251@qq.com> Date: Sun Oct 8 19:29:45 2023 +0800 fix(onedrive_app): incorrect api on `_accessToken` (#5346) commit 1a283bb2720eff6d1b0c1dd6f1667a6449905a9b Author: foxxorcat <95907542+foxxorcat@users.noreply.github.com> Date: Fri Oct 6 16:04:39 2023 +0800 feat(google_drive): add `hash_info`, `ctime`, `thumbnail` (#5334) commit a008f54f4d5eda5738abfd54bf1abf1e18c08430 Author: nkh0472 <67589323+nkh0472@users.noreply.github.com> Date: Thu Oct 5 13:10:51 2023 +0800 docs: minor language improvements (#5329) [skip ci] * fix: adapt update progress type * Squashed commit of the following: commit 65c5ec0c34d5f027a65933fe89af53791747bdd4 Author: itsHenry <2671230065@qq.com> Date: Sat Nov 4 13:35:09 2023 +0800 feat(cloudreve): folder size count and switch (#5457 close #5395) commit a6325967d0de18e6b6c744f06cb1ebaa08ec687e Author: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Date: Mon Oct 30 15:11:20 2023 +0800 fix(deps): update module github.com/charmbracelet/lipgloss to v0.9.1 (#5234) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> commit 4dff49470adce36416d8c56594e84868c04d023b Author: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Date: Mon Oct 30 15:10:36 2023 +0800 fix(deps): update golang.org/x/exp digest to 7918f67 (#5366) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> commit cc86d6f3d1ff2120669c9dda719b7faabb922f52 Author: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Date: Sun Oct 29 14:45:55 2023 +0800 fix(deps): update module golang.org/x/net to v0.17.0 [security] (#5370) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> commit c0f9c8ebafdf8dd2afe5c0b9fba24456819c3155 Author: Andy Hsu <i@nn.ci> Date: Thu Oct 26 19:21:09 2023 +0800 feat: add ignore direct link params (close #5434)
2023-11-06 08:56:55 +00:00
up(float64(threadG.Success()) * 100 / float64(len(precreateResp.BlockList)))
precreateResp.BlockList[i] = -1
return nil
})
2022-09-02 14:46:31 +00:00
}
if err = threadG.Wait(); err != nil {
// 如果属于用户主动取消,则保存上传进度
if errors.Is(err, context.Canceled) {
precreateResp.BlockList = utils.SliceFilter(precreateResp.BlockList, func(s int) bool { return s >= 0 })
base.SaveUploadProgress(d, precreateResp, d.AccessToken, contentMd5)
}
return nil, err
}
// step.3 创建文件
var newFile File
_, err = d.create(path, streamSize, 0, precreateResp.Uploadid, blockListStr, &newFile, mtime, ctime)
if err != nil {
return nil, err
}
return fileToObj(newFile), nil
}
func (d *BaiduNetdisk) uploadSlice(ctx context.Context, params map[string]string, fileName string, file io.Reader) error {
res, err := base.RestyClient.R().
SetContext(ctx).
SetQueryParams(params).
SetFileReader("file", fileName, file).
Post(d.UploadAPI + "/rest/2.0/pcs/superfile2")
if err != nil {
return err
}
log.Debugln(res.RawResponse.Status + res.String())
errCode := utils.Json.Get(res.Body(), "error_code").ToInt()
errNo := utils.Json.Get(res.Body(), "errno").ToInt()
if errCode != 0 || errNo != 0 {
return errs.NewErr(errs.StreamIncomplete, "error in uploading to baidu, will retry. response=%s", res.String())
}
return nil
}
2022-09-02 14:46:31 +00:00
var _ driver.Driver = (*BaiduNetdisk)(nil)