mirror of https://github.com/Xhofe/alist
feat: misc improvements about upload/copy/hash (#5045)
general: add createTime/updateTime support in webdav and some drivers general: add hash support in some drivers general: cross-storage rapid-upload support general: enhance upload to avoid local temp file if possible general: replace readseekcloser with File interface to speed upstream operations feat(aliyun_open): same as above feat(crypt): add hack for 139cloud Close #4934 Close #4819 baidu_netdisk needs to improve the upload code to support rapid-uploadpull/5111/head
parent
9b765ef696
commit
a3748af772
|
@ -2,13 +2,13 @@ package _115
|
|||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/pkg/errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Pan115 struct {
|
||||
|
@ -38,15 +38,15 @@ func (d *Pan115) List(ctx context.Context, dir model.Obj, args model.ListArgs) (
|
|||
if err != nil && !errors.Is(err, driver115.ErrNotExist) {
|
||||
return nil, err
|
||||
}
|
||||
return utils.SliceConvert(files, func(src driver115.File) (model.Obj, error) {
|
||||
return src, nil
|
||||
return utils.SliceConvert(files, func(src FileObj) (model.Obj, error) {
|
||||
return &src, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (d *Pan115) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
downloadInfo, err := d.client.
|
||||
SetUserAgent(driver115.UA115Browser).
|
||||
Download(file.(driver115.File).PickCode)
|
||||
Download(file.(*FileObj).PickCode)
|
||||
// recover for upload
|
||||
d.client.SetUserAgent(driver115.UA115Desktop)
|
||||
if err != nil {
|
||||
|
@ -83,15 +83,67 @@ func (d *Pan115) Remove(ctx context.Context, obj model.Obj) error {
|
|||
}
|
||||
|
||||
func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
|
||||
var (
|
||||
fastInfo *driver115.UploadInitResp
|
||||
dirID = dstDir.GetID()
|
||||
)
|
||||
|
||||
if ok, err := d.client.UploadAvailable(); err != nil || !ok {
|
||||
return err
|
||||
}
|
||||
if stream.GetSize() > d.client.UploadMetaInfo.SizeLimit {
|
||||
return driver115.ErrUploadTooLarge
|
||||
}
|
||||
//if digest, err = d.client.GetDigestResult(stream); err != nil {
|
||||
// return err
|
||||
//}
|
||||
|
||||
const PreHashSize int64 = 128 * utils.KB
|
||||
hashSize := PreHashSize
|
||||
if stream.GetSize() < PreHashSize {
|
||||
hashSize = stream.GetSize()
|
||||
}
|
||||
reader, err := stream.RangeRead(http_range.Range{Start: 0, Length: hashSize})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
return d.client.UploadFastOrByMultipart(dstDir.GetID(), stream.GetName(), stream.GetSize(), tempFile)
|
||||
preHash, err := utils.HashReader(utils.SHA1, reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
preHash = strings.ToUpper(preHash)
|
||||
fullHash := stream.GetHash().GetHash(utils.SHA1)
|
||||
if len(fullHash) <= 0 {
|
||||
tmpF, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fullHash, err = utils.HashFile(utils.SHA1, tmpF)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
fullHash = strings.ToUpper(fullHash)
|
||||
|
||||
// rapid-upload
|
||||
// note that 115 add timeout for rapid-upload,
|
||||
// and "sig invalid" err is thrown even when the hash is correct after timeout.
|
||||
if fastInfo, err = d.rapidUpload(stream.GetSize(), stream.GetName(), dirID, preHash, fullHash, stream); err != nil {
|
||||
return err
|
||||
}
|
||||
if matched, err := fastInfo.Ok(); err != nil {
|
||||
return err
|
||||
} else if matched {
|
||||
return nil
|
||||
}
|
||||
|
||||
// 闪传失败,上传
|
||||
if stream.GetSize() <= utils.KB { // 文件大小小于1KB,改用普通模式上传
|
||||
return d.client.UploadByOSS(&fastInfo.UploadOSSParams, stream, dirID)
|
||||
}
|
||||
// 分片上传
|
||||
return d.UploadByMultipart(&fastInfo.UploadOSSParams, stream.GetSize(), stream, dirID)
|
||||
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Pan115)(nil)
|
||||
|
|
|
@ -3,6 +3,20 @@ package _115
|
|||
import (
|
||||
"github.com/SheltonZhu/115driver/pkg/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"time"
|
||||
)
|
||||
|
||||
var _ model.Obj = (*driver.File)(nil)
|
||||
var _ model.Obj = (*FileObj)(nil)
|
||||
|
||||
type FileObj struct {
|
||||
driver.File
|
||||
}
|
||||
|
||||
func (f *FileObj) CreateTime() time.Time {
|
||||
return f.File.CreateTime
|
||||
}
|
||||
|
||||
func (f *FileObj) GetHash() utils.HashInfo {
|
||||
return utils.NewHashInfo(utils.SHA1, f.Sha1)
|
||||
}
|
||||
|
|
|
@ -1,10 +1,25 @@
|
|||
package _115
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||
"github.com/orzogc/fake115uploader/cipher"
|
||||
"io"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/SheltonZhu/115driver/pkg/driver"
|
||||
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
@ -41,8 +56,8 @@ func (d *Pan115) login() error {
|
|||
return d.client.LoginCheck()
|
||||
}
|
||||
|
||||
func (d *Pan115) getFiles(fileId string) ([]driver.File, error) {
|
||||
res := make([]driver.File, 0)
|
||||
func (d *Pan115) getFiles(fileId string) ([]FileObj, error) {
|
||||
res := make([]FileObj, 0)
|
||||
if d.PageSize <= 0 {
|
||||
d.PageSize = driver.FileListLimit
|
||||
}
|
||||
|
@ -51,7 +66,357 @@ func (d *Pan115) getFiles(fileId string) ([]driver.File, error) {
|
|||
return nil, err
|
||||
}
|
||||
for _, file := range *files {
|
||||
res = append(res, file)
|
||||
res = append(res, FileObj{file})
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
const (
|
||||
appVer = "2.0.3.6"
|
||||
)
|
||||
|
||||
func (d *Pan115) rapidUpload(fileSize int64, fileName, dirID, preID, fileID string, stream model.FileStreamer) (*driver115.UploadInitResp, error) {
|
||||
var (
|
||||
ecdhCipher *cipher.EcdhCipher
|
||||
encrypted []byte
|
||||
decrypted []byte
|
||||
encodedToken string
|
||||
err error
|
||||
target = "U_1_" + dirID
|
||||
bodyBytes []byte
|
||||
result = driver115.UploadInitResp{}
|
||||
fileSizeStr = strconv.FormatInt(fileSize, 10)
|
||||
)
|
||||
if ecdhCipher, err = cipher.NewEcdhCipher(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
userID := strconv.FormatInt(d.client.UserID, 10)
|
||||
form := url.Values{}
|
||||
form.Set("appid", "0")
|
||||
form.Set("appversion", appVer)
|
||||
form.Set("userid", userID)
|
||||
form.Set("filename", fileName)
|
||||
form.Set("filesize", fileSizeStr)
|
||||
form.Set("fileid", fileID)
|
||||
form.Set("target", target)
|
||||
form.Set("sig", d.client.GenerateSignature(fileID, target))
|
||||
|
||||
signKey, signVal := "", ""
|
||||
for retry := true; retry; {
|
||||
t := driver115.Now()
|
||||
|
||||
if encodedToken, err = ecdhCipher.EncodeToken(t.ToInt64()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
params := map[string]string{
|
||||
"k_ec": encodedToken,
|
||||
}
|
||||
|
||||
form.Set("t", t.String())
|
||||
form.Set("token", d.client.GenerateToken(fileID, preID, t.String(), fileSizeStr, signKey, signVal))
|
||||
if signKey != "" && signVal != "" {
|
||||
form.Set("sign_key", signKey)
|
||||
form.Set("sign_val", signVal)
|
||||
}
|
||||
if encrypted, err = ecdhCipher.Encrypt([]byte(form.Encode())); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req := d.client.NewRequest().
|
||||
SetQueryParams(params).
|
||||
SetBody(encrypted).
|
||||
SetHeaderVerbatim("Content-Type", "application/x-www-form-urlencoded").
|
||||
SetDoNotParseResponse(true)
|
||||
resp, err := req.Post(driver115.ApiUploadInit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data := resp.RawBody()
|
||||
defer data.Close()
|
||||
if bodyBytes, err = io.ReadAll(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if decrypted, err = ecdhCipher.Decrypt(bodyBytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = driver115.CheckErr(json.Unmarshal(decrypted, &result), &result, resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.Status == 7 {
|
||||
// Update signKey & signVal
|
||||
signKey = result.SignKey
|
||||
signVal, err = UploadDigestRange(stream, result.SignCheck)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
retry = false
|
||||
}
|
||||
result.SHA1 = fileID
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func UploadDigestRange(stream model.FileStreamer, rangeSpec string) (result string, err error) {
|
||||
var start, end int64
|
||||
if _, err = fmt.Sscanf(rangeSpec, "%d-%d", &start, &end); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
length := end - start + 1
|
||||
reader, err := stream.RangeRead(http_range.Range{Start: start, Length: length})
|
||||
hashStr, err := utils.HashReader(utils.SHA1, reader)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
result = strings.ToUpper(hashStr)
|
||||
return
|
||||
}
|
||||
|
||||
// UploadByMultipart upload by mutipart blocks
|
||||
func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize int64, stream model.FileStreamer, dirID string, opts ...driver115.UploadMultipartOption) error {
|
||||
var (
|
||||
chunks []oss.FileChunk
|
||||
parts []oss.UploadPart
|
||||
imur oss.InitiateMultipartUploadResult
|
||||
ossClient *oss.Client
|
||||
bucket *oss.Bucket
|
||||
ossToken *driver115.UploadOSSTokenResp
|
||||
err error
|
||||
)
|
||||
|
||||
tmpF, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
options := driver115.DefalutUploadMultipartOptions()
|
||||
if len(opts) > 0 {
|
||||
for _, f := range opts {
|
||||
f(options)
|
||||
}
|
||||
}
|
||||
|
||||
if ossToken, err = d.client.GetOSSToken(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ossClient, err = oss.New(driver115.OSSEndpoint, ossToken.AccessKeyID, ossToken.AccessKeySecret); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if bucket, err = ossClient.Bucket(params.Bucket); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// ossToken一小时后就会失效,所以每50分钟重新获取一次
|
||||
ticker := time.NewTicker(options.TokenRefreshTime)
|
||||
defer ticker.Stop()
|
||||
// 设置超时
|
||||
timeout := time.NewTimer(options.Timeout)
|
||||
|
||||
if chunks, err = SplitFile(fileSize); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if imur, err = bucket.InitiateMultipartUpload(params.Object,
|
||||
oss.SetHeader(driver115.OssSecurityTokenHeaderName, ossToken.SecurityToken),
|
||||
oss.UserAgentHeader(driver115.OSSUserAgent),
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(len(chunks))
|
||||
|
||||
chunksCh := make(chan oss.FileChunk)
|
||||
errCh := make(chan error)
|
||||
UploadedPartsCh := make(chan oss.UploadPart)
|
||||
quit := make(chan struct{})
|
||||
|
||||
// producer
|
||||
go chunksProducer(chunksCh, chunks)
|
||||
go func() {
|
||||
wg.Wait()
|
||||
quit <- struct{}{}
|
||||
}()
|
||||
|
||||
// consumers
|
||||
for i := 0; i < options.ThreadsNum; i++ {
|
||||
go func(threadId int) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
errCh <- fmt.Errorf("Recovered in %v", r)
|
||||
}
|
||||
}()
|
||||
for chunk := range chunksCh {
|
||||
var part oss.UploadPart // 出现错误就继续尝试,共尝试3次
|
||||
for retry := 0; retry < 3; retry++ {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if ossToken, err = d.client.GetOSSToken(); err != nil { // 到时重新获取ossToken
|
||||
errCh <- errors.Wrap(err, "刷新token时出现错误")
|
||||
}
|
||||
default:
|
||||
}
|
||||
|
||||
buf := make([]byte, chunk.Size)
|
||||
if _, err = tmpF.ReadAt(buf, chunk.Offset); err != nil && !errors.Is(err, io.EOF) {
|
||||
continue
|
||||
}
|
||||
|
||||
b := bytes.NewBuffer(buf)
|
||||
if part, err = bucket.UploadPart(imur, b, chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
errCh <- errors.Wrap(err, fmt.Sprintf("上传 %s 的第%d个分片时出现错误:%v", stream.GetName(), chunk.Number, err))
|
||||
}
|
||||
UploadedPartsCh <- part
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
go func() {
|
||||
for part := range UploadedPartsCh {
|
||||
parts = append(parts, part)
|
||||
wg.Done()
|
||||
}
|
||||
}()
|
||||
LOOP:
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
// 到时重新获取ossToken
|
||||
if ossToken, err = d.client.GetOSSToken(); err != nil {
|
||||
return err
|
||||
}
|
||||
case <-quit:
|
||||
break LOOP
|
||||
case <-errCh:
|
||||
return err
|
||||
case <-timeout.C:
|
||||
return fmt.Errorf("time out")
|
||||
}
|
||||
}
|
||||
|
||||
// EOF错误是xml的Unmarshal导致的,响应其实是json格式,所以实际上上传是成功的
|
||||
if _, err = bucket.CompleteMultipartUpload(imur, parts, driver115.OssOption(params, ossToken)...); err != nil && !errors.Is(err, io.EOF) {
|
||||
// 当文件名含有 &< 这两个字符之一时响应的xml解析会出现错误,实际上上传是成功的
|
||||
if filename := filepath.Base(stream.GetName()); !strings.ContainsAny(filename, "&<") {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return d.checkUploadStatus(dirID, params.SHA1)
|
||||
}
|
||||
func chunksProducer(ch chan oss.FileChunk, chunks []oss.FileChunk) {
|
||||
for _, chunk := range chunks {
|
||||
ch <- chunk
|
||||
}
|
||||
}
|
||||
func (d *Pan115) checkUploadStatus(dirID, sha1 string) error {
|
||||
// 验证上传是否成功
|
||||
req := d.client.NewRequest().ForceContentType("application/json;charset=UTF-8")
|
||||
opts := []driver115.GetFileOptions{
|
||||
driver115.WithOrder(driver115.FileOrderByTime),
|
||||
driver115.WithShowDirEnable(false),
|
||||
driver115.WithAsc(false),
|
||||
driver115.WithLimit(500),
|
||||
}
|
||||
fResp, err := driver115.GetFiles(req, dirID, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, fileInfo := range fResp.Files {
|
||||
if fileInfo.Sha1 == sha1 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return driver115.ErrUploadFailed
|
||||
}
|
||||
|
||||
func SplitFile(fileSize int64) (chunks []oss.FileChunk, err error) {
|
||||
for i := int64(1); i < 10; i++ {
|
||||
if fileSize < i*utils.GB { // 文件大小小于iGB时分为i*1000片
|
||||
if chunks, err = SplitFileByPartNum(fileSize, int(i*1000)); err != nil {
|
||||
return
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if fileSize > 9*utils.GB { // 文件大小大于9GB时分为10000片
|
||||
if chunks, err = SplitFileByPartNum(fileSize, 10000); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
// 单个分片大小不能小于100KB
|
||||
if chunks[0].Size < 100*utils.KB {
|
||||
if chunks, err = SplitFileByPartSize(fileSize, 100*utils.KB); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SplitFileByPartNum splits big file into parts by the num of parts.
|
||||
// Split the file with specified parts count, returns the split result when error is nil.
|
||||
func SplitFileByPartNum(fileSize int64, chunkNum int) ([]oss.FileChunk, error) {
|
||||
if chunkNum <= 0 || chunkNum > 10000 {
|
||||
return nil, errors.New("chunkNum invalid")
|
||||
}
|
||||
|
||||
if int64(chunkNum) > fileSize {
|
||||
return nil, errors.New("oss: chunkNum invalid")
|
||||
}
|
||||
|
||||
var chunks []oss.FileChunk
|
||||
var chunk = oss.FileChunk{}
|
||||
var chunkN = (int64)(chunkNum)
|
||||
for i := int64(0); i < chunkN; i++ {
|
||||
chunk.Number = int(i + 1)
|
||||
chunk.Offset = i * (fileSize / chunkN)
|
||||
if i == chunkN-1 {
|
||||
chunk.Size = fileSize/chunkN + fileSize%chunkN
|
||||
} else {
|
||||
chunk.Size = fileSize / chunkN
|
||||
}
|
||||
chunks = append(chunks, chunk)
|
||||
}
|
||||
|
||||
return chunks, nil
|
||||
}
|
||||
|
||||
// SplitFileByPartSize splits big file into parts by the size of parts.
|
||||
// Splits the file by the part size. Returns the FileChunk when error is nil.
|
||||
func SplitFileByPartSize(fileSize int64, chunkSize int64) ([]oss.FileChunk, error) {
|
||||
if chunkSize <= 0 {
|
||||
return nil, errors.New("chunkSize invalid")
|
||||
}
|
||||
|
||||
var chunkN = fileSize / chunkSize
|
||||
if chunkN >= 10000 {
|
||||
return nil, errors.New("Too many parts, please increase part size")
|
||||
}
|
||||
|
||||
var chunks []oss.FileChunk
|
||||
var chunk = oss.FileChunk{}
|
||||
for i := int64(0); i < chunkN; i++ {
|
||||
chunk.Number = int(i + 1)
|
||||
chunk.Offset = i * chunkSize
|
||||
chunk.Size = chunkSize
|
||||
chunks = append(chunks, chunk)
|
||||
}
|
||||
|
||||
if fileSize%chunkSize > 0 {
|
||||
chunk.Number = len(chunks) + 1
|
||||
chunk.Offset = int64(len(chunks)) * chunkSize
|
||||
chunk.Size = fileSize % chunkSize
|
||||
chunks = append(chunks, chunk)
|
||||
}
|
||||
|
||||
return chunks, nil
|
||||
}
|
||||
|
|
|
@ -6,11 +6,6 @@ import (
|
|||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
|
@ -22,6 +17,9 @@ import (
|
|||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
"github.com/go-resty/resty/v2"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
type Pan123 struct {
|
||||
|
@ -184,13 +182,12 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||
// const DEFAULT int64 = 10485760
|
||||
h := md5.New()
|
||||
// need to calculate md5 of the full content
|
||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
|
||||
tempFile, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
if _, err = io.Copy(h, tempFile); err != nil {
|
||||
return err
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package _123
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
|
@ -21,6 +22,14 @@ type File struct {
|
|||
DownloadUrl string `json:"DownloadUrl"`
|
||||
}
|
||||
|
||||
func (f File) CreateTime() time.Time {
|
||||
return f.UpdateAt
|
||||
}
|
||||
|
||||
func (f File) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{}
|
||||
}
|
||||
|
||||
func (f File) GetPath() string {
|
||||
return ""
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package _123Share
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
|
@ -21,6 +22,10 @@ type File struct {
|
|||
DownloadUrl string `json:"DownloadUrl"`
|
||||
}
|
||||
|
||||
func (f File) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{}
|
||||
}
|
||||
|
||||
func (f File) GetPath() string {
|
||||
return ""
|
||||
}
|
||||
|
@ -36,6 +41,9 @@ func (f File) GetName() string {
|
|||
func (f File) ModTime() time.Time {
|
||||
return f.UpdateAt
|
||||
}
|
||||
func (f File) CreateTime() time.Time {
|
||||
return f.UpdateAt
|
||||
}
|
||||
|
||||
func (f File) IsDir() bool {
|
||||
return f.Type == 1
|
||||
|
|
|
@ -3,6 +3,7 @@ package _189pc
|
|||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -175,6 +176,14 @@ type Cloud189File struct {
|
|||
// StarLabel int64 `json:"starLabel"`
|
||||
}
|
||||
|
||||
func (c *Cloud189File) CreateTime() time.Time {
|
||||
return time.Time(c.CreateDate)
|
||||
}
|
||||
|
||||
func (c *Cloud189File) GetHash() utils.HashInfo {
|
||||
return utils.NewHashInfo(utils.MD5, c.Md5)
|
||||
}
|
||||
|
||||
func (c *Cloud189File) GetSize() int64 { return c.Size }
|
||||
func (c *Cloud189File) GetName() string { return c.Name }
|
||||
func (c *Cloud189File) ModTime() time.Time { return time.Time(c.LastOpTime) }
|
||||
|
@ -199,6 +208,14 @@ type Cloud189Folder struct {
|
|||
// StarLabel int64 `json:"starLabel"`
|
||||
}
|
||||
|
||||
func (c *Cloud189Folder) CreateTime() time.Time {
|
||||
return time.Time(c.CreateDate)
|
||||
}
|
||||
|
||||
func (c *Cloud189Folder) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{}
|
||||
}
|
||||
|
||||
func (c *Cloud189Folder) GetSize() int64 { return 0 }
|
||||
func (c *Cloud189Folder) GetName() string { return c.Name }
|
||||
func (c *Cloud189Folder) ModTime() time.Time { return time.Time(c.LastOpTime) }
|
||||
|
|
|
@ -13,7 +13,6 @@ import (
|
|||
"net/http"
|
||||
"net/http/cookiejar"
|
||||
"net/url"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
@ -550,13 +549,12 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
|||
// 快传
|
||||
func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
// 需要获取完整文件md5,必须支持 io.Seek
|
||||
tempFile, err := utils.CreateTempFile(file.GetReadCloser(), file.GetSize())
|
||||
tempFile, err := file.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
|
||||
var sliceSize = partSize(file.GetSize())
|
||||
|
@ -742,13 +740,12 @@ func (y *Cloud189PC) GetMultiUploadUrls(ctx context.Context, uploadFileId string
|
|||
// 旧版本上传,家庭云不支持覆盖
|
||||
func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
// 需要获取完整文件md5,必须支持 io.Seek
|
||||
tempFile, err := utils.CreateTempFile(file.GetReadCloser(), file.GetSize())
|
||||
tempFile, err := file.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
|
||||
// 计算md5
|
||||
|
|
|
@ -3,6 +3,7 @@ package alist_v3
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"strconv"
|
||||
|
@ -176,7 +177,7 @@ func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
|||
SetHeader("Password", d.MetaPassword).
|
||||
SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).
|
||||
SetContentLength(true).
|
||||
SetBody(stream.GetReadCloser())
|
||||
SetBody(io.ReadCloser(stream))
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"io"
|
||||
"math"
|
||||
"math/big"
|
||||
|
@ -67,7 +68,7 @@ func (d *AliDrive) Init(ctx context.Context) error {
|
|||
return nil
|
||||
}
|
||||
// init deviceID
|
||||
deviceID := utils.GetSHA256Encode([]byte(d.UserID))
|
||||
deviceID := utils.HashData(utils.SHA256, []byte(d.UserID))
|
||||
// init privateKey
|
||||
privateKey, _ := NewPrivateKeyFromHex(deviceID)
|
||||
state := State{
|
||||
|
@ -163,14 +164,14 @@ func (d *AliDrive) Remove(ctx context.Context, obj model.Obj) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
file := model.FileStream{
|
||||
Obj: stream,
|
||||
ReadCloser: stream,
|
||||
Mimetype: stream.GetMimetype(),
|
||||
func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.FileStreamer, up driver.UpdateProgress) error {
|
||||
file := stream.FileStream{
|
||||
Obj: streamer,
|
||||
Reader: streamer,
|
||||
Mimetype: streamer.GetMimetype(),
|
||||
}
|
||||
const DEFAULT int64 = 10485760
|
||||
var count = int(math.Ceil(float64(stream.GetSize()) / float64(DEFAULT)))
|
||||
var count = int(math.Ceil(float64(streamer.GetSize()) / float64(DEFAULT)))
|
||||
|
||||
partInfoList := make([]base.Json, 0, count)
|
||||
for i := 1; i <= count; i++ {
|
||||
|
@ -187,25 +188,25 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
|
|||
}
|
||||
|
||||
var localFile *os.File
|
||||
if fileStream, ok := file.ReadCloser.(*model.FileStream); ok {
|
||||
localFile, _ = fileStream.ReadCloser.(*os.File)
|
||||
if fileStream, ok := file.Reader.(*stream.FileStream); ok {
|
||||
localFile, _ = fileStream.Reader.(*os.File)
|
||||
}
|
||||
if d.RapidUpload {
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
io.CopyN(buf, file, 1024)
|
||||
reqBody["pre_hash"] = utils.GetSHA1Encode(buf.Bytes())
|
||||
reqBody["pre_hash"] = utils.HashData(utils.SHA1, buf.Bytes())
|
||||
if localFile != nil {
|
||||
if _, err := localFile.Seek(0, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// 把头部拼接回去
|
||||
file.ReadCloser = struct {
|
||||
file.Reader = struct {
|
||||
io.Reader
|
||||
io.Closer
|
||||
}{
|
||||
Reader: io.MultiReader(buf, file),
|
||||
Closer: file,
|
||||
Closer: &file,
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -281,7 +282,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
|
|||
if _, err = localFile.Seek(0, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
file.ReadCloser = localFile
|
||||
file.Reader = localFile
|
||||
}
|
||||
|
||||
for i, partInfo := range resp.PartInfoList {
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package aliyundrive_open
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
|
@ -46,6 +47,8 @@ func fileToObj(f File) *model.ObjThumb {
|
|||
Size: f.Size,
|
||||
Modified: f.UpdatedAt,
|
||||
IsFolder: f.Type == "folder",
|
||||
Ctime: f.CreatedAt,
|
||||
HashInfo: utils.NewHashInfo(utils.SHA1, f.ContentHash),
|
||||
},
|
||||
Thumbnail: model.Thumbnail{Thumbnail: f.Thumbnail},
|
||||
}
|
||||
|
|
|
@ -3,14 +3,12 @@ package aliyundrive_open
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -33,19 +31,19 @@ func makePartInfos(size int) []base.Json {
|
|||
}
|
||||
|
||||
func calPartSize(fileSize int64) int64 {
|
||||
var partSize int64 = 20 * 1024 * 1024
|
||||
var partSize int64 = 20 * utils.MB
|
||||
if fileSize > partSize {
|
||||
if fileSize > 1*1024*1024*1024*1024 { // file Size over 1TB
|
||||
partSize = 5 * 1024 * 1024 * 1024 // file part size 5GB
|
||||
} else if fileSize > 768*1024*1024*1024 { // over 768GB
|
||||
if fileSize > 1*utils.TB { // file Size over 1TB
|
||||
partSize = 5 * utils.GB // file part size 5GB
|
||||
} else if fileSize > 768*utils.GB { // over 768GB
|
||||
partSize = 109951163 // ≈ 104.8576MB, split 1TB into 10,000 part
|
||||
} else if fileSize > 512*1024*1024*1024 { // over 512GB
|
||||
} else if fileSize > 512*utils.GB { // over 512GB
|
||||
partSize = 82463373 // ≈ 78.6432MB
|
||||
} else if fileSize > 384*1024*1024*1024 { // over 384GB
|
||||
} else if fileSize > 384*utils.GB { // over 384GB
|
||||
partSize = 54975582 // ≈ 52.4288MB
|
||||
} else if fileSize > 256*1024*1024*1024 { // over 256GB
|
||||
} else if fileSize > 256*utils.GB { // over 256GB
|
||||
partSize = 41231687 // ≈ 39.3216MB
|
||||
} else if fileSize > 128*1024*1024*1024 { // over 128GB
|
||||
} else if fileSize > 128*utils.GB { // over 128GB
|
||||
partSize = 27487791 // ≈ 26.2144MB
|
||||
}
|
||||
}
|
||||
|
@ -127,17 +125,22 @@ func getProofRange(input string, size int64) (*ProofRange, error) {
|
|||
return pr, nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) calProofCode(file *os.File, fileSize int64) (string, error) {
|
||||
proofRange, err := getProofRange(d.AccessToken, fileSize)
|
||||
func (d *AliyundriveOpen) calProofCode(stream model.FileStreamer) (string, error) {
|
||||
proofRange, err := getProofRange(d.AccessToken, stream.GetSize())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
buf := make([]byte, proofRange.End-proofRange.Start)
|
||||
_, err = file.ReadAt(buf, proofRange.Start)
|
||||
length := proofRange.End - proofRange.Start
|
||||
buf := bytes.NewBuffer(make([]byte, 0, length))
|
||||
reader, err := stream.RangeRead(http_range.Range{Start: proofRange.Start, Length: length})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(buf), nil
|
||||
_, err = io.CopyN(buf, reader, length)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(buf.Bytes()), nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
|
@ -145,70 +148,68 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
|
|||
// Part Size Unit: Bytes, Default: 20MB,
|
||||
// Maximum number of slices 10,000, ≈195.3125GB
|
||||
var partSize = calPartSize(stream.GetSize())
|
||||
const dateFormat = "2006-01-02T15:04:05.88Z"
|
||||
mtime := stream.ModTime()
|
||||
mtimeStr := mtime.UTC().Format(dateFormat)
|
||||
ctimeStr := stream.CreateTime().UTC().Format(dateFormat)
|
||||
|
||||
createData := base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
"parent_file_id": dstDir.GetID(),
|
||||
"name": stream.GetName(),
|
||||
"type": "file",
|
||||
"check_name_mode": "ignore",
|
||||
"local_modified_at": mtimeStr,
|
||||
"local_created_at": ctimeStr,
|
||||
}
|
||||
count := int(math.Ceil(float64(stream.GetSize()) / float64(partSize)))
|
||||
createData["part_info_list"] = makePartInfos(count)
|
||||
// rapid upload
|
||||
rapidUpload := stream.GetSize() > 100*1024 && d.RapidUpload
|
||||
rapidUpload := stream.GetSize() > 100*utils.KB && d.RapidUpload
|
||||
if rapidUpload {
|
||||
log.Debugf("[aliyundrive_open] start cal pre_hash")
|
||||
// read 1024 bytes to calculate pre hash
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
_, err := io.CopyN(buf, stream, 1024)
|
||||
reader, err := stream.RangeRead(http_range.Range{Start: 0, Length: 1024})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash, err := utils.HashReader(utils.SHA1, reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
createData["size"] = stream.GetSize()
|
||||
createData["pre_hash"] = utils.GetSHA1Encode(buf.Bytes())
|
||||
// if support seek, seek to start
|
||||
if localFile, ok := stream.(io.Seeker); ok {
|
||||
if _, err := localFile.Seek(0, io.SeekStart); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// Put spliced head back to stream
|
||||
stream.SetReadCloser(struct {
|
||||
io.Reader
|
||||
io.Closer
|
||||
}{
|
||||
Reader: io.MultiReader(buf, stream.GetReadCloser()),
|
||||
Closer: stream.GetReadCloser(),
|
||||
})
|
||||
}
|
||||
createData["pre_hash"] = hash
|
||||
}
|
||||
var createResp CreateResp
|
||||
_, err, e := d.requestReturnErrResp("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(createData).SetResult(&createResp)
|
||||
})
|
||||
var tmpF model.File
|
||||
if err != nil {
|
||||
if e.Code != "PreHashMatched" || !rapidUpload {
|
||||
return nil, err
|
||||
}
|
||||
log.Debugf("[aliyundrive_open] pre_hash matched, start rapid upload")
|
||||
// convert to local file
|
||||
file, err := utils.CreateTempFile(stream, stream.GetSize())
|
||||
|
||||
hi := stream.GetHash()
|
||||
hash := hi.GetHash(utils.SHA1)
|
||||
if len(hash) <= 0 {
|
||||
tmpF, err = stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_ = stream.GetReadCloser().Close()
|
||||
stream.SetReadCloser(file)
|
||||
// calculate full hash
|
||||
h := sha1.New()
|
||||
_, err = io.Copy(h, file)
|
||||
hash, err = utils.HashFile(utils.SHA1, tmpF)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
delete(createData, "pre_hash")
|
||||
createData["proof_version"] = "v1"
|
||||
createData["content_hash_name"] = "sha1"
|
||||
createData["content_hash"] = hex.EncodeToString(h.Sum(nil))
|
||||
createData["proof_code"], err = d.calProofCode(file, stream.GetSize())
|
||||
createData["content_hash"] = hash
|
||||
createData["proof_code"], err = d.calProofCode(stream)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cal proof code error: %s", err.Error())
|
||||
}
|
||||
|
@ -218,17 +219,15 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// seek to start
|
||||
if _, err = file.Seek(0, io.SeekStart); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if !createResp.RapidUpload {
|
||||
// 2. upload
|
||||
// 2. normal upload
|
||||
log.Debugf("[aliyundive_open] normal upload")
|
||||
|
||||
preTime := time.Now()
|
||||
var offset, length int64 = 0, partSize
|
||||
//var length
|
||||
for i := 0; i < len(createResp.PartInfoList); i++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return nil, ctx.Err()
|
||||
|
@ -241,9 +240,16 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
|
|||
}
|
||||
preTime = time.Now()
|
||||
}
|
||||
rd := utils.NewMultiReadable(io.LimitReader(stream, partSize))
|
||||
if remain := stream.GetSize() - offset; length > remain {
|
||||
length = remain
|
||||
}
|
||||
//rd := utils.NewMultiReadable(io.LimitReader(stream, partSize))
|
||||
rd, err := stream.RangeRead(http_range.Range{Start: offset, Length: length})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = retry.Do(func() error {
|
||||
rd.Reset()
|
||||
//rd.Reset()
|
||||
return d.uploadPart(ctx, rd, createResp.PartInfoList[i])
|
||||
},
|
||||
retry.Attempts(3),
|
||||
|
@ -252,6 +258,7 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
offset += partSize
|
||||
}
|
||||
} else {
|
||||
log.Debugf("[aliyundrive_open] rapid upload success, file id: %s", createResp.FileId)
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
"io"
|
||||
"math"
|
||||
"net/url"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"strconv"
|
||||
"time"
|
||||
|
@ -31,7 +30,7 @@ type BaiduNetdisk struct {
|
|||
uploadThread int
|
||||
}
|
||||
|
||||
const DefaultSliceSize int64 = 4 * 1024 * 1024
|
||||
const DefaultSliceSize int64 = 4 * utils.MB
|
||||
|
||||
func (d *BaiduNetdisk) Config() driver.Config {
|
||||
return config
|
||||
|
@ -81,7 +80,7 @@ func (d *BaiduNetdisk) Link(ctx context.Context, file model.Obj, args model.Link
|
|||
|
||||
func (d *BaiduNetdisk) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
var newDir File
|
||||
_, err := d.create(stdpath.Join(parentDir.GetPath(), dirName), 0, 1, "", "", &newDir)
|
||||
_, err := d.create(stdpath.Join(parentDir.GetPath(), dirName), 0, 1, "", "", &newDir, 0, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -148,14 +147,10 @@ func (d *BaiduNetdisk) Remove(ctx context.Context, obj model.Obj) error {
|
|||
}
|
||||
|
||||
func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
|
||||
tempFile, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
|
||||
streamSize := stream.GetSize()
|
||||
count := int(math.Max(math.Ceil(float64(streamSize)/float64(DefaultSliceSize)), 1))
|
||||
|
@ -194,15 +189,15 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
|||
|
||||
rawPath := stdpath.Join(dstDir.GetPath(), stream.GetName())
|
||||
path := encodeURIComponent(rawPath)
|
||||
mtime := stream.ModTime().Unix()
|
||||
ctime := stream.CreateTime().Unix()
|
||||
|
||||
// step.1 预上传
|
||||
// 尝试获取之前的进度
|
||||
precreateResp, ok := base.GetUploadProgress[*PrecreateResp](d, d.AccessToken, contentMd5)
|
||||
if !ok {
|
||||
data := fmt.Sprintf("path=%s&size=%d&isdir=0&autoinit=1&rtype=3&block_list=%s&content-md5=%s&slice-md5=%s",
|
||||
path, streamSize,
|
||||
blockListStr,
|
||||
contentMd5, sliceMd5)
|
||||
data := fmt.Sprintf("path=%s&size=%d&isdir=0&autoinit=1&rtype=3&block_list=%s&content-md5=%s&slice-md5=%s&local_mtime=%d&local_ctime=%d",
|
||||
path, streamSize, blockListStr, contentMd5, sliceMd5, mtime, ctime)
|
||||
params := map[string]string{
|
||||
"method": "precreate",
|
||||
}
|
||||
|
@ -263,7 +258,7 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
|||
|
||||
// step.3 创建文件
|
||||
var newFile File
|
||||
_, err = d.create(rawPath, streamSize, 0, precreateResp.Uploadid, blockListStr, &newFile)
|
||||
_, err = d.create(rawPath, streamSize, 0, precreateResp.Uploadid, blockListStr, &newFile, mtime, ctime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package baidu_netdisk
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"path"
|
||||
"strconv"
|
||||
"time"
|
||||
|
@ -40,11 +41,11 @@ type File struct {
|
|||
Isdir int `json:"isdir"`
|
||||
|
||||
// list resp
|
||||
//ServerCtime int64 `json:"server_ctime"`
|
||||
ServerCtime int64 `json:"server_ctime"`
|
||||
ServerMtime int64 `json:"server_mtime"`
|
||||
//ServerAtime int64 `json:"server_atime"`
|
||||
//LocalCtime int64 `json:"local_ctime"`
|
||||
//LocalMtime int64 `json:"local_mtime"`
|
||||
LocalMtime int64 `json:"local_mtime"`
|
||||
LocalCtime int64 `json:"local_ctime"`
|
||||
//ServerAtime int64 `json:"server_atime"` `
|
||||
|
||||
// only create and precreate resp
|
||||
Ctime int64 `json:"ctime"`
|
||||
|
@ -55,8 +56,11 @@ func fileToObj(f File) *model.ObjThumb {
|
|||
if f.ServerFilename == "" {
|
||||
f.ServerFilename = path.Base(f.Path)
|
||||
}
|
||||
if f.ServerMtime == 0 {
|
||||
f.ServerMtime = int64(f.Mtime)
|
||||
if f.LocalCtime == 0 {
|
||||
f.LocalCtime = f.Ctime
|
||||
}
|
||||
if f.LocalMtime == 0 {
|
||||
f.LocalMtime = f.Mtime
|
||||
}
|
||||
return &model.ObjThumb{
|
||||
Object: model.Object{
|
||||
|
@ -64,8 +68,10 @@ func fileToObj(f File) *model.ObjThumb {
|
|||
Path: f.Path,
|
||||
Name: f.ServerFilename,
|
||||
Size: f.Size,
|
||||
Modified: time.Unix(f.ServerMtime, 0),
|
||||
Modified: time.Unix(f.LocalMtime, 0),
|
||||
Ctime: time.Unix(f.LocalCtime, 0),
|
||||
IsFolder: f.Isdir == 1,
|
||||
HashInfo: utils.NewHashInfo(utils.MD5, f.Md5),
|
||||
},
|
||||
Thumbnail: model.Thumbnail{Thumbnail: f.Thumbs.Url3},
|
||||
}
|
||||
|
|
|
@ -198,11 +198,17 @@ func (d *BaiduNetdisk) manage(opera string, filelist any) ([]byte, error) {
|
|||
return d.post("/xpan/file", params, data, nil)
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) create(path string, size int64, isdir int, uploadid, block_list string, resp any) ([]byte, error) {
|
||||
func (d *BaiduNetdisk) create(path string, size int64, isdir int, uploadid, block_list string, resp any, mtime, ctime int64) ([]byte, error) {
|
||||
params := map[string]string{
|
||||
"method": "create",
|
||||
}
|
||||
data := fmt.Sprintf("path=%s&size=%d&isdir=%d&rtype=3", encodeURIComponent(path), size, isdir)
|
||||
data := ""
|
||||
if mtime == 0 || ctime == 0 {
|
||||
data = fmt.Sprintf("path=%s&size=%d&isdir=%d&rtype=3", encodeURIComponent(path), size, isdir)
|
||||
} else {
|
||||
data = fmt.Sprintf("path=%s&size=%d&isdir=%d&rtype=3&local_mtime=%d&local_ctime=%d", encodeURIComponent(path), size, isdir, mtime, ctime)
|
||||
}
|
||||
|
||||
if uploadid != "" {
|
||||
data += fmt.Sprintf("&uploadid=%s&block_list=%s", uploadid, block_list)
|
||||
}
|
||||
|
|
|
@ -8,7 +8,6 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -229,13 +228,12 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
|||
}
|
||||
|
||||
// 需要获取完整文件md5,必须支持 io.Seek
|
||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
|
||||
tempFile, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
|
||||
const DEFAULT int64 = 1 << 22
|
||||
|
|
|
@ -64,7 +64,7 @@ func renameAlbum(album *Album, newName string) *Album {
|
|||
AlbumID: album.AlbumID,
|
||||
Tid: album.Tid,
|
||||
JoinTime: album.JoinTime,
|
||||
CreateTime: album.CreateTime,
|
||||
CreationTime: album.CreationTime,
|
||||
Title: newName,
|
||||
Mtime: time.Now().Unix(),
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package baiduphoto
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
|
@ -73,6 +74,13 @@ func (c *File) Thumb() string {
|
|||
}
|
||||
return ""
|
||||
}
|
||||
func (c *File) CreateTime() time.Time {
|
||||
return time.Unix(c.Ctime, 0)
|
||||
}
|
||||
|
||||
func (c *File) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{}
|
||||
}
|
||||
|
||||
/*相册部分*/
|
||||
type (
|
||||
|
@ -88,7 +96,7 @@ type (
|
|||
Tid int64 `json:"tid"`
|
||||
Title string `json:"title"`
|
||||
JoinTime int64 `json:"join_time"`
|
||||
CreateTime int64 `json:"create_time"`
|
||||
CreationTime int64 `json:"create_time"`
|
||||
Mtime int64 `json:"mtime"`
|
||||
|
||||
parseTime *time.Time
|
||||
|
@ -109,6 +117,14 @@ type (
|
|||
}
|
||||
)
|
||||
|
||||
func (a *Album) CreateTime() time.Time {
|
||||
return time.Unix(a.CreationTime, 0)
|
||||
}
|
||||
|
||||
func (a *Album) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{}
|
||||
}
|
||||
|
||||
func (a *Album) GetSize() int64 { return 0 }
|
||||
func (a *Album) GetName() string { return a.Title }
|
||||
func (a *Album) ModTime() time.Time {
|
||||
|
|
|
@ -115,7 +115,7 @@ func (d *Cloudreve) Remove(ctx context.Context, obj model.Obj) error {
|
|||
}
|
||||
|
||||
func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
if stream.GetReadCloser() == http.NoBody {
|
||||
if io.ReadCloser(stream) == http.NoBody {
|
||||
return d.create(ctx, dstDir, stream)
|
||||
}
|
||||
var r DirectoryResp
|
||||
|
|
|
@ -3,8 +3,8 @@ package crypt
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"io"
|
||||
"net/http"
|
||||
stdpath "path"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
@ -13,7 +13,6 @@ import (
|
|||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/fs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/net"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
|
@ -82,7 +81,6 @@ func (d *Crypt) Init(ctx context.Context) error {
|
|||
}
|
||||
d.cipher = c
|
||||
|
||||
//c, err := rcCrypt.newCipher(rcCrypt.NameEncryptionStandard, "", "", true, nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -128,6 +126,8 @@ func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
|
|||
Size: 0,
|
||||
Modified: obj.ModTime(),
|
||||
IsFolder: obj.IsDir(),
|
||||
Ctime: obj.CreateTime(),
|
||||
// discarding hash as it's encrypted
|
||||
}
|
||||
result = append(result, &objRes)
|
||||
} else {
|
||||
|
@ -147,6 +147,8 @@ func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
|
|||
Size: size,
|
||||
Modified: obj.ModTime(),
|
||||
IsFolder: obj.IsDir(),
|
||||
Ctime: obj.CreateTime(),
|
||||
// discarding hash as it's encrypted
|
||||
}
|
||||
if !ok {
|
||||
result = append(result, &objRes)
|
||||
|
@ -232,70 +234,53 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if remoteLink.RangeReadCloser.RangeReader == nil && remoteLink.ReadSeekCloser == nil && len(remoteLink.URL) == 0 {
|
||||
if remoteLink.RangeReadCloser == nil && remoteLink.MFile == nil && len(remoteLink.URL) == 0 {
|
||||
return nil, fmt.Errorf("the remote storage driver need to be enhanced to support encrytion")
|
||||
}
|
||||
remoteFileSize := remoteFile.GetSize()
|
||||
remoteClosers := utils.NewClosers()
|
||||
remoteClosers := utils.EmptyClosers()
|
||||
rangeReaderFunc := func(ctx context.Context, underlyingOffset, underlyingLength int64) (io.ReadCloser, error) {
|
||||
length := underlyingLength
|
||||
if underlyingLength >= 0 && underlyingOffset+underlyingLength >= remoteFileSize {
|
||||
length = -1
|
||||
}
|
||||
if remoteLink.RangeReadCloser.RangeReader != nil {
|
||||
rrc := remoteLink.RangeReadCloser
|
||||
if len(remoteLink.URL) > 0 {
|
||||
|
||||
rangedRemoteLink := &model.Link{
|
||||
URL: remoteLink.URL,
|
||||
Header: remoteLink.Header,
|
||||
}
|
||||
var converted, err = stream.GetRangeReadCloserFromLink(remoteFileSize, rangedRemoteLink)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rrc = converted
|
||||
}
|
||||
if rrc != nil {
|
||||
//remoteRangeReader, err :=
|
||||
remoteReader, err := remoteLink.RangeReadCloser.RangeReader(http_range.Range{Start: underlyingOffset, Length: length})
|
||||
remoteClosers.Add(remoteLink.RangeReadCloser.Closers)
|
||||
remoteReader, err := rrc.RangeRead(ctx, http_range.Range{Start: underlyingOffset, Length: length})
|
||||
remoteClosers.AddClosers(rrc.GetClosers())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return remoteReader, nil
|
||||
}
|
||||
if remoteLink.ReadSeekCloser != nil {
|
||||
_, err := remoteLink.ReadSeekCloser.Seek(underlyingOffset, io.SeekStart)
|
||||
if remoteLink.MFile != nil {
|
||||
_, err := remoteLink.MFile.Seek(underlyingOffset, io.SeekStart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//remoteClosers.Add(remoteLink.ReadSeekCloser)
|
||||
//keep reuse same ReadSeekCloser and close at last.
|
||||
return io.NopCloser(remoteLink.ReadSeekCloser), nil
|
||||
}
|
||||
if len(remoteLink.URL) > 0 {
|
||||
rangedRemoteLink := &model.Link{
|
||||
URL: remoteLink.URL,
|
||||
Header: remoteLink.Header,
|
||||
}
|
||||
response, err := RequestRangedHttp(args.HttpReq, rangedRemoteLink, underlyingOffset, length)
|
||||
//remoteClosers.Add(response.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("remote storage http request failure,status: %d err:%s", response.StatusCode, err)
|
||||
}
|
||||
if underlyingOffset == 0 && length == -1 || response.StatusCode == http.StatusPartialContent {
|
||||
return response.Body, nil
|
||||
} else if response.StatusCode == http.StatusOK {
|
||||
log.Warnf("remote http server not supporting range request, expect low perfromace!")
|
||||
readCloser, err := net.GetRangedHttpReader(response.Body, underlyingOffset, length)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return readCloser, nil
|
||||
//remoteClosers.Add(remoteLink.MFile)
|
||||
//keep reuse same MFile and close at last.
|
||||
remoteClosers.Add(remoteLink.MFile)
|
||||
return io.NopCloser(remoteLink.MFile), nil
|
||||
}
|
||||
|
||||
return response.Body, nil
|
||||
}
|
||||
//if remoteLink.Data != nil {
|
||||
// log.Warnf("remote storage not supporting range request, expect low perfromace!")
|
||||
// readCloser, err := net.GetRangedHttpReader(remoteLink.Data, underlyingOffset, length)
|
||||
// remoteCloser = remoteLink.Data
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// return readCloser, nil
|
||||
//}
|
||||
return nil, errs.NotSupport
|
||||
|
||||
}
|
||||
resultRangeReader := func(httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
resultRangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
readSeeker, err := d.cipher.DecryptDataSeek(ctx, rangeReaderFunc, httpRange.Start, httpRange.Length)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -306,7 +291,7 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
|||
resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: remoteClosers}
|
||||
resultLink := &model.Link{
|
||||
Header: remoteLink.Header,
|
||||
RangeReadCloser: *resultRangeReadCloser,
|
||||
RangeReadCloser: resultRangeReadCloser,
|
||||
Expiration: remoteLink.Expiration,
|
||||
}
|
||||
|
||||
|
@ -370,32 +355,32 @@ func (d *Crypt) Remove(ctx context.Context, obj model.Obj) error {
|
|||
return op.Remove(ctx, d.remoteStorage, remoteActualPath)
|
||||
}
|
||||
|
||||
func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, streamer model.FileStreamer, up driver.UpdateProgress) error {
|
||||
dstDirActualPath, err := d.getActualPathForRemote(dstDir.GetPath(), true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to convert path to remote path: %w", err)
|
||||
}
|
||||
|
||||
in := stream.GetReadCloser()
|
||||
// Encrypt the data into wrappedIn
|
||||
wrappedIn, err := d.cipher.EncryptData(in)
|
||||
wrappedIn, err := d.cipher.EncryptData(streamer)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to EncryptData: %w", err)
|
||||
}
|
||||
|
||||
streamOut := &model.FileStream{
|
||||
// doesn't support seekableStream, since rapid-upload is not working for encrypted data
|
||||
streamOut := &stream.FileStream{
|
||||
Obj: &model.Object{
|
||||
ID: stream.GetID(),
|
||||
Path: stream.GetPath(),
|
||||
Name: d.cipher.EncryptFileName(stream.GetName()),
|
||||
Size: d.cipher.EncryptedSize(stream.GetSize()),
|
||||
Modified: stream.ModTime(),
|
||||
IsFolder: stream.IsDir(),
|
||||
ID: streamer.GetID(),
|
||||
Path: streamer.GetPath(),
|
||||
Name: d.cipher.EncryptFileName(streamer.GetName()),
|
||||
Size: d.cipher.EncryptedSize(streamer.GetSize()),
|
||||
Modified: streamer.ModTime(),
|
||||
IsFolder: streamer.IsDir(),
|
||||
},
|
||||
ReadCloser: io.NopCloser(wrappedIn),
|
||||
Reader: wrappedIn,
|
||||
Mimetype: "application/octet-stream",
|
||||
WebPutAsTask: stream.NeedStore(),
|
||||
Old: stream.GetOld(),
|
||||
WebPutAsTask: streamer.NeedStore(),
|
||||
Exist: streamer.GetExist(),
|
||||
}
|
||||
err = op.Put(ctx, d.remoteStorage, dstDirActualPath, streamOut, up, false)
|
||||
if err != nil {
|
||||
|
|
|
@ -1,24 +1,13 @@
|
|||
package crypt
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
stdpath "path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/net"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
)
|
||||
|
||||
func RequestRangedHttp(r *http.Request, link *model.Link, offset, length int64) (*http.Response, error) {
|
||||
header := net.ProcessHeader(http.Header{}, link.Header)
|
||||
header = http_range.ApplyRangeToHttpHeader(http_range.Range{Start: offset, Length: length}, header)
|
||||
|
||||
return net.RequestHttp("GET", header, link.URL)
|
||||
}
|
||||
|
||||
// will give the best guessing based on the path
|
||||
func guessPath(path string) (isFolder, secondTry bool) {
|
||||
if strings.HasSuffix(path, "/") {
|
||||
|
|
|
@ -66,7 +66,7 @@ func (d *FTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*m
|
|||
|
||||
r := NewFTPFileReader(d.conn, file.GetPath())
|
||||
link := &model.Link{
|
||||
ReadSeekCloser: r,
|
||||
MFile: r,
|
||||
}
|
||||
return link, nil
|
||||
}
|
||||
|
|
|
@ -30,11 +30,12 @@ func (d *FTP) login() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// An FTP file reader that implements io.ReadSeekCloser for seeking.
|
||||
// An FTP file reader that implements io.MFile for seeking.
|
||||
type FTPFileReader struct {
|
||||
conn *ftp.ServerConn
|
||||
resp *ftp.Response
|
||||
offset int64
|
||||
readAtOffset int64
|
||||
mu sync.Mutex
|
||||
path string
|
||||
}
|
||||
|
@ -50,15 +51,33 @@ func (r *FTPFileReader) Read(buf []byte) (n int, err error) {
|
|||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
n, err = r.ReadAt(buf, r.offset)
|
||||
r.offset += int64(n)
|
||||
return
|
||||
}
|
||||
func (r *FTPFileReader) ReadAt(buf []byte, off int64) (n int, err error) {
|
||||
if off < 0 {
|
||||
return -1, os.ErrInvalid
|
||||
}
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if off != r.readAtOffset {
|
||||
//have to restart the connection, to correct offset
|
||||
_ = r.resp.Close()
|
||||
r.resp = nil
|
||||
}
|
||||
|
||||
if r.resp == nil {
|
||||
r.resp, err = r.conn.RetrFrom(r.path, uint64(r.offset))
|
||||
r.resp, err = r.conn.RetrFrom(r.path, uint64(off))
|
||||
r.readAtOffset = off
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
n, err = r.resp.Read(buf)
|
||||
r.offset += int64(n)
|
||||
r.readAtOffset += int64(n)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -92,12 +111,6 @@ func (r *FTPFileReader) Seek(offset int64, whence int) (int64, error) {
|
|||
return oldOffset, nil
|
||||
}
|
||||
r.offset = newOffset
|
||||
|
||||
if r.resp != nil {
|
||||
// close the existing ftp data connection, otherwise the next read will be blocked
|
||||
_ = r.resp.Close() // we do not care about whether it returns an error
|
||||
r.resp = nil
|
||||
}
|
||||
return newOffset, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -112,7 +112,7 @@ func (d *GoogleDrive) Remove(ctx context.Context, obj model.Obj) error {
|
|||
}
|
||||
|
||||
func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
obj := stream.GetOld()
|
||||
obj := stream.GetExist()
|
||||
var (
|
||||
e Error
|
||||
url string
|
||||
|
@ -158,7 +158,7 @@ func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
|
|||
putUrl := res.Header().Get("location")
|
||||
if stream.GetSize() < d.ChunkSize*1024*1024 {
|
||||
_, err = d.request(putUrl, http.MethodPut, func(req *resty.Request) {
|
||||
req.SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).SetBody(stream.GetReadCloser())
|
||||
req.SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).SetBody(stream)
|
||||
}, nil)
|
||||
} else {
|
||||
err = d.chunkUpload(ctx, stream, putUrl)
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"io"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
|
@ -216,25 +216,29 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) {
|
|||
|
||||
func (d *GoogleDrive) chunkUpload(ctx context.Context, stream model.FileStreamer, url string) error {
|
||||
var defaultChunkSize = d.ChunkSize * 1024 * 1024
|
||||
var finish int64 = 0
|
||||
for finish < stream.GetSize() {
|
||||
var offset int64 = 0
|
||||
for offset < stream.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
chunkSize := stream.GetSize() - finish
|
||||
chunkSize := stream.GetSize() - offset
|
||||
if chunkSize > defaultChunkSize {
|
||||
chunkSize = defaultChunkSize
|
||||
}
|
||||
_, err := d.request(url, http.MethodPut, func(req *resty.Request) {
|
||||
reader, err := stream.RangeRead(http_range.Range{Start: offset, Length: chunkSize})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = d.request(url, http.MethodPut, func(req *resty.Request) {
|
||||
req.SetHeaders(map[string]string{
|
||||
"Content-Length": strconv.FormatInt(chunkSize, 10),
|
||||
"Content-Range": fmt.Sprintf("bytes %d-%d/%d", finish, finish+chunkSize-1, stream.GetSize()),
|
||||
}).SetBody(io.LimitReader(stream.GetReadCloser(), chunkSize)).SetContext(ctx)
|
||||
"Content-Range": fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, stream.GetSize()),
|
||||
}).SetBody(reader).SetContext(ctx)
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
finish += chunkSize
|
||||
offset += chunkSize
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -124,7 +124,7 @@ func (d *GooglePhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
|
|||
}
|
||||
|
||||
resp, err := d.request(postUrl, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(stream.GetReadCloser()).SetContext(ctx)
|
||||
req.SetBody(stream).SetContext(ctx)
|
||||
}, nil, postHeaders)
|
||||
|
||||
if err != nil {
|
||||
|
|
|
@ -3,6 +3,8 @@ package lanzou
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"time"
|
||||
)
|
||||
|
||||
|
@ -18,6 +20,9 @@ type RespInfo[T any] struct {
|
|||
Info T `json:"info"`
|
||||
}
|
||||
|
||||
var _ model.Obj = (*FileOrFolder)(nil)
|
||||
var _ model.Obj = (*FileOrFolderByShareUrl)(nil)
|
||||
|
||||
type FileOrFolder struct {
|
||||
Name string `json:"name"`
|
||||
//Onof string `json:"onof"` // 是否存在提取码
|
||||
|
@ -49,6 +54,14 @@ type FileOrFolder struct {
|
|||
shareInfo *FileShare `json:"-"`
|
||||
}
|
||||
|
||||
func (f *FileOrFolder) CreateTime() time.Time {
|
||||
return f.ModTime()
|
||||
}
|
||||
|
||||
func (f *FileOrFolder) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{}
|
||||
}
|
||||
|
||||
func (f *FileOrFolder) GetID() string {
|
||||
if f.IsDir() {
|
||||
return f.FolID
|
||||
|
@ -130,6 +143,14 @@ type FileOrFolderByShareUrl struct {
|
|||
repairFlag bool `json:"-"`
|
||||
}
|
||||
|
||||
func (f *FileOrFolderByShareUrl) CreateTime() time.Time {
|
||||
return f.ModTime()
|
||||
}
|
||||
|
||||
func (f *FileOrFolderByShareUrl) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{}
|
||||
}
|
||||
|
||||
func (f *FileOrFolderByShareUrl) GetID() string { return f.ID }
|
||||
func (f *FileOrFolderByShareUrl) GetName() string { return f.NameAll }
|
||||
func (f *FileOrFolderByShareUrl) GetPath() string { return "" }
|
||||
|
|
|
@ -5,15 +5,6 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
|
@ -21,7 +12,15 @@ import (
|
|||
"github.com/alist-org/alist/v3/internal/sign"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/alist-org/alist/v3/server/common"
|
||||
"github.com/djherbis/times"
|
||||
_ "golang.org/x/image/webp"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Local struct {
|
||||
|
@ -102,6 +101,14 @@ func (d *Local) FileInfoToObj(f fs.FileInfo, reqPath string, fullPath string) mo
|
|||
if !isFolder {
|
||||
size = f.Size()
|
||||
}
|
||||
ctime := f.ModTime()
|
||||
t, err := times.Stat(stdpath.Join(fullPath, f.Name()))
|
||||
if err == nil {
|
||||
if t.HasBirthTime() {
|
||||
ctime = t.BirthTime()
|
||||
}
|
||||
}
|
||||
|
||||
file := model.ObjThumb{
|
||||
Object: model.Object{
|
||||
Path: filepath.Join(fullPath, f.Name()),
|
||||
|
@ -109,6 +116,7 @@ func (d *Local) FileInfoToObj(f fs.FileInfo, reqPath string, fullPath string) mo
|
|||
Modified: f.ModTime(),
|
||||
Size: size,
|
||||
IsFolder: isFolder,
|
||||
Ctime: ctime,
|
||||
},
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: thumb,
|
||||
|
@ -171,9 +179,9 @@ func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
link.ReadSeekCloser = open
|
||||
link.MFile = open
|
||||
} else {
|
||||
link.ReadSeekCloser = utils.ReadSeekerNopCloser(bytes.NewReader(buf.Bytes()))
|
||||
link.MFile = model.NewNopMFile(bytes.NewReader(buf.Bytes()))
|
||||
//link.Header.Set("Content-Length", strconv.Itoa(buf.Len()))
|
||||
}
|
||||
} else {
|
||||
|
@ -181,15 +189,7 @@ func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
link.ReadSeekCloser = struct {
|
||||
io.Reader
|
||||
io.Seeker
|
||||
io.Closer
|
||||
}{
|
||||
Reader: open,
|
||||
Seeker: open,
|
||||
Closer: open,
|
||||
}
|
||||
link.MFile = open
|
||||
}
|
||||
return &link, nil
|
||||
}
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
|
@ -181,13 +180,12 @@ func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
|
||||
tempFile, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
uploader := s3manager.NewUploader(s)
|
||||
input := &s3manager.UploadInput{
|
||||
|
|
|
@ -42,7 +42,7 @@ func (d *Mega) Drop(ctx context.Context) error {
|
|||
|
||||
func (d *Mega) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
if node, ok := dir.(*MegaNode); ok {
|
||||
nodes, err := d.c.FS.GetChildren(node.Node)
|
||||
nodes, err := d.c.FS.GetChildren(node.n)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ func (d *Mega) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]
|
|||
return res, nil
|
||||
}
|
||||
log.Errorf("can't convert: %+v", dir)
|
||||
return nil, fmt.Errorf("unable to convert dir to mega node")
|
||||
return nil, fmt.Errorf("unable to convert dir to mega n")
|
||||
}
|
||||
|
||||
func (d *Mega) GetRoot(ctx context.Context) (model.Obj, error) {
|
||||
|
@ -68,21 +68,21 @@ func (d *Mega) GetRoot(ctx context.Context) (model.Obj, error) {
|
|||
func (d *Mega) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
if node, ok := file.(*MegaNode); ok {
|
||||
|
||||
//down, err := d.c.NewDownload(node.Node)
|
||||
//down, err := d.c.NewDownload(n.Node)
|
||||
//if err != nil {
|
||||
// return nil, fmt.Errorf("open download file failed: %w", err)
|
||||
//}
|
||||
|
||||
size := file.GetSize()
|
||||
var finalClosers utils.Closers
|
||||
resultRangeReader := func(httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
resultRangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
length := httpRange.Length
|
||||
if httpRange.Length >= 0 && httpRange.Start+httpRange.Length >= size {
|
||||
length = -1
|
||||
}
|
||||
var down *mega.Download
|
||||
err := utils.Retry(3, time.Second, func() (err error) {
|
||||
down, err = d.c.NewDownload(node.Node)
|
||||
down, err = d.c.NewDownload(node.n)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -97,37 +97,37 @@ func (d *Mega) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*
|
|||
|
||||
return readers.NewLimitedReadCloser(oo, length), nil
|
||||
}
|
||||
resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: &finalClosers}
|
||||
resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: finalClosers}
|
||||
resultLink := &model.Link{
|
||||
RangeReadCloser: *resultRangeReadCloser,
|
||||
RangeReadCloser: resultRangeReadCloser,
|
||||
}
|
||||
return resultLink, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unable to convert dir to mega node")
|
||||
return nil, fmt.Errorf("unable to convert dir to mega n")
|
||||
}
|
||||
|
||||
func (d *Mega) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
if parentNode, ok := parentDir.(*MegaNode); ok {
|
||||
_, err := d.c.CreateDir(dirName, parentNode.Node)
|
||||
_, err := d.c.CreateDir(dirName, parentNode.n)
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("unable to convert dir to mega node")
|
||||
return fmt.Errorf("unable to convert dir to mega n")
|
||||
}
|
||||
|
||||
func (d *Mega) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
if srcNode, ok := srcObj.(*MegaNode); ok {
|
||||
if dstNode, ok := dstDir.(*MegaNode); ok {
|
||||
return d.c.Move(srcNode.Node, dstNode.Node)
|
||||
return d.c.Move(srcNode.n, dstNode.n)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("unable to convert dir to mega node")
|
||||
return fmt.Errorf("unable to convert dir to mega n")
|
||||
}
|
||||
|
||||
func (d *Mega) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
if srcNode, ok := srcObj.(*MegaNode); ok {
|
||||
return d.c.Rename(srcNode.Node, newName)
|
||||
return d.c.Rename(srcNode.n, newName)
|
||||
}
|
||||
return fmt.Errorf("unable to convert dir to mega node")
|
||||
return fmt.Errorf("unable to convert dir to mega n")
|
||||
}
|
||||
|
||||
func (d *Mega) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
|
@ -136,14 +136,14 @@ func (d *Mega) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
|||
|
||||
func (d *Mega) Remove(ctx context.Context, obj model.Obj) error {
|
||||
if node, ok := obj.(*MegaNode); ok {
|
||||
return d.c.Delete(node.Node, false)
|
||||
return d.c.Delete(node.n, false)
|
||||
}
|
||||
return fmt.Errorf("unable to convert dir to mega node")
|
||||
return fmt.Errorf("unable to convert dir to mega n")
|
||||
}
|
||||
|
||||
func (d *Mega) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
if dstNode, ok := dstDir.(*MegaNode); ok {
|
||||
u, err := d.c.NewUpload(dstNode.Node, stream.GetName(), stream.GetSize())
|
||||
u, err := d.c.NewUpload(dstNode.n, stream.GetName(), stream.GetSize())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -175,7 +175,7 @@ func (d *Mega) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
|
|||
_, err = u.Finish()
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("unable to convert dir to mega node")
|
||||
return fmt.Errorf("unable to convert dir to mega n")
|
||||
}
|
||||
|
||||
//func (d *Mega) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package mega
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
|
@ -8,29 +9,36 @@ import (
|
|||
)
|
||||
|
||||
type MegaNode struct {
|
||||
*mega.Node
|
||||
n *mega.Node
|
||||
}
|
||||
|
||||
//func (m *MegaNode) GetSize() int64 {
|
||||
// //TODO implement me
|
||||
// panic("implement me")
|
||||
//}
|
||||
//
|
||||
//func (m *MegaNode) GetName() string {
|
||||
// //TODO implement me
|
||||
// panic("implement me")
|
||||
//}
|
||||
func (m *MegaNode) GetSize() int64 {
|
||||
return m.n.GetSize()
|
||||
}
|
||||
|
||||
func (m *MegaNode) GetName() string {
|
||||
return m.n.GetName()
|
||||
}
|
||||
|
||||
func (m *MegaNode) CreateTime() time.Time {
|
||||
return m.n.GetTimeStamp()
|
||||
}
|
||||
|
||||
func (m *MegaNode) GetHash() utils.HashInfo {
|
||||
//Meganz use md5, but can't get the original file hash, due to it's encrypted in the cloud
|
||||
return utils.HashInfo{}
|
||||
}
|
||||
|
||||
func (m *MegaNode) ModTime() time.Time {
|
||||
return m.GetTimeStamp()
|
||||
return m.n.GetTimeStamp()
|
||||
}
|
||||
|
||||
func (m *MegaNode) IsDir() bool {
|
||||
return m.GetType() == mega.FOLDER || m.GetType() == mega.ROOT
|
||||
return m.n.GetType() == mega.FOLDER || m.n.GetType() == mega.ROOT
|
||||
}
|
||||
|
||||
func (m *MegaNode) GetID() string {
|
||||
return m.GetHash()
|
||||
return m.n.GetHash()
|
||||
}
|
||||
|
||||
func (m *MegaNode) GetPath() string {
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
|
@ -231,13 +230,12 @@ func (d *MoPan) Remove(ctx context.Context, obj model.Obj) error {
|
|||
}
|
||||
|
||||
func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
file, err := utils.CreateTempFile(stream, stream.GetSize())
|
||||
file, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
_ = file.Close()
|
||||
_ = os.Remove(file.Name())
|
||||
}()
|
||||
|
||||
// step.1
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
|
@ -124,13 +123,12 @@ func (d *PikPak) Remove(ctx context.Context, obj model.Obj) error {
|
|||
}
|
||||
|
||||
func (d *PikPak) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
|
||||
tempFile, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
// cal gcid
|
||||
sha1Str, err := getGcid(tempFile, stream.GetSize())
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
"encoding/hex"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
|
@ -75,7 +74,7 @@ func (d *QuarkOrUC) Link(ctx context.Context, file model.Obj, args model.LinkArg
|
|||
"User-Agent": []string{ua},
|
||||
},
|
||||
Concurrency: 2,
|
||||
PartSize: 10 * 1024 * 1024,
|
||||
PartSize: 10 * utils.MB,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -136,13 +135,12 @@ func (d *QuarkOrUC) Remove(ctx context.Context, obj model.Obj) error {
|
|||
}
|
||||
|
||||
func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
|
||||
tempFile, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
m := md5.New()
|
||||
_, err = io.Copy(m, tempFile)
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"io"
|
||||
"net/url"
|
||||
stdpath "path"
|
||||
|
@ -96,12 +97,12 @@ func (d *S3) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*mo
|
|||
func (d *S3) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
return d.Put(ctx, &model.Object{
|
||||
Path: stdpath.Join(parentDir.GetPath(), dirName),
|
||||
}, &model.FileStream{
|
||||
}, &stream.FileStream{
|
||||
Obj: &model.Object{
|
||||
Name: getPlaceholderName(d.Placeholder),
|
||||
Modified: time.Now(),
|
||||
},
|
||||
ReadCloser: io.NopCloser(bytes.NewReader([]byte{})),
|
||||
Reader: io.NopCloser(bytes.NewReader([]byte{})),
|
||||
Mimetype: "application/octet-stream",
|
||||
}, func(int) {})
|
||||
}
|
||||
|
|
|
@ -56,7 +56,7 @@ func (d *SFTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*
|
|||
return nil, err
|
||||
}
|
||||
link := &model.Link{
|
||||
ReadSeekCloser: remoteFile,
|
||||
MFile: remoteFile,
|
||||
}
|
||||
return link, nil
|
||||
}
|
||||
|
|
|
@ -61,6 +61,7 @@ func (d *SMB) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]m
|
|||
Modified: f.ModTime(),
|
||||
Size: f.Size(),
|
||||
IsFolder: f.IsDir(),
|
||||
Ctime: f.(*smb2.FileStat).CreationTime,
|
||||
},
|
||||
}
|
||||
files = append(files, &file)
|
||||
|
@ -79,7 +80,7 @@ func (d *SMB) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*m
|
|||
return nil, err
|
||||
}
|
||||
link := &model.Link{
|
||||
ReadSeekCloser: remoteFile,
|
||||
MFile: remoteFile,
|
||||
}
|
||||
d.updateLastConnTime()
|
||||
return link, nil
|
||||
|
|
|
@ -11,7 +11,6 @@ import (
|
|||
log "github.com/sirupsen/logrus"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -116,13 +115,12 @@ func (d *Terabox) Remove(ctx context.Context, obj model.Obj) error {
|
|||
}
|
||||
|
||||
func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
|
||||
tempFile, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
var Default int64 = 4 * 1024 * 1024
|
||||
defaultByteData := make([]byte, Default)
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
|
@ -333,13 +332,12 @@ func (xc *XunLeiCommon) Remove(ctx context.Context, obj model.Obj) error {
|
|||
}
|
||||
|
||||
func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
|
||||
tempFile, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
|
||||
gcid, err := getGcid(tempFile, stream.GetSize())
|
||||
|
|
|
@ -2,6 +2,8 @@ package thunder
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
@ -84,6 +86,8 @@ type Link struct {
|
|||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
var _ model.Obj = (*Files)(nil)
|
||||
|
||||
type Files struct {
|
||||
Kind string `json:"kind"`
|
||||
ID string `json:"id"`
|
||||
|
@ -146,6 +150,14 @@ type Files struct {
|
|||
//Collection interface{} `json:"collection"`
|
||||
}
|
||||
|
||||
func (c *Files) CreateTime() time.Time {
|
||||
return c.CreatedTime
|
||||
}
|
||||
|
||||
func (c *Files) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{}
|
||||
}
|
||||
|
||||
func (c *Files) GetSize() int64 { size, _ := strconv.ParseInt(c.Size, 10, 64); return size }
|
||||
func (c *Files) GetName() string { return c.Name }
|
||||
func (c *Files) ModTime() time.Time { return c.ModifiedTime }
|
||||
|
|
|
@ -52,18 +52,29 @@ func (d *Virtual) List(ctx context.Context, dir model.Obj, args model.ListArgs)
|
|||
return res, nil
|
||||
}
|
||||
|
||||
type nopReadSeekCloser struct {
|
||||
type DummyMFile struct {
|
||||
io.Reader
|
||||
}
|
||||
|
||||
func (nopReadSeekCloser) Seek(offset int64, whence int) (int64, error) {
|
||||
func (f DummyMFile) Read(p []byte) (n int, err error) {
|
||||
return f.Reader.Read(p)
|
||||
}
|
||||
|
||||
func (f DummyMFile) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
return f.Reader.Read(p)
|
||||
}
|
||||
|
||||
func (f DummyMFile) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (DummyMFile) Seek(offset int64, whence int) (int64, error) {
|
||||
return offset, nil
|
||||
}
|
||||
func (nopReadSeekCloser) Close() error { return nil }
|
||||
|
||||
func (d *Virtual) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
return &model.Link{
|
||||
ReadSeekCloser: nopReadSeekCloser{io.LimitReader(random.Rand, file.GetSize())},
|
||||
MFile: DummyMFile{Reader: random.Rand},
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
|
@ -310,13 +309,12 @@ func (d *WeiYun) Remove(ctx context.Context, obj model.Obj) error {
|
|||
|
||||
func (d *WeiYun) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
if folder, ok := dstDir.(*Folder); ok {
|
||||
file, err := utils.CreateTempFile(stream, stream.GetSize())
|
||||
file, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
_ = file.Close()
|
||||
_ = os.Remove(file.Name())
|
||||
}()
|
||||
|
||||
// step 1.
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package weiyun
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"time"
|
||||
|
||||
weiyunsdkgo "github.com/foxxorcat/weiyun-sdk-go"
|
||||
|
@ -21,12 +22,27 @@ func (f *File) GetPath() string { return "" }
|
|||
func (f *File) GetPKey() string {
|
||||
return f.PFolder.DirKey
|
||||
}
|
||||
func (f *File) CreateTime() time.Time {
|
||||
return time.Time(f.FileCtime)
|
||||
}
|
||||
|
||||
func (f *File) GetHash() utils.HashInfo {
|
||||
return utils.NewHashInfo(utils.SHA1, f.FileSha)
|
||||
}
|
||||
|
||||
type Folder struct {
|
||||
PFolder *Folder
|
||||
weiyunsdkgo.Folder
|
||||
}
|
||||
|
||||
func (f *Folder) CreateTime() time.Time {
|
||||
return time.Time(f.DirCtime)
|
||||
}
|
||||
|
||||
func (f *Folder) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{}
|
||||
}
|
||||
|
||||
func (f *Folder) GetID() string { return f.DirKey }
|
||||
func (f *Folder) GetSize() int64 { return 0 }
|
||||
func (f *Folder) GetName() string { return f.DirName }
|
||||
|
|
4
go.mod
4
go.mod
|
@ -17,6 +17,7 @@ require (
|
|||
github.com/coreos/go-oidc v2.2.1+incompatible
|
||||
github.com/deckarep/golang-set/v2 v2.3.1
|
||||
github.com/disintegration/imaging v1.6.2
|
||||
github.com/djherbis/times v1.5.0
|
||||
github.com/dustinxie/ecc v0.0.0-20210511000915-959544187564
|
||||
github.com/foxxorcat/mopan-sdk-go v0.1.4
|
||||
github.com/foxxorcat/weiyun-sdk-go v0.1.2
|
||||
|
@ -39,6 +40,7 @@ require (
|
|||
github.com/rclone/rclone v1.63.1
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/cobra v1.7.0
|
||||
github.com/stretchr/testify v1.8.4
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20230228171823-a01a2cda13ca
|
||||
github.com/u2takey/ffmpeg-go v0.5.0
|
||||
github.com/upyun/go-sdk/v3 v3.0.4
|
||||
|
@ -90,6 +92,7 @@ require (
|
|||
github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81 // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.4.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
|
||||
|
@ -158,6 +161,7 @@ require (
|
|||
github.com/orzogc/fake115uploader v0.3.3-0.20221009101310-08b764073b77 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.17 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect
|
||||
github.com/pquerna/cachecontrol v0.1.0 // indirect
|
||||
github.com/prometheus/client_golang v1.16.0 // indirect
|
||||
|
|
14
go.sum
14
go.sum
|
@ -8,8 +8,6 @@ github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9
|
|||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd/go.mod h1:C8yoIfvESpM3GD07OCHU7fqI7lhwyZ2Td1rbNbTAhnc=
|
||||
github.com/RoaringBitmap/roaring v1.2.3 h1:yqreLINqIrX22ErkKI0vY47/ivtJr6n+kMhVOVmhWBY=
|
||||
github.com/RoaringBitmap/roaring v1.2.3/go.mod h1:plvDsJQpxOC5bw8LRteu/MLWHsHez/3y6cubLI4/1yE=
|
||||
github.com/SheltonZhu/115driver v1.0.14 h1:uW3dl8J9KDMw+3gPxQdhTysoGhw0/uI1484GT9xhfU4=
|
||||
github.com/SheltonZhu/115driver v1.0.14/go.mod h1:00ixivHH5HqDj4S7kAWbkuUrjtsJTxc7cGv5RMw3RVs=
|
||||
github.com/SheltonZhu/115driver v1.0.15 h1:RRvgXvXEzvrPwkRno0CUIg7ucEphbsfwct2mQxfNOdQ=
|
||||
github.com/SheltonZhu/115driver v1.0.15/go.mod h1:e3fPOBANbH/FsTya8FquJwOR3ErhCQgEab3q6CVY2k4=
|
||||
github.com/Unknwon/goconfig v1.0.0 h1:9IAu/BYbSLQi8puFjUQApZTxIHqSwrj5d8vpP8vTq4A=
|
||||
|
@ -30,8 +28,6 @@ github.com/andreburgaud/crypt2go v1.1.0/go.mod h1:4qhZPzarj1dCIRmCkpdgCklwp+hBq9
|
|||
github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0=
|
||||
github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY=
|
||||
github.com/aws/aws-sdk-go v1.38.20/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.44.316 h1:UC3alCEyzj2XU13ZFGIOHW3yjCNLGTIGVauyetl9fwE=
|
||||
github.com/aws/aws-sdk-go v1.44.316/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-sdk-go v1.44.327 h1:ZS8oO4+7MOBLhkdwIhgtVeDzCeWOlTfKJS7EgggbIEY=
|
||||
github.com/aws/aws-sdk-go v1.44.327/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
|
||||
|
@ -83,7 +79,6 @@ github.com/bytedance/sonic v1.9.1 h1:6iJ6NqdoxCDr6mbY8h18oSO+cShGSMRGCEo7F2h0x8s
|
|||
github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U=
|
||||
github.com/caarlos0/env/v9 v9.0.0 h1:SI6JNsOA+y5gj9njpgybykATIylrRMklbs5ch6wO6pc=
|
||||
github.com/caarlos0/env/v9 v9.0.0/go.mod h1:ye5mlCVMYh6tZ+vCgrs/B95sj88cg5Tlnc0XIzgZ020=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/charmbracelet/bubbles v0.16.1 h1:6uzpAAaT9ZqKssntbvZMlksWHruQLNxg49H5WdeuYSY=
|
||||
|
@ -109,8 +104,6 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3
|
|||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/deckarep/golang-set/v2 v2.3.0 h1:qs18EKUfHm2X9fA50Mr/M5hccg2tNnVqsiBImnyDs0g=
|
||||
github.com/deckarep/golang-set/v2 v2.3.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
|
||||
github.com/deckarep/golang-set/v2 v2.3.1 h1:vjmkvJt/IV27WXPyYQpAh4bRyWJc5Y435D17XQ9QU5A=
|
||||
github.com/deckarep/golang-set/v2 v2.3.1/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
|
||||
|
@ -118,6 +111,8 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1
|
|||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc=
|
||||
github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c=
|
||||
github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4=
|
||||
github.com/djherbis/times v1.5.0 h1:79myA211VwPhFTqUk8xehWrsEO+zcIZj0zT8mXPVARU=
|
||||
github.com/djherbis/times v1.5.0/go.mod h1:5q7FDLvbNg1L/KaBmPcWlVR9NmoKo3+ucqUA3ijQhA0=
|
||||
github.com/dustinxie/ecc v0.0.0-20210511000915-959544187564 h1:I6KUy4CI6hHjqnyJLNCEi7YHVMkwwtfSr2k9splgdSM=
|
||||
github.com/dustinxie/ecc v0.0.0-20210511000915-959544187564/go.mod h1:yekO+3ZShy19S+bsmnERmznGy9Rfg6dWWWpiGJjNAz8=
|
||||
github.com/foxxorcat/mopan-sdk-go v0.1.3 h1:6ww0ulyLDh6neXZBqUM2PDbxQ6lfdkQbr0FCh9BTY0Y=
|
||||
|
@ -144,7 +139,6 @@ github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
|
|||
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
|
||||
github.com/go-chi/chi/v5 v5.0.10 h1:rLz5avzKpjqxrYwXNfmjkrYYXOyLJd37pz53UFHC6vk=
|
||||
github.com/go-chi/chi/v5 v5.0.10/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
||||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
|
@ -290,7 +284,6 @@ github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOj
|
|||
github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
|
||||
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
|
||||
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
|
@ -439,8 +432,6 @@ github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZ
|
|||
github.com/upyun/go-sdk/v3 v3.0.4 h1:2DCJa/Yi7/3ZybT9UCPATSzvU3wpPPxhXinNlb1Hi8Q=
|
||||
github.com/upyun/go-sdk/v3 v3.0.4/go.mod h1:P/SnuuwhrIgAVRd/ZpzDWqCsBAf/oHg7UggbAxyZa0E=
|
||||
github.com/valyala/fastjson v1.6.3 h1:tAKFnnwmeMGPbwJ7IwxcTPCNr3uIzoIj3/Fh90ra4xc=
|
||||
github.com/winfsp/cgofuse v1.5.1-0.20221118130120-84c0898ad2e0 h1:j3un8DqYvvAOqKI5OPz+/RRVhDFipbPKI4t2Uk5RBJw=
|
||||
github.com/winfsp/cgofuse v1.5.1-0.20221118130120-84c0898ad2e0/go.mod h1:uxjoF2jEYT3+x+vC2KJddEGdk/LU8pRowXmyVMHSV5I=
|
||||
github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5 h1:jxZvjx8Ve5sOXorZG0KzTxbp0Cr1n3FEegfmyd9br1k=
|
||||
github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5/go.mod h1:uxjoF2jEYT3+x+vC2KJddEGdk/LU8pRowXmyVMHSV5I=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
|
@ -524,7 +515,6 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
|
||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
|
|
|
@ -2,6 +2,7 @@ package aria2
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
@ -162,22 +163,27 @@ func (m *Monitor) Complete() error {
|
|||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to open file %s", file.Path)
|
||||
}
|
||||
stream := &model.FileStream{
|
||||
s := stream.FileStream{
|
||||
Obj: &model.Object{
|
||||
Name: path.Base(file.Path),
|
||||
Size: size,
|
||||
Modified: time.Now(),
|
||||
IsFolder: false,
|
||||
},
|
||||
ReadCloser: f,
|
||||
Reader: f,
|
||||
Closers: utils.NewClosers(f),
|
||||
Mimetype: mimetype,
|
||||
}
|
||||
ss, err := stream.NewSeekableStream(s, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
relDir, err := filepath.Rel(m.tempDir, filepath.Dir(file.Path))
|
||||
if err != nil {
|
||||
log.Errorf("find relation directory error: %v", err)
|
||||
}
|
||||
newDistDir := filepath.Join(dstDirActualPath, relDir)
|
||||
return op.Put(tsk.Ctx, storage, newDistDir, stream, tsk.SetProgress)
|
||||
return op.Put(tsk.Ctx, storage, newDistDir, ss, tsk.SetProgress)
|
||||
},
|
||||
}))
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@ var (
|
|||
MetaNotFound = errors.New("meta not found")
|
||||
StorageNotFound = errors.New("storage not found")
|
||||
StreamIncomplete = errors.New("upload/download stream incomplete, possible network issue")
|
||||
StreamPeekFail = errors.New("StreamPeekFail")
|
||||
)
|
||||
|
||||
// NewErr wrap constant error with an extra message
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/alist-org/alist/v3/pkg/task"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -94,9 +95,14 @@ func copyFileBetween2Storages(tsk *task.Task[uint64], srcStorage, dstStorage dri
|
|||
if err != nil {
|
||||
return errors.WithMessagef(err, "failed get [%s] link", srcFilePath)
|
||||
}
|
||||
stream, err := getFileStreamFromLink(tsk.Ctx, srcFile, link)
|
||||
fs := stream.FileStream{
|
||||
Obj: srcFile,
|
||||
Ctx: tsk.Ctx,
|
||||
}
|
||||
// any link provided is seekable
|
||||
ss, err := stream.NewSeekableStream(fs, link)
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "failed get [%s] stream", srcFilePath)
|
||||
}
|
||||
return op.Put(tsk.Ctx, dstStorage, dstDirPath, stream, tsk.SetProgress, true)
|
||||
return op.Put(tsk.Ctx, dstStorage, dstDirPath, ss, tsk.SetProgress, true)
|
||||
}
|
||||
|
|
|
@ -2,7 +2,6 @@ package fs
|
|||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
|
@ -93,7 +92,7 @@ func Remove(ctx context.Context, path string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func PutDirectly(ctx context.Context, dstDirPath string, file *model.FileStream, lazyCache ...bool) error {
|
||||
func PutDirectly(ctx context.Context, dstDirPath string, file model.FileStreamer, lazyCache ...bool) error {
|
||||
err := putDirectly(ctx, dstDirPath, file, lazyCache...)
|
||||
if err != nil {
|
||||
log.Errorf("failed put %s: %+v", dstDirPath, err)
|
||||
|
@ -101,7 +100,7 @@ func PutDirectly(ctx context.Context, dstDirPath string, file *model.FileStream,
|
|||
return err
|
||||
}
|
||||
|
||||
func PutAsTask(dstDirPath string, file *model.FileStream) error {
|
||||
func PutAsTask(dstDirPath string, file model.FileStreamer) error {
|
||||
err := putAsTask(dstDirPath, file)
|
||||
if err != nil {
|
||||
log.Errorf("failed put %s: %+v", dstDirPath, err)
|
||||
|
|
|
@ -3,13 +3,12 @@ package fs
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/pkg/task"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
|
@ -18,7 +17,7 @@ var UploadTaskManager = task.NewTaskManager(3, func(tid *uint64) {
|
|||
})
|
||||
|
||||
// putAsTask add as a put task and return immediately
|
||||
func putAsTask(dstDirPath string, file *model.FileStream) error {
|
||||
func putAsTask(dstDirPath string, file model.FileStreamer) error {
|
||||
storage, dstDirActualPath, err := op.GetStorageAndActualPath(dstDirPath)
|
||||
if err != nil {
|
||||
return errors.WithMessage(err, "failed get storage")
|
||||
|
@ -27,11 +26,12 @@ func putAsTask(dstDirPath string, file *model.FileStream) error {
|
|||
return errors.WithStack(errs.UploadNotSupported)
|
||||
}
|
||||
if file.NeedStore() {
|
||||
tempFile, err := utils.CreateTempFile(file, file.GetSize())
|
||||
_, err := file.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create temp file")
|
||||
}
|
||||
file.SetReadCloser(tempFile)
|
||||
//file.SetReader(tempFile)
|
||||
//file.SetTmpFile(tempFile)
|
||||
}
|
||||
UploadTaskManager.Submit(task.WithCancelCtx(&task.Task[uint64]{
|
||||
Name: fmt.Sprintf("upload %s to [%s](%s)", file.GetName(), storage.GetStorage().MountPath, dstDirActualPath),
|
||||
|
@ -43,7 +43,7 @@ func putAsTask(dstDirPath string, file *model.FileStream) error {
|
|||
}
|
||||
|
||||
// putDirect put the file and return after finish
|
||||
func putDirectly(ctx context.Context, dstDirPath string, file *model.FileStream, lazyCache ...bool) error {
|
||||
func putDirectly(ctx context.Context, dstDirPath string, file model.FileStreamer, lazyCache ...bool) error {
|
||||
storage, dstDirActualPath, err := op.GetStorageAndActualPath(dstDirPath)
|
||||
if err != nil {
|
||||
return errors.WithMessage(err, "failed get storage")
|
||||
|
|
|
@ -1,73 +0,0 @@
|
|||
package fs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/net"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/alist-org/alist/v3/server/common"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func getFileStreamFromLink(ctx context.Context, file model.Obj, link *model.Link) (*model.FileStream, error) {
|
||||
var rc io.ReadCloser
|
||||
var err error
|
||||
mimetype := utils.GetMimeType(file.GetName())
|
||||
if link.RangeReadCloser.RangeReader != nil {
|
||||
rc, err = link.RangeReadCloser.RangeReader(http_range.Range{Length: -1})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else if link.ReadSeekCloser != nil {
|
||||
rc = link.ReadSeekCloser
|
||||
} else if link.Concurrency != 0 || link.PartSize != 0 {
|
||||
down := net.NewDownloader(func(d *net.Downloader) {
|
||||
d.Concurrency = link.Concurrency
|
||||
d.PartSize = link.PartSize
|
||||
})
|
||||
req := &net.HttpRequestParams{
|
||||
URL: link.URL,
|
||||
Range: http_range.Range{Length: -1},
|
||||
Size: file.GetSize(),
|
||||
HeaderRef: link.Header,
|
||||
}
|
||||
rc, err = down.Download(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
//TODO: add accelerator
|
||||
req, err := http.NewRequest(http.MethodGet, link.URL, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create request for %s", link.URL)
|
||||
}
|
||||
for h, val := range link.Header {
|
||||
req.Header[h] = val
|
||||
}
|
||||
res, err := common.HttpClient().Do(req)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get response for %s", link.URL)
|
||||
}
|
||||
mt := res.Header.Get("Content-Type")
|
||||
if mt != "" && strings.ToLower(mt) != "application/octet-stream" {
|
||||
mimetype = mt
|
||||
}
|
||||
rc = res.Body
|
||||
}
|
||||
// if can't get mimetype, use default application/octet-stream
|
||||
if mimetype == "" {
|
||||
mimetype = "application/octet-stream"
|
||||
}
|
||||
stream := &model.FileStream{
|
||||
Obj: file,
|
||||
ReadCloser: rc,
|
||||
Mimetype: mimetype,
|
||||
}
|
||||
return stream, nil
|
||||
}
|
|
@ -1,6 +1,7 @@
|
|||
package model
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
@ -22,13 +23,14 @@ type LinkArgs struct {
|
|||
}
|
||||
|
||||
type Link struct {
|
||||
URL string `json:"url"`
|
||||
Header http.Header `json:"header"` // needed header (for url) or response header(for data or writer)
|
||||
RangeReadCloser RangeReadCloser `json:"-"` // recommended way
|
||||
ReadSeekCloser io.ReadSeekCloser `json:"-"` // best for local,smb... file system, which exposes ReadSeekCloser
|
||||
URL string `json:"url"` // most common way
|
||||
Header http.Header `json:"header"` // needed header (for url)
|
||||
RangeReadCloser RangeReadCloserIF `json:"-"` // recommended way if can't use URL
|
||||
MFile File `json:"-"` // best for local,smb... file system, which exposes MFile
|
||||
|
||||
Expiration *time.Duration // local cache expire Duration
|
||||
IPCacheKey bool `json:"-"` // add ip to cache key
|
||||
|
||||
//for accelerating request, use multi-thread downloading
|
||||
Concurrency int `json:"concurrency"`
|
||||
PartSize int `json:"part_size"`
|
||||
|
@ -45,10 +47,23 @@ type FsOtherArgs struct {
|
|||
Method string `json:"method" form:"method"`
|
||||
Data interface{} `json:"data" form:"data"`
|
||||
}
|
||||
type RangeReadCloser struct {
|
||||
RangeReader RangeReaderFunc
|
||||
Closers *utils.Closers
|
||||
type RangeReadCloserIF interface {
|
||||
RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error)
|
||||
utils.ClosersIF
|
||||
}
|
||||
|
||||
type WriterFunc func(w io.Writer) error
|
||||
type RangeReaderFunc func(httpRange http_range.Range) (io.ReadCloser, error)
|
||||
var _ RangeReadCloserIF = (*RangeReadCloser)(nil)
|
||||
|
||||
type RangeReadCloser struct {
|
||||
RangeReader RangeReaderFunc
|
||||
utils.Closers
|
||||
}
|
||||
|
||||
func (r RangeReadCloser) RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
rc, err := r.RangeReader(ctx, httpRange)
|
||||
r.Closers.Add(rc)
|
||||
return rc, err
|
||||
}
|
||||
|
||||
// type WriterFunc func(w io.Writer) error
|
||||
type RangeReaderFunc func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error)
|
||||
|
|
|
@ -0,0 +1,25 @@
|
|||
package model
|
||||
|
||||
import "io"
|
||||
|
||||
// File is basic file level accessing interface
|
||||
type File interface {
|
||||
io.Reader
|
||||
io.ReaderAt
|
||||
io.Seeker
|
||||
io.Closer
|
||||
}
|
||||
|
||||
type NopMFileIF interface {
|
||||
io.Reader
|
||||
io.ReaderAt
|
||||
io.Seeker
|
||||
}
|
||||
type NopMFile struct {
|
||||
NopMFileIF
|
||||
}
|
||||
|
||||
func (NopMFile) Close() error { return nil }
|
||||
func NewNopMFile(r NopMFileIF) File {
|
||||
return NopMFile{r}
|
||||
}
|
|
@ -1,6 +1,8 @@
|
|||
package model
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"io"
|
||||
"regexp"
|
||||
"sort"
|
||||
|
@ -20,8 +22,9 @@ type Obj interface {
|
|||
GetSize() int64
|
||||
GetName() string
|
||||
ModTime() time.Time
|
||||
CreateTime() time.Time
|
||||
IsDir() bool
|
||||
//GetHash() (string, string)
|
||||
GetHash() utils.HashInfo
|
||||
|
||||
// The internal information of the driver.
|
||||
// If you want to use it, please understand what it means
|
||||
|
@ -29,14 +32,20 @@ type Obj interface {
|
|||
GetPath() string
|
||||
}
|
||||
|
||||
// FileStreamer ->check FileStream for more comments
|
||||
type FileStreamer interface {
|
||||
io.ReadCloser
|
||||
io.Reader
|
||||
io.Closer
|
||||
Obj
|
||||
GetMimetype() string
|
||||
SetReadCloser(io.ReadCloser)
|
||||
//SetReader(io.Reader)
|
||||
NeedStore() bool
|
||||
GetReadCloser() io.ReadCloser
|
||||
GetOld() Obj
|
||||
GetExist() Obj
|
||||
SetExist(Obj)
|
||||
//for a non-seekable Stream, RangeRead supports peeking some data, and CacheFullInTempFile still works
|
||||
RangeRead(http_range.Range) (io.Reader, error)
|
||||
//for a non-seekable Stream, if Read is called, this function won't work
|
||||
CacheFullInTempFile() (File, error)
|
||||
}
|
||||
|
||||
type URL interface {
|
||||
|
@ -50,9 +59,6 @@ type Thumb interface {
|
|||
type SetPath interface {
|
||||
SetPath(path string)
|
||||
}
|
||||
type SetHash interface {
|
||||
SetHash(hash string, hashType string)
|
||||
}
|
||||
|
||||
func SortFiles(objs []Obj, orderBy, orderDirection string) {
|
||||
if orderBy == "" {
|
||||
|
|
|
@ -28,9 +28,9 @@ type Object struct {
|
|||
Name string
|
||||
Size int64
|
||||
Modified time.Time
|
||||
Ctime time.Time // file create time
|
||||
IsFolder bool
|
||||
Hash string
|
||||
HashType string
|
||||
HashInfo utils.HashInfo
|
||||
}
|
||||
|
||||
func (o *Object) GetName() string {
|
||||
|
@ -44,6 +44,12 @@ func (o *Object) GetSize() int64 {
|
|||
func (o *Object) ModTime() time.Time {
|
||||
return o.Modified
|
||||
}
|
||||
func (o *Object) CreateTime() time.Time {
|
||||
if o.Ctime.IsZero() {
|
||||
return o.ModTime()
|
||||
}
|
||||
return o.Ctime
|
||||
}
|
||||
|
||||
func (o *Object) IsDir() bool {
|
||||
return o.IsFolder
|
||||
|
@ -61,13 +67,8 @@ func (o *Object) SetPath(path string) {
|
|||
o.Path = path
|
||||
}
|
||||
|
||||
func (o *Object) SetHash(hash string, hashType string) {
|
||||
o.Hash = hash
|
||||
o.HashType = hashType
|
||||
}
|
||||
|
||||
func (o *Object) GetHash() (string, string) {
|
||||
return o.Hash, o.HashType
|
||||
func (o *Object) GetHash() utils.HashInfo {
|
||||
return o.HashInfo
|
||||
}
|
||||
|
||||
type Thumbnail struct {
|
||||
|
|
|
@ -1,33 +0,0 @@
|
|||
package model
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
type FileStream struct {
|
||||
Obj
|
||||
io.ReadCloser
|
||||
Mimetype string
|
||||
WebPutAsTask bool
|
||||
Old Obj
|
||||
}
|
||||
|
||||
func (f *FileStream) GetMimetype() string {
|
||||
return f.Mimetype
|
||||
}
|
||||
|
||||
func (f *FileStream) NeedStore() bool {
|
||||
return f.WebPutAsTask
|
||||
}
|
||||
|
||||
func (f *FileStream) GetReadCloser() io.ReadCloser {
|
||||
return f.ReadCloser
|
||||
}
|
||||
|
||||
func (f *FileStream) SetReadCloser(rc io.ReadCloser) {
|
||||
f.ReadCloser = rc
|
||||
}
|
||||
|
||||
func (f *FileStream) GetOld() Obj {
|
||||
return f.Old
|
||||
}
|
|
@ -124,11 +124,11 @@ func (u *User) JoinPath(reqPath string) (string, error) {
|
|||
}
|
||||
|
||||
func StaticHash(password string) string {
|
||||
return utils.GetSHA256Encode([]byte(fmt.Sprintf("%s-%s", password, StaticHashSalt)))
|
||||
return utils.HashData(utils.SHA256, []byte(fmt.Sprintf("%s-%s", password, StaticHashSalt)))
|
||||
}
|
||||
|
||||
func HashPwd(static string, salt string) string {
|
||||
return utils.GetSHA256Encode([]byte(fmt.Sprintf("%s-%s", static, salt)))
|
||||
return utils.HashData(utils.SHA256, []byte(fmt.Sprintf("%s-%s", static, salt)))
|
||||
}
|
||||
|
||||
func TwoHashPwd(password string, salt string) string {
|
||||
|
|
|
@ -43,7 +43,7 @@ type Downloader struct {
|
|||
//RequestParam HttpRequestParams
|
||||
HttpClient HttpRequestFunc
|
||||
}
|
||||
type HttpRequestFunc func(params *HttpRequestParams) (*http.Response, error)
|
||||
type HttpRequestFunc func(ctx context.Context, params *HttpRequestParams) (*http.Response, error)
|
||||
|
||||
func NewDownloader(options ...func(*Downloader)) *Downloader {
|
||||
d := &Downloader{
|
||||
|
@ -131,7 +131,7 @@ func (d *downloader) download() (io.ReadCloser, error) {
|
|||
}
|
||||
|
||||
if d.cfg.Concurrency == 1 {
|
||||
resp, err := d.cfg.HttpClient(d.params)
|
||||
resp, err := d.cfg.HttpClient(d.ctx, d.params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -258,7 +258,7 @@ func (d *downloader) downloadChunk(ch *chunk) error {
|
|||
|
||||
func (d *downloader) tryDownloadChunk(params *HttpRequestParams, ch *chunk) (int64, error) {
|
||||
|
||||
resp, err := d.cfg.HttpClient(params)
|
||||
resp, err := d.cfg.HttpClient(d.ctx, params)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -371,10 +371,10 @@ type chunk struct {
|
|||
//boundary http_range.Range
|
||||
}
|
||||
|
||||
func DefaultHttpRequestFunc(params *HttpRequestParams) (*http.Response, error) {
|
||||
func DefaultHttpRequestFunc(ctx context.Context, params *HttpRequestParams) (*http.Response, error) {
|
||||
header := http_range.ApplyRangeToHttpHeader(params.Range, params.HeaderRef)
|
||||
|
||||
res, err := RequestHttp("GET", header, params.URL)
|
||||
res, err := RequestHttp(ctx, "GET", header, params.URL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -456,7 +456,7 @@ type Buf struct {
|
|||
// NewBuf is a buffer that can have 1 read & 1 write at the same time.
|
||||
// when read is faster write, immediately feed data to read after written
|
||||
func NewBuf(ctx context.Context, maxSize int, id int) *Buf {
|
||||
d := make([]byte, maxSize)
|
||||
d := make([]byte, 0, maxSize)
|
||||
return &Buf{ctx: ctx, buffer: bytes.NewBuffer(d), size: maxSize, notify: make(chan struct{})}
|
||||
|
||||
}
|
||||
|
|
|
@ -143,7 +143,7 @@ type downloadCaptureClient struct {
|
|||
lock sync.Mutex
|
||||
}
|
||||
|
||||
func (c *downloadCaptureClient) HttpRequest(params *HttpRequestParams) (*http.Response, error) {
|
||||
func (c *downloadCaptureClient) HttpRequest(ctx context.Context, params *HttpRequestParams) (*http.Response, error) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package net
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime"
|
||||
|
@ -110,7 +111,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time
|
|||
}
|
||||
switch {
|
||||
case len(ranges) == 0:
|
||||
reader, err := RangeReaderFunc(http_range.Range{Length: -1})
|
||||
reader, err := RangeReaderFunc(context.Background(), http_range.Range{Length: -1})
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
|
@ -129,7 +130,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time
|
|||
// does not request multiple parts might not support
|
||||
// multipart responses."
|
||||
ra := ranges[0]
|
||||
sendContent, err = RangeReaderFunc(ra)
|
||||
sendContent, err = RangeReaderFunc(context.Background(), ra)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)
|
||||
return
|
||||
|
@ -156,7 +157,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time
|
|||
pw.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
reader, err := RangeReaderFunc(ra)
|
||||
reader, err := RangeReaderFunc(context.Background(), ra)
|
||||
if err != nil {
|
||||
pw.CloseWithError(err)
|
||||
return
|
||||
|
@ -209,8 +210,8 @@ func ProcessHeader(origin, override http.Header) http.Header {
|
|||
}
|
||||
|
||||
// RequestHttp deal with Header properly then send the request
|
||||
func RequestHttp(httpMethod string, headerOverride http.Header, URL string) (*http.Response, error) {
|
||||
req, err := http.NewRequest(httpMethod, URL, nil)
|
||||
func RequestHttp(ctx context.Context, httpMethod string, headerOverride http.Header, URL string) (*http.Response, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, httpMethod, URL, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -2,7 +2,6 @@ package op
|
|||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"time"
|
||||
|
||||
|
@ -481,18 +480,10 @@ func Remove(ctx context.Context, storage driver.Driver, path string) error {
|
|||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file *model.FileStream, up driver.UpdateProgress, lazyCache ...bool) error {
|
||||
func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file model.FileStreamer, up driver.UpdateProgress, lazyCache ...bool) error {
|
||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
||||
return errors.Errorf("storage not init: %s", storage.GetStorage().Status)
|
||||
}
|
||||
defer func() {
|
||||
if f, ok := file.GetReadCloser().(*os.File); ok {
|
||||
err := os.RemoveAll(f.Name())
|
||||
if err != nil {
|
||||
log.Errorf("failed to remove file [%s]", f.Name())
|
||||
}
|
||||
}
|
||||
}()
|
||||
defer func() {
|
||||
if err := file.Close(); err != nil {
|
||||
log.Errorf("failed to close file streamer, %v", err)
|
||||
|
@ -508,7 +499,7 @@ func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file *mo
|
|||
if fi.GetSize() == 0 {
|
||||
err = Remove(ctx, storage, dstPath)
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "failed remove file that exist and have size 0")
|
||||
return errors.WithMessagef(err, "while uploading, failed remove existing file which size = 0")
|
||||
}
|
||||
} else if storage.Config().NoOverwriteUpload {
|
||||
// try to rename old obj
|
||||
|
@ -517,7 +508,7 @@ func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file *mo
|
|||
return err
|
||||
}
|
||||
} else {
|
||||
file.Old = fi
|
||||
file.SetExist(fi)
|
||||
}
|
||||
}
|
||||
err = MakeDir(ctx, storage, dstDirPath)
|
||||
|
|
|
@ -2,7 +2,7 @@ package qbittorrent
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
@ -157,17 +157,22 @@ func (m *Monitor) complete() error {
|
|||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to open file %s", tempPath)
|
||||
}
|
||||
stream := &model.FileStream{
|
||||
s := stream.FileStream{
|
||||
Obj: &model.Object{
|
||||
Name: fileName,
|
||||
Size: size,
|
||||
Modified: time.Now(),
|
||||
IsFolder: false,
|
||||
},
|
||||
ReadCloser: struct{ io.ReadSeekCloser }{f},
|
||||
Reader: f,
|
||||
Closers: utils.NewClosers(f),
|
||||
Mimetype: mimetype,
|
||||
}
|
||||
return op.Put(tsk.Ctx, storage, dstDir, stream, tsk.SetProgress)
|
||||
ss, err := stream.NewSeekableStream(s, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return op.Put(tsk.Ctx, storage, dstDir, ss, tsk.SetProgress)
|
||||
},
|
||||
}))
|
||||
}
|
||||
|
|
|
@ -0,0 +1,278 @@
|
|||
package stream
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
type FileStream struct {
|
||||
Ctx context.Context
|
||||
model.Obj
|
||||
io.Reader
|
||||
Mimetype string
|
||||
WebPutAsTask bool
|
||||
Exist model.Obj //the file existed in the destination, we can reuse some info since we wil overwrite it
|
||||
utils.Closers
|
||||
tmpFile *os.File //if present, tmpFile has full content, it will be deleted at last
|
||||
peekBuff *bytes.Reader
|
||||
}
|
||||
|
||||
func (f *FileStream) GetMimetype() string {
|
||||
return f.Mimetype
|
||||
}
|
||||
|
||||
func (f *FileStream) NeedStore() bool {
|
||||
return f.WebPutAsTask
|
||||
}
|
||||
func (f *FileStream) Close() error {
|
||||
var err1, err2 error
|
||||
err1 = f.Closers.Close()
|
||||
if f.tmpFile != nil {
|
||||
err2 = os.RemoveAll(f.tmpFile.Name())
|
||||
if err2 != nil {
|
||||
err2 = errs.NewErr(err2, "failed to remove tmpFile [%s]", f.tmpFile.Name())
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Join(err1, err2)
|
||||
}
|
||||
|
||||
func (f *FileStream) GetExist() model.Obj {
|
||||
return f.Exist
|
||||
}
|
||||
func (f *FileStream) SetExist(obj model.Obj) {
|
||||
f.Exist = obj
|
||||
}
|
||||
|
||||
// CacheFullInTempFile save all data into tmpFile. Not recommended since it wears disk,
|
||||
// and can't start upload until the file is written. It's not thread-safe!
|
||||
// won't check if some
|
||||
func (f *FileStream) CacheFullInTempFile() (model.File, error) {
|
||||
if f.tmpFile != nil {
|
||||
return f.tmpFile, nil
|
||||
}
|
||||
if file, ok := f.Reader.(model.File); ok {
|
||||
return file, nil
|
||||
}
|
||||
tmpF, err := utils.CreateTempFile(f.Reader, f.GetSize())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.tmpFile = tmpF
|
||||
f.Reader = tmpF
|
||||
return f.tmpFile, nil
|
||||
}
|
||||
|
||||
const InMemoryBufMaxSize = 10 // Megabytes
|
||||
const InMemoryBufMaxSizeBytes = InMemoryBufMaxSize * 1024 * 1024
|
||||
|
||||
// RangeRead have to cache all data first since only Reader is provided.
|
||||
// also support a peeking RangeRead at very start, but won't buffer more than 10MB data in memory
|
||||
func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
|
||||
if httpRange.Length == -1 {
|
||||
httpRange.Length = f.GetSize()
|
||||
}
|
||||
|
||||
if f.peekBuff != nil && httpRange.Start < int64(f.peekBuff.Len()) && httpRange.Start+httpRange.Length-1 < int64(f.peekBuff.Len()) {
|
||||
return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil
|
||||
}
|
||||
if httpRange.Start == 0 && httpRange.Length <= InMemoryBufMaxSizeBytes && f.peekBuff == nil {
|
||||
bufSize := utils.Min(httpRange.Length, f.GetSize())
|
||||
newBuf := bytes.NewBuffer(make([]byte, 0, bufSize))
|
||||
n, err := io.CopyN(newBuf, f.Reader, bufSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if n != bufSize {
|
||||
return nil, fmt.Errorf("stream RangeRead did not get all data in peek, expect =%d ,actual =%d", bufSize, n)
|
||||
}
|
||||
f.peekBuff = bytes.NewReader(newBuf.Bytes())
|
||||
f.Reader = io.MultiReader(f.peekBuff, f.Reader)
|
||||
return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil
|
||||
}
|
||||
if f.tmpFile == nil {
|
||||
_, err := f.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return io.NewSectionReader(f.tmpFile, httpRange.Start, httpRange.Length), nil
|
||||
}
|
||||
|
||||
var _ model.FileStreamer = (*SeekableStream)(nil)
|
||||
var _ model.FileStreamer = (*FileStream)(nil)
|
||||
|
||||
//var _ seekableStream = (*FileStream)(nil)
|
||||
|
||||
// for most internal stream, which is either RangeReadCloser or MFile
|
||||
type SeekableStream struct {
|
||||
FileStream
|
||||
Link *model.Link
|
||||
// should have one of belows to support rangeRead
|
||||
rangeReadCloser model.RangeReadCloserIF
|
||||
mFile model.File
|
||||
}
|
||||
|
||||
func NewSeekableStream(fs FileStream, link *model.Link) (*SeekableStream, error) {
|
||||
if len(fs.Mimetype) == 0 {
|
||||
fs.Mimetype = utils.GetMimeType(fs.Obj.GetName())
|
||||
}
|
||||
ss := SeekableStream{FileStream: fs, Link: link}
|
||||
if ss.Reader != nil {
|
||||
result, ok := ss.Reader.(model.File)
|
||||
if ok {
|
||||
ss.mFile = result
|
||||
ss.Closers.Add(result)
|
||||
return &ss, nil
|
||||
}
|
||||
}
|
||||
if ss.Link != nil {
|
||||
if ss.Link.MFile != nil {
|
||||
ss.mFile = ss.Link.MFile
|
||||
ss.Reader = ss.Link.MFile
|
||||
ss.Closers.Add(ss.Link.MFile)
|
||||
return &ss, nil
|
||||
}
|
||||
|
||||
if ss.Link.RangeReadCloser != nil {
|
||||
ss.rangeReadCloser = ss.Link.RangeReadCloser
|
||||
return &ss, nil
|
||||
}
|
||||
if len(ss.Link.URL) > 0 {
|
||||
rrc, err := GetRangeReadCloserFromLink(ss.GetSize(), link)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ss.rangeReadCloser = rrc
|
||||
return &ss, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("illegal seekableStream")
|
||||
}
|
||||
|
||||
//func (ss *SeekableStream) Peek(length int) {
|
||||
//
|
||||
//}
|
||||
|
||||
// RangeRead is not thread-safe, pls use it in single thread only.
|
||||
func (ss *SeekableStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
|
||||
if httpRange.Length == -1 {
|
||||
httpRange.Length = ss.GetSize()
|
||||
}
|
||||
if ss.mFile != nil {
|
||||
return io.NewSectionReader(ss.mFile, httpRange.Start, httpRange.Length), nil
|
||||
}
|
||||
if ss.tmpFile != nil {
|
||||
return io.NewSectionReader(ss.tmpFile, httpRange.Start, httpRange.Length), nil
|
||||
}
|
||||
if ss.rangeReadCloser != nil {
|
||||
rc, err := ss.rangeReadCloser.RangeRead(ss.Ctx, httpRange)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rc, nil
|
||||
}
|
||||
return nil, fmt.Errorf("can't find mFile or rangeReadCloser")
|
||||
}
|
||||
|
||||
//func (f *FileStream) GetReader() io.Reader {
|
||||
// return f.Reader
|
||||
//}
|
||||
|
||||
// only provide Reader as full stream when it's demanded. in rapid-upload, we can skip this to save memory
|
||||
func (ss *SeekableStream) Read(p []byte) (n int, err error) {
|
||||
//f.mu.Lock()
|
||||
|
||||
//f.peekedOnce = true
|
||||
//defer f.mu.Unlock()
|
||||
if ss.Reader == nil {
|
||||
if ss.rangeReadCloser == nil {
|
||||
return 0, fmt.Errorf("illegal seekableStream")
|
||||
}
|
||||
rc, err := ss.rangeReadCloser.RangeRead(ss.Ctx, http_range.Range{Length: -1})
|
||||
if err != nil {
|
||||
return 0, nil
|
||||
}
|
||||
ss.Reader = io.NopCloser(rc)
|
||||
ss.Closers.Add(rc)
|
||||
|
||||
}
|
||||
return ss.Reader.Read(p)
|
||||
}
|
||||
|
||||
func (ss *SeekableStream) CacheFullInTempFile() (model.File, error) {
|
||||
if ss.tmpFile != nil {
|
||||
return ss.tmpFile, nil
|
||||
}
|
||||
if ss.mFile != nil {
|
||||
return ss.mFile, nil
|
||||
}
|
||||
tmpF, err := utils.CreateTempFile(ss, ss.GetSize())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ss.tmpFile = tmpF
|
||||
ss.Reader = tmpF
|
||||
return ss.tmpFile, nil
|
||||
}
|
||||
|
||||
//func (f *FileStream) SetReader(r io.Reader) {
|
||||
// f.Reader = r
|
||||
//}
|
||||
|
||||
/*
|
||||
// RangePeek allow once peek at start of the data, since most drives check first XX bytes for rapid-upload
|
||||
func (f *FileStream) RangePeek(length int64) (*bytes.Buffer, error) {
|
||||
if length > InMemoryBufMaxSize*1024*1024 {
|
||||
return nil, errs.NewErr(errs.StreamPeekFail, "can't peek size > %d MB", InMemoryBufMaxSize)
|
||||
}
|
||||
httpRange := &http_range.Range{Length: length}
|
||||
bufSize := utils.Min(httpRange.Length, f.GetSize())
|
||||
buf := bytes.NewBuffer(make([]byte, 0, bufSize))
|
||||
if f.link == nil && f.tmpFile == nil {
|
||||
if !f.peekedOnce {
|
||||
f.mu.Lock()
|
||||
f.peekedOnce = true
|
||||
_, err := io.CopyN(buf, f.Reader, bufSize)
|
||||
|
||||
if err != nil {
|
||||
f.mu.Unlock()
|
||||
return nil, errs.NewErr(errs.StreamPeekFail, "failed to copyN %d bytes data", bufSize)
|
||||
}
|
||||
f.Reader = io.MultiReader(buf, f.Reader)
|
||||
f.mu.Unlock()
|
||||
return buf, nil
|
||||
|
||||
}
|
||||
return nil, errs.NewErr(errs.StreamPeekFail, "link and tmpFile both are null")
|
||||
}
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
rc, _, err := GetReadCloserFromLink(f.Obj, f.link, httpRange)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = io.CopyN(buf, rc, bufSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf, nil
|
||||
}*/
|
||||
|
||||
//func (f *FileStream) SetTmpFile(r *os.File) {
|
||||
// f.mu.Lock()
|
||||
// //f.readDisabled = true
|
||||
// f.tmpFile = r
|
||||
// f.Reader = r
|
||||
// f.mu.Unlock()
|
||||
//}
|
|
@ -0,0 +1,84 @@
|
|||
package stream
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/net"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func GetRangeReadCloserFromLink(size int64, link *model.Link) (model.RangeReadCloserIF, error) {
|
||||
if len(link.URL) == 0 {
|
||||
return nil, fmt.Errorf("can't create RangeReadCloser since URL is empty in link")
|
||||
}
|
||||
//remoteClosers := utils.EmptyClosers()
|
||||
rangeReaderFunc := func(ctx context.Context, r http_range.Range) (io.ReadCloser, error) {
|
||||
if link.Concurrency != 0 || link.PartSize != 0 {
|
||||
header := net.ProcessHeader(http.Header{}, link.Header)
|
||||
down := net.NewDownloader(func(d *net.Downloader) {
|
||||
d.Concurrency = link.Concurrency
|
||||
d.PartSize = link.PartSize
|
||||
})
|
||||
req := &net.HttpRequestParams{
|
||||
URL: link.URL,
|
||||
Range: r,
|
||||
Size: size,
|
||||
HeaderRef: header,
|
||||
}
|
||||
rc, err := down.Download(ctx, req)
|
||||
if err != nil {
|
||||
return nil, errs.NewErr(err, "GetReadCloserFromLink failed")
|
||||
}
|
||||
return rc, nil
|
||||
|
||||
}
|
||||
if len(link.URL) > 0 {
|
||||
response, err := RequestRangedHttp(ctx, link, r.Start, r.Length)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("http request failure,status: %d err:%s", response.StatusCode, err)
|
||||
}
|
||||
if r.Start == 0 && (r.Length == -1 || r.Length == size) || response.StatusCode == http.StatusPartialContent ||
|
||||
checkContentRange(&response.Header, size, r.Start) {
|
||||
return response.Body, nil
|
||||
} else if response.StatusCode == http.StatusOK {
|
||||
log.Warnf("remote http server not supporting range request, expect low perfromace!")
|
||||
readCloser, err := net.GetRangedHttpReader(response.Body, r.Start, r.Length)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return readCloser, nil
|
||||
|
||||
}
|
||||
|
||||
return response.Body, nil
|
||||
}
|
||||
|
||||
return nil, errs.NotSupport
|
||||
}
|
||||
resultRangeReadCloser := model.RangeReadCloser{RangeReader: rangeReaderFunc}
|
||||
return &resultRangeReadCloser, nil
|
||||
}
|
||||
|
||||
func RequestRangedHttp(ctx context.Context, link *model.Link, offset, length int64) (*http.Response, error) {
|
||||
header := net.ProcessHeader(http.Header{}, link.Header)
|
||||
header = http_range.ApplyRangeToHttpHeader(http_range.Range{Start: offset, Length: length}, header)
|
||||
|
||||
return net.RequestHttp(ctx, "GET", header, link.URL)
|
||||
}
|
||||
|
||||
// 139 cloud does not properly return 206 http status code, add a hack here
|
||||
func checkContentRange(header *http.Header, size, offset int64) bool {
|
||||
r, err2 := http_range.ParseRange(header.Get("Content-Range"), size)
|
||||
if err2 != nil {
|
||||
log.Warnf("exception trying to parse Content-Range, will ignore,err=%s", err2)
|
||||
}
|
||||
if len(r) == 1 && r[0].Start == offset {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -113,7 +113,7 @@ func CreateNestedFile(path string) (*os.File, error) {
|
|||
}
|
||||
|
||||
// CreateTempFile create temp file from io.ReadCloser, and seek to 0
|
||||
func CreateTempFile(r io.ReadCloser, size int64) (*os.File, error) {
|
||||
func CreateTempFile(r io.Reader, size int64) (*os.File, error) {
|
||||
if f, ok := r.(*os.File); ok {
|
||||
return f, nil
|
||||
}
|
||||
|
@ -171,3 +171,10 @@ func GetMimeType(name string) string {
|
|||
}
|
||||
return "application/octet-stream"
|
||||
}
|
||||
|
||||
const (
|
||||
KB = 1 << (10 * (iota + 1))
|
||||
MB
|
||||
GB
|
||||
TB
|
||||
)
|
||||
|
|
|
@ -4,46 +4,178 @@ import (
|
|||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"hash"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func GetSHA1Encode(data []byte) string {
|
||||
h := sha1.New()
|
||||
h.Write(data)
|
||||
return hex.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
func GetSHA256Encode(data []byte) string {
|
||||
h := sha256.New()
|
||||
h.Write(data)
|
||||
return hex.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
func GetMD5Encode(data []byte) string {
|
||||
h := md5.New()
|
||||
h.Write(data)
|
||||
return hex.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
func GetMD5EncodeStr(data string) string {
|
||||
return GetMD5Encode([]byte(data))
|
||||
return HashData(MD5, []byte(data))
|
||||
}
|
||||
|
||||
var DEC = map[string]string{
|
||||
"-": "+",
|
||||
"_": "/",
|
||||
".": "=",
|
||||
//inspired by "github.com/rclone/rclone/fs/hash"
|
||||
|
||||
// ErrUnsupported should be returned by filesystem,
|
||||
// if it is requested to deliver an unsupported hash type.
|
||||
var ErrUnsupported = errors.New("hash type not supported")
|
||||
|
||||
// HashType indicates a standard hashing algorithm
|
||||
type HashType struct {
|
||||
Width int
|
||||
Name string
|
||||
Alias string
|
||||
NewFunc func() hash.Hash
|
||||
}
|
||||
|
||||
func SafeAtob(data string) (string, error) {
|
||||
for k, v := range DEC {
|
||||
data = strings.ReplaceAll(data, k, v)
|
||||
var (
|
||||
name2hash = map[string]*HashType{}
|
||||
alias2hash = map[string]*HashType{}
|
||||
Supported []*HashType
|
||||
)
|
||||
|
||||
// RegisterHash adds a new Hash to the list and returns its Type
|
||||
func RegisterHash(name, alias string, width int, newFunc func() hash.Hash) *HashType {
|
||||
|
||||
newType := &HashType{
|
||||
Name: name,
|
||||
Alias: alias,
|
||||
Width: width,
|
||||
NewFunc: newFunc,
|
||||
}
|
||||
bytes, err := base64.StdEncoding.DecodeString(data)
|
||||
|
||||
name2hash[name] = newType
|
||||
alias2hash[alias] = newType
|
||||
Supported = append(Supported, newType)
|
||||
return newType
|
||||
}
|
||||
|
||||
var (
|
||||
// MD5 indicates MD5 support
|
||||
MD5 = RegisterHash("md5", "MD5", 32, md5.New)
|
||||
|
||||
// SHA1 indicates SHA-1 support
|
||||
SHA1 = RegisterHash("sha1", "SHA-1", 40, sha1.New)
|
||||
|
||||
// SHA256 indicates SHA-256 support
|
||||
SHA256 = RegisterHash("sha256", "SHA-256", 64, sha256.New)
|
||||
)
|
||||
|
||||
// HashData get hash of one hashType
|
||||
func HashData(hashType *HashType, data []byte) string {
|
||||
h := hashType.NewFunc()
|
||||
h.Write(data)
|
||||
return hex.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
// HashReader get hash of one hashType from a reader
|
||||
func HashReader(hashType *HashType, reader io.Reader) (string, error) {
|
||||
h := hashType.NewFunc()
|
||||
_, err := io.Copy(h, reader)
|
||||
if err != nil {
|
||||
return "", errs.NewErr(err, "HashReader error")
|
||||
}
|
||||
return hex.EncodeToString(h.Sum(nil)), nil
|
||||
}
|
||||
|
||||
// HashFile get hash of one hashType from a model.File
|
||||
func HashFile(hashType *HashType, file io.ReadSeeker) (string, error) {
|
||||
str, err := HashReader(hashType, file)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(bytes), err
|
||||
if _, err = file.Seek(0, io.SeekStart); err != nil {
|
||||
return str, err
|
||||
}
|
||||
return str, nil
|
||||
}
|
||||
|
||||
// fromTypes will return hashers for all the requested types.
|
||||
func fromTypes(types []*HashType) map[*HashType]hash.Hash {
|
||||
hashers := map[*HashType]hash.Hash{}
|
||||
for _, t := range types {
|
||||
hashers[t] = t.NewFunc()
|
||||
}
|
||||
return hashers
|
||||
}
|
||||
|
||||
// toMultiWriter will return a set of hashers into a
|
||||
// single multiwriter, where one write will update all
|
||||
// the hashers.
|
||||
func toMultiWriter(h map[*HashType]hash.Hash) io.Writer {
|
||||
// Convert to to slice
|
||||
var w = make([]io.Writer, 0, len(h))
|
||||
for _, v := range h {
|
||||
w = append(w, v)
|
||||
}
|
||||
return io.MultiWriter(w...)
|
||||
}
|
||||
|
||||
// A MultiHasher will construct various hashes on all incoming writes.
|
||||
type MultiHasher struct {
|
||||
w io.Writer
|
||||
size int64
|
||||
h map[*HashType]hash.Hash // Hashes
|
||||
}
|
||||
|
||||
// NewMultiHasher will return a hash writer that will write
|
||||
// the requested hash types.
|
||||
func NewMultiHasher(types []*HashType) *MultiHasher {
|
||||
hashers := fromTypes(types)
|
||||
m := MultiHasher{h: hashers, w: toMultiWriter(hashers)}
|
||||
return &m
|
||||
}
|
||||
|
||||
func (m *MultiHasher) Write(p []byte) (n int, err error) {
|
||||
n, err = m.w.Write(p)
|
||||
m.size += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (m *MultiHasher) GetHashInfo() *HashInfo {
|
||||
dst := make(map[*HashType]string)
|
||||
for k, v := range m.h {
|
||||
dst[k] = hex.EncodeToString(v.Sum(nil))
|
||||
}
|
||||
return &HashInfo{h: dst}
|
||||
}
|
||||
|
||||
// Sum returns the specified hash from the multihasher
|
||||
func (m *MultiHasher) Sum(hashType *HashType) ([]byte, error) {
|
||||
h, ok := m.h[hashType]
|
||||
if !ok {
|
||||
return nil, ErrUnsupported
|
||||
}
|
||||
return h.Sum(nil), nil
|
||||
}
|
||||
|
||||
// Size returns the number of bytes written
|
||||
func (m *MultiHasher) Size() int64 {
|
||||
return m.size
|
||||
}
|
||||
|
||||
// A HashInfo contains hash string for one or more hashType
|
||||
type HashInfo struct {
|
||||
h map[*HashType]string
|
||||
}
|
||||
|
||||
func NewHashInfo(ht *HashType, str string) HashInfo {
|
||||
m := make(map[*HashType]string)
|
||||
m[ht] = str
|
||||
return HashInfo{h: m}
|
||||
}
|
||||
|
||||
func (hi HashInfo) String() string {
|
||||
var tmp []string
|
||||
for ht, str := range hi.h {
|
||||
if len(str) > 0 {
|
||||
tmp = append(tmp, ht.Name+":"+str)
|
||||
}
|
||||
}
|
||||
return strings.Join(tmp, "\n")
|
||||
}
|
||||
func (hi HashInfo) GetHash(ht *HashType) string {
|
||||
return hi.h[ht]
|
||||
}
|
||||
|
|
|
@ -0,0 +1,64 @@
|
|||
package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"io"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type hashTest struct {
|
||||
input []byte
|
||||
output map[*HashType]string
|
||||
}
|
||||
|
||||
var hashTestSet = []hashTest{
|
||||
{
|
||||
input: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14},
|
||||
output: map[*HashType]string{
|
||||
MD5: "bf13fc19e5151ac57d4252e0e0f87abe",
|
||||
SHA1: "3ab6543c08a75f292a5ecedac87ec41642d12166",
|
||||
SHA256: "c839e57675862af5c21bd0a15413c3ec579e0d5522dab600bc6c3489b05b8f54",
|
||||
},
|
||||
},
|
||||
// Empty data set
|
||||
{
|
||||
input: []byte{},
|
||||
output: map[*HashType]string{
|
||||
MD5: "d41d8cd98f00b204e9800998ecf8427e",
|
||||
SHA1: "da39a3ee5e6b4b0d3255bfef95601890afd80709",
|
||||
SHA256: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestMultiHasher(t *testing.T) {
|
||||
for _, test := range hashTestSet {
|
||||
mh := NewMultiHasher([]*HashType{MD5, SHA1, SHA256})
|
||||
n, err := io.Copy(mh, bytes.NewBuffer(test.input))
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, test.input, int(n))
|
||||
hashInfo := mh.GetHashInfo()
|
||||
for k, v := range hashInfo.h {
|
||||
expect, ok := test.output[k]
|
||||
require.True(t, ok, "test output for hash not found")
|
||||
assert.Equal(t, expect, v)
|
||||
}
|
||||
// Test that all are present
|
||||
for k, v := range test.output {
|
||||
expect, ok := hashInfo.h[k]
|
||||
require.True(t, ok, "test output for hash not found")
|
||||
assert.Equal(t, expect, v)
|
||||
}
|
||||
for k, v := range test.output {
|
||||
expect := hashInfo.GetHash(k)
|
||||
require.True(t, len(expect) > 0, "test output for hash not found")
|
||||
assert.Equal(t, expect, v)
|
||||
}
|
||||
expect := hashInfo.GetHash(nil)
|
||||
require.True(t, len(expect) == 0, "unknown type should return empty string")
|
||||
Log.Info(hashInfo.String())
|
||||
|
||||
}
|
||||
}
|
|
@ -3,7 +3,9 @@ package utils
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"golang.org/x/exp/constraints"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
|
@ -17,7 +19,7 @@ type readerFunc func(p []byte) (n int, err error)
|
|||
func (rf readerFunc) Read(p []byte) (n int, err error) { return rf(p) }
|
||||
|
||||
// CopyWithCtx slightly modified function signature:
|
||||
// - context has been added in order to propagate cancelation
|
||||
// - context has been added in order to propagate cancellation
|
||||
// - I do not return the number of bytes written, has it is not useful in my use case
|
||||
func CopyWithCtx(ctx context.Context, out io.Writer, in io.Reader, size int64, progress func(percentage int)) error {
|
||||
// Copy will call the Reader and Writer interface multiple time, in order
|
||||
|
@ -132,16 +134,6 @@ func (mr *MultiReadable) Close() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
type nopCloser struct {
|
||||
io.ReadSeeker
|
||||
}
|
||||
|
||||
func (nopCloser) Close() error { return nil }
|
||||
|
||||
func ReadSeekerNopCloser(r io.ReadSeeker) io.ReadSeekCloser {
|
||||
return nopCloser{r}
|
||||
}
|
||||
|
||||
func Retry(attempts int, sleep time.Duration, f func() error) (err error) {
|
||||
for i := 0; i < attempts; i++ {
|
||||
fmt.Println("This is attempt number", i)
|
||||
|
@ -158,23 +150,56 @@ func Retry(attempts int, sleep time.Duration, f func() error) (err error) {
|
|||
return fmt.Errorf("after %d attempts, last error: %s", attempts, err)
|
||||
}
|
||||
|
||||
type Closers struct {
|
||||
closers []*io.Closer
|
||||
type ClosersIF interface {
|
||||
io.Closer
|
||||
Add(closer io.Closer)
|
||||
AddClosers(closers Closers)
|
||||
GetClosers() Closers
|
||||
}
|
||||
|
||||
func (c *Closers) Close() (err error) {
|
||||
type Closers struct {
|
||||
closers []io.Closer
|
||||
}
|
||||
|
||||
func (c *Closers) GetClosers() Closers {
|
||||
return *c
|
||||
}
|
||||
|
||||
var _ ClosersIF = (*Closers)(nil)
|
||||
|
||||
func (c *Closers) Close() error {
|
||||
var errs []error
|
||||
for _, closer := range c.closers {
|
||||
if closer != nil {
|
||||
_ = (*closer).Close()
|
||||
errs = append(errs, closer.Close())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
func (c *Closers) Add(closer io.Closer) {
|
||||
if closer != nil {
|
||||
c.closers = append(c.closers, &closer)
|
||||
c.closers = append(c.closers, closer)
|
||||
|
||||
}
|
||||
func (c *Closers) AddClosers(closers Closers) {
|
||||
c.closers = append(c.closers, closers.closers...)
|
||||
}
|
||||
func NewClosers() *Closers {
|
||||
return &Closers{[]*io.Closer{}}
|
||||
|
||||
func EmptyClosers() Closers {
|
||||
return Closers{[]io.Closer{}}
|
||||
}
|
||||
func NewClosers(c ...io.Closer) Closers {
|
||||
return Closers{c}
|
||||
}
|
||||
|
||||
func Min[T constraints.Ordered](a, b T) T {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
func Max[T constraints.Ordered](a, b T) T {
|
||||
if a < b {
|
||||
return b
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package utils
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
|
@ -12,3 +13,20 @@ func MappingName(name string) string {
|
|||
}
|
||||
return name
|
||||
}
|
||||
|
||||
var DEC = map[string]string{
|
||||
"-": "+",
|
||||
"_": "/",
|
||||
".": "=",
|
||||
}
|
||||
|
||||
func SafeAtob(data string) (string, error) {
|
||||
for k, v := range DEC {
|
||||
data = strings.ReplaceAll(data, k, v)
|
||||
}
|
||||
bytes, err := base64.StdEncoding.DecodeString(data)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(bytes), err
|
||||
}
|
||||
|
|
|
@ -3,58 +3,35 @@ package common
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sync"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/net"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
func HttpClient() *http.Client {
|
||||
once.Do(func() {
|
||||
httpClient = base.NewHttpClient()
|
||||
httpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||
if len(via) >= 10 {
|
||||
return errors.New("stopped after 10 redirects")
|
||||
}
|
||||
req.Header.Del("Referer")
|
||||
return nil
|
||||
}
|
||||
})
|
||||
return httpClient
|
||||
}
|
||||
|
||||
var once sync.Once
|
||||
var httpClient *http.Client
|
||||
|
||||
func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.Obj) error {
|
||||
if link.ReadSeekCloser != nil {
|
||||
if link.MFile != nil {
|
||||
attachFileName(w, file)
|
||||
http.ServeContent(w, r, file.GetName(), file.ModTime(), link.ReadSeekCloser)
|
||||
defer link.ReadSeekCloser.Close()
|
||||
http.ServeContent(w, r, file.GetName(), file.ModTime(), link.MFile)
|
||||
defer link.MFile.Close()
|
||||
return nil
|
||||
} else if link.RangeReadCloser.RangeReader != nil {
|
||||
} else if link.RangeReadCloser != nil {
|
||||
attachFileName(w, file)
|
||||
net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), link.RangeReadCloser.RangeReader)
|
||||
net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), link.RangeReadCloser.RangeRead)
|
||||
defer func() {
|
||||
if link.RangeReadCloser.Closers != nil {
|
||||
link.RangeReadCloser.Closers.Close()
|
||||
}
|
||||
_ = link.RangeReadCloser.Close()
|
||||
}()
|
||||
return nil
|
||||
} else if link.Concurrency != 0 || link.PartSize != 0 {
|
||||
attachFileName(w, file)
|
||||
size := file.GetSize()
|
||||
//var finalClosers model.Closers
|
||||
finalClosers := utils.NewClosers()
|
||||
finalClosers := utils.EmptyClosers()
|
||||
header := net.ProcessHeader(r.Header, link.Header)
|
||||
rangeReader := func(httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
rangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
down := net.NewDownloader(func(d *net.Downloader) {
|
||||
d.Concurrency = link.Concurrency
|
||||
d.PartSize = link.PartSize
|
||||
|
@ -65,7 +42,7 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.
|
|||
Size: size,
|
||||
HeaderRef: header,
|
||||
}
|
||||
rc, err := down.Download(context.Background(), req)
|
||||
rc, err := down.Download(ctx, req)
|
||||
finalClosers.Add(rc)
|
||||
return rc, err
|
||||
}
|
||||
|
@ -75,7 +52,7 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.
|
|||
} else {
|
||||
//transparent proxy
|
||||
header := net.ProcessHeader(r.Header, link.Header)
|
||||
res, err := net.RequestHttp(r.Method, header, link.URL)
|
||||
res, err := net.RequestHttp(context.Background(), r.Method, header, link.URL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -40,13 +40,13 @@ func Down(c *gin.Context) {
|
|||
common.ErrorResp(c, err, 500)
|
||||
return
|
||||
}
|
||||
if link.ReadSeekCloser != nil {
|
||||
if link.MFile != nil {
|
||||
defer func(ReadSeekCloser io.ReadCloser) {
|
||||
err := ReadSeekCloser.Close()
|
||||
if err != nil {
|
||||
log.Errorf("close data error: %s", err)
|
||||
}
|
||||
}(link.ReadSeekCloser)
|
||||
}(link.MFile)
|
||||
}
|
||||
c.Header("Referrer-Policy", "no-referrer")
|
||||
c.Header("Cache-Control", "max-age=0, no-cache, no-store, must-revalidate")
|
||||
|
|
|
@ -331,13 +331,13 @@ func Link(c *gin.Context) {
|
|||
common.ErrorResp(c, err, 500)
|
||||
return
|
||||
}
|
||||
if link.ReadSeekCloser != nil {
|
||||
if link.MFile != nil {
|
||||
defer func(ReadSeekCloser io.ReadCloser) {
|
||||
err := ReadSeekCloser.Close()
|
||||
if err != nil {
|
||||
log.Errorf("close link data error: %v", err)
|
||||
}
|
||||
}(link.ReadSeekCloser)
|
||||
}(link.MFile)
|
||||
}
|
||||
common.SuccessResp(c, link)
|
||||
return
|
||||
|
|
|
@ -37,9 +37,11 @@ type ObjResp struct {
|
|||
Size int64 `json:"size"`
|
||||
IsDir bool `json:"is_dir"`
|
||||
Modified time.Time `json:"modified"`
|
||||
Created time.Time `json:"created"`
|
||||
Sign string `json:"sign"`
|
||||
Thumb string `json:"thumb"`
|
||||
Type int `json:"type"`
|
||||
HashInfo string `json:"hashinfo"`
|
||||
}
|
||||
|
||||
type FsListResp struct {
|
||||
|
@ -313,6 +315,8 @@ func FsGet(c *gin.Context) {
|
|||
Size: obj.GetSize(),
|
||||
IsDir: obj.IsDir(),
|
||||
Modified: obj.ModTime(),
|
||||
Created: obj.CreateTime(),
|
||||
HashInfo: obj.GetHash().String(),
|
||||
Sign: common.Sign(obj, parentPath, isEncrypt(meta, reqPath)),
|
||||
Type: utils.GetFileType(obj.GetName()),
|
||||
Thumb: thumb,
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package handles
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"net/url"
|
||||
stdpath "path"
|
||||
"strconv"
|
||||
|
@ -33,21 +34,22 @@ func FsStream(c *gin.Context) {
|
|||
common.ErrorResp(c, err, 400)
|
||||
return
|
||||
}
|
||||
stream := &model.FileStream{
|
||||
s := &stream.FileStream{
|
||||
Obj: &model.Object{
|
||||
Name: name,
|
||||
Size: size,
|
||||
Modified: time.Now(),
|
||||
},
|
||||
ReadCloser: c.Request.Body,
|
||||
Reader: c.Request.Body,
|
||||
Mimetype: c.GetHeader("Content-Type"),
|
||||
WebPutAsTask: asTask,
|
||||
}
|
||||
if asTask {
|
||||
err = fs.PutAsTask(dir, stream)
|
||||
err = fs.PutAsTask(dir, s)
|
||||
} else {
|
||||
err = fs.PutDirectly(c, dir, stream, true)
|
||||
err = fs.PutDirectly(c, dir, s, true)
|
||||
}
|
||||
defer c.Request.Body.Close()
|
||||
if err != nil {
|
||||
common.ErrorResp(c, err, 500)
|
||||
return
|
||||
|
@ -89,21 +91,27 @@ func FsForm(c *gin.Context) {
|
|||
return
|
||||
}
|
||||
dir, name := stdpath.Split(path)
|
||||
stream := &model.FileStream{
|
||||
s := stream.FileStream{
|
||||
Obj: &model.Object{
|
||||
Name: name,
|
||||
Size: file.Size,
|
||||
Modified: time.Now(),
|
||||
},
|
||||
ReadCloser: f,
|
||||
Reader: f,
|
||||
Mimetype: file.Header.Get("Content-Type"),
|
||||
WebPutAsTask: false,
|
||||
}
|
||||
if asTask {
|
||||
err = fs.PutAsTask(dir, stream)
|
||||
} else {
|
||||
err = fs.PutDirectly(c, dir, stream, true)
|
||||
ss, err := stream.NewSeekableStream(s, nil)
|
||||
if err != nil {
|
||||
common.ErrorResp(c, err, 500)
|
||||
return
|
||||
}
|
||||
if asTask {
|
||||
err = fs.PutAsTask(dir, ss)
|
||||
} else {
|
||||
err = fs.PutDirectly(c, dir, ss, true)
|
||||
}
|
||||
defer f.Close()
|
||||
if err != nil {
|
||||
common.ErrorResp(c, err, 500)
|
||||
return
|
||||
|
|
|
@ -131,8 +131,8 @@ var liveProps = map[xml.Name]struct {
|
|||
dir: true,
|
||||
},
|
||||
{Space: "DAV:", Local: "creationdate"}: {
|
||||
findFn: nil,
|
||||
dir: false,
|
||||
findFn: findCreationDate,
|
||||
dir: true,
|
||||
},
|
||||
{Space: "DAV:", Local: "getcontentlanguage"}: {
|
||||
findFn: nil,
|
||||
|
@ -383,6 +383,9 @@ func findContentLength(ctx context.Context, ls LockSystem, name string, fi model
|
|||
func findLastModified(ctx context.Context, ls LockSystem, name string, fi model.Obj) (string, error) {
|
||||
return fi.ModTime().UTC().Format(http.TimeFormat), nil
|
||||
}
|
||||
func findCreationDate(ctx context.Context, ls LockSystem, name string, fi model.Obj) (string, error) {
|
||||
return fi.CreateTime().UTC().Format(http.TimeFormat), nil
|
||||
}
|
||||
|
||||
// ErrNotImplemented should be returned by optional interfaces if they
|
||||
// want the original implementation to be used.
|
||||
|
|
|
@ -0,0 +1,29 @@
|
|||
package webdav
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (h *Handler) getModTime(r *http.Request) time.Time {
|
||||
return h.getHeaderTime(r, "X-OC-Mtime")
|
||||
}
|
||||
|
||||
// owncloud/ nextcloud haven't impl this, but we can add the support since rclone may support this soon
|
||||
func (h *Handler) getCreateTime(r *http.Request) time.Time {
|
||||
return h.getHeaderTime(r, "X-OC-Ctime")
|
||||
}
|
||||
|
||||
func (h *Handler) getHeaderTime(r *http.Request, header string) time.Time {
|
||||
hVal := r.Header.Get(header)
|
||||
if hVal != "" {
|
||||
modTimeUnix, err := strconv.ParseInt(hVal, 10, 64)
|
||||
if err == nil {
|
||||
return time.Unix(modTimeUnix, 0)
|
||||
}
|
||||
log.Warnf("getModTime in Webdav, failed to parse %s, %s", header, err)
|
||||
}
|
||||
return time.Now()
|
||||
}
|
|
@ -8,6 +8,7 @@ package webdav // import "golang.org/x/net/webdav"
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
|
@ -321,11 +322,12 @@ func (h *Handler) handlePut(w http.ResponseWriter, r *http.Request) (status int,
|
|||
obj := model.Object{
|
||||
Name: path.Base(reqPath),
|
||||
Size: r.ContentLength,
|
||||
Modified: time.Now(),
|
||||
Modified: h.getModTime(r),
|
||||
Ctime: h.getCreateTime(r),
|
||||
}
|
||||
stream := &model.FileStream{
|
||||
stream := &stream.FileStream{
|
||||
Obj: &obj,
|
||||
ReadCloser: r.Body,
|
||||
Reader: r.Body,
|
||||
Mimetype: r.Header.Get("Content-Type"),
|
||||
}
|
||||
if stream.Mimetype == "" {
|
||||
|
@ -336,6 +338,8 @@ func (h *Handler) handlePut(w http.ResponseWriter, r *http.Request) (status int,
|
|||
return http.StatusNotFound, err
|
||||
}
|
||||
|
||||
_ = r.Body.Close()
|
||||
_ = stream.Close()
|
||||
// TODO(rost): Returning 405 Method Not Allowed might not be appropriate.
|
||||
if err != nil {
|
||||
return http.StatusMethodNotAllowed, err
|
||||
|
|
Loading…
Reference in New Issue