mirror of https://github.com/Xhofe/alist
423 lines
11 KiB
Go
423 lines
11 KiB
Go
package _115
|
||
|
||
import (
|
||
"bytes"
|
||
"crypto/tls"
|
||
"encoding/json"
|
||
"fmt"
|
||
"github.com/alist-org/alist/v3/internal/model"
|
||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||
"github.com/alist-org/alist/v3/pkg/utils"
|
||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||
"github.com/orzogc/fake115uploader/cipher"
|
||
"io"
|
||
"net/url"
|
||
"path/filepath"
|
||
"strconv"
|
||
"strings"
|
||
"sync"
|
||
"time"
|
||
|
||
"github.com/SheltonZhu/115driver/pkg/driver"
|
||
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
|
||
"github.com/alist-org/alist/v3/internal/conf"
|
||
"github.com/pkg/errors"
|
||
)
|
||
|
||
var UserAgent = driver.UA115Desktop
|
||
|
||
func (d *Pan115) login() error {
|
||
var err error
|
||
opts := []driver.Option{
|
||
driver.UA(UserAgent),
|
||
func(c *driver.Pan115Client) {
|
||
c.Client.SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
|
||
},
|
||
}
|
||
d.client = driver.New(opts...)
|
||
cr := &driver.Credential{}
|
||
if d.Addition.QRCodeToken != "" {
|
||
s := &driver.QRCodeSession{
|
||
UID: d.Addition.QRCodeToken,
|
||
}
|
||
if cr, err = d.client.QRCodeLogin(s); err != nil {
|
||
return errors.Wrap(err, "failed to login by qrcode")
|
||
}
|
||
d.Addition.Cookie = fmt.Sprintf("UID=%s;CID=%s;SEID=%s", cr.UID, cr.CID, cr.SEID)
|
||
d.Addition.QRCodeToken = ""
|
||
} else if d.Addition.Cookie != "" {
|
||
if err = cr.FromCookie(d.Addition.Cookie); err != nil {
|
||
return errors.Wrap(err, "failed to login by cookies")
|
||
}
|
||
d.client.ImportCredential(cr)
|
||
} else {
|
||
return errors.New("missing cookie or qrcode account")
|
||
}
|
||
return d.client.LoginCheck()
|
||
}
|
||
|
||
func (d *Pan115) getFiles(fileId string) ([]FileObj, error) {
|
||
res := make([]FileObj, 0)
|
||
if d.PageSize <= 0 {
|
||
d.PageSize = driver.FileListLimit
|
||
}
|
||
files, err := d.client.ListWithLimit(fileId, d.PageSize)
|
||
if err != nil {
|
||
return nil, err
|
||
}
|
||
for _, file := range *files {
|
||
res = append(res, FileObj{file})
|
||
}
|
||
return res, nil
|
||
}
|
||
|
||
const (
|
||
appVer = "2.0.3.6"
|
||
)
|
||
|
||
func (d *Pan115) rapidUpload(fileSize int64, fileName, dirID, preID, fileID string, stream model.FileStreamer) (*driver115.UploadInitResp, error) {
|
||
var (
|
||
ecdhCipher *cipher.EcdhCipher
|
||
encrypted []byte
|
||
decrypted []byte
|
||
encodedToken string
|
||
err error
|
||
target = "U_1_" + dirID
|
||
bodyBytes []byte
|
||
result = driver115.UploadInitResp{}
|
||
fileSizeStr = strconv.FormatInt(fileSize, 10)
|
||
)
|
||
if ecdhCipher, err = cipher.NewEcdhCipher(); err != nil {
|
||
return nil, err
|
||
}
|
||
|
||
userID := strconv.FormatInt(d.client.UserID, 10)
|
||
form := url.Values{}
|
||
form.Set("appid", "0")
|
||
form.Set("appversion", appVer)
|
||
form.Set("userid", userID)
|
||
form.Set("filename", fileName)
|
||
form.Set("filesize", fileSizeStr)
|
||
form.Set("fileid", fileID)
|
||
form.Set("target", target)
|
||
form.Set("sig", d.client.GenerateSignature(fileID, target))
|
||
|
||
signKey, signVal := "", ""
|
||
for retry := true; retry; {
|
||
t := driver115.Now()
|
||
|
||
if encodedToken, err = ecdhCipher.EncodeToken(t.ToInt64()); err != nil {
|
||
return nil, err
|
||
}
|
||
|
||
params := map[string]string{
|
||
"k_ec": encodedToken,
|
||
}
|
||
|
||
form.Set("t", t.String())
|
||
form.Set("token", d.client.GenerateToken(fileID, preID, t.String(), fileSizeStr, signKey, signVal))
|
||
if signKey != "" && signVal != "" {
|
||
form.Set("sign_key", signKey)
|
||
form.Set("sign_val", signVal)
|
||
}
|
||
if encrypted, err = ecdhCipher.Encrypt([]byte(form.Encode())); err != nil {
|
||
return nil, err
|
||
}
|
||
|
||
req := d.client.NewRequest().
|
||
SetQueryParams(params).
|
||
SetBody(encrypted).
|
||
SetHeaderVerbatim("Content-Type", "application/x-www-form-urlencoded").
|
||
SetDoNotParseResponse(true)
|
||
resp, err := req.Post(driver115.ApiUploadInit)
|
||
if err != nil {
|
||
return nil, err
|
||
}
|
||
data := resp.RawBody()
|
||
defer data.Close()
|
||
if bodyBytes, err = io.ReadAll(data); err != nil {
|
||
return nil, err
|
||
}
|
||
if decrypted, err = ecdhCipher.Decrypt(bodyBytes); err != nil {
|
||
return nil, err
|
||
}
|
||
if err = driver115.CheckErr(json.Unmarshal(decrypted, &result), &result, resp); err != nil {
|
||
return nil, err
|
||
}
|
||
if result.Status == 7 {
|
||
// Update signKey & signVal
|
||
signKey = result.SignKey
|
||
signVal, err = UploadDigestRange(stream, result.SignCheck)
|
||
if err != nil {
|
||
return nil, err
|
||
}
|
||
} else {
|
||
retry = false
|
||
}
|
||
result.SHA1 = fileID
|
||
}
|
||
|
||
return &result, nil
|
||
}
|
||
|
||
func UploadDigestRange(stream model.FileStreamer, rangeSpec string) (result string, err error) {
|
||
var start, end int64
|
||
if _, err = fmt.Sscanf(rangeSpec, "%d-%d", &start, &end); err != nil {
|
||
return
|
||
}
|
||
|
||
length := end - start + 1
|
||
reader, err := stream.RangeRead(http_range.Range{Start: start, Length: length})
|
||
hashStr, err := utils.HashReader(utils.SHA1, reader)
|
||
if err != nil {
|
||
return "", err
|
||
}
|
||
result = strings.ToUpper(hashStr)
|
||
return
|
||
}
|
||
|
||
// UploadByMultipart upload by mutipart blocks
|
||
func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize int64, stream model.FileStreamer, dirID string, opts ...driver115.UploadMultipartOption) error {
|
||
var (
|
||
chunks []oss.FileChunk
|
||
parts []oss.UploadPart
|
||
imur oss.InitiateMultipartUploadResult
|
||
ossClient *oss.Client
|
||
bucket *oss.Bucket
|
||
ossToken *driver115.UploadOSSTokenResp
|
||
err error
|
||
)
|
||
|
||
tmpF, err := stream.CacheFullInTempFile()
|
||
if err != nil {
|
||
return err
|
||
}
|
||
|
||
options := driver115.DefalutUploadMultipartOptions()
|
||
if len(opts) > 0 {
|
||
for _, f := range opts {
|
||
f(options)
|
||
}
|
||
}
|
||
|
||
if ossToken, err = d.client.GetOSSToken(); err != nil {
|
||
return err
|
||
}
|
||
|
||
if ossClient, err = oss.New(driver115.OSSEndpoint, ossToken.AccessKeyID, ossToken.AccessKeySecret); err != nil {
|
||
return err
|
||
}
|
||
|
||
if bucket, err = ossClient.Bucket(params.Bucket); err != nil {
|
||
return err
|
||
}
|
||
|
||
// ossToken一小时后就会失效,所以每50分钟重新获取一次
|
||
ticker := time.NewTicker(options.TokenRefreshTime)
|
||
defer ticker.Stop()
|
||
// 设置超时
|
||
timeout := time.NewTimer(options.Timeout)
|
||
|
||
if chunks, err = SplitFile(fileSize); err != nil {
|
||
return err
|
||
}
|
||
|
||
if imur, err = bucket.InitiateMultipartUpload(params.Object,
|
||
oss.SetHeader(driver115.OssSecurityTokenHeaderName, ossToken.SecurityToken),
|
||
oss.UserAgentHeader(driver115.OSSUserAgent),
|
||
); err != nil {
|
||
return err
|
||
}
|
||
|
||
wg := sync.WaitGroup{}
|
||
wg.Add(len(chunks))
|
||
|
||
chunksCh := make(chan oss.FileChunk)
|
||
errCh := make(chan error)
|
||
UploadedPartsCh := make(chan oss.UploadPart)
|
||
quit := make(chan struct{})
|
||
|
||
// producer
|
||
go chunksProducer(chunksCh, chunks)
|
||
go func() {
|
||
wg.Wait()
|
||
quit <- struct{}{}
|
||
}()
|
||
|
||
// consumers
|
||
for i := 0; i < options.ThreadsNum; i++ {
|
||
go func(threadId int) {
|
||
defer func() {
|
||
if r := recover(); r != nil {
|
||
errCh <- fmt.Errorf("Recovered in %v", r)
|
||
}
|
||
}()
|
||
for chunk := range chunksCh {
|
||
var part oss.UploadPart // 出现错误就继续尝试,共尝试3次
|
||
for retry := 0; retry < 3; retry++ {
|
||
select {
|
||
case <-ticker.C:
|
||
if ossToken, err = d.client.GetOSSToken(); err != nil { // 到时重新获取ossToken
|
||
errCh <- errors.Wrap(err, "刷新token时出现错误")
|
||
}
|
||
default:
|
||
}
|
||
|
||
buf := make([]byte, chunk.Size)
|
||
if _, err = tmpF.ReadAt(buf, chunk.Offset); err != nil && !errors.Is(err, io.EOF) {
|
||
continue
|
||
}
|
||
|
||
b := bytes.NewBuffer(buf)
|
||
if part, err = bucket.UploadPart(imur, b, chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil {
|
||
break
|
||
}
|
||
}
|
||
if err != nil {
|
||
errCh <- errors.Wrap(err, fmt.Sprintf("上传 %s 的第%d个分片时出现错误:%v", stream.GetName(), chunk.Number, err))
|
||
}
|
||
UploadedPartsCh <- part
|
||
}
|
||
}(i)
|
||
}
|
||
|
||
go func() {
|
||
for part := range UploadedPartsCh {
|
||
parts = append(parts, part)
|
||
wg.Done()
|
||
}
|
||
}()
|
||
LOOP:
|
||
for {
|
||
select {
|
||
case <-ticker.C:
|
||
// 到时重新获取ossToken
|
||
if ossToken, err = d.client.GetOSSToken(); err != nil {
|
||
return err
|
||
}
|
||
case <-quit:
|
||
break LOOP
|
||
case <-errCh:
|
||
return err
|
||
case <-timeout.C:
|
||
return fmt.Errorf("time out")
|
||
}
|
||
}
|
||
|
||
// EOF错误是xml的Unmarshal导致的,响应其实是json格式,所以实际上上传是成功的
|
||
if _, err = bucket.CompleteMultipartUpload(imur, parts, driver115.OssOption(params, ossToken)...); err != nil && !errors.Is(err, io.EOF) {
|
||
// 当文件名含有 &< 这两个字符之一时响应的xml解析会出现错误,实际上上传是成功的
|
||
if filename := filepath.Base(stream.GetName()); !strings.ContainsAny(filename, "&<") {
|
||
return err
|
||
}
|
||
}
|
||
return d.checkUploadStatus(dirID, params.SHA1)
|
||
}
|
||
func chunksProducer(ch chan oss.FileChunk, chunks []oss.FileChunk) {
|
||
for _, chunk := range chunks {
|
||
ch <- chunk
|
||
}
|
||
}
|
||
func (d *Pan115) checkUploadStatus(dirID, sha1 string) error {
|
||
// 验证上传是否成功
|
||
req := d.client.NewRequest().ForceContentType("application/json;charset=UTF-8")
|
||
opts := []driver115.GetFileOptions{
|
||
driver115.WithOrder(driver115.FileOrderByTime),
|
||
driver115.WithShowDirEnable(false),
|
||
driver115.WithAsc(false),
|
||
driver115.WithLimit(500),
|
||
}
|
||
fResp, err := driver115.GetFiles(req, dirID, opts...)
|
||
if err != nil {
|
||
return err
|
||
}
|
||
for _, fileInfo := range fResp.Files {
|
||
if fileInfo.Sha1 == sha1 {
|
||
return nil
|
||
}
|
||
}
|
||
return driver115.ErrUploadFailed
|
||
}
|
||
|
||
func SplitFile(fileSize int64) (chunks []oss.FileChunk, err error) {
|
||
for i := int64(1); i < 10; i++ {
|
||
if fileSize < i*utils.GB { // 文件大小小于iGB时分为i*1000片
|
||
if chunks, err = SplitFileByPartNum(fileSize, int(i*1000)); err != nil {
|
||
return
|
||
}
|
||
break
|
||
}
|
||
}
|
||
if fileSize > 9*utils.GB { // 文件大小大于9GB时分为10000片
|
||
if chunks, err = SplitFileByPartNum(fileSize, 10000); err != nil {
|
||
return
|
||
}
|
||
}
|
||
// 单个分片大小不能小于100KB
|
||
if chunks[0].Size < 100*utils.KB {
|
||
if chunks, err = SplitFileByPartSize(fileSize, 100*utils.KB); err != nil {
|
||
return
|
||
}
|
||
}
|
||
return
|
||
}
|
||
|
||
// SplitFileByPartNum splits big file into parts by the num of parts.
|
||
// Split the file with specified parts count, returns the split result when error is nil.
|
||
func SplitFileByPartNum(fileSize int64, chunkNum int) ([]oss.FileChunk, error) {
|
||
if chunkNum <= 0 || chunkNum > 10000 {
|
||
return nil, errors.New("chunkNum invalid")
|
||
}
|
||
|
||
if int64(chunkNum) > fileSize {
|
||
return nil, errors.New("oss: chunkNum invalid")
|
||
}
|
||
|
||
var chunks []oss.FileChunk
|
||
var chunk = oss.FileChunk{}
|
||
var chunkN = (int64)(chunkNum)
|
||
for i := int64(0); i < chunkN; i++ {
|
||
chunk.Number = int(i + 1)
|
||
chunk.Offset = i * (fileSize / chunkN)
|
||
if i == chunkN-1 {
|
||
chunk.Size = fileSize/chunkN + fileSize%chunkN
|
||
} else {
|
||
chunk.Size = fileSize / chunkN
|
||
}
|
||
chunks = append(chunks, chunk)
|
||
}
|
||
|
||
return chunks, nil
|
||
}
|
||
|
||
// SplitFileByPartSize splits big file into parts by the size of parts.
|
||
// Splits the file by the part size. Returns the FileChunk when error is nil.
|
||
func SplitFileByPartSize(fileSize int64, chunkSize int64) ([]oss.FileChunk, error) {
|
||
if chunkSize <= 0 {
|
||
return nil, errors.New("chunkSize invalid")
|
||
}
|
||
|
||
var chunkN = fileSize / chunkSize
|
||
if chunkN >= 10000 {
|
||
return nil, errors.New("Too many parts, please increase part size")
|
||
}
|
||
|
||
var chunks []oss.FileChunk
|
||
var chunk = oss.FileChunk{}
|
||
for i := int64(0); i < chunkN; i++ {
|
||
chunk.Number = int(i + 1)
|
||
chunk.Offset = i * chunkSize
|
||
chunk.Size = chunkSize
|
||
chunks = append(chunks, chunk)
|
||
}
|
||
|
||
if fileSize%chunkSize > 0 {
|
||
chunk.Number = len(chunks) + 1
|
||
chunk.Offset = int64(len(chunks)) * chunkSize
|
||
chunk.Size = fileSize % chunkSize
|
||
chunks = append(chunks, chunk)
|
||
}
|
||
|
||
return chunks, nil
|
||
}
|