Compare commits

..

1 Commits

Author SHA1 Message Date
renovate[bot] d428eeb737
fix(deps): update module github.com/larksuite/oapi-sdk-go/v3 to v3.3.4 2024-09-25 10:32:39 +00:00
393 changed files with 3441 additions and 32953 deletions

2
.github/FUNDING.yml vendored
View File

@ -10,4 +10,4 @@ liberapay: # Replace with a single Liberapay username
issuehunt: # Replace with a single IssueHunt username issuehunt: # Replace with a single IssueHunt username
otechie: # Replace with a single Otechie username otechie: # Replace with a single Otechie username
lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
custom: ['https://alistgo.com/guide/sponsor.html'] custom: ['https://alist.nn.ci/guide/sponsor.html']

View File

@ -16,14 +16,14 @@ body:
您必须勾选以下所有内容否则您的issue可能会被直接关闭。或者您可以去[讨论区](https://github.com/alist-org/alist/discussions) 您必须勾选以下所有内容否则您的issue可能会被直接关闭。或者您可以去[讨论区](https://github.com/alist-org/alist/discussions)
options: options:
- label: | - label: |
I have read the [documentation](https://alistgo.com). I have read the [documentation](https://alist.nn.ci).
我已经阅读了[文档](https://alistgo.com)。 我已经阅读了[文档](https://alist.nn.ci)。
- label: | - label: |
I'm sure there are no duplicate issues or discussions. I'm sure there are no duplicate issues or discussions.
我确定没有重复的issue或讨论。 我确定没有重复的issue或讨论。
- label: | - label: |
I'm sure it's due to `AList` and not something else(such as [Network](https://alistgo.com/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host) ,`Dependencies` or `Operational`). I'm sure it's due to `AList` and not something else(such as [Network](https://alist.nn.ci/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host) ,`Dependencies` or `Operational`).
我确定是`AList`的问题,而不是其他原因(例如[网络](https://alistgo.com/zh/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host)`依赖`或`操作`)。 我确定是`AList`的问题,而不是其他原因(例如[网络](https://alist.nn.ci/zh/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host)`依赖`或`操作`)。
- label: | - label: |
I'm sure this issue is not fixed in the latest version. I'm sure this issue is not fixed in the latest version.
我确定这个问题在最新版本中没有被修复。 我确定这个问题在最新版本中没有被修复。

View File

@ -7,7 +7,7 @@ body:
label: Please make sure of the following things label: Please make sure of the following things
description: You may select more than one, even select all. description: You may select more than one, even select all.
options: options:
- label: I have read the [documentation](https://alistgo.com). - label: I have read the [documentation](https://alist.nn.ci).
- label: I'm sure there are no duplicate issues or discussions. - label: I'm sure there are no duplicate issues or discussions.
- label: I'm sure this feature is not implemented. - label: I'm sure this feature is not implemented.
- label: I'm sure it's a reasonable and popular requirement. - label: I'm sure it's a reasonable and popular requirement.

View File

@ -8,9 +8,6 @@ concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true cancel-in-progress: true
permissions:
contents: write
jobs: jobs:
changelog: changelog:
strategy: strategy:
@ -57,7 +54,7 @@ jobs:
strategy: strategy:
matrix: matrix:
include: include:
- target: '!(*musl*|*windows-arm64*|*android*|*freebsd*)' # xgo - target: '!(*musl*|*windows-arm64*|*android*)' # xgo
hash: "md5" hash: "md5"
- target: 'linux-!(arm*)-musl*' #musl-not-arm - target: 'linux-!(arm*)-musl*' #musl-not-arm
hash: "md5-linux-musl" hash: "md5-linux-musl"
@ -67,9 +64,6 @@ jobs:
hash: "md5-windows-arm64" hash: "md5-windows-arm64"
- target: 'android-*' #android - target: 'android-*' #android
hash: "md5-android" hash: "md5-android"
- target: 'freebsd-*' #freebsd
hash: "md5-freebsd"
name: Beta Release name: Beta Release
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -87,17 +81,12 @@ jobs:
run: bash build.sh dev web run: bash build.sh dev web
- name: Build - name: Build
id: test-action
uses: go-cross/cgo-actions@v1 uses: go-cross/cgo-actions@v1
with: with:
targets: ${{ matrix.target }} targets: ${{ matrix.target }}
musl-target-format: $os-$musl-$arch musl-target-format: $os-$musl-$arch
out-dir: build out-dir: build
x-flags: |
github.com/alist-org/alist/v3/internal/conf.BuiltAt=$built_at
github.com/alist-org/alist/v3/internal/conf.GitAuthor=Xhofe
github.com/alist-org/alist/v3/internal/conf.GitCommit=$git_commit
github.com/alist-org/alist/v3/internal/conf.Version=$tag
github.com/alist-org/alist/v3/internal/conf.WebVersion=dev
- name: Compress - name: Compress
run: | run: |
@ -116,23 +105,14 @@ jobs:
name: Beta Release Desktop name: Beta Release Desktop
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout repo - uses: peter-evans/create-or-update-comment@v4
uses: actions/checkout@v4
with: with:
repository: AlistGo/desktop-release issue-number: 69
ref: main body: |
persist-credentials: false /release-beta
fetch-depth: 0 - triggered by @${{ github.actor }}
- commit sha: ${{ github.sha }}
- name: Commit - view files: https://github.com/alist-org/alist/tree/${{ github.sha }}
run: | reactions: 'rocket'
git config --local user.email "bot@nn.ci" token: ${{ secrets.MY_TOKEN }}
git config --local user.name "IlaBot" repository: alist-org/desktop-release
git commit --allow-empty -m "Trigger build for ${{ github.sha }}"
- name: Push commit
uses: ad-m/github-push-action@master
with:
github_token: ${{ secrets.MY_TOKEN }}
branch: main
repository: AlistGo/desktop-release

View File

@ -15,19 +15,14 @@ jobs:
strategy: strategy:
matrix: matrix:
platform: [ubuntu-latest] platform: [ubuntu-latest]
target: go-version: [ '1.21' ]
- darwin-amd64
- darwin-arm64
- windows-amd64
- linux-arm64-musl
- linux-amd64-musl
- windows-arm64
- android-arm64
name: Build name: Build
runs-on: ${{ matrix.platform }} runs-on: ${{ matrix.platform }}
env:
GOPROXY: https://proxy.golang.org,direct
steps: steps:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
@ -35,29 +30,19 @@ jobs:
- uses: benjlevesque/short-sha@v3.0 - uses: benjlevesque/short-sha@v3.0
id: short-sha id: short-sha
- name: Setup Go - name: Install dependencies
uses: actions/setup-go@v5 run: |
with: sudo snap install zig --classic --beta
go-version: '1.22' docker pull crazymax/xgo:latest
go install github.com/crazy-max/xgo@latest
- name: Setup web sudo apt install upx
run: bash build.sh dev web
- name: Build - name: Build
uses: go-cross/cgo-actions@v1 run: |
with: bash build.sh dev
targets: ${{ matrix.target }}
musl-target-format: $os-$musl-$arch
out-dir: build
x-flags: |
github.com/alist-org/alist/v3/internal/conf.BuiltAt=$built_at
github.com/alist-org/alist/v3/internal/conf.GitAuthor=Xhofe
github.com/alist-org/alist/v3/internal/conf.GitCommit=$git_commit
github.com/alist-org/alist/v3/internal/conf.Version=$tag
github.com/alist-org/alist/v3/internal/conf.WebVersion=dev
- name: Upload artifact - name: Upload artifact
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4
with: with:
name: alist_${{ env.SHA }}_${{ matrix.target }} name: alist_${{ env.SHA }}
path: build/* path: dist

126
.github/workflows/build_docker.yml vendored Normal file
View File

@ -0,0 +1,126 @@
name: build_docker
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
build_docker:
name: Build Docker
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Docker meta
id: meta
uses: docker/metadata-action@v5
with:
images: xhofe/alist
tags: |
type=schedule
type=ref,event=branch
type=ref,event=tag
type=ref,event=pr
type=raw,value=beta,enable={{is_default_branch}}
- name: Docker meta with ffmpeg
id: meta-ffmpeg
uses: docker/metadata-action@v5
with:
images: xhofe/alist
flavor: |
suffix=-ffmpeg
tags: |
type=schedule
type=ref,event=branch
type=ref,event=tag
type=ref,event=pr
type=raw,value=beta,enable={{is_default_branch}}
- uses: actions/setup-go@v5
with:
go-version: 'stable'
- name: Cache Musl
id: cache-musl
uses: actions/cache@v4
with:
path: build/musl-libs
key: docker-musl-libs
- name: Download Musl Library
if: steps.cache-musl.outputs.cache-hit != 'true'
run: bash build.sh prepare docker-multiplatform
- name: Build go binary
run: bash build.sh dev docker-multiplatform
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to DockerHub
if: github.event_name == 'push'
uses: docker/login-action@v3
with:
username: xhofe
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push
id: docker_build
uses: docker/build-push-action@v6
with:
context: .
file: Dockerfile.ci
push: ${{ github.event_name == 'push' }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x
- name: Build and push with ffmpeg
id: docker_build_ffmpeg
uses: docker/build-push-action@v6
with:
context: .
file: Dockerfile.ci
push: ${{ github.event_name == 'push' }}
tags: ${{ steps.meta-ffmpeg.outputs.tags }}
labels: ${{ steps.meta-ffmpeg.outputs.labels }}
build-args: INSTALL_FFMPEG=true
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x
build_docker_with_aria2:
needs: build_docker
name: Build docker with aria2
runs-on: ubuntu-latest
if: github.event_name == 'push'
steps:
- name: Checkout repo
uses: actions/checkout@v4
with:
repository: alist-org/with_aria2
ref: main
persist-credentials: false
fetch-depth: 0
- name: Commit
run: |
git config --local user.email "bot@nn.ci"
git config --local user.name "IlaBot"
git commit --allow-empty -m "Trigger build for ${{ github.sha }}"
- name: Push commit
uses: ad-m/github-push-action@master
with:
github_token: ${{ secrets.MY_TOKEN }}
branch: main
repository: alist-org/with_aria2

View File

@ -13,23 +13,6 @@ jobs:
name: Release name: Release
runs-on: ${{ matrix.platform }} runs-on: ${{ matrix.platform }}
steps: steps:
- name: Free Disk Space (Ubuntu)
uses: jlumbroso/free-disk-space@main
with:
# this might remove tools that are actually needed,
# if set to "true" but frees about 6 GB
tool-cache: false
# all of these default to true, but feel free to set to
# "false" if necessary for your workflow
android: true
dotnet: true
haskell: true
large-packages: true
docker-images: true
swap-storage: true
- name: Prerelease - name: Prerelease
uses: irongut/EditRelease@v1.2.0 uses: irongut/EditRelease@v1.2.0
with: with:
@ -72,7 +55,7 @@ jobs:
- name: Checkout repo - name: Checkout repo
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
repository: AlistGo/desktop-release repository: alist-org/desktop-release
ref: main ref: main
persist-credentials: false persist-credentials: false
fetch-depth: 0 fetch-depth: 0
@ -89,4 +72,4 @@ jobs:
with: with:
github_token: ${{ secrets.MY_TOKEN }} github_token: ${{ secrets.MY_TOKEN }}
branch: main branch: main
repository: AlistGo/desktop-release repository: alist-org/desktop-release

View File

@ -4,35 +4,10 @@ on:
push: push:
tags: tags:
- 'v*' - 'v*'
branches:
- main
pull_request:
branches:
- main
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
env:
REGISTRY: 'xhofe/alist'
REGISTRY_USERNAME: 'xhofe'
REGISTRY_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }}
GITHUB_CR_REPO: ghcr.io/${{ github.repository }}
ARTIFACT_NAME: 'binaries_docker_release'
RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64'
IMAGE_PUSH: ${{ github.event_name == 'push' }}
IMAGE_IS_PROD: ${{ github.ref_type == 'tag' }}
IMAGE_TAGS_BETA: |
type=schedule
type=ref,event=branch
type=ref,event=tag
type=ref,event=pr
type=raw,value=beta,enable={{is_default_branch}}
jobs: jobs:
build_binary: release_docker:
name: Build Binaries for Docker Release name: Release Docker
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
@ -47,59 +22,20 @@ jobs:
uses: actions/cache@v4 uses: actions/cache@v4
with: with:
path: build/musl-libs path: build/musl-libs
key: docker-musl-libs-v2 key: docker-musl-libs
- name: Download Musl Library - name: Download Musl Library
if: steps.cache-musl.outputs.cache-hit != 'true' if: steps.cache-musl.outputs.cache-hit != 'true'
run: bash build.sh prepare docker-multiplatform run: bash build.sh prepare docker-multiplatform
- name: Build go binary (beta) - name: Build go binary
if: env.IMAGE_IS_PROD != 'true'
run: bash build.sh beta docker-multiplatform
- name: Build go binary (release)
if: env.IMAGE_IS_PROD == 'true'
run: bash build.sh release docker-multiplatform run: bash build.sh release docker-multiplatform
- name: Upload artifacts - name: Docker meta
uses: actions/upload-artifact@v4 id: meta
uses: docker/metadata-action@v5
with: with:
name: ${{ env.ARTIFACT_NAME }} images: xhofe/alist
overwrite: true
path: |
build/
!build/*.tgz
!build/musl-libs/**
release_docker:
needs: build_binary
name: Release Docker image
runs-on: ubuntu-latest
strategy:
matrix:
image: ["latest", "ffmpeg", "aria2", "aio"]
include:
- image: "latest"
build_arg: ""
tag_favor: ""
- image: "ffmpeg"
build_arg: INSTALL_FFMPEG=true
tag_favor: "suffix=-ffmpeg,onlatest=true"
- image: "aria2"
build_arg: INSTALL_ARIA2=true
tag_favor: "suffix=-aria2,onlatest=true"
- image: "aio"
build_arg: |
INSTALL_FFMPEG=true
INSTALL_ARIA2=true
tag_favor: "suffix=-aio,onlatest=true"
steps:
- name: Checkout
uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:
name: ${{ env.ARTIFACT_NAME }}
path: 'build/'
- name: Set up QEMU - name: Set up QEMU
uses: docker/setup-qemu-action@v3 uses: docker/setup-qemu-action@v3
@ -108,32 +44,10 @@ jobs:
uses: docker/setup-buildx-action@v3 uses: docker/setup-buildx-action@v3
- name: Login to DockerHub - name: Login to DockerHub
if: env.IMAGE_PUSH == 'true'
uses: docker/login-action@v3 uses: docker/login-action@v3
with: with:
logout: true username: xhofe
username: ${{ env.REGISTRY_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }}
password: ${{ env.REGISTRY_PASSWORD }}
- name: Login to GHCR
uses: docker/login-action@v3
with:
logout: true
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Docker meta
id: meta
uses: docker/metadata-action@v5
with:
images: |
${{ env.REGISTRY }}
${{ env.GITHUB_CR_REPO }}
tags: ${{ env.IMAGE_IS_PROD == 'true' && '' || env.IMAGE_TAGS_BETA }}
flavor: |
${{ env.IMAGE_IS_PROD == 'true' && 'latest=true' || '' }}
${{ matrix.tag_favor }}
- name: Build and push - name: Build and push
id: docker_build id: docker_build
@ -141,8 +55,54 @@ jobs:
with: with:
context: . context: .
file: Dockerfile.ci file: Dockerfile.ci
push: ${{ env.IMAGE_PUSH == 'true' }} push: true
build-args: ${{ matrix.build_arg }}
tags: ${{ steps.meta.outputs.tags }} tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }} labels: ${{ steps.meta.outputs.labels }}
platforms: ${{ env.RELEASE_PLATFORMS }} platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x
- name: Docker meta with ffmpeg
id: meta-ffmpeg
uses: docker/metadata-action@v5
with:
images: xhofe/alist
flavor: |
latest=true
suffix=-ffmpeg,onlatest=true
- name: Build and push with ffmpeg
id: docker_build_ffmpeg
uses: docker/build-push-action@v6
with:
context: .
file: Dockerfile.ci
push: true
tags: ${{ steps.meta-ffmpeg.outputs.tags }}
labels: ${{ steps.meta-ffmpeg.outputs.labels }}
build-args: INSTALL_FFMPEG=true
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x
release_docker_with_aria2:
needs: release_docker
name: Release docker with aria2
runs-on: ubuntu-latest
steps:
- name: Checkout repo
uses: actions/checkout@v4
with:
repository: alist-org/with_aria2
ref: main
persist-credentials: false
fetch-depth: 0
- name: Add tag
run: |
git config --local user.email "bot@nn.ci"
git config --local user.name "IlaBot"
git tag -a ${{ github.ref_name }} -m "release ${{ github.ref_name }}"
- name: Push tags
uses: ad-m/github-push-action@master
with:
github_token: ${{ secrets.MY_TOKEN }}
branch: main
repository: alist-org/with_aria2

View File

@ -1,34 +0,0 @@
name: release_freebsd
on:
release:
types: [ published ]
jobs:
release_freebsd:
strategy:
matrix:
platform: [ ubuntu-latest ]
go-version: [ '1.21' ]
name: Release
runs-on: ${{ matrix.platform }}
steps:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build
run: |
bash build.sh release freebsd
- name: Upload assets
uses: softprops/action-gh-release@v2
with:
files: build/compress/*

View File

@ -10,7 +10,6 @@ RUN bash build.sh release docker
FROM alpine:edge FROM alpine:edge
ARG INSTALL_FFMPEG=false ARG INSTALL_FFMPEG=false
ARG INSTALL_ARIA2=false
LABEL MAINTAINER="i@nn.ci" LABEL MAINTAINER="i@nn.ci"
WORKDIR /opt/alist/ WORKDIR /opt/alist/
@ -19,24 +18,13 @@ RUN apk update && \
apk upgrade --no-cache && \ apk upgrade --no-cache && \
apk add --no-cache bash ca-certificates su-exec tzdata; \ apk add --no-cache bash ca-certificates su-exec tzdata; \
[ "$INSTALL_FFMPEG" = "true" ] && apk add --no-cache ffmpeg; \ [ "$INSTALL_FFMPEG" = "true" ] && apk add --no-cache ffmpeg; \
[ "$INSTALL_ARIA2" = "true" ] && apk add --no-cache curl aria2 && \
mkdir -p /opt/aria2/.aria2 && \
wget https://github.com/P3TERX/aria2.conf/archive/refs/heads/master.tar.gz -O /tmp/aria-conf.tar.gz && \
tar -zxvf /tmp/aria-conf.tar.gz -C /opt/aria2/.aria2 --strip-components=1 && rm -f /tmp/aria-conf.tar.gz && \
sed -i 's|rpc-secret|#rpc-secret|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/script.conf && \
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/script.conf && \
touch /opt/aria2/.aria2/aria2.session && \
/opt/aria2/.aria2/tracker.sh ; \
rm -rf /var/cache/apk/* rm -rf /var/cache/apk/*
COPY --chmod=755 --from=builder /app/bin/alist ./ COPY --from=builder /app/bin/alist ./
COPY --chmod=755 entrypoint.sh /entrypoint.sh COPY entrypoint.sh /entrypoint.sh
RUN /entrypoint.sh version RUN chmod +x /entrypoint.sh && /entrypoint.sh version
ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2} ENV PUID=0 PGID=0 UMASK=022
VOLUME /opt/alist/data/ VOLUME /opt/alist/data/
EXPOSE 5244 5245 EXPOSE 5244 5245
CMD [ "/entrypoint.sh" ] CMD [ "/entrypoint.sh" ]

View File

@ -1,8 +1,7 @@
FROM alpine:3.20.7 FROM alpine:edge
ARG TARGETPLATFORM ARG TARGETPLATFORM
ARG INSTALL_FFMPEG=false ARG INSTALL_FFMPEG=false
ARG INSTALL_ARIA2=false
LABEL MAINTAINER="i@nn.ci" LABEL MAINTAINER="i@nn.ci"
WORKDIR /opt/alist/ WORKDIR /opt/alist/
@ -11,24 +10,13 @@ RUN apk update && \
apk upgrade --no-cache && \ apk upgrade --no-cache && \
apk add --no-cache bash ca-certificates su-exec tzdata; \ apk add --no-cache bash ca-certificates su-exec tzdata; \
[ "$INSTALL_FFMPEG" = "true" ] && apk add --no-cache ffmpeg; \ [ "$INSTALL_FFMPEG" = "true" ] && apk add --no-cache ffmpeg; \
[ "$INSTALL_ARIA2" = "true" ] && apk add --no-cache curl aria2 && \
mkdir -p /opt/aria2/.aria2 && \
wget https://github.com/P3TERX/aria2.conf/archive/refs/heads/master.tar.gz -O /tmp/aria-conf.tar.gz && \
tar -zxvf /tmp/aria-conf.tar.gz -C /opt/aria2/.aria2 --strip-components=1 && rm -f /tmp/aria-conf.tar.gz && \
sed -i 's|rpc-secret|#rpc-secret|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/script.conf && \
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/aria2.conf && \
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/script.conf && \
touch /opt/aria2/.aria2/aria2.session && \
/opt/aria2/.aria2/tracker.sh ; \
rm -rf /var/cache/apk/* rm -rf /var/cache/apk/*
COPY --chmod=755 /build/${TARGETPLATFORM}/alist ./ COPY /build/${TARGETPLATFORM}/alist ./
COPY --chmod=755 entrypoint.sh /entrypoint.sh COPY entrypoint.sh /entrypoint.sh
RUN /entrypoint.sh version RUN chmod +x /entrypoint.sh && /entrypoint.sh version
ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2} ENV PUID=0 PGID=0 UMASK=022
VOLUME /opt/alist/data/ VOLUME /opt/alist/data/
EXPOSE 5244 5245 EXPOSE 5244 5245
CMD [ "/entrypoint.sh" ] CMD [ "/entrypoint.sh" ]

View File

@ -1,5 +1,5 @@
<div align="center"> <div align="center">
<a href="https://alistgo.com"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a> <a href="https://alist.nn.ci"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
<p><em>🗂A file list program that supports multiple storages, powered by Gin and Solidjs.</em></p> <p><em>🗂A file list program that supports multiple storages, powered by Gin and Solidjs.</em></p>
<div> <div>
<a href="https://goreportcard.com/report/github.com/alist-org/alist/v3"> <a href="https://goreportcard.com/report/github.com/alist-org/alist/v3">
@ -31,7 +31,7 @@
<a href="https://hub.docker.com/r/xhofe/alist"> <a href="https://hub.docker.com/r/xhofe/alist">
<img src="https://img.shields.io/docker/pulls/xhofe/alist?color=%2348BB78&logo=docker&label=pulls" alt="Downloads" /> <img src="https://img.shields.io/docker/pulls/xhofe/alist?color=%2348BB78&logo=docker&label=pulls" alt="Downloads" />
</a> </a>
<a href="https://alistgo.com/guide/sponsor.html"> <a href="https://alist.nn.ci/guide/sponsor.html">
<img src="https://img.shields.io/badge/%24-sponsor-F87171.svg" alt="sponsor" /> <img src="https://img.shields.io/badge/%24-sponsor-F87171.svg" alt="sponsor" />
</a> </a>
</div> </div>
@ -39,7 +39,7 @@
--- ---
English | [中文](./README_cn.md) | [日本語](./README_ja.md) | [Contributing](./CONTRIBUTING.md) | [CODE_OF_CONDUCT](./CODE_OF_CONDUCT.md) English | [中文](./README_cn.md)| [日本語](./README_ja.md) | [Contributing](./CONTRIBUTING.md) | [CODE_OF_CONDUCT](./CODE_OF_CONDUCT.md)
## Features ## Features
@ -57,10 +57,8 @@ English | [中文](./README_cn.md) | [日本語](./README_ja.md) | [Contributing
- [x] [UPYUN Storage Service](https://www.upyun.com/products/file-storage) - [x] [UPYUN Storage Service](https://www.upyun.com/products/file-storage)
- [x] WebDav(Support OneDrive/SharePoint without API) - [x] WebDav(Support OneDrive/SharePoint without API)
- [x] Teambition([China](https://www.teambition.com/ ),[International](https://us.teambition.com/ )) - [x] Teambition([China](https://www.teambition.com/ ),[International](https://us.teambition.com/ ))
- [x] [MediaFire](https://www.mediafire.com)
- [x] [Mediatrack](https://www.mediatrack.cn/) - [x] [Mediatrack](https://www.mediatrack.cn/)
- [x] [ProtonDrive](https://proton.me/drive) - [x] [139yun](https://yun.139.com/) (Personal, Family)
- [x] [139yun](https://yun.139.com/) (Personal, Family, Group)
- [x] [YandexDisk](https://disk.yandex.com/) - [x] [YandexDisk](https://disk.yandex.com/)
- [x] [BaiduNetdisk](http://pan.baidu.com/) - [x] [BaiduNetdisk](http://pan.baidu.com/)
- [x] [Terabox](https://www.terabox.com/main) - [x] [Terabox](https://www.terabox.com/main)
@ -79,7 +77,6 @@ English | [中文](./README_cn.md) | [日本語](./README_ja.md) | [Contributing
- [x] [Dropbox](https://www.dropbox.com/) - [x] [Dropbox](https://www.dropbox.com/)
- [x] [FeijiPan](https://www.feijipan.com/) - [x] [FeijiPan](https://www.feijipan.com/)
- [x] [dogecloud](https://www.dogecloud.com/product/oss) - [x] [dogecloud](https://www.dogecloud.com/product/oss)
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
- [x] Easy to deploy and out-of-the-box - [x] Easy to deploy and out-of-the-box
- [x] File preview (PDF, markdown, code, plain text, ...) - [x] File preview (PDF, markdown, code, plain text, ...)
- [x] Image preview in gallery mode - [x] Image preview in gallery mode
@ -90,7 +87,7 @@ English | [中文](./README_cn.md) | [日本語](./README_ja.md) | [Contributing
- [x] Dark mode - [x] Dark mode
- [x] I18n - [x] I18n
- [x] Protected routes (password protection and authentication) - [x] Protected routes (password protection and authentication)
- [x] WebDav (see https://alistgo.com/guide/webdav.html for details) - [x] WebDav (see https://alist.nn.ci/guide/webdav.html for details)
- [x] [Docker Deploy](https://hub.docker.com/r/xhofe/alist) - [x] [Docker Deploy](https://hub.docker.com/r/xhofe/alist)
- [x] Cloudflare Workers proxy - [x] Cloudflare Workers proxy
- [x] File/Folder package download - [x] File/Folder package download
@ -101,11 +98,7 @@ English | [中文](./README_cn.md) | [日本語](./README_ja.md) | [Contributing
## Document ## Document
<https://alistgo.com/> <https://alist.nn.ci/>
## API Documentation (via Apifox):
<https://alist-public.apifox.cn/>
## Demo ## Demo
@ -118,11 +111,13 @@ Please go to our [discussion forum](https://github.com/alist-org/alist/discussio
## Sponsor ## Sponsor
AList is an open-source software, if you happen to like this project and want me to keep going, please consider sponsoring me or providing a single donation! Thanks for all the love and support: AList is an open-source software, if you happen to like this project and want me to keep going, please consider sponsoring me or providing a single donation! Thanks for all the love and support:
https://alistgo.com/guide/sponsor.html https://alist.nn.ci/guide/sponsor.html
### Special sponsors ### Special sponsors
- [VidHub](https://apps.apple.com/app/apple-store/id1659622164?pt=118612019&ct=alist&mt=8) - An elegant cloud video player within the Apple ecosystem. Support for iPhone, iPad, Mac, and Apple TV. - [VidHub](https://apps.apple.com/app/apple-store/id1659622164?pt=118612019&ct=alist&mt=8) - An elegant cloud video player within the Apple ecosystem. Support for iPhone, iPad, Mac, and Apple TV.
- [亚洲云](https://www.asiayun.com/aff/QQCOOQKZ) - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商 (sponsored Chinese API server)
- [找资源](http://zhaoziyuan2.cc/) - 阿里云盘资源搜索引擎
## Contributors ## Contributors
@ -143,4 +138,4 @@ The `AList` is open-source software licensed under the AGPL-3.0 license.
--- ---
> [@GitHub](https://github.com/alist-org) · [@TelegramGroup](https://t.me/alist_chat) · [@Discord](https://discord.gg/F4ymsH4xv2) > [@Blog](https://nn.ci/) · [@GitHub](https://github.com/alist-org) · [@TelegramGroup](https://t.me/alist_chat) · [@Discord](https://discord.gg/F4ymsH4xv2)

View File

@ -1,5 +1,5 @@
<div align="center"> <div align="center">
<a href="https://alistgo.com"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a> <a href="https://alist.nn.ci"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
<p><em>🗂一个支持多存储的文件列表程序,使用 Gin 和 Solidjs。</em></p> <p><em>🗂一个支持多存储的文件列表程序,使用 Gin 和 Solidjs。</em></p>
<div> <div>
<a href="https://goreportcard.com/report/github.com/alist-org/alist/v3"> <a href="https://goreportcard.com/report/github.com/alist-org/alist/v3">
@ -31,7 +31,7 @@
<a href="https://hub.docker.com/r/xhofe/alist"> <a href="https://hub.docker.com/r/xhofe/alist">
<img src="https://img.shields.io/docker/pulls/xhofe/alist?color=%2348BB78&logo=docker&label=pulls" alt="Downloads" /> <img src="https://img.shields.io/docker/pulls/xhofe/alist?color=%2348BB78&logo=docker&label=pulls" alt="Downloads" />
</a> </a>
<a href="https://alistgo.com/zh/guide/sponsor.html"> <a href="https://alist.nn.ci/zh/guide/sponsor.html">
<img src="https://img.shields.io/badge/%24-sponsor-F87171.svg" alt="sponsor" /> <img src="https://img.shields.io/badge/%24-sponsor-F87171.svg" alt="sponsor" />
</a> </a>
</div> </div>
@ -57,10 +57,8 @@
- [x] [又拍云对象存储](https://www.upyun.com/products/file-storage) - [x] [又拍云对象存储](https://www.upyun.com/products/file-storage)
- [x] WebDav(支持无API的OneDrive/SharePoint) - [x] WebDav(支持无API的OneDrive/SharePoint)
- [x] Teambition[中国](https://www.teambition.com/ )[国际](https://us.teambition.com/ ) - [x] Teambition[中国](https://www.teambition.com/ )[国际](https://us.teambition.com/ )
- [x] [MediaFire](https://www.mediafire.com)
- [x] [分秒帧](https://www.mediatrack.cn/) - [x] [分秒帧](https://www.mediatrack.cn/)
- [x] [ProtonDrive](https://proton.me/drive) - [x] [和彩云](https://yun.139.com/) (个人云, 家庭云)
- [x] [和彩云](https://yun.139.com/) (个人云, 家庭云,共享群组)
- [x] [Yandex.Disk](https://disk.yandex.com/) - [x] [Yandex.Disk](https://disk.yandex.com/)
- [x] [百度网盘](http://pan.baidu.com/) - [x] [百度网盘](http://pan.baidu.com/)
- [x] [UC网盘](https://drive.uc.cn) - [x] [UC网盘](https://drive.uc.cn)
@ -88,7 +86,7 @@
- [x] 黑暗模式 - [x] 黑暗模式
- [x] 国际化 - [x] 国际化
- [x] 受保护的路由(密码保护和身份验证) - [x] 受保护的路由(密码保护和身份验证)
- [x] WebDav (具体见 https://alistgo.com/zh/guide/webdav.html) - [x] WebDav (具体见 https://alist.nn.ci/zh/guide/webdav.html)
- [x] [Docker 部署](https://hub.docker.com/r/xhofe/alist) - [x] [Docker 部署](https://hub.docker.com/r/xhofe/alist)
- [x] Cloudflare workers 中转 - [x] Cloudflare workers 中转
- [x] 文件/文件夹打包下载 - [x] 文件/文件夹打包下载
@ -99,11 +97,7 @@
## 文档 ## 文档
<https://alistgo.com/zh/> <https://alist.nn.ci/zh/>
## API 文档(通过 Apifox 提供)
<https://alist-public.apifox.cn/>
## Demo ## Demo
@ -115,11 +109,13 @@
## 赞助 ## 赞助
AList 是一个开源软件如果你碰巧喜欢这个项目并希望我继续下去请考虑赞助我或提供一个单一的捐款感谢所有的爱和支持https://alistgo.com/zh/guide/sponsor.html AList 是一个开源软件如果你碰巧喜欢这个项目并希望我继续下去请考虑赞助我或提供一个单一的捐款感谢所有的爱和支持https://alist.nn.ci/zh/guide/sponsor.html
### 特别赞助 ### 特别赞助
- [VidHub](https://apps.apple.com/app/apple-store/id1659622164?pt=118612019&ct=alist&mt=8) - 苹果生态下优雅的网盘视频播放器iPhoneiPadMacApple TV全平台支持。 - [VidHub](https://apps.apple.com/app/apple-store/id1659622164?pt=118612019&ct=alist&mt=8) - 苹果生态下优雅的网盘视频播放器iPhoneiPadMacApple TV全平台支持。
- [亚洲云](https://www.asiayun.com/aff/QQCOOQKZ) - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商 (国内API服务器赞助)
- [找资源](http://zhaoziyuan2.cc/) - 阿里云盘资源搜索引擎
## 贡献者 ## 贡献者

View File

@ -1,5 +1,5 @@
<div align="center"> <div align="center">
<a href="https://alistgo.com"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a> <a href="https://alist.nn.ci"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
<p><em>🗂Gin と Solidjs による、複数のストレージをサポートするファイルリストプログラム。</em></p> <p><em>🗂Gin と Solidjs による、複数のストレージをサポートするファイルリストプログラム。</em></p>
<div> <div>
<a href="https://goreportcard.com/report/github.com/alist-org/alist/v3"> <a href="https://goreportcard.com/report/github.com/alist-org/alist/v3">
@ -31,7 +31,7 @@
<a href="https://hub.docker.com/r/xhofe/alist"> <a href="https://hub.docker.com/r/xhofe/alist">
<img src="https://img.shields.io/docker/pulls/xhofe/alist?color=%2348BB78&logo=docker&label=pulls" alt="Downloads" /> <img src="https://img.shields.io/docker/pulls/xhofe/alist?color=%2348BB78&logo=docker&label=pulls" alt="Downloads" />
</a> </a>
<a href="https://alistgo.com/guide/sponsor.html"> <a href="https://alist.nn.ci/guide/sponsor.html">
<img src="https://img.shields.io/badge/%24-sponsor-F87171.svg" alt="sponsor" /> <img src="https://img.shields.io/badge/%24-sponsor-F87171.svg" alt="sponsor" />
</a> </a>
</div> </div>
@ -57,10 +57,8 @@
- [x] [UPYUN Storage Service](https://www.upyun.com/products/file-storage) - [x] [UPYUN Storage Service](https://www.upyun.com/products/file-storage)
- [x] WebDav(Support OneDrive/SharePoint without API) - [x] WebDav(Support OneDrive/SharePoint without API)
- [x] Teambition([China](https://www.teambition.com/ ),[International](https://us.teambition.com/ )) - [x] Teambition([China](https://www.teambition.com/ ),[International](https://us.teambition.com/ ))
- [x] [MediaFire](https://www.mediafire.com)
- [x] [Mediatrack](https://www.mediatrack.cn/) - [x] [Mediatrack](https://www.mediatrack.cn/)
- [x] [ProtonDrive](https://proton.me/drive) - [x] [139yun](https://yun.139.com/) (Personal, Family)
- [x] [139yun](https://yun.139.com/) (Personal, Family, Group)
- [x] [YandexDisk](https://disk.yandex.com/) - [x] [YandexDisk](https://disk.yandex.com/)
- [x] [BaiduNetdisk](http://pan.baidu.com/) - [x] [BaiduNetdisk](http://pan.baidu.com/)
- [x] [Terabox](https://www.terabox.com/main) - [x] [Terabox](https://www.terabox.com/main)
@ -89,7 +87,7 @@
- [x] ダークモード - [x] ダークモード
- [x] 国際化 - [x] 国際化
- [x] 保護されたルート (パスワード保護と認証) - [x] 保護されたルート (パスワード保護と認証)
- [x] WebDav (詳細は https://alistgo.com/guide/webdav.html を参照) - [x] WebDav (詳細は https://alist.nn.ci/guide/webdav.html を参照)
- [x] [Docker デプロイ](https://hub.docker.com/r/xhofe/alist) - [x] [Docker デプロイ](https://hub.docker.com/r/xhofe/alist)
- [x] Cloudflare ワーカープロキシ - [x] Cloudflare ワーカープロキシ
- [x] ファイル/フォルダパッケージのダウンロード - [x] ファイル/フォルダパッケージのダウンロード
@ -100,11 +98,7 @@
## ドキュメント ## ドキュメント
<https://alistgo.com/> <https://alist.nn.ci/>
## APIドキュメントApifox 提供)
<https://alist-public.apifox.cn/>
## デモ ## デモ
@ -117,11 +111,13 @@
## スポンサー ## スポンサー
AList はオープンソースのソフトウェアです。もしあなたがこのプロジェクトを気に入ってくださり、続けて欲しいと思ってくださるなら、ぜひスポンサーになってくださるか、1口でも寄付をしてくださるようご検討くださいすべての愛とサポートに感謝します: AList はオープンソースのソフトウェアです。もしあなたがこのプロジェクトを気に入ってくださり、続けて欲しいと思ってくださるなら、ぜひスポンサーになってくださるか、1口でも寄付をしてくださるようご検討くださいすべての愛とサポートに感謝します:
https://alistgo.com/guide/sponsor.html https://alist.nn.ci/guide/sponsor.html
### スペシャルスポンサー ### スペシャルスポンサー
- [VidHub](https://apps.apple.com/app/apple-store/id1659622164?pt=118612019&ct=alist&mt=8) - An elegant cloud video player within the Apple ecosystem. Support for iPhone, iPad, Mac, and Apple TV. - [VidHub](https://apps.apple.com/app/apple-store/id1659622164?pt=118612019&ct=alist&mt=8) - An elegant cloud video player within the Apple ecosystem. Support for iPhone, iPad, Mac, and Apple TV.
- [亚洲云](https://www.asiayun.com/aff/QQCOOQKZ) - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商 (sponsored Chinese API server)
- [找资源](http://zhaoziyuan2.cc/) - 阿里云盘资源搜索引擎
## コントリビューター ## コントリビューター

View File

@ -1,14 +1,12 @@
appName="alist" appName="alist"
builtAt="$(date +'%F %T %z')" builtAt="$(date +'%F %T %z')"
goVersion=$(go version | sed 's/go version //')
gitAuthor="Xhofe <i@nn.ci>" gitAuthor="Xhofe <i@nn.ci>"
gitCommit=$(git log --pretty=format:"%h" -1) gitCommit=$(git log --pretty=format:"%h" -1)
if [ "$1" = "dev" ]; then if [ "$1" = "dev" ]; then
version="dev" version="dev"
webVersion="dev" webVersion="dev"
elif [ "$1" = "beta" ]; then
version="beta"
webVersion="dev"
else else
git tag -d beta git tag -d beta
version=$(git describe --abbrev=0 --tags) version=$(git describe --abbrev=0 --tags)
@ -21,6 +19,7 @@ echo "frontend version: $webVersion"
ldflags="\ ldflags="\
-w -s \ -w -s \
-X 'github.com/alist-org/alist/v3/internal/conf.BuiltAt=$builtAt' \ -X 'github.com/alist-org/alist/v3/internal/conf.BuiltAt=$builtAt' \
-X 'github.com/alist-org/alist/v3/internal/conf.GoVersion=$goVersion' \
-X 'github.com/alist-org/alist/v3/internal/conf.GitAuthor=$gitAuthor' \ -X 'github.com/alist-org/alist/v3/internal/conf.GitAuthor=$gitAuthor' \
-X 'github.com/alist-org/alist/v3/internal/conf.GitCommit=$gitCommit' \ -X 'github.com/alist-org/alist/v3/internal/conf.GitCommit=$gitCommit' \
-X 'github.com/alist-org/alist/v3/internal/conf.Version=$version' \ -X 'github.com/alist-org/alist/v3/internal/conf.Version=$version' \
@ -93,8 +92,8 @@ BuildDocker() {
PrepareBuildDockerMusl() { PrepareBuildDockerMusl() {
mkdir -p build/musl-libs mkdir -p build/musl-libs
BASE="https://github.com/go-cross/musl-toolchain-archive/releases/latest/download/" BASE="https://musl.cc/"
FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross i486-linux-musl-cross s390x-linux-musl-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross riscv64-linux-musl-cross powerpc64le-linux-musl-cross) FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross i486-linux-musl-cross s390x-linux-musl-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross)
for i in "${FILES[@]}"; do for i in "${FILES[@]}"; do
url="${BASE}${i}.tgz" url="${BASE}${i}.tgz"
lib_tgz="build/${i}.tgz" lib_tgz="build/${i}.tgz"
@ -113,8 +112,8 @@ BuildDockerMultiplatform() {
docker_lflags="--extldflags '-static -fpic' $ldflags" docker_lflags="--extldflags '-static -fpic' $ldflags"
export CGO_ENABLED=1 export CGO_ENABLED=1
OS_ARCHES=(linux-amd64 linux-arm64 linux-386 linux-s390x linux-riscv64 linux-ppc64le) OS_ARCHES=(linux-amd64 linux-arm64 linux-386 linux-s390x)
CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc i486-linux-musl-gcc s390x-linux-musl-gcc riscv64-linux-musl-gcc powerpc64le-linux-musl-gcc) CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc i486-linux-musl-gcc s390x-linux-musl-gcc)
for i in "${!OS_ARCHES[@]}"; do for i in "${!OS_ARCHES[@]}"; do
os_arch=${OS_ARCHES[$i]} os_arch=${OS_ARCHES[$i]}
cgo_cc=${CGO_ARGS[$i]} cgo_cc=${CGO_ARGS[$i]}
@ -234,29 +233,6 @@ BuildReleaseAndroid() {
done done
} }
BuildReleaseFreeBSD() {
rm -rf .git/
mkdir -p "build/freebsd"
OS_ARCHES=(amd64 arm64 i386)
GO_ARCHES=(amd64 arm64 386)
CGO_ARGS=(x86_64-unknown-freebsd14.1 aarch64-unknown-freebsd14.1 i386-unknown-freebsd14.1)
for i in "${!OS_ARCHES[@]}"; do
os_arch=${OS_ARCHES[$i]}
cgo_cc="clang --target=${CGO_ARGS[$i]} --sysroot=/opt/freebsd/${os_arch}"
echo building for freebsd-${os_arch}
sudo mkdir -p "/opt/freebsd/${os_arch}"
wget -q https://download.freebsd.org/releases/${os_arch}/14.3-RELEASE/base.txz
sudo tar -xf ./base.txz -C /opt/freebsd/${os_arch}
rm base.txz
export GOOS=freebsd
export GOARCH=${GO_ARCHES[$i]}
export CC=${cgo_cc}
export CGO_ENABLED=1
export CGO_LDFLAGS="-fuse-ld=lld"
go build -o ./build/$appName-freebsd-$os_arch -ldflags="$ldflags" -tags=jsoniter .
done
}
MakeRelease() { MakeRelease() {
cd build cd build
mkdir compress mkdir compress
@ -275,11 +251,6 @@ MakeRelease() {
tar -czvf compress/"$i".tar.gz alist tar -czvf compress/"$i".tar.gz alist
rm -f alist rm -f alist
done done
for i in $(find . -type f -name "$appName-freebsd-*"); do
cp "$i" alist
tar -czvf compress/"$i".tar.gz alist
rm -f alist
done
for i in $(find . -type f -name "$appName-windows-*"); do for i in $(find . -type f -name "$appName-windows-*"); do
cp "$i" alist.exe cp "$i" alist.exe
zip compress/$(echo $i | sed 's/\.[^.]*$//').zip alist.exe zip compress/$(echo $i | sed 's/\.[^.]*$//').zip alist.exe
@ -302,12 +273,8 @@ if [ "$1" = "dev" ]; then
else else
BuildDev BuildDev
fi fi
elif [ "$1" = "release" -o "$1" = "beta" ]; then elif [ "$1" = "release" ]; then
if [ "$1" = "beta" ]; then FetchWebRelease
FetchWebDev
else
FetchWebRelease
fi
if [ "$2" = "docker" ]; then if [ "$2" = "docker" ]; then
BuildDocker BuildDocker
elif [ "$2" = "docker-multiplatform" ]; then elif [ "$2" = "docker-multiplatform" ]; then
@ -321,9 +288,6 @@ elif [ "$1" = "release" -o "$1" = "beta" ]; then
elif [ "$2" = "android" ]; then elif [ "$2" = "android" ]; then
BuildReleaseAndroid BuildReleaseAndroid
MakeRelease "md5-android.txt" MakeRelease "md5-android.txt"
elif [ "$2" = "freebsd" ]; then
BuildReleaseFreeBSD
MakeRelease "md5-freebsd.txt"
elif [ "$2" = "web" ]; then elif [ "$2" = "web" ]; then
echo "web only" echo "web only"
else else

View File

@ -1,7 +1,6 @@
package cmd package cmd
import ( import (
"github.com/alist-org/alist/v3/internal/bootstrap/patch/v3_46_0"
"os" "os"
"path/filepath" "path/filepath"
"strconv" "strconv"
@ -17,16 +16,8 @@ func Init() {
bootstrap.InitConfig() bootstrap.InitConfig()
bootstrap.Log() bootstrap.Log()
bootstrap.InitDB() bootstrap.InitDB()
if v3_46_0.IsLegacyRoleDetected() {
utils.Log.Warnf("Detected legacy role format, executing ConvertLegacyRoles patch early...")
v3_46_0.ConvertLegacyRoles()
}
data.InitData() data.InitData()
bootstrap.InitStreamLimit()
bootstrap.InitIndex() bootstrap.InitIndex()
bootstrap.InitUpgradePatch()
} }
func Release() { func Release() {

View File

@ -1,54 +0,0 @@
package cmd
import (
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"os"
)
// KillCmd represents the kill command
var KillCmd = &cobra.Command{
Use: "kill",
Short: "Force kill alist server process by daemon/pid file",
Run: func(cmd *cobra.Command, args []string) {
kill()
},
}
func kill() {
initDaemon()
if pid == -1 {
log.Info("Seems not have been started. Try use `alist start` to start server.")
return
}
process, err := os.FindProcess(pid)
if err != nil {
log.Errorf("failed to find process by pid: %d, reason: %v", pid, process)
return
}
err = process.Kill()
if err != nil {
log.Errorf("failed to kill process %d: %v", pid, err)
} else {
log.Info("killed process: ", pid)
}
err = os.Remove(pidFile)
if err != nil {
log.Errorf("failed to remove pid file")
}
pid = -1
}
func init() {
RootCmd.AddCommand(KillCmd)
// Here you will define your flags and configuration settings.
// Cobra supports Persistent Flags which will work for this command
// and all subcommands, e.g.:
// stopCmd.PersistentFlags().String("foo", "", "A help for foo")
// Cobra supports local flags which will only run when this command
// is called directly, e.g.:
// stopCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
}

View File

@ -12,7 +12,6 @@ import (
"strings" "strings"
_ "github.com/alist-org/alist/v3/drivers" _ "github.com/alist-org/alist/v3/drivers"
"github.com/alist-org/alist/v3/internal/bootstrap"
"github.com/alist-org/alist/v3/internal/bootstrap/data" "github.com/alist-org/alist/v3/internal/bootstrap/data"
"github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/op" "github.com/alist-org/alist/v3/internal/op"
@ -138,7 +137,6 @@ var LangCmd = &cobra.Command{
Use: "lang", Use: "lang",
Short: "Generate language json file", Short: "Generate language json file",
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
bootstrap.InitConfig()
err := os.MkdirAll("lang", 0777) err := os.MkdirAll("lang", 0777)
if err != nil { if err != nil {
utils.Log.Fatalf("failed create folder: %s", err.Error()) utils.Log.Fatalf("failed create folder: %s", err.Error())

View File

@ -6,7 +6,6 @@ import (
"github.com/alist-org/alist/v3/cmd/flags" "github.com/alist-org/alist/v3/cmd/flags"
_ "github.com/alist-org/alist/v3/drivers" _ "github.com/alist-org/alist/v3/drivers"
_ "github.com/alist-org/alist/v3/internal/archive"
_ "github.com/alist-org/alist/v3/internal/offline_download" _ "github.com/alist-org/alist/v3/internal/offline_download"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@ -16,7 +15,7 @@ var RootCmd = &cobra.Command{
Short: "A file list program that supports multiple storage.", Short: "A file list program that supports multiple storage.",
Long: `A file list program that supports multiple storage, Long: `A file list program that supports multiple storage,
built with love by Xhofe and friends in Go/Solid.js. built with love by Xhofe and friends in Go/Solid.js.
Complete documentation is available at https://alistgo.com/`, Complete documentation is available at https://alist.nn.ci/`,
} }
func Execute() { func Execute() {

View File

@ -13,19 +13,14 @@ import (
"syscall" "syscall"
"time" "time"
ftpserver "github.com/KirCute/ftpserverlib-pasvportmap"
"github.com/KirCute/sftpd-alist"
"github.com/alist-org/alist/v3/cmd/flags" "github.com/alist-org/alist/v3/cmd/flags"
"github.com/alist-org/alist/v3/internal/bootstrap" "github.com/alist-org/alist/v3/internal/bootstrap"
"github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/fs"
"github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/pkg/utils"
"github.com/alist-org/alist/v3/server" "github.com/alist-org/alist/v3/server"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"golang.org/x/net/http2"
"golang.org/x/net/http2/h2c"
) )
// ServerCmd represents the server command // ServerCmd represents the server command
@ -49,15 +44,11 @@ the address is defined in config file`,
r := gin.New() r := gin.New()
r.Use(gin.LoggerWithWriter(log.StandardLogger().Out), gin.RecoveryWithWriter(log.StandardLogger().Out)) r.Use(gin.LoggerWithWriter(log.StandardLogger().Out), gin.RecoveryWithWriter(log.StandardLogger().Out))
server.Init(r) server.Init(r)
var httpHandler http.Handler = r
if conf.Conf.Scheme.EnableH2c {
httpHandler = h2c.NewHandler(r, &http2.Server{})
}
var httpSrv, httpsSrv, unixSrv *http.Server var httpSrv, httpsSrv, unixSrv *http.Server
if conf.Conf.Scheme.HttpPort != -1 { if conf.Conf.Scheme.HttpPort != -1 {
httpBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpPort) httpBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpPort)
utils.Log.Infof("start HTTP server @ %s", httpBase) utils.Log.Infof("start HTTP server @ %s", httpBase)
httpSrv = &http.Server{Addr: httpBase, Handler: httpHandler} httpSrv = &http.Server{Addr: httpBase, Handler: r}
go func() { go func() {
err := httpSrv.ListenAndServe() err := httpSrv.ListenAndServe()
if err != nil && !errors.Is(err, http.ErrServerClosed) { if err != nil && !errors.Is(err, http.ErrServerClosed) {
@ -78,7 +69,7 @@ the address is defined in config file`,
} }
if conf.Conf.Scheme.UnixFile != "" { if conf.Conf.Scheme.UnixFile != "" {
utils.Log.Infof("start unix server @ %s", conf.Conf.Scheme.UnixFile) utils.Log.Infof("start unix server @ %s", conf.Conf.Scheme.UnixFile)
unixSrv = &http.Server{Handler: httpHandler} unixSrv = &http.Server{Handler: r}
go func() { go func() {
listener, err := net.Listen("unix", conf.Conf.Scheme.UnixFile) listener, err := net.Listen("unix", conf.Conf.Scheme.UnixFile)
if err != nil { if err != nil {
@ -121,42 +112,6 @@ the address is defined in config file`,
} }
}() }()
} }
var ftpDriver *server.FtpMainDriver
var ftpServer *ftpserver.FtpServer
if conf.Conf.FTP.Listen != "" && conf.Conf.FTP.Enable {
var err error
ftpDriver, err = server.NewMainDriver()
if err != nil {
utils.Log.Fatalf("failed to start ftp driver: %s", err.Error())
} else {
utils.Log.Infof("start ftp server on %s", conf.Conf.FTP.Listen)
go func() {
ftpServer = ftpserver.NewFtpServer(ftpDriver)
err = ftpServer.ListenAndServe()
if err != nil {
utils.Log.Fatalf("problem ftp server listening: %s", err.Error())
}
}()
}
}
var sftpDriver *server.SftpDriver
var sftpServer *sftpd.SftpServer
if conf.Conf.SFTP.Listen != "" && conf.Conf.SFTP.Enable {
var err error
sftpDriver, err = server.NewSftpDriver()
if err != nil {
utils.Log.Fatalf("failed to start sftp driver: %s", err.Error())
} else {
utils.Log.Infof("start sftp server on %s", conf.Conf.SFTP.Listen)
go func() {
sftpServer = sftpd.NewSftpServer(sftpDriver)
err = sftpServer.RunServer()
if err != nil {
utils.Log.Fatalf("problem sftp server listening: %s", err.Error())
}
}()
}
}
// Wait for interrupt signal to gracefully shutdown the server with // Wait for interrupt signal to gracefully shutdown the server with
// a timeout of 1 second. // a timeout of 1 second.
quit := make(chan os.Signal, 1) quit := make(chan os.Signal, 1)
@ -166,7 +121,6 @@ the address is defined in config file`,
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM) signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
<-quit <-quit
utils.Log.Println("Shutdown server...") utils.Log.Println("Shutdown server...")
fs.ArchiveContentUploadTaskManager.RemoveAll()
Release() Release()
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel() defer cancel()
@ -198,25 +152,6 @@ the address is defined in config file`,
} }
}() }()
} }
if conf.Conf.FTP.Listen != "" && conf.Conf.FTP.Enable && ftpServer != nil && ftpDriver != nil {
wg.Add(1)
go func() {
defer wg.Done()
ftpDriver.Stop()
if err := ftpServer.Stop(); err != nil {
utils.Log.Fatal("FTP server shutdown err: ", err)
}
}()
}
if conf.Conf.SFTP.Listen != "" && conf.Conf.SFTP.Enable && sftpServer != nil && sftpDriver != nil {
wg.Add(1)
go func() {
defer wg.Done()
if err := sftpServer.Close(); err != nil {
utils.Log.Fatal("SFTP server shutdown err: ", err)
}
}()
}
wg.Wait() wg.Wait()
utils.Log.Println("Server exit") utils.Log.Println("Server exit")
}, },

View File

@ -1,10 +1,10 @@
//go:build !windows /*
Copyright © 2022 NAME HERE <EMAIL ADDRESS>
*/
package cmd package cmd
import ( import (
"os" "os"
"syscall"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@ -30,11 +30,11 @@ func stop() {
log.Errorf("failed to find process by pid: %d, reason: %v", pid, process) log.Errorf("failed to find process by pid: %d, reason: %v", pid, process)
return return
} }
err = process.Signal(syscall.SIGTERM) err = process.Kill()
if err != nil { if err != nil {
log.Errorf("failed to terminate process %d: %v", pid, err) log.Errorf("failed to kill process %d: %v", pid, err)
} else { } else {
log.Info("terminated process: ", pid) log.Info("killed process: ", pid)
} }
err = os.Remove(pidFile) err = os.Remove(pidFile)
if err != nil { if err != nil {

View File

@ -1,34 +0,0 @@
//go:build windows
package cmd
import (
"github.com/spf13/cobra"
)
// StopCmd represents the stop command
var StopCmd = &cobra.Command{
Use: "stop",
Short: "Same as the kill command",
Run: func(cmd *cobra.Command, args []string) {
stop()
},
}
func stop() {
kill()
}
func init() {
RootCmd.AddCommand(StopCmd)
// Here you will define your flags and configuration settings.
// Cobra supports Persistent Flags which will work for this command
// and all subcommands, e.g.:
// stopCmd.PersistentFlags().String("foo", "", "A help for foo")
// Cobra supports local flags which will only run when this command
// is called directly, e.g.:
// stopCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
}

View File

@ -6,7 +6,6 @@ package cmd
import ( import (
"fmt" "fmt"
"os" "os"
"runtime"
"github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/conf"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@ -17,15 +16,14 @@ var VersionCmd = &cobra.Command{
Use: "version", Use: "version",
Short: "Show current version of AList", Short: "Show current version of AList",
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
goVersion := fmt.Sprintf("%s %s/%s", runtime.Version(), runtime.GOOS, runtime.GOARCH)
fmt.Printf(`Built At: %s fmt.Printf(`Built At: %s
Go Version: %s Go Version: %s
Author: %s Author: %s
Commit ID: %s Commit ID: %s
Version: %s Version: %s
WebVersion: %s WebVersion: %s
`, conf.BuiltAt, goVersion, conf.GitAuthor, conf.GitCommit, conf.Version, conf.WebVersion) `,
conf.BuiltAt, conf.GoVersion, conf.GitAuthor, conf.GitCommit, conf.Version, conf.WebVersion)
os.Exit(0) os.Exit(0)
}, },
} }

View File

@ -1,43 +0,0 @@
package _115
import (
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
"github.com/alist-org/alist/v3/drivers/base"
log "github.com/sirupsen/logrus"
)
var (
md5Salt = "Qclm8MGWUv59TnrR0XPg"
appVer = "27.0.5.7"
)
func (d *Pan115) getAppVersion() ([]driver115.AppVersion, error) {
result := driver115.VersionResp{}
resp, err := base.RestyClient.R().Get(driver115.ApiGetVersion)
err = driver115.CheckErr(err, &result, resp)
if err != nil {
return nil, err
}
return result.Data.GetAppVersions(), nil
}
func (d *Pan115) getAppVer() string {
// todo add some cache
vers, err := d.getAppVersion()
if err != nil {
log.Warnf("[115] get app version failed: %v", err)
return appVer
}
for _, ver := range vers {
if ver.AppName == "win" {
return ver.Version
}
}
return appVer
}
func (d *Pan115) initAppVer() {
appVer = d.getAppVer()
}

View File

@ -3,7 +3,6 @@ package _115
import ( import (
"context" "context"
"strings" "strings"
"sync"
driver115 "github.com/SheltonZhu/115driver/pkg/driver" driver115 "github.com/SheltonZhu/115driver/pkg/driver"
"github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/driver"
@ -17,9 +16,8 @@ import (
type Pan115 struct { type Pan115 struct {
model.Storage model.Storage
Addition Addition
client *driver115.Pan115Client client *driver115.Pan115Client
limiter *rate.Limiter limiter *rate.Limiter
appVerOnce sync.Once
} }
func (d *Pan115) Config() driver.Config { func (d *Pan115) Config() driver.Config {
@ -31,7 +29,6 @@ func (d *Pan115) GetAddition() driver.Additional {
} }
func (d *Pan115) Init(ctx context.Context) error { func (d *Pan115) Init(ctx context.Context) error {
d.appVerOnce.Do(d.initAppVer)
if d.LimitRate > 0 { if d.LimitRate > 0 {
d.limiter = rate.NewLimiter(rate.Limit(d.LimitRate), 1) d.limiter = rate.NewLimiter(rate.Limit(d.LimitRate), 1)
} }
@ -79,60 +76,28 @@ func (d *Pan115) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
return link, nil return link, nil
} }
func (d *Pan115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) { func (d *Pan115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
if err := d.WaitLimit(ctx); err != nil { if err := d.WaitLimit(ctx); err != nil {
return nil, err return err
} }
if _, err := d.client.Mkdir(parentDir.GetID(), dirName); err != nil {
result := driver115.MkdirResp{} return err
form := map[string]string{
"pid": parentDir.GetID(),
"cname": dirName,
} }
req := d.client.NewRequest(). return nil
SetFormData(form).
SetResult(&result).
ForceContentType("application/json;charset=UTF-8")
resp, err := req.Post(driver115.ApiDirAdd)
err = driver115.CheckErr(err, &result, resp)
if err != nil {
return nil, err
}
f, err := d.getNewFile(result.FileID)
if err != nil {
return nil, nil
}
return f, nil
} }
func (d *Pan115) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { func (d *Pan115) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
if err := d.WaitLimit(ctx); err != nil { if err := d.WaitLimit(ctx); err != nil {
return nil, err return err
} }
if err := d.client.Move(dstDir.GetID(), srcObj.GetID()); err != nil { return d.client.Move(dstDir.GetID(), srcObj.GetID())
return nil, err
}
f, err := d.getNewFile(srcObj.GetID())
if err != nil {
return nil, nil
}
return f, nil
} }
func (d *Pan115) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) { func (d *Pan115) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
if err := d.WaitLimit(ctx); err != nil { if err := d.WaitLimit(ctx); err != nil {
return nil, err return err
} }
if err := d.client.Rename(srcObj.GetID(), newName); err != nil { return d.client.Rename(srcObj.GetID(), newName)
return nil, err
}
f, err := d.getNewFile((srcObj.GetID()))
if err != nil {
return nil, nil
}
return f, nil
} }
func (d *Pan115) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { func (d *Pan115) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
@ -149,9 +114,9 @@ func (d *Pan115) Remove(ctx context.Context, obj model.Obj) error {
return d.client.Delete(obj.GetID()) return d.client.Delete(obj.GetID())
} }
func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
if err := d.WaitLimit(ctx); err != nil { if err := d.WaitLimit(ctx); err != nil {
return nil, err return err
} }
var ( var (
@ -160,10 +125,10 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
) )
if ok, err := d.client.UploadAvailable(); err != nil || !ok { if ok, err := d.client.UploadAvailable(); err != nil || !ok {
return nil, err return err
} }
if stream.GetSize() > d.client.UploadMetaInfo.SizeLimit { if stream.GetSize() > d.client.UploadMetaInfo.SizeLimit {
return nil, driver115.ErrUploadTooLarge return driver115.ErrUploadTooLarge
} }
//if digest, err = d.client.GetDigestResult(stream); err != nil { //if digest, err = d.client.GetDigestResult(stream); err != nil {
// return err // return err
@ -176,22 +141,22 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
} }
reader, err := stream.RangeRead(http_range.Range{Start: 0, Length: hashSize}) reader, err := stream.RangeRead(http_range.Range{Start: 0, Length: hashSize})
if err != nil { if err != nil {
return nil, err return err
} }
preHash, err := utils.HashReader(utils.SHA1, reader) preHash, err := utils.HashReader(utils.SHA1, reader)
if err != nil { if err != nil {
return nil, err return err
} }
preHash = strings.ToUpper(preHash) preHash = strings.ToUpper(preHash)
fullHash := stream.GetHash().GetHash(utils.SHA1) fullHash := stream.GetHash().GetHash(utils.SHA1)
if len(fullHash) <= 0 { if len(fullHash) <= 0 {
tmpF, err := stream.CacheFullInTempFile() tmpF, err := stream.CacheFullInTempFile()
if err != nil { if err != nil {
return nil, err return err
} }
fullHash, err = utils.HashFile(utils.SHA1, tmpF) fullHash, err = utils.HashFile(utils.SHA1, tmpF)
if err != nil { if err != nil {
return nil, err return err
} }
} }
fullHash = strings.ToUpper(fullHash) fullHash = strings.ToUpper(fullHash)
@ -200,36 +165,20 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
// note that 115 add timeout for rapid-upload, // note that 115 add timeout for rapid-upload,
// and "sig invalid" err is thrown even when the hash is correct after timeout. // and "sig invalid" err is thrown even when the hash is correct after timeout.
if fastInfo, err = d.rapidUpload(stream.GetSize(), stream.GetName(), dirID, preHash, fullHash, stream); err != nil { if fastInfo, err = d.rapidUpload(stream.GetSize(), stream.GetName(), dirID, preHash, fullHash, stream); err != nil {
return nil, err return err
} }
if matched, err := fastInfo.Ok(); err != nil { if matched, err := fastInfo.Ok(); err != nil {
return nil, err return err
} else if matched { } else if matched {
f, err := d.getNewFileByPickCode(fastInfo.PickCode) return nil
if err != nil {
return nil, nil
}
return f, nil
} }
var uploadResult *UploadResult
// 闪传失败,上传 // 闪传失败,上传
if stream.GetSize() <= 10*utils.MB { // 文件大小小于10MB改用普通模式上传 if stream.GetSize() <= utils.KB { // 文件大小小于1KB改用普通模式上传
if uploadResult, err = d.UploadByOSS(ctx, &fastInfo.UploadOSSParams, stream, dirID, up); err != nil { return d.client.UploadByOSS(&fastInfo.UploadOSSParams, stream, dirID)
return nil, err
}
} else {
// 分片上传
if uploadResult, err = d.UploadByMultipart(ctx, &fastInfo.UploadOSSParams, stream.GetSize(), stream, dirID, up); err != nil {
return nil, err
}
} }
// 分片上传
file, err := d.getNewFile(uploadResult.Data.FileID) return d.UploadByMultipart(&fastInfo.UploadOSSParams, stream.GetSize(), stream, dirID)
if err != nil {
return nil, nil
}
return file, nil
} }
func (d *Pan115) OfflineList(ctx context.Context) ([]*driver115.OfflineTask, error) { func (d *Pan115) OfflineList(ctx context.Context) ([]*driver115.OfflineTask, error) {
@ -241,7 +190,7 @@ func (d *Pan115) OfflineList(ctx context.Context) ([]*driver115.OfflineTask, err
} }
func (d *Pan115) OfflineDownload(ctx context.Context, uris []string, dstDir model.Obj) ([]string, error) { func (d *Pan115) OfflineDownload(ctx context.Context, uris []string, dstDir model.Obj) ([]string, error) {
return d.client.AddOfflineTaskURIs(uris, dstDir.GetID(), driver115.WithAppVer(appVer)) return d.client.AddOfflineTaskURIs(uris, dstDir.GetID())
} }
func (d *Pan115) DeleteOfflineTasks(ctx context.Context, hashes []string, deleteFiles bool) error { func (d *Pan115) DeleteOfflineTasks(ctx context.Context, hashes []string, deleteFiles bool) error {

View File

@ -9,8 +9,8 @@ type Addition struct {
Cookie string `json:"cookie" type:"text" help:"one of QR code token and cookie required"` Cookie string `json:"cookie" type:"text" help:"one of QR code token and cookie required"`
QRCodeToken string `json:"qrcode_token" type:"text" help:"one of QR code token and cookie required"` QRCodeToken string `json:"qrcode_token" type:"text" help:"one of QR code token and cookie required"`
QRCodeSource string `json:"qrcode_source" type:"select" options:"web,android,ios,tv,alipaymini,wechatmini,qandroid" default:"linux" help:"select the QR code device, default linux"` QRCodeSource string `json:"qrcode_source" type:"select" options:"web,android,ios,tv,alipaymini,wechatmini,qandroid" default:"linux" help:"select the QR code device, default linux"`
PageSize int64 `json:"page_size" type:"number" default:"1000" help:"list api per page size of 115 driver"` PageSize int64 `json:"page_size" type:"number" default:"56" help:"list api per page size of 115 driver"`
LimitRate float64 `json:"limit_rate" type:"float" default:"2" help:"limit all api request rate ([limit]r/1s)"` LimitRate float64 `json:"limit_rate" type:"number" default:"2" help:"limit all api request rate (1r/[limit_rate]s)"`
driver.RootID driver.RootID
} }

View File

@ -1,11 +1,10 @@
package _115 package _115
import ( import (
"time"
"github.com/SheltonZhu/115driver/pkg/driver" "github.com/SheltonZhu/115driver/pkg/driver"
"github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/pkg/utils"
"time"
) )
var _ model.Obj = (*FileObj)(nil) var _ model.Obj = (*FileObj)(nil)
@ -21,18 +20,3 @@ func (f *FileObj) CreateTime() time.Time {
func (f *FileObj) GetHash() utils.HashInfo { func (f *FileObj) GetHash() utils.HashInfo {
return utils.NewHashInfo(utils.SHA1, f.Sha1) return utils.NewHashInfo(utils.SHA1, f.Sha1)
} }
type UploadResult struct {
driver.BasicResp
Data struct {
PickCode string `json:"pick_code"`
FileSize int `json:"file_size"`
FileID string `json:"file_id"`
ThumbURL string `json:"thumb_url"`
Sha1 string `json:"sha1"`
Aid int `json:"aid"`
FileName string `json:"file_name"`
Cid string `json:"cid"`
IsVideo int `json:"is_video"`
} `json:"data"`
}

View File

@ -2,39 +2,36 @@ package _115
import ( import (
"bytes" "bytes"
"context"
"crypto/md5"
"crypto/tls" "crypto/tls"
"encoding/hex"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
"net/url" "net/url"
"path/filepath"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
"sync/atomic"
"time" "time"
"github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/http_range" "github.com/alist-org/alist/v3/pkg/http_range"
"github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/pkg/utils"
"github.com/aliyun/aliyun-oss-go-sdk/oss" "github.com/aliyun/aliyun-oss-go-sdk/oss"
cipher "github.com/SheltonZhu/115driver/pkg/crypto/ec115"
crypto "github.com/SheltonZhu/115driver/pkg/crypto/m115"
driver115 "github.com/SheltonZhu/115driver/pkg/driver" driver115 "github.com/SheltonZhu/115driver/pkg/driver"
crypto "github.com/gaoyb7/115drive-webdav/115"
"github.com/orzogc/fake115uploader/cipher"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
// var UserAgent = driver115.UA115Browser var UserAgent = driver115.UA115Browser
func (d *Pan115) login() error { func (d *Pan115) login() error {
var err error var err error
opts := []driver115.Option{ opts := []driver115.Option{
driver115.UA(d.getUA()), driver115.UA(UserAgent),
func(c *driver115.Pan115Client) { func(c *driver115.Pan115Client) {
c.Client.SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify}) c.Client.SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
}, },
@ -48,7 +45,7 @@ func (d *Pan115) login() error {
if cr, err = d.client.QRCodeLoginWithApp(s, driver115.LoginApp(d.QRCodeSource)); err != nil { if cr, err = d.client.QRCodeLoginWithApp(s, driver115.LoginApp(d.QRCodeSource)); err != nil {
return errors.Wrap(err, "failed to login by qrcode") return errors.Wrap(err, "failed to login by qrcode")
} }
d.Cookie = fmt.Sprintf("UID=%s;CID=%s;SEID=%s;KID=%s", cr.UID, cr.CID, cr.SEID, cr.KID) d.Cookie = fmt.Sprintf("UID=%s;CID=%s;SEID=%s", cr.UID, cr.CID, cr.SEID)
d.QRCodeToken = "" d.QRCodeToken = ""
} else if d.Cookie != "" { } else if d.Cookie != "" {
if err = cr.FromCookie(d.Cookie); err != nil { if err = cr.FromCookie(d.Cookie); err != nil {
@ -66,7 +63,7 @@ func (d *Pan115) getFiles(fileId string) ([]FileObj, error) {
if d.PageSize <= 0 { if d.PageSize <= 0 {
d.PageSize = driver115.FileListLimit d.PageSize = driver115.FileListLimit
} }
files, err := d.client.ListWithLimit(fileId, d.PageSize, driver115.WithMultiUrls()) files, err := d.client.ListWithLimit(fileId, d.PageSize)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -76,42 +73,28 @@ func (d *Pan115) getFiles(fileId string) ([]FileObj, error) {
return res, nil return res, nil
} }
func (d *Pan115) getNewFile(fileId string) (*FileObj, error) { const (
file, err := d.client.GetFile(fileId) appVer = "27.0.3.7"
)
func (c *Pan115) getAppVer() string {
// todo add some cache
vers, err := c.client.GetAppVersion()
if err != nil { if err != nil {
return nil, err return appVer
} }
return &FileObj{*file}, nil for _, ver := range vers {
if ver.AppName == "win" {
return ver.Version
}
}
return appVer
} }
func (d *Pan115) getNewFileByPickCode(pickCode string) (*FileObj, error) { func (c *Pan115) DownloadWithUA(pickCode, ua string) (*driver115.DownloadInfo, error) {
result := driver115.GetFileInfoResponse{}
req := d.client.NewRequest().
SetQueryParam("pick_code", pickCode).
ForceContentType("application/json;charset=UTF-8").
SetResult(&result)
resp, err := req.Get(driver115.ApiFileInfo)
if err := driver115.CheckErr(err, &result, resp); err != nil {
return nil, err
}
if len(result.Files) == 0 {
return nil, errors.New("not get file info")
}
fileInfo := result.Files[0]
f := &FileObj{}
f.From(fileInfo)
return f, nil
}
func (d *Pan115) getUA() string {
return fmt.Sprintf("Mozilla/5.0 115Browser/%s", appVer)
}
func (d *Pan115) DownloadWithUA(pickCode, ua string) (*driver115.DownloadInfo, error) {
key := crypto.GenerateKey() key := crypto.GenerateKey()
result := driver115.DownloadResp{} result := driver115.DownloadResp{}
params, err := utils.Json.Marshal(map[string]string{"pick_code": pickCode}) params, err := utils.Json.Marshal(map[string]string{"pickcode": pickCode})
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -119,13 +102,13 @@ func (d *Pan115) DownloadWithUA(pickCode, ua string) (*driver115.DownloadInfo, e
data := crypto.Encode(params, key) data := crypto.Encode(params, key)
bodyReader := strings.NewReader(url.Values{"data": []string{data}}.Encode()) bodyReader := strings.NewReader(url.Values{"data": []string{data}}.Encode())
reqUrl := fmt.Sprintf("%s?t=%s", driver115.AndroidApiDownloadGetUrl, driver115.Now().String()) reqUrl := fmt.Sprintf("%s?t=%s", driver115.ApiDownloadGetUrl, driver115.Now().String())
req, _ := http.NewRequest(http.MethodPost, reqUrl, bodyReader) req, _ := http.NewRequest(http.MethodPost, reqUrl, bodyReader)
req.Header.Set("Content-Type", "application/x-www-form-urlencoded") req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
req.Header.Set("Cookie", d.Cookie) req.Header.Set("Cookie", c.Cookie)
req.Header.Set("User-Agent", ua) req.Header.Set("User-Agent", ua)
resp, err := d.client.Client.GetClient().Do(req) resp, err := c.client.Client.GetClient().Do(req)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -143,30 +126,24 @@ func (d *Pan115) DownloadWithUA(pickCode, ua string) (*driver115.DownloadInfo, e
return nil, err return nil, err
} }
b, err := crypto.Decode(string(result.EncodedData), key) bytes, err := crypto.Decode(string(result.EncodedData), key)
if err != nil { if err != nil {
return nil, err return nil, err
} }
downloadInfo := struct { downloadInfo := driver115.DownloadData{}
Url string `json:"url"` if err := utils.Json.Unmarshal(bytes, &downloadInfo); err != nil {
}{}
if err := utils.Json.Unmarshal(b, &downloadInfo); err != nil {
return nil, err return nil, err
} }
info := &driver115.DownloadInfo{} for _, info := range downloadInfo {
info.PickCode = pickCode if info.FileSize < 0 {
info.Header = resp.Request.Header return nil, driver115.ErrDownloadEmpty
info.Url.Url = downloadInfo.Url }
return info, nil info.Header = resp.Request.Header
} return info, nil
}
func (c *Pan115) GenerateToken(fileID, preID, timeStamp, fileSize, signKey, signVal string) string { return nil, driver115.ErrUnexpected
userID := strconv.FormatInt(c.client.UserID, 10)
userIDMd5 := md5.Sum([]byte(userID))
tokenMd5 := md5.Sum([]byte(md5Salt + fileID + fileSize + signKey + signVal + userID + timeStamp + hex.EncodeToString(userIDMd5[:]) + appVer))
return hex.EncodeToString(tokenMd5[:])
} }
func (d *Pan115) rapidUpload(fileSize int64, fileName, dirID, preID, fileID string, stream model.FileStreamer) (*driver115.UploadInitResp, error) { func (d *Pan115) rapidUpload(fileSize int64, fileName, dirID, preID, fileID string, stream model.FileStreamer) (*driver115.UploadInitResp, error) {
@ -188,7 +165,7 @@ func (d *Pan115) rapidUpload(fileSize int64, fileName, dirID, preID, fileID stri
userID := strconv.FormatInt(d.client.UserID, 10) userID := strconv.FormatInt(d.client.UserID, 10)
form := url.Values{} form := url.Values{}
form.Set("appid", "0") form.Set("appid", "0")
form.Set("appversion", appVer) form.Set("appversion", d.getAppVer())
form.Set("userid", userID) form.Set("userid", userID)
form.Set("filename", fileName) form.Set("filename", fileName)
form.Set("filesize", fileSizeStr) form.Set("filesize", fileSizeStr)
@ -209,7 +186,7 @@ func (d *Pan115) rapidUpload(fileSize int64, fileName, dirID, preID, fileID stri
} }
form.Set("t", t.String()) form.Set("t", t.String())
form.Set("token", d.GenerateToken(fileID, preID, t.String(), fileSizeStr, signKey, signVal)) form.Set("token", d.client.GenerateToken(fileID, preID, t.String(), fileSizeStr, signKey, signVal))
if signKey != "" && signVal != "" { if signKey != "" && signVal != "" {
form.Set("sign_key", signKey) form.Set("sign_key", signKey)
form.Set("sign_val", signVal) form.Set("sign_val", signVal)
@ -273,43 +250,8 @@ func UploadDigestRange(stream model.FileStreamer, rangeSpec string) (result stri
return return
} }
// UploadByOSS use aliyun sdk to upload
func (c *Pan115) UploadByOSS(ctx context.Context, params *driver115.UploadOSSParams, s model.FileStreamer, dirID string, up driver.UpdateProgress) (*UploadResult, error) {
ossToken, err := c.client.GetOSSToken()
if err != nil {
return nil, err
}
ossClient, err := oss.New(driver115.OSSEndpoint, ossToken.AccessKeyID, ossToken.AccessKeySecret)
if err != nil {
return nil, err
}
bucket, err := ossClient.Bucket(params.Bucket)
if err != nil {
return nil, err
}
var bodyBytes []byte
r := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
Reader: s,
UpdateProgress: up,
})
if err = bucket.PutObject(params.Object, r, append(
driver115.OssOption(params, ossToken),
oss.CallbackResult(&bodyBytes),
)...); err != nil {
return nil, err
}
var uploadResult UploadResult
if err = json.Unmarshal(bodyBytes, &uploadResult); err != nil {
return nil, err
}
return &uploadResult, uploadResult.Err(string(bodyBytes))
}
// UploadByMultipart upload by mutipart blocks // UploadByMultipart upload by mutipart blocks
func (d *Pan115) UploadByMultipart(ctx context.Context, params *driver115.UploadOSSParams, fileSize int64, s model.FileStreamer, func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize int64, stream model.FileStreamer, dirID string, opts ...driver115.UploadMultipartOption) error {
dirID string, up driver.UpdateProgress, opts ...driver115.UploadMultipartOption) (*UploadResult, error) {
var ( var (
chunks []oss.FileChunk chunks []oss.FileChunk
parts []oss.UploadPart parts []oss.UploadPart
@ -317,13 +259,12 @@ func (d *Pan115) UploadByMultipart(ctx context.Context, params *driver115.Upload
ossClient *oss.Client ossClient *oss.Client
bucket *oss.Bucket bucket *oss.Bucket
ossToken *driver115.UploadOSSTokenResp ossToken *driver115.UploadOSSTokenResp
bodyBytes []byte
err error err error
) )
tmpF, err := s.CacheFullInTempFile() tmpF, err := stream.CacheFullInTempFile()
if err != nil { if err != nil {
return nil, err return err
} }
options := driver115.DefalutUploadMultipartOptions() options := driver115.DefalutUploadMultipartOptions()
@ -332,19 +273,17 @@ func (d *Pan115) UploadByMultipart(ctx context.Context, params *driver115.Upload
f(options) f(options)
} }
} }
// oss 启用Sequential必须按顺序上传
options.ThreadsNum = 1
if ossToken, err = d.client.GetOSSToken(); err != nil { if ossToken, err = d.client.GetOSSToken(); err != nil {
return nil, err return err
} }
if ossClient, err = oss.New(driver115.OSSEndpoint, ossToken.AccessKeyID, ossToken.AccessKeySecret, oss.EnableMD5(true), oss.EnableCRC(true)); err != nil { if ossClient, err = oss.New(driver115.OSSEndpoint, ossToken.AccessKeyID, ossToken.AccessKeySecret); err != nil {
return nil, err return err
} }
if bucket, err = ossClient.Bucket(params.Bucket); err != nil { if bucket, err = ossClient.Bucket(params.Bucket); err != nil {
return nil, err return err
} }
// ossToken一小时后就会失效所以每50分钟重新获取一次 // ossToken一小时后就会失效所以每50分钟重新获取一次
@ -354,15 +293,14 @@ func (d *Pan115) UploadByMultipart(ctx context.Context, params *driver115.Upload
timeout := time.NewTimer(options.Timeout) timeout := time.NewTimer(options.Timeout)
if chunks, err = SplitFile(fileSize); err != nil { if chunks, err = SplitFile(fileSize); err != nil {
return nil, err return err
} }
if imur, err = bucket.InitiateMultipartUpload(params.Object, if imur, err = bucket.InitiateMultipartUpload(params.Object,
oss.SetHeader(driver115.OssSecurityTokenHeaderName, ossToken.SecurityToken), oss.SetHeader(driver115.OssSecurityTokenHeaderName, ossToken.SecurityToken),
oss.UserAgentHeader(driver115.OSSUserAgent), oss.UserAgentHeader(driver115.OSSUserAgent),
oss.EnableSha1(), oss.Sequential(),
); err != nil { ); err != nil {
return nil, err return err
} }
wg := sync.WaitGroup{} wg := sync.WaitGroup{}
@ -380,7 +318,6 @@ func (d *Pan115) UploadByMultipart(ctx context.Context, params *driver115.Upload
quit <- struct{}{} quit <- struct{}{}
}() }()
completedNum := atomic.Int32{}
// consumers // consumers
for i := 0; i < options.ThreadsNum; i++ { for i := 0; i < options.ThreadsNum; i++ {
go func(threadId int) { go func(threadId int) {
@ -393,28 +330,25 @@ func (d *Pan115) UploadByMultipart(ctx context.Context, params *driver115.Upload
var part oss.UploadPart // 出现错误就继续尝试共尝试3次 var part oss.UploadPart // 出现错误就继续尝试共尝试3次
for retry := 0; retry < 3; retry++ { for retry := 0; retry < 3; retry++ {
select { select {
case <-ctx.Done():
break
case <-ticker.C: case <-ticker.C:
if ossToken, err = d.client.GetOSSToken(); err != nil { // 到时重新获取ossToken if ossToken, err = d.client.GetOSSToken(); err != nil { // 到时重新获取ossToken
errCh <- errors.Wrap(err, "刷新token时出现错误") errCh <- errors.Wrap(err, "刷新token时出现错误")
} }
default: default:
} }
buf := make([]byte, chunk.Size) buf := make([]byte, chunk.Size)
if _, err = tmpF.ReadAt(buf, chunk.Offset); err != nil && !errors.Is(err, io.EOF) { if _, err = tmpF.ReadAt(buf, chunk.Offset); err != nil && !errors.Is(err, io.EOF) {
continue continue
} }
if part, err = bucket.UploadPart(imur, driver.NewLimitedUploadStream(ctx, bytes.NewReader(buf)),
chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil { b := bytes.NewBuffer(buf)
if part, err = bucket.UploadPart(imur, b, chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil {
break break
} }
} }
if err != nil { if err != nil {
errCh <- errors.Wrap(err, fmt.Sprintf("上传 %s 的第%d个分片时出现错误%v", s.GetName(), chunk.Number, err)) errCh <- errors.Wrap(err, fmt.Sprintf("上传 %s 的第%d个分片时出现错误%v", stream.GetName(), chunk.Number, err))
} else {
num := completedNum.Add(1)
up(float64(num) * 100.0 / float64(len(chunks)))
} }
UploadedPartsCh <- part UploadedPartsCh <- part
} }
@ -433,31 +367,25 @@ LOOP:
case <-ticker.C: case <-ticker.C:
// 到时重新获取ossToken // 到时重新获取ossToken
if ossToken, err = d.client.GetOSSToken(); err != nil { if ossToken, err = d.client.GetOSSToken(); err != nil {
return nil, err return err
} }
case <-quit: case <-quit:
break LOOP break LOOP
case <-errCh: case <-errCh:
return nil, err return err
case <-timeout.C: case <-timeout.C:
return nil, fmt.Errorf("time out") return fmt.Errorf("time out")
} }
} }
// 不知道啥原因oss那边分片上传不计算sha1导致115服务器校验错误 // EOF错误是xml的Unmarshal导致的响应其实是json格式所以实际上上传是成功的
// params.Callback.Callback = strings.ReplaceAll(params.Callback.Callback, "${sha1}", params.SHA1) if _, err = bucket.CompleteMultipartUpload(imur, parts, driver115.OssOption(params, ossToken)...); err != nil && !errors.Is(err, io.EOF) {
if _, err := bucket.CompleteMultipartUpload(imur, parts, append( // 当文件名含有 &< 这两个字符之一时响应的xml解析会出现错误实际上上传是成功的
driver115.OssOption(params, ossToken), if filename := filepath.Base(stream.GetName()); !strings.ContainsAny(filename, "&<") {
oss.CallbackResult(&bodyBytes), return err
)...); err != nil { }
return nil, err
} }
return d.checkUploadStatus(dirID, params.SHA1)
var uploadResult UploadResult
if err = json.Unmarshal(bodyBytes, &uploadResult); err != nil {
return nil, err
}
return &uploadResult, uploadResult.Err(string(bodyBytes))
} }
func chunksProducer(ch chan oss.FileChunk, chunks []oss.FileChunk) { func chunksProducer(ch chan oss.FileChunk, chunks []oss.FileChunk) {
@ -466,6 +394,27 @@ func chunksProducer(ch chan oss.FileChunk, chunks []oss.FileChunk) {
} }
} }
func (d *Pan115) checkUploadStatus(dirID, sha1 string) error {
// 验证上传是否成功
req := d.client.NewRequest().ForceContentType("application/json;charset=UTF-8")
opts := []driver115.GetFileOptions{
driver115.WithOrder(driver115.FileOrderByTime),
driver115.WithShowDirEnable(false),
driver115.WithAsc(false),
driver115.WithLimit(500),
}
fResp, err := driver115.GetFiles(req, dirID, opts...)
if err != nil {
return err
}
for _, fileInfo := range fResp.Files {
if fileInfo.Sha1 == sha1 {
return nil
}
}
return driver115.ErrUploadFailed
}
func SplitFile(fileSize int64) (chunks []oss.FileChunk, err error) { func SplitFile(fileSize int64) (chunks []oss.FileChunk, err error) {
for i := int64(1); i < 10; i++ { for i := int64(1); i < 10; i++ {
if fileSize < i*utils.GB { // 文件大小小于iGB时分为i*1000片 if fileSize < i*utils.GB { // 文件大小小于iGB时分为i*1000片

View File

@ -1,335 +0,0 @@
package _115_open
import (
"context"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"time"
"github.com/alist-org/alist/v3/cmd/flags"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/pkg/utils"
sdk "github.com/xhofe/115-sdk-go"
"golang.org/x/time/rate"
)
type Open115 struct {
model.Storage
Addition
client *sdk.Client
limiter *rate.Limiter
}
func (d *Open115) Config() driver.Config {
return config
}
func (d *Open115) GetAddition() driver.Additional {
return &d.Addition
}
func (d *Open115) Init(ctx context.Context) error {
d.client = sdk.New(sdk.WithRefreshToken(d.Addition.RefreshToken),
sdk.WithAccessToken(d.Addition.AccessToken),
sdk.WithOnRefreshToken(func(s1, s2 string) {
d.Addition.AccessToken = s1
d.Addition.RefreshToken = s2
op.MustSaveDriverStorage(d)
}))
if flags.Debug || flags.Dev {
d.client.SetDebug(true)
}
_, err := d.client.UserInfo(ctx)
if err != nil {
return err
}
if d.Addition.LimitRate > 0 {
d.limiter = rate.NewLimiter(rate.Limit(d.Addition.LimitRate), 1)
}
return nil
}
func (d *Open115) WaitLimit(ctx context.Context) error {
if d.limiter != nil {
return d.limiter.Wait(ctx)
}
return nil
}
func (d *Open115) Drop(ctx context.Context) error {
return nil
}
func (d *Open115) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
var res []model.Obj
pageSize := int64(200)
offset := int64(0)
for {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
resp, err := d.client.GetFiles(ctx, &sdk.GetFilesReq{
CID: dir.GetID(),
Limit: pageSize,
Offset: offset,
ASC: d.Addition.OrderDirection == "asc",
O: d.Addition.OrderBy,
// Cur: 1,
ShowDir: true,
})
if err != nil {
return nil, err
}
res = append(res, utils.MustSliceConvert(resp.Data, func(src sdk.GetFilesResp_File) model.Obj {
obj := Obj(src)
return &obj
})...)
if len(res) >= int(resp.Count) {
break
}
offset += pageSize
}
return res, nil
}
func (d *Open115) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
var ua string
if args.Header != nil {
ua = args.Header.Get("User-Agent")
}
if ua == "" {
ua = base.UserAgent
}
obj, ok := file.(*Obj)
if !ok {
return nil, fmt.Errorf("can't convert obj")
}
pc := obj.Pc
resp, err := d.client.DownURL(ctx, pc, ua)
if err != nil {
return nil, err
}
u, ok := resp[obj.GetID()]
if !ok {
return nil, fmt.Errorf("can't get link")
}
return &model.Link{
URL: u.URL.URL,
Header: http.Header{
"User-Agent": []string{ua},
},
}, nil
}
func (d *Open115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
resp, err := d.client.Mkdir(ctx, parentDir.GetID(), dirName)
if err != nil {
return nil, err
}
return &Obj{
Fid: resp.FileID,
Pid: parentDir.GetID(),
Fn: dirName,
Fc: "0",
Upt: time.Now().Unix(),
Uet: time.Now().Unix(),
UpPt: time.Now().Unix(),
}, nil
}
func (d *Open115) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
_, err := d.client.Move(ctx, &sdk.MoveReq{
FileIDs: srcObj.GetID(),
ToCid: dstDir.GetID(),
})
if err != nil {
return nil, err
}
return srcObj, nil
}
func (d *Open115) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
_, err := d.client.UpdateFile(ctx, &sdk.UpdateFileReq{
FileID: srcObj.GetID(),
FileNma: newName,
})
if err != nil {
return nil, err
}
obj, ok := srcObj.(*Obj)
if ok {
obj.Fn = newName
}
return srcObj, nil
}
func (d *Open115) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
_, err := d.client.Copy(ctx, &sdk.CopyReq{
PID: dstDir.GetID(),
FileID: srcObj.GetID(),
NoDupli: "1",
})
if err != nil {
return nil, err
}
return srcObj, nil
}
func (d *Open115) Remove(ctx context.Context, obj model.Obj) error {
if err := d.WaitLimit(ctx); err != nil {
return err
}
_obj, ok := obj.(*Obj)
if !ok {
return fmt.Errorf("can't convert obj")
}
_, err := d.client.DelFile(ctx, &sdk.DelFileReq{
FileIDs: _obj.GetID(),
ParentID: _obj.Pid,
})
if err != nil {
return err
}
return nil
}
func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
if err := d.WaitLimit(ctx); err != nil {
return err
}
tempF, err := file.CacheFullInTempFile()
if err != nil {
return err
}
// cal full sha1
sha1, err := utils.HashReader(utils.SHA1, tempF)
if err != nil {
return err
}
_, err = tempF.Seek(0, io.SeekStart)
if err != nil {
return err
}
// pre 128k sha1
sha1128k, err := utils.HashReader(utils.SHA1, io.LimitReader(tempF, 128*1024))
if err != nil {
return err
}
_, err = tempF.Seek(0, io.SeekStart)
if err != nil {
return err
}
// 1. Init
resp, err := d.client.UploadInit(ctx, &sdk.UploadInitReq{
FileName: file.GetName(),
FileSize: file.GetSize(),
Target: dstDir.GetID(),
FileID: strings.ToUpper(sha1),
PreID: strings.ToUpper(sha1128k),
})
if err != nil {
return err
}
if resp.Status == 2 {
return nil
}
// 2. two way verify
if utils.SliceContains([]int{6, 7, 8}, resp.Status) {
signCheck := strings.Split(resp.SignCheck, "-") //"sign_check": "2392148-2392298" 取2392148-2392298之间的内容(包含2392148、2392298)的sha1
start, err := strconv.ParseInt(signCheck[0], 10, 64)
if err != nil {
return err
}
end, err := strconv.ParseInt(signCheck[1], 10, 64)
if err != nil {
return err
}
_, err = tempF.Seek(start, io.SeekStart)
if err != nil {
return err
}
signVal, err := utils.HashReader(utils.SHA1, io.LimitReader(tempF, end-start+1))
if err != nil {
return err
}
_, err = tempF.Seek(0, io.SeekStart)
if err != nil {
return err
}
resp, err = d.client.UploadInit(ctx, &sdk.UploadInitReq{
FileName: file.GetName(),
FileSize: file.GetSize(),
Target: dstDir.GetID(),
FileID: strings.ToUpper(sha1),
PreID: strings.ToUpper(sha1128k),
SignKey: resp.SignKey,
SignVal: strings.ToUpper(signVal),
})
if err != nil {
return err
}
if resp.Status == 2 {
return nil
}
}
// 3. get upload token
tokenResp, err := d.client.UploadGetToken(ctx)
if err != nil {
return err
}
// 4. upload
err = d.multpartUpload(ctx, tempF, file, up, tokenResp, resp)
if err != nil {
return err
}
return nil
}
// func (d *Open115) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
// // TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
// return nil, errs.NotImplement
// }
// func (d *Open115) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
// // TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
// return nil, errs.NotImplement
// }
// func (d *Open115) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
// // TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
// return nil, errs.NotImplement
// }
// func (d *Open115) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
// // TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional
// // a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir
// // return errs.NotImplement to use an internal archive tool
// return nil, errs.NotImplement
// }
//func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
// return nil, errs.NotSupport
//}
var _ driver.Driver = (*Open115)(nil)

View File

@ -1,37 +0,0 @@
package _115_open
import (
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/op"
)
type Addition struct {
// Usually one of two
driver.RootID
// define other
RefreshToken string `json:"refresh_token" required:"true"`
OrderBy string `json:"order_by" type:"select" options:"file_name,file_size,user_utime,file_type"`
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc"`
LimitRate float64 `json:"limit_rate" type:"float" default:"1" help:"limit all api request rate ([limit]r/1s)"`
AccessToken string
}
var config = driver.Config{
Name: "115 Open",
LocalSort: false,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,
NeedMs: false,
DefaultRoot: "0",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: false,
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &Open115{}
})
}

View File

@ -1,59 +0,0 @@
package _115_open
import (
"time"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
sdk "github.com/xhofe/115-sdk-go"
)
type Obj sdk.GetFilesResp_File
// Thumb implements model.Thumb.
func (o *Obj) Thumb() string {
return o.Thumbnail
}
// CreateTime implements model.Obj.
func (o *Obj) CreateTime() time.Time {
return time.Unix(o.UpPt, 0)
}
// GetHash implements model.Obj.
func (o *Obj) GetHash() utils.HashInfo {
return utils.NewHashInfo(utils.SHA1, o.Sha1)
}
// GetID implements model.Obj.
func (o *Obj) GetID() string {
return o.Fid
}
// GetName implements model.Obj.
func (o *Obj) GetName() string {
return o.Fn
}
// GetPath implements model.Obj.
func (o *Obj) GetPath() string {
return ""
}
// GetSize implements model.Obj.
func (o *Obj) GetSize() int64 {
return o.FS
}
// IsDir implements model.Obj.
func (o *Obj) IsDir() bool {
return o.Fc == "0"
}
// ModTime implements model.Obj.
func (o *Obj) ModTime() time.Time {
return time.Unix(o.Upt, 0)
}
var _ model.Obj = (*Obj)(nil)
var _ model.Thumb = (*Obj)(nil)

View File

@ -1,140 +0,0 @@
package _115_open
import (
"context"
"encoding/base64"
"io"
"time"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
"github.com/avast/retry-go"
sdk "github.com/xhofe/115-sdk-go"
)
func calPartSize(fileSize int64) int64 {
var partSize int64 = 20 * utils.MB
if fileSize > partSize {
if fileSize > 1*utils.TB { // file Size over 1TB
partSize = 5 * utils.GB // file part size 5GB
} else if fileSize > 768*utils.GB { // over 768GB
partSize = 109951163 // ≈ 104.8576MB, split 1TB into 10,000 part
} else if fileSize > 512*utils.GB { // over 512GB
partSize = 82463373 // ≈ 78.6432MB
} else if fileSize > 384*utils.GB { // over 384GB
partSize = 54975582 // ≈ 52.4288MB
} else if fileSize > 256*utils.GB { // over 256GB
partSize = 41231687 // ≈ 39.3216MB
} else if fileSize > 128*utils.GB { // over 128GB
partSize = 27487791 // ≈ 26.2144MB
}
}
return partSize
}
func (d *Open115) singleUpload(ctx context.Context, tempF model.File, tokenResp *sdk.UploadGetTokenResp, initResp *sdk.UploadInitResp) error {
ossClient, err := oss.New(tokenResp.Endpoint, tokenResp.AccessKeyId, tokenResp.AccessKeySecret, oss.SecurityToken(tokenResp.SecurityToken))
if err != nil {
return err
}
bucket, err := ossClient.Bucket(initResp.Bucket)
if err != nil {
return err
}
err = bucket.PutObject(initResp.Object, tempF,
oss.Callback(base64.StdEncoding.EncodeToString([]byte(initResp.Callback.Value.Callback))),
oss.CallbackVar(base64.StdEncoding.EncodeToString([]byte(initResp.Callback.Value.CallbackVar))),
)
return err
}
// type CallbackResult struct {
// State bool `json:"state"`
// Code int `json:"code"`
// Message string `json:"message"`
// Data struct {
// PickCode string `json:"pick_code"`
// FileName string `json:"file_name"`
// FileSize int64 `json:"file_size"`
// FileID string `json:"file_id"`
// ThumbURL string `json:"thumb_url"`
// Sha1 string `json:"sha1"`
// Aid int `json:"aid"`
// Cid string `json:"cid"`
// } `json:"data"`
// }
func (d *Open115) multpartUpload(ctx context.Context, tempF model.File, stream model.FileStreamer, up driver.UpdateProgress, tokenResp *sdk.UploadGetTokenResp, initResp *sdk.UploadInitResp) error {
fileSize := stream.GetSize()
chunkSize := calPartSize(fileSize)
ossClient, err := oss.New(tokenResp.Endpoint, tokenResp.AccessKeyId, tokenResp.AccessKeySecret, oss.SecurityToken(tokenResp.SecurityToken))
if err != nil {
return err
}
bucket, err := ossClient.Bucket(initResp.Bucket)
if err != nil {
return err
}
imur, err := bucket.InitiateMultipartUpload(initResp.Object, oss.Sequential())
if err != nil {
return err
}
partNum := (stream.GetSize() + chunkSize - 1) / chunkSize
parts := make([]oss.UploadPart, partNum)
offset := int64(0)
for i := int64(1); i <= partNum; i++ {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
partSize := chunkSize
if i == partNum {
partSize = fileSize - (i-1)*chunkSize
}
rd := utils.NewMultiReadable(io.LimitReader(stream, partSize))
err = retry.Do(func() error {
_ = rd.Reset()
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
part, err := bucket.UploadPart(imur, rateLimitedRd, partSize, int(i))
if err != nil {
return err
}
parts[i-1] = part
return nil
},
retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second))
if err != nil {
return err
}
if i == partNum {
offset = fileSize
} else {
offset += partSize
}
up(float64(offset) / float64(fileSize))
}
// callbackRespBytes := make([]byte, 1024)
_, err = bucket.CompleteMultipartUpload(
imur,
parts,
oss.Callback(base64.StdEncoding.EncodeToString([]byte(initResp.Callback.Value.Callback))),
oss.CallbackVar(base64.StdEncoding.EncodeToString([]byte(initResp.Callback.Value.CallbackVar))),
// oss.CallbackResult(&callbackRespBytes),
)
if err != nil {
return err
}
return nil
}

View File

@ -1,3 +0,0 @@
package _115_open
// do others that not defined in Driver interface

View File

@ -9,8 +9,8 @@ type Addition struct {
Cookie string `json:"cookie" type:"text" help:"one of QR code token and cookie required"` Cookie string `json:"cookie" type:"text" help:"one of QR code token and cookie required"`
QRCodeToken string `json:"qrcode_token" type:"text" help:"one of QR code token and cookie required"` QRCodeToken string `json:"qrcode_token" type:"text" help:"one of QR code token and cookie required"`
QRCodeSource string `json:"qrcode_source" type:"select" options:"web,android,ios,tv,alipaymini,wechatmini,qandroid" default:"linux" help:"select the QR code device, default linux"` QRCodeSource string `json:"qrcode_source" type:"select" options:"web,android,ios,tv,alipaymini,wechatmini,qandroid" default:"linux" help:"select the QR code device, default linux"`
PageSize int64 `json:"page_size" type:"number" default:"1000" help:"list api per page size of 115 driver"` PageSize int64 `json:"page_size" type:"number" default:"20" help:"list api per page size of 115 driver"`
LimitRate float64 `json:"limit_rate" type:"float" default:"2" help:"limit all api request rate (1r/[limit_rate]s)"` LimitRate float64 `json:"limit_rate" type:"number" default:"2" help:"limit all api request rate (1r/[limit_rate]s)"`
ShareCode string `json:"share_code" type:"text" required:"true" help:"share code of 115 share link"` ShareCode string `json:"share_code" type:"text" required:"true" help:"share code of 115 share link"`
ReceiveCode string `json:"receive_code" type:"text" required:"true" help:"receive code of 115 share link"` ReceiveCode string `json:"receive_code" type:"text" required:"true" help:"receive code of 115 share link"`
driver.RootID driver.RootID
@ -18,7 +18,7 @@ type Addition struct {
var config = driver.Config{ var config = driver.Config{
Name: "115 Share", Name: "115 Share",
DefaultRoot: "0", DefaultRoot: "",
// OnlyProxy: true, // OnlyProxy: true,
// OnlyLocal: true, // OnlyLocal: true,
CheckStatus: false, CheckStatus: false,

View File

@ -96,7 +96,7 @@ func (d *Pan115Share) login() error {
if cr, err = d.client.QRCodeLoginWithApp(s, driver115.LoginApp(d.QRCodeSource)); err != nil { if cr, err = d.client.QRCodeLoginWithApp(s, driver115.LoginApp(d.QRCodeSource)); err != nil {
return errors.Wrap(err, "failed to login by qrcode") return errors.Wrap(err, "failed to login by qrcode")
} }
d.Cookie = fmt.Sprintf("UID=%s;CID=%s;SEID=%s;KID=%s", cr.UID, cr.CID, cr.SEID, cr.KID) d.Cookie = fmt.Sprintf("UID=%s;CID=%s;SEID=%s", cr.UID, cr.CID, cr.SEID)
d.QRCodeToken = "" d.QRCodeToken = ""
} else if d.Cookie != "" { } else if d.Cookie != "" {
if err = cr.FromCookie(d.Cookie); err != nil { if err = cr.FromCookie(d.Cookie); err != nil {

View File

@ -2,22 +2,21 @@ package _123
import ( import (
"context" "context"
"crypto/md5"
"encoding/base64" "encoding/base64"
"encoding/hex"
"fmt" "fmt"
"golang.org/x/time/rate"
"io"
"net/http" "net/http"
"net/url" "net/url"
"strconv"
"strings"
"sync" "sync"
"time" "time"
"golang.org/x/time/rate"
"github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/pkg/utils"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials"
@ -30,8 +29,7 @@ import (
type Pan123 struct { type Pan123 struct {
model.Storage model.Storage
Addition Addition
apiRateLimit sync.Map apiRateLimit sync.Map
safeBoxUnlocked sync.Map
} }
func (d *Pan123) Config() driver.Config { func (d *Pan123) Config() driver.Config {
@ -43,38 +41,21 @@ func (d *Pan123) GetAddition() driver.Additional {
} }
func (d *Pan123) Init(ctx context.Context) error { func (d *Pan123) Init(ctx context.Context) error {
_, err := d.Request(UserInfo, http.MethodGet, nil, nil) _, err := d.request(UserInfo, http.MethodGet, nil, nil)
return err return err
} }
func (d *Pan123) Drop(ctx context.Context) error { func (d *Pan123) Drop(ctx context.Context) error {
_, _ = d.Request(Logout, http.MethodPost, func(req *resty.Request) { _, _ = d.request(Logout, http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{}) req.SetBody(base.Json{})
}, nil) }, nil)
return nil return nil
} }
func (d *Pan123) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { func (d *Pan123) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
if f, ok := dir.(File); ok && f.IsLock {
if err := d.unlockSafeBox(f.FileId); err != nil {
return nil, err
}
}
files, err := d.getFiles(ctx, dir.GetID(), dir.GetName()) files, err := d.getFiles(ctx, dir.GetID(), dir.GetName())
if err != nil { if err != nil {
msg := strings.ToLower(err.Error()) return nil, err
if strings.Contains(msg, "safe box") || strings.Contains(err.Error(), "保险箱") {
if id, e := strconv.ParseInt(dir.GetID(), 10, 64); e == nil {
if e = d.unlockSafeBox(id); e == nil {
files, err = d.getFiles(ctx, dir.GetID(), dir.GetName())
} else {
return nil, e
}
}
}
if err != nil {
return nil, err
}
} }
return utils.SliceConvert(files, func(src File) (model.Obj, error) { return utils.SliceConvert(files, func(src File) (model.Obj, error) {
return src, nil return src, nil
@ -100,8 +81,7 @@ func (d *Pan123) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
"size": f.Size, "size": f.Size,
"type": f.Type, "type": f.Type,
} }
resp, err := d.Request(DownloadInfo, http.MethodPost, func(req *resty.Request) { resp, err := d.request(DownloadInfo, http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetHeaders(headers) req.SetBody(data).SetHeaders(headers)
}, nil) }, nil)
if err != nil { if err != nil {
@ -154,7 +134,7 @@ func (d *Pan123) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
"size": 0, "size": 0,
"type": 1, "type": 1,
} }
_, err := d.Request(Mkdir, http.MethodPost, func(req *resty.Request) { _, err := d.request(Mkdir, http.MethodPost, func(req *resty.Request) {
req.SetBody(data) req.SetBody(data)
}, nil) }, nil)
return err return err
@ -165,7 +145,7 @@ func (d *Pan123) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
"fileIdList": []base.Json{{"FileId": srcObj.GetID()}}, "fileIdList": []base.Json{{"FileId": srcObj.GetID()}},
"parentFileId": dstDir.GetID(), "parentFileId": dstDir.GetID(),
} }
_, err := d.Request(Move, http.MethodPost, func(req *resty.Request) { _, err := d.request(Move, http.MethodPost, func(req *resty.Request) {
req.SetBody(data) req.SetBody(data)
}, nil) }, nil)
return err return err
@ -177,7 +157,7 @@ func (d *Pan123) Rename(ctx context.Context, srcObj model.Obj, newName string) e
"fileId": srcObj.GetID(), "fileId": srcObj.GetID(),
"fileName": newName, "fileName": newName,
} }
_, err := d.Request(Rename, http.MethodPost, func(req *resty.Request) { _, err := d.request(Rename, http.MethodPost, func(req *resty.Request) {
req.SetBody(data) req.SetBody(data)
}, nil) }, nil)
return err return err
@ -194,7 +174,7 @@ func (d *Pan123) Remove(ctx context.Context, obj model.Obj) error {
"operation": true, "operation": true,
"fileTrashInfoList": []File{f}, "fileTrashInfoList": []File{f},
} }
_, err := d.Request(Trash, http.MethodPost, func(req *resty.Request) { _, err := d.request(Trash, http.MethodPost, func(req *resty.Request) {
req.SetBody(data) req.SetBody(data)
}, nil) }, nil)
return err return err
@ -203,26 +183,36 @@ func (d *Pan123) Remove(ctx context.Context, obj model.Obj) error {
} }
} }
func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error { func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
etag := file.GetHash().GetHash(utils.MD5) // const DEFAULT int64 = 10485760
var err error h := md5.New()
if len(etag) < utils.MD5.Width { // need to calculate md5 of the full content
_, etag, err = stream.CacheFullInTempFileAndHash(file, utils.MD5) tempFile, err := stream.CacheFullInTempFile()
if err != nil { if err != nil {
return err return err
}
} }
defer func() {
_ = tempFile.Close()
}()
if _, err = utils.CopyWithBuffer(h, tempFile); err != nil {
return err
}
_, err = tempFile.Seek(0, io.SeekStart)
if err != nil {
return err
}
etag := hex.EncodeToString(h.Sum(nil))
data := base.Json{ data := base.Json{
"driveId": 0, "driveId": 0,
"duplicate": 2, // 2->覆盖 1->重命名 0->默认 "duplicate": 2, // 2->覆盖 1->重命名 0->默认
"etag": etag, "etag": etag,
"fileName": file.GetName(), "fileName": stream.GetName(),
"parentFileId": dstDir.GetID(), "parentFileId": dstDir.GetID(),
"size": file.GetSize(), "size": stream.GetSize(),
"type": 0, "type": 0,
} }
var resp UploadResp var resp UploadResp
res, err := d.Request(UploadRequest, http.MethodPost, func(req *resty.Request) { res, err := d.request(UploadRequest, http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetContext(ctx) req.SetBody(data).SetContext(ctx)
}, &resp) }, &resp)
if err != nil { if err != nil {
@ -233,7 +223,7 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, file model.FileStrea
return nil return nil
} }
if resp.Data.AccessKeyId == "" || resp.Data.SecretAccessKey == "" || resp.Data.SessionToken == "" { if resp.Data.AccessKeyId == "" || resp.Data.SecretAccessKey == "" || resp.Data.SessionToken == "" {
err = d.newUpload(ctx, &resp, file, up) err = d.newUpload(ctx, &resp, stream, tempFile, up)
return err return err
} else { } else {
cfg := &aws.Config{ cfg := &aws.Config{
@ -247,23 +237,17 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, file model.FileStrea
return err return err
} }
uploader := s3manager.NewUploader(s) uploader := s3manager.NewUploader(s)
if file.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize { if stream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
uploader.PartSize = file.GetSize() / (s3manager.MaxUploadParts - 1) uploader.PartSize = stream.GetSize() / (s3manager.MaxUploadParts - 1)
} }
input := &s3manager.UploadInput{ input := &s3manager.UploadInput{
Bucket: &resp.Data.Bucket, Bucket: &resp.Data.Bucket,
Key: &resp.Data.Key, Key: &resp.Data.Key,
Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ Body: tempFile,
Reader: file,
UpdateProgress: up,
}),
} }
_, err = uploader.UploadWithContext(ctx, input) _, err = uploader.UploadWithContext(ctx, input)
if err != nil {
return err
}
} }
_, err = d.Request(UploadComplete, http.MethodPost, func(req *resty.Request) { _, err = d.request(UploadComplete, http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{ req.SetBody(base.Json{
"fileId": resp.Data.FileId, "fileId": resp.Data.FileId,
}).SetContext(ctx) }).SetContext(ctx)

View File

@ -6,9 +6,8 @@ import (
) )
type Addition struct { type Addition struct {
Username string `json:"username" required:"true"` Username string `json:"username" required:"true"`
Password string `json:"password" required:"true"` Password string `json:"password" required:"true"`
SafePassword string `json:"safe_password"`
driver.RootID driver.RootID
//OrderBy string `json:"order_by" type:"select" options:"file_id,file_name,size,update_at" default:"file_name"` //OrderBy string `json:"order_by" type:"select" options:"file_id,file_name,size,update_at" default:"file_name"`
//OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"` //OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`

View File

@ -20,7 +20,6 @@ type File struct {
Etag string `json:"Etag"` Etag string `json:"Etag"`
S3KeyFlag string `json:"S3KeyFlag"` S3KeyFlag string `json:"S3KeyFlag"`
DownloadUrl string `json:"DownloadUrl"` DownloadUrl string `json:"DownloadUrl"`
IsLock bool `json:"IsLock"`
} }
func (f File) CreateTime() time.Time { func (f File) CreateTime() time.Time {

View File

@ -4,6 +4,7 @@ import (
"context" "context"
"fmt" "fmt"
"io" "io"
"math"
"net/http" "net/http"
"strconv" "strconv"
@ -24,7 +25,7 @@ func (d *Pan123) getS3PreSignedUrls(ctx context.Context, upReq *UploadResp, star
"StorageNode": upReq.Data.StorageNode, "StorageNode": upReq.Data.StorageNode,
} }
var s3PreSignedUrls S3PreSignedURLs var s3PreSignedUrls S3PreSignedURLs
_, err := d.Request(S3PreSignedUrls, http.MethodPost, func(req *resty.Request) { _, err := d.request(S3PreSignedUrls, http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetContext(ctx) req.SetBody(data).SetContext(ctx)
}, &s3PreSignedUrls) }, &s3PreSignedUrls)
if err != nil { if err != nil {
@ -43,7 +44,7 @@ func (d *Pan123) getS3Auth(ctx context.Context, upReq *UploadResp, start, end in
"uploadId": upReq.Data.UploadId, "uploadId": upReq.Data.UploadId,
} }
var s3PreSignedUrls S3PreSignedURLs var s3PreSignedUrls S3PreSignedURLs
_, err := d.Request(S3Auth, http.MethodPost, func(req *resty.Request) { _, err := d.request(S3Auth, http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetContext(ctx) req.SetBody(data).SetContext(ctx)
}, &s3PreSignedUrls) }, &s3PreSignedUrls)
if err != nil { if err != nil {
@ -62,31 +63,21 @@ func (d *Pan123) completeS3(ctx context.Context, upReq *UploadResp, file model.F
"key": upReq.Data.Key, "key": upReq.Data.Key,
"uploadId": upReq.Data.UploadId, "uploadId": upReq.Data.UploadId,
} }
_, err := d.Request(UploadCompleteV2, http.MethodPost, func(req *resty.Request) { _, err := d.request(UploadCompleteV2, http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetContext(ctx) req.SetBody(data).SetContext(ctx)
}, nil) }, nil)
return err return err
} }
func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, up driver.UpdateProgress) error { func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, reader io.Reader, up driver.UpdateProgress) error {
tmpF, err := file.CacheFullInTempFile() chunkSize := int64(1024 * 1024 * 16)
if err != nil {
return err
}
// fetch s3 pre signed urls // fetch s3 pre signed urls
size := file.GetSize() chunkCount := int(math.Ceil(float64(file.GetSize()) / float64(chunkSize)))
chunkSize := min(size, 16*utils.MB)
chunkCount := int(size / chunkSize)
lastChunkSize := size % chunkSize
if lastChunkSize > 0 {
chunkCount++
} else {
lastChunkSize = chunkSize
}
// only 1 batch is allowed // only 1 batch is allowed
isMultipart := chunkCount > 1
batchSize := 1 batchSize := 1
getS3UploadUrl := d.getS3Auth getS3UploadUrl := d.getS3Auth
if chunkCount > 1 { if isMultipart {
batchSize = 10 batchSize = 10
getS3UploadUrl = d.getS3PreSignedUrls getS3UploadUrl = d.getS3PreSignedUrls
} }
@ -95,7 +86,10 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
return ctx.Err() return ctx.Err()
} }
start := i start := i
end := min(i+batchSize, chunkCount+1) end := i + batchSize
if end > chunkCount+1 {
end = chunkCount + 1
}
s3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, start, end) s3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, start, end)
if err != nil { if err != nil {
return err return err
@ -107,9 +101,9 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
} }
curSize := chunkSize curSize := chunkSize
if j == chunkCount { if j == chunkCount {
curSize = lastChunkSize curSize = file.GetSize() - (int64(chunkCount)-1)*chunkSize
} }
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.NewSectionReader(tmpF, chunkSize*int64(j-1), curSize), curSize, false, getS3UploadUrl) err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.LimitReader(reader, chunkSize), curSize, false, getS3UploadUrl)
if err != nil { if err != nil {
return err return err
} }
@ -120,12 +114,12 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
return d.completeS3(ctx, upReq, file, chunkCount > 1) return d.completeS3(ctx, upReq, file, chunkCount > 1)
} }
func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSignedUrls *S3PreSignedURLs, cur, end int, reader *io.SectionReader, curSize int64, retry bool, getS3UploadUrl func(ctx context.Context, upReq *UploadResp, start int, end int) (*S3PreSignedURLs, error)) error { func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSignedUrls *S3PreSignedURLs, cur, end int, reader io.Reader, curSize int64, retry bool, getS3UploadUrl func(ctx context.Context, upReq *UploadResp, start int, end int) (*S3PreSignedURLs, error)) error {
uploadUrl := s3PreSignedUrls.Data.PreSignedUrls[strconv.Itoa(cur)] uploadUrl := s3PreSignedUrls.Data.PreSignedUrls[strconv.Itoa(cur)]
if uploadUrl == "" { if uploadUrl == "" {
return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls) return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls)
} }
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, reader)) req, err := http.NewRequest("PUT", uploadUrl, reader)
if err != nil { if err != nil {
return err return err
} }
@ -148,7 +142,6 @@ func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSign
} }
s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls
// retry // retry
reader.Seek(0, io.SeekStart)
return d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, cur, end, reader, curSize, true, getS3UploadUrl) return d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, cur, end, reader, curSize, true, getS3UploadUrl)
} }
if res.StatusCode != http.StatusOK { if res.StatusCode != http.StatusOK {

View File

@ -26,9 +26,8 @@ const (
Api = "https://www.123pan.com/api" Api = "https://www.123pan.com/api"
AApi = "https://www.123pan.com/a/api" AApi = "https://www.123pan.com/a/api"
BApi = "https://www.123pan.com/b/api" BApi = "https://www.123pan.com/b/api"
LoginApi = "https://login.123pan.com/api"
MainApi = BApi MainApi = BApi
SignIn = LoginApi + "/user/sign_in" SignIn = MainApi + "/user/sign_in"
Logout = MainApi + "/user/logout" Logout = MainApi + "/user/logout"
UserInfo = MainApi + "/user/info" UserInfo = MainApi + "/user/info"
FileList = MainApi + "/file/list/new" FileList = MainApi + "/file/list/new"
@ -43,7 +42,6 @@ const (
S3Auth = MainApi + "/file/s3_upload_object/auth" S3Auth = MainApi + "/file/s3_upload_object/auth"
UploadCompleteV2 = MainApi + "/file/upload_complete/v2" UploadCompleteV2 = MainApi + "/file/upload_complete/v2"
S3Complete = MainApi + "/file/s3_complete_multipart_upload" S3Complete = MainApi + "/file/s3_complete_multipart_upload"
SafeBoxUnlock = MainApi + "/restful/goapi/v1/file/safe_box/auth/unlockbox"
//AuthKeySalt = "8-8D$sL8gPjom7bk#cY" //AuthKeySalt = "8-8D$sL8gPjom7bk#cY"
) )
@ -162,12 +160,12 @@ func (d *Pan123) login() error {
} }
res, err := base.RestyClient.R(). res, err := base.RestyClient.R().
SetHeaders(map[string]string{ SetHeaders(map[string]string{
"origin": "https://www.123pan.com", "origin": "https://www.123pan.com",
"referer": "https://www.123pan.com/", "referer": "https://www.123pan.com/",
//"user-agent": "Dart/2.19(dart:io)-alist", "user-agent": "Dart/2.19(dart:io)-alist",
"platform": "web", "platform": "web",
"app-version": "3", "app-version": "3",
"user-agent": base.UserAgent, //"user-agent": base.UserAgent,
}). }).
SetBody(body).Post(SignIn) SetBody(body).Post(SignIn)
if err != nil { if err != nil {
@ -195,15 +193,13 @@ func (d *Pan123) login() error {
// return &authKey, nil // return &authKey, nil
//} //}
func (d *Pan123) Request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) { func (d *Pan123) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
isRetry := false
do:
req := base.RestyClient.R() req := base.RestyClient.R()
req.SetHeaders(map[string]string{ req.SetHeaders(map[string]string{
"origin": "https://www.123pan.com", "origin": "https://www.123pan.com",
"referer": "https://www.123pan.com/", "referer": "https://www.123pan.com/",
"authorization": "Bearer " + d.AccessToken, "authorization": "Bearer " + d.AccessToken,
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7)", "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) alist-client",
"platform": "web", "platform": "web",
"app-version": "3", "app-version": "3",
//"user-agent": base.UserAgent, //"user-agent": base.UserAgent,
@ -226,35 +222,18 @@ do:
body := res.Body() body := res.Body()
code := utils.Json.Get(body, "code").ToInt() code := utils.Json.Get(body, "code").ToInt()
if code != 0 { if code != 0 {
if !isRetry && code == 401 { if code == 401 {
err := d.login() err := d.login()
if err != nil { if err != nil {
return nil, err return nil, err
} }
isRetry = true return d.request(url, method, callback, resp)
goto do
} }
return nil, errors.New(jsoniter.Get(body, "message").ToString()) return nil, errors.New(jsoniter.Get(body, "message").ToString())
} }
return body, nil return body, nil
} }
func (d *Pan123) unlockSafeBox(fileId int64) error {
if _, ok := d.safeBoxUnlocked.Load(fileId); ok {
return nil
}
data := base.Json{"password": d.SafePassword}
url := fmt.Sprintf("%s?fileId=%d", SafeBoxUnlock, fileId)
_, err := d.Request(url, http.MethodPost, func(req *resty.Request) {
req.SetBody(data)
}, nil)
if err != nil {
return err
}
d.safeBoxUnlocked.Store(fileId, true)
return nil
}
func (d *Pan123) getFiles(ctx context.Context, parentId string, name string) ([]File, error) { func (d *Pan123) getFiles(ctx context.Context, parentId string, name string) ([]File, error) {
page := 1 page := 1
total := 0 total := 0
@ -280,19 +259,10 @@ func (d *Pan123) getFiles(ctx context.Context, parentId string, name string) ([]
"operateType": "4", "operateType": "4",
"inDirectSpace": "false", "inDirectSpace": "false",
} }
_res, err := d.Request(FileList, http.MethodGet, func(req *resty.Request) { _res, err := d.request(FileList, http.MethodGet, func(req *resty.Request) {
req.SetQueryParams(query) req.SetQueryParams(query)
}, &resp) }, &resp)
if err != nil { if err != nil {
msg := strings.ToLower(err.Error())
if strings.Contains(msg, "safe box") || strings.Contains(err.Error(), "保险箱") {
if fid, e := strconv.ParseInt(parentId, 10, 64); e == nil {
if e = d.unlockSafeBox(fid); e == nil {
return d.getFiles(ctx, parentId, name)
}
return nil, e
}
}
return nil, err return nil, err
} }
log.Debug(string(_res)) log.Debug(string(_res))

View File

@ -1,191 +0,0 @@
package _123Open
import (
"fmt"
"github.com/go-resty/resty/v2"
"net/http"
)
const (
// baseurl
ApiBaseURL = "https://open-api.123pan.com"
// auth
ApiToken = "/api/v1/access_token"
// file list
ApiFileList = "/api/v2/file/list"
// direct link
ApiGetDirectLink = "/api/v1/direct-link/url"
// mkdir
ApiMakeDir = "/upload/v1/file/mkdir"
// remove
ApiRemove = "/api/v1/file/trash"
// upload
ApiUploadDomainURL = "/upload/v2/file/domain"
ApiSingleUploadURL = "/upload/v2/file/single/create"
ApiCreateUploadURL = "/upload/v2/file/create"
ApiUploadSliceURL = "/upload/v2/file/slice"
ApiUploadCompleteURL = "/upload/v2/file/upload_complete"
// move
ApiMove = "/api/v1/file/move"
// rename
ApiRename = "/api/v1/file/name"
)
type Response[T any] struct {
Code int `json:"code"`
Message string `json:"message"`
Data T `json:"data"`
}
type TokenResp struct {
Code int `json:"code"`
Message string `json:"message"`
Data TokenData `json:"data"`
}
type TokenData struct {
AccessToken string `json:"accessToken"`
ExpiredAt string `json:"expiredAt"`
}
type FileListResp struct {
Code int `json:"code"`
Message string `json:"message"`
Data FileListData `json:"data"`
}
type FileListData struct {
LastFileId int64 `json:"lastFileId"`
FileList []File `json:"fileList"`
}
type DirectLinkResp struct {
Code int `json:"code"`
Message string `json:"message"`
Data DirectLinkData `json:"data"`
}
type DirectLinkData struct {
URL string `json:"url"`
}
type MakeDirRequest struct {
Name string `json:"name"`
ParentID int64 `json:"parentID"`
}
type MakeDirResp struct {
Code int `json:"code"`
Message string `json:"message"`
Data MakeDirData `json:"data"`
}
type MakeDirData struct {
DirID int64 `json:"dirID"`
}
type RemoveRequest struct {
FileIDs []int64 `json:"fileIDs"`
}
type UploadCreateResp struct {
Code int `json:"code"`
Message string `json:"message"`
Data UploadCreateData `json:"data"`
}
type UploadCreateData struct {
FileID int64 `json:"fileId"`
Reuse bool `json:"reuse"`
PreuploadID string `json:"preuploadId"`
SliceSize int64 `json:"sliceSize"`
Servers []string `json:"servers"`
}
type UploadUrlResp struct {
Code int `json:"code"`
Message string `json:"message"`
Data UploadUrlData `json:"data"`
}
type UploadUrlData struct {
PresignedURL string `json:"presignedUrl"`
}
type UploadCompleteResp struct {
Code int `json:"code"`
Message string `json:"message"`
Data UploadCompleteData `json:"data"`
}
type UploadCompleteData struct {
FileID int `json:"fileID"`
Completed bool `json:"completed"`
}
func (d *Open123) Request(endpoint string, method string, setup func(*resty.Request), result any) (*resty.Response, error) {
client := resty.New()
token, err := d.tm.getToken()
if err != nil {
return nil, err
}
req := client.R().
SetHeader("Authorization", "Bearer "+token).
SetHeader("Platform", "open_platform").
SetHeader("Content-Type", "application/json").
SetResult(result)
if setup != nil {
setup(req)
}
switch method {
case http.MethodGet:
return req.Get(ApiBaseURL + endpoint)
case http.MethodPost:
return req.Post(ApiBaseURL + endpoint)
case http.MethodPut:
return req.Put(ApiBaseURL + endpoint)
default:
return nil, fmt.Errorf("unsupported method: %s", method)
}
}
func (d *Open123) RequestTo(fullURL string, method string, setup func(*resty.Request), result any) (*resty.Response, error) {
client := resty.New()
token, err := d.tm.getToken()
if err != nil {
return nil, err
}
req := client.R().
SetHeader("Authorization", "Bearer "+token).
SetHeader("Platform", "open_platform").
SetHeader("Content-Type", "application/json").
SetResult(result)
if setup != nil {
setup(req)
}
switch method {
case http.MethodGet:
return req.Get(fullURL)
case http.MethodPost:
return req.Post(fullURL)
case http.MethodPut:
return req.Put(fullURL)
default:
return nil, fmt.Errorf("unsupported method: %s", method)
}
}

View File

@ -1,294 +0,0 @@
package _123Open
import (
"context"
"fmt"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
"net/http"
"strconv"
"time"
)
type Open123 struct {
model.Storage
Addition
UploadThread int
tm *tokenManager
}
func (d *Open123) Config() driver.Config {
return config
}
func (d *Open123) GetAddition() driver.Additional {
return &d.Addition
}
func (d *Open123) Init(ctx context.Context) error {
d.tm = newTokenManager(d.ClientID, d.ClientSecret)
if _, err := d.tm.getToken(); err != nil {
return fmt.Errorf("token 初始化失败: %w", err)
}
return nil
}
func (d *Open123) Drop(ctx context.Context) error {
return nil
}
func (d *Open123) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
parentFileId, err := strconv.ParseInt(dir.GetID(), 10, 64)
if err != nil {
return nil, err
}
fileLastId := int64(0)
var results []File
for fileLastId != -1 {
files, err := d.getFiles(parentFileId, 100, fileLastId)
if err != nil {
return nil, err
}
for _, f := range files.Data.FileList {
if f.Trashed == 0 {
results = append(results, f)
}
}
fileLastId = files.Data.LastFileId
}
objs := make([]model.Obj, 0, len(results))
for _, f := range results {
objs = append(objs, f)
}
return objs, nil
}
func (d *Open123) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if file.IsDir() {
return nil, errs.LinkIsDir
}
fileID := file.GetID()
var result DirectLinkResp
url := fmt.Sprintf("%s?fileID=%s", ApiGetDirectLink, fileID)
_, err := d.Request(url, http.MethodGet, nil, &result)
if err != nil {
return nil, err
}
if result.Code != 0 {
return nil, fmt.Errorf("get link failed: %s", result.Message)
}
linkURL := result.Data.URL
if d.PrivateKey != "" {
if d.UID == 0 {
return nil, fmt.Errorf("uid is required when private key is set")
}
duration := time.Duration(d.ValidDuration)
if duration <= 0 {
duration = 30
}
signedURL, err := SignURL(linkURL, d.PrivateKey, d.UID, duration*time.Minute)
if err != nil {
return nil, err
}
linkURL = signedURL
}
return &model.Link{
URL: linkURL,
}, nil
}
func (d *Open123) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
parentID, err := strconv.ParseInt(parentDir.GetID(), 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid parent ID: %w", err)
}
var result MakeDirResp
reqBody := MakeDirRequest{
Name: dirName,
ParentID: parentID,
}
_, err = d.Request(ApiMakeDir, http.MethodPost, func(r *resty.Request) {
r.SetBody(reqBody)
}, &result)
if err != nil {
return nil, err
}
if result.Code != 0 {
return nil, fmt.Errorf("mkdir failed: %s", result.Message)
}
newDir := File{
FileId: result.Data.DirID,
FileName: dirName,
Type: 1,
ParentFileId: int(parentID),
Size: 0,
Trashed: 0,
}
return newDir, nil
}
func (d *Open123) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
srcID, err := strconv.ParseInt(srcObj.GetID(), 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid src file ID: %w", err)
}
dstID, err := strconv.ParseInt(dstDir.GetID(), 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid dest dir ID: %w", err)
}
var result Response[any]
reqBody := map[string]interface{}{
"fileIDs": []int64{srcID},
"toParentFileID": dstID,
}
_, err = d.Request(ApiMove, http.MethodPost, func(r *resty.Request) {
r.SetBody(reqBody)
}, &result)
if err != nil {
return nil, err
}
if result.Code != 0 {
return nil, fmt.Errorf("move failed: %s", result.Message)
}
files, err := d.getFiles(dstID, 100, 0)
if err != nil {
return nil, fmt.Errorf("move succeed but failed to get target dir: %w", err)
}
for _, f := range files.Data.FileList {
if f.FileId == srcID {
return f, nil
}
}
return nil, fmt.Errorf("move succeed but file not found in target dir")
}
func (d *Open123) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
srcID, err := strconv.ParseInt(srcObj.GetID(), 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid file ID: %w", err)
}
var result Response[any]
reqBody := map[string]interface{}{
"fileId": srcID,
"fileName": newName,
}
_, err = d.Request(ApiRename, http.MethodPut, func(r *resty.Request) {
r.SetBody(reqBody)
}, &result)
if err != nil {
return nil, err
}
if result.Code != 0 {
return nil, fmt.Errorf("rename failed: %s", result.Message)
}
parentID := 0
if file, ok := srcObj.(File); ok {
parentID = file.ParentFileId
}
files, err := d.getFiles(int64(parentID), 100, 0)
if err != nil {
return nil, fmt.Errorf("rename succeed but failed to get parent dir: %w", err)
}
for _, f := range files.Data.FileList {
if f.FileId == srcID {
return f, nil
}
}
return nil, fmt.Errorf("rename succeed but file not found in parent dir")
}
func (d *Open123) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
return nil, errs.NotSupport
}
func (d *Open123) Remove(ctx context.Context, obj model.Obj) error {
idStr := obj.GetID()
id, err := strconv.ParseInt(idStr, 10, 64)
if err != nil {
return fmt.Errorf("invalid file ID: %w", err)
}
var result Response[any]
reqBody := RemoveRequest{
FileIDs: []int64{id},
}
_, err = d.Request(ApiRemove, http.MethodPost, func(r *resty.Request) {
r.SetBody(reqBody)
}, &result)
if err != nil {
return err
}
if result.Code != 0 {
return fmt.Errorf("remove failed: %s", result.Message)
}
return nil
}
func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
parentFileId, err := strconv.ParseInt(dstDir.GetID(), 10, 64)
etag := file.GetHash().GetHash(utils.MD5)
if len(etag) < utils.MD5.Width {
up = model.UpdateProgressWithRange(up, 50, 100)
_, etag, err = stream.CacheFullInTempFileAndHash(file, utils.MD5)
if err != nil {
return err
}
}
createResp, err := d.create(parentFileId, file.GetName(), etag, file.GetSize(), 2, false)
if err != nil {
return err
}
if createResp.Data.Reuse {
return nil
}
return d.Upload(ctx, file, parentFileId, createResp, up)
}
func (d *Open123) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
return nil, errs.NotSupport
}
func (d *Open123) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
return nil, errs.NotSupport
}
func (d *Open123) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
return nil, errs.NotSupport
}
func (d *Open123) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
return nil, errs.NotSupport
}
//func (d *Open123) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
// return nil, errs.NotSupport
//}
var _ driver.Driver = (*Open123)(nil)

View File

@ -1,36 +0,0 @@
package _123Open
import (
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/op"
)
type Addition struct {
driver.RootID
ClientID string `json:"client_id" required:"true" label:"Client ID"`
ClientSecret string `json:"client_secret" required:"true" label:"Client Secret"`
PrivateKey string `json:"private_key"`
UID uint64 `json:"uid" type:"number"`
ValidDuration int64 `json:"valid_duration" type:"number" default:"30" help:"minutes"`
}
var config = driver.Config{
Name: "123 Open",
LocalSort: false,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,
NeedMs: false,
DefaultRoot: "0",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: false,
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &Open123{}
})
}

View File

@ -1,27 +0,0 @@
package _123Open
import (
"crypto/md5"
"fmt"
"math/rand"
"net/url"
"time"
)
func SignURL(originURL, privateKey string, uid uint64, validDuration time.Duration) (string, error) {
if privateKey == "" {
return originURL, nil
}
parsed, err := url.Parse(originURL)
if err != nil {
return "", err
}
ts := time.Now().Add(validDuration).Unix()
randInt := rand.Int()
signature := fmt.Sprintf("%d-%d-%d-%x", ts, randInt, uid, md5.Sum([]byte(fmt.Sprintf("%s-%d-%d-%d-%s",
parsed.Path, ts, randInt, uid, privateKey))))
query := parsed.Query()
query.Add("auth_key", signature)
parsed.RawQuery = query.Encode()
return parsed.String(), nil
}

View File

@ -1,85 +0,0 @@
package _123Open
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"sync"
"time"
)
const tokenURL = ApiBaseURL + ApiToken
type tokenManager struct {
clientID string
clientSecret string
mu sync.Mutex
accessToken string
expireTime time.Time
}
func newTokenManager(clientID, clientSecret string) *tokenManager {
return &tokenManager{
clientID: clientID,
clientSecret: clientSecret,
}
}
func (tm *tokenManager) getToken() (string, error) {
tm.mu.Lock()
defer tm.mu.Unlock()
if tm.accessToken != "" && time.Now().Before(tm.expireTime.Add(-5*time.Minute)) {
return tm.accessToken, nil
}
reqBody := map[string]string{
"clientID": tm.clientID,
"clientSecret": tm.clientSecret,
}
body, _ := json.Marshal(reqBody)
req, err := http.NewRequest("POST", tokenURL, bytes.NewBuffer(body))
if err != nil {
return "", err
}
req.Header.Set("Platform", "open_platform")
req.Header.Set("Content-Type", "application/json")
resp, err := http.DefaultClient.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
var result TokenResp
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return "", err
}
if result.Code != 0 {
return "", fmt.Errorf("get token failed: %s", result.Message)
}
tm.accessToken = result.Data.AccessToken
expireAt, err := time.Parse(time.RFC3339, result.Data.ExpiredAt)
if err != nil {
return "", fmt.Errorf("parse expire time failed: %w", err)
}
tm.expireTime = expireAt
return tm.accessToken, nil
}
func (tm *tokenManager) buildHeaders() (http.Header, error) {
token, err := tm.getToken()
if err != nil {
return nil, err
}
header := http.Header{}
header.Set("Authorization", "Bearer "+token)
header.Set("Platform", "open_platform")
header.Set("Content-Type", "application/json")
return header, nil
}

View File

@ -1,70 +0,0 @@
package _123Open
import (
"fmt"
"github.com/alist-org/alist/v3/pkg/utils"
"time"
)
type File struct {
FileName string `json:"filename"`
Size int64 `json:"size"`
CreateAt string `json:"createAt"`
UpdateAt string `json:"updateAt"`
FileId int64 `json:"fileId"`
Type int `json:"type"`
Etag string `json:"etag"`
S3KeyFlag string `json:"s3KeyFlag"`
ParentFileId int `json:"parentFileId"`
Category int `json:"category"`
Status int `json:"status"`
Trashed int `json:"trashed"`
}
func (f File) GetID() string {
return fmt.Sprint(f.FileId)
}
func (f File) GetName() string {
return f.FileName
}
func (f File) GetSize() int64 {
return f.Size
}
func (f File) IsDir() bool {
return f.Type == 1
}
func (f File) GetModified() string {
return f.UpdateAt
}
func (f File) GetThumb() string {
return ""
}
func (f File) ModTime() time.Time {
t, err := time.Parse("2006-01-02 15:04:05", f.UpdateAt)
if err != nil {
return time.Time{}
}
return t
}
func (f File) CreateTime() time.Time {
t, err := time.Parse("2006-01-02 15:04:05", f.CreateAt)
if err != nil {
return time.Time{}
}
return t
}
func (f File) GetHash() utils.HashInfo {
return utils.NewHashInfo(utils.MD5, f.Etag)
}
func (f File) GetPath() string {
return ""
}

View File

@ -1,282 +0,0 @@
package _123Open
import (
"bytes"
"context"
"crypto/md5"
"encoding/hex"
"encoding/json"
"fmt"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
"golang.org/x/sync/errgroup"
"io"
"mime/multipart"
"net/http"
"runtime"
"strconv"
"time"
)
func (d *Open123) create(parentFileID int64, filename, etag string, size int64, duplicate int, containDir bool) (*UploadCreateResp, error) {
var resp UploadCreateResp
_, err := d.Request(ApiCreateUploadURL, http.MethodPost, func(req *resty.Request) {
body := base.Json{
"parentFileID": parentFileID,
"filename": filename,
"etag": etag,
"size": size,
}
if duplicate > 0 {
body["duplicate"] = duplicate
}
if containDir {
body["containDir"] = true
}
req.SetBody(body)
}, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}
func (d *Open123) GetUploadDomains() ([]string, error) {
var resp struct {
Code int `json:"code"`
Message string `json:"message"`
Data []string `json:"data"`
}
_, err := d.Request(ApiUploadDomainURL, http.MethodGet, nil, &resp)
if err != nil {
return nil, err
}
if resp.Code != 0 {
return nil, fmt.Errorf("get upload domain failed: %s", resp.Message)
}
return resp.Data, nil
}
func (d *Open123) UploadSingle(ctx context.Context, createResp *UploadCreateResp, file model.FileStreamer, parentID int64) error {
domain := createResp.Data.Servers[0]
etag := file.GetHash().GetHash(utils.MD5)
if len(etag) < utils.MD5.Width {
_, _, err := stream.CacheFullInTempFileAndHash(file, utils.MD5)
if err != nil {
return err
}
}
reader, err := file.RangeRead(http_range.Range{Start: 0, Length: file.GetSize()})
if err != nil {
return err
}
reader = driver.NewLimitedUploadStream(ctx, reader)
var b bytes.Buffer
mw := multipart.NewWriter(&b)
mw.WriteField("parentFileID", fmt.Sprint(parentID))
mw.WriteField("filename", file.GetName())
mw.WriteField("etag", etag)
mw.WriteField("size", fmt.Sprint(file.GetSize()))
fw, _ := mw.CreateFormFile("file", file.GetName())
_, err = io.Copy(fw, reader)
mw.Close()
req, err := http.NewRequestWithContext(ctx, "POST", domain+ApiSingleUploadURL, &b)
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+d.tm.accessToken)
req.Header.Set("Platform", "open_platform")
req.Header.Set("Content-Type", mw.FormDataContentType())
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
var result struct {
Code int `json:"code"`
Message string `json:"message"`
Data struct {
FileID int64 `json:"fileID"`
Completed bool `json:"completed"`
} `json:"data"`
}
body, _ := io.ReadAll(resp.Body)
if err := json.Unmarshal(body, &result); err != nil {
return fmt.Errorf("unmarshal response error: %v, body: %s", err, string(body))
}
if result.Code != 0 {
return fmt.Errorf("upload failed: %s", result.Message)
}
if !result.Data.Completed || result.Data.FileID == 0 {
return fmt.Errorf("upload incomplete or missing fileID")
}
return nil
}
func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, parentID int64, createResp *UploadCreateResp, up driver.UpdateProgress) error {
if cacher, ok := file.(interface{ CacheFullInTempFile() (model.File, error) }); ok {
if _, err := cacher.CacheFullInTempFile(); err != nil {
return err
}
}
size := file.GetSize()
chunkSize := createResp.Data.SliceSize
uploadNums := (size + chunkSize - 1) / chunkSize
uploadDomain := createResp.Data.Servers[0]
if d.UploadThread <= 0 {
cpuCores := runtime.NumCPU()
threads := cpuCores * 2
if threads < 4 {
threads = 4
}
if threads > 16 {
threads = 16
}
d.UploadThread = threads
fmt.Printf("[Upload] Auto set upload concurrency: %d (CPU cores=%d)\n", d.UploadThread, cpuCores)
}
fmt.Printf("[Upload] File size: %d bytes, chunk size: %d bytes, total slices: %d, concurrency: %d\n",
size, chunkSize, uploadNums, d.UploadThread)
if size <= 1<<30 {
return d.UploadSingle(ctx, createResp, file, parentID)
}
if createResp.Data.Reuse {
up(100)
return nil
}
client := resty.New()
semaphore := make(chan struct{}, d.UploadThread)
threadG, _ := errgroup.WithContext(ctx)
var progressArr = make([]int64, uploadNums)
for partIndex := int64(0); partIndex < uploadNums; partIndex++ {
partIndex := partIndex
semaphore <- struct{}{}
threadG.Go(func() error {
defer func() { <-semaphore }()
offset := partIndex * chunkSize
length := min(chunkSize, size-offset)
partNumber := partIndex + 1
fmt.Printf("[Slice %d] Starting read from offset %d, length %d\n", partNumber, offset, length)
reader, err := file.RangeRead(http_range.Range{Start: offset, Length: length})
if err != nil {
return fmt.Errorf("[Slice %d] RangeRead error: %v", partNumber, err)
}
buf := make([]byte, length)
n, err := io.ReadFull(reader, buf)
if err != nil && err != io.EOF {
return fmt.Errorf("[Slice %d] Read error: %v", partNumber, err)
}
buf = buf[:n]
hash := md5.Sum(buf)
sliceMD5Str := hex.EncodeToString(hash[:])
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
writer.WriteField("preuploadID", createResp.Data.PreuploadID)
writer.WriteField("sliceNo", strconv.FormatInt(partNumber, 10))
writer.WriteField("sliceMD5", sliceMD5Str)
partName := fmt.Sprintf("%s.part%d", file.GetName(), partNumber)
fw, _ := writer.CreateFormFile("slice", partName)
fw.Write(buf)
writer.Close()
resp, err := client.R().
SetHeader("Authorization", "Bearer "+d.tm.accessToken).
SetHeader("Platform", "open_platform").
SetHeader("Content-Type", writer.FormDataContentType()).
SetBody(body.Bytes()).
Post(uploadDomain + ApiUploadSliceURL)
if err != nil {
return fmt.Errorf("[Slice %d] Upload HTTP error: %v", partNumber, err)
}
if resp.StatusCode() != 200 {
return fmt.Errorf("[Slice %d] Upload failed with status: %s, resp: %s", partNumber, resp.Status(), resp.String())
}
progressArr[partIndex] = length
var totalUploaded int64 = 0
for _, v := range progressArr {
totalUploaded += v
}
if up != nil {
percent := float64(totalUploaded) / float64(size) * 100
up(percent)
}
fmt.Printf("[Slice %d] MD5: %s\n", partNumber, sliceMD5Str)
fmt.Printf("[Slice %d] Upload finished\n", partNumber)
return nil
})
}
if err := threadG.Wait(); err != nil {
return err
}
var completeResp struct {
Code int `json:"code"`
Message string `json:"message"`
Data struct {
Completed bool `json:"completed"`
FileID int64 `json:"fileID"`
} `json:"data"`
}
for {
reqBody := fmt.Sprintf(`{"preuploadID":"%s"}`, createResp.Data.PreuploadID)
req, err := http.NewRequestWithContext(ctx, "POST", uploadDomain+ApiUploadCompleteURL, bytes.NewBufferString(reqBody))
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+d.tm.accessToken)
req.Header.Set("Platform", "open_platform")
req.Header.Set("Content-Type", "application/json")
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
body, _ := io.ReadAll(resp.Body)
resp.Body.Close()
if err := json.Unmarshal(body, &completeResp); err != nil {
return fmt.Errorf("completion response unmarshal error: %v, body: %s", err, string(body))
}
if completeResp.Code != 0 {
return fmt.Errorf("completion API returned error code %d: %s", completeResp.Code, completeResp.Message)
}
if completeResp.Data.Completed && completeResp.Data.FileID != 0 {
fmt.Printf("[Upload] Upload completed successfully. FileID: %d\n", completeResp.Data.FileID)
break
}
time.Sleep(time.Second)
}
up(100)
return nil
}

View File

@ -1,20 +0,0 @@
package _123Open
import (
"fmt"
"net/http"
)
func (d *Open123) getFiles(parentFileId int64, limit int, lastFileId int64) (*FileListResp, error) {
var result FileListResp
url := fmt.Sprintf("%s?parentFileId=%d&limit=%d&lastFileId=%d", ApiFileList, parentFileId, limit, lastFileId)
_, err := d.Request(url, http.MethodGet, nil, &result)
if err != nil {
return nil, err
}
if result.Code != 0 {
return nil, fmt.Errorf("list error: %s", result.Message)
}
return &result, nil
}

View File

@ -4,14 +4,12 @@ import (
"context" "context"
"encoding/base64" "encoding/base64"
"fmt" "fmt"
"golang.org/x/time/rate"
"net/http" "net/http"
"net/url" "net/url"
"sync" "sync"
"time" "time"
"golang.org/x/time/rate"
_123 "github.com/alist-org/alist/v3/drivers/123"
"github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/errs"
@ -25,7 +23,6 @@ type Pan123Share struct {
model.Storage model.Storage
Addition Addition
apiRateLimit sync.Map apiRateLimit sync.Map
ref *_123.Pan123
} }
func (d *Pan123Share) Config() driver.Config { func (d *Pan123Share) Config() driver.Config {
@ -42,17 +39,7 @@ func (d *Pan123Share) Init(ctx context.Context) error {
return nil return nil
} }
func (d *Pan123Share) InitReference(storage driver.Driver) error {
refStorage, ok := storage.(*_123.Pan123)
if ok {
d.ref = refStorage
return nil
}
return fmt.Errorf("ref: storage is not 123Pan")
}
func (d *Pan123Share) Drop(ctx context.Context) error { func (d *Pan123Share) Drop(ctx context.Context) error {
d.ref = nil
return nil return nil
} }

View File

@ -53,9 +53,6 @@ func GetApi(rawUrl string) string {
} }
func (d *Pan123Share) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) { func (d *Pan123Share) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
if d.ref != nil {
return d.ref.Request(url, method, callback, resp)
}
req := base.RestyClient.R() req := base.RestyClient.R()
req.SetHeaders(map[string]string{ req.SetHeaders(map[string]string{
"origin": "https://www.123pan.com", "origin": "https://www.123pan.com",

View File

@ -2,32 +2,28 @@ package _139
import ( import (
"context" "context"
"encoding/xml" "encoding/base64"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
"path"
"strconv" "strconv"
"strings"
"time" "time"
"github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/model"
streamPkg "github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/cron"
"github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/pkg/utils"
"github.com/alist-org/alist/v3/pkg/utils/random" "github.com/alist-org/alist/v3/pkg/cron"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
type Yun139 struct { type Yun139 struct {
model.Storage model.Storage
Addition Addition
cron *cron.Cron cron *cron.Cron
Account string Account string
ref *Yun139
PersonalCloudHost string
} }
func (d *Yun139) Config() driver.Config { func (d *Yun139) Config() driver.Config {
@ -39,79 +35,56 @@ func (d *Yun139) GetAddition() driver.Additional {
} }
func (d *Yun139) Init(ctx context.Context) error { func (d *Yun139) Init(ctx context.Context) error {
if d.ref == nil { if d.Authorization == "" {
if len(d.Authorization) == 0 { return fmt.Errorf("authorization is empty")
return fmt.Errorf("authorization is empty") }
} d.cron = cron.NewCron(time.Hour * 24 * 7)
d.cron.Do(func() {
err := d.refreshToken() err := d.refreshToken()
if err != nil { if err != nil {
return err log.Errorf("%+v", err)
} }
})
// Query Route Policy
var resp QueryRoutePolicyResp
_, err = d.requestRoute(base.Json{
"userInfo": base.Json{
"userType": 1,
"accountType": 1,
"accountName": d.Account},
"modAddrType": 1,
}, &resp)
if err != nil {
return err
}
for _, policyItem := range resp.Data.RoutePolicyList {
if policyItem.ModName == "personal" {
d.PersonalCloudHost = policyItem.HttpsUrl
break
}
}
if len(d.PersonalCloudHost) == 0 {
return fmt.Errorf("PersonalCloudHost is empty")
}
d.cron = cron.NewCron(time.Hour * 12)
d.cron.Do(func() {
err := d.refreshToken()
if err != nil {
log.Errorf("%+v", err)
}
})
}
switch d.Addition.Type { switch d.Addition.Type {
case MetaPersonalNew: case MetaPersonalNew:
if len(d.Addition.RootFolderID) == 0 { if len(d.Addition.RootFolderID) == 0 {
d.RootFolderID = "/" d.RootFolderID = "/"
} }
return nil
case MetaPersonal: case MetaPersonal:
if len(d.Addition.RootFolderID) == 0 { if len(d.Addition.RootFolderID) == 0 {
d.RootFolderID = "root" d.RootFolderID = "root"
} }
case MetaGroup: fallthrough
if len(d.Addition.RootFolderID) == 0 {
d.RootFolderID = d.CloudID
}
case MetaFamily: case MetaFamily:
decode, err := base64.StdEncoding.DecodeString(d.Authorization)
if err != nil {
return err
}
decodeStr := string(decode)
splits := strings.Split(decodeStr, ":")
if len(splits) < 2 {
return fmt.Errorf("authorization is invalid, splits < 2")
}
d.Account = splits[1]
_, err = d.post("/orchestration/personalCloud/user/v1.0/qryUserExternInfo", base.Json{
"qryUserExternInfoReq": base.Json{
"commonAccountInfo": base.Json{
"account": d.Account,
"accountType": 1,
},
},
}, nil)
return err
default: default:
return errs.NotImplement return errs.NotImplement
} }
return nil
}
func (d *Yun139) InitReference(storage driver.Driver) error {
refStorage, ok := storage.(*Yun139)
if ok {
d.ref = refStorage
return nil
}
return errs.NotSupport
} }
func (d *Yun139) Drop(ctx context.Context) error { func (d *Yun139) Drop(ctx context.Context) error {
if d.cron != nil { if d.cron != nil {
d.cron.Stop() d.cron.Stop()
} }
d.ref = nil
return nil return nil
} }
@ -123,8 +96,6 @@ func (d *Yun139) List(ctx context.Context, dir model.Obj, args model.ListArgs) (
return d.getFiles(dir.GetID()) return d.getFiles(dir.GetID())
case MetaFamily: case MetaFamily:
return d.familyGetFiles(dir.GetID()) return d.familyGetFiles(dir.GetID())
case MetaGroup:
return d.groupGetFiles(dir.GetID())
default: default:
return nil, errs.NotImplement return nil, errs.NotImplement
} }
@ -137,11 +108,9 @@ func (d *Yun139) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
case MetaPersonalNew: case MetaPersonalNew:
url, err = d.personalGetLink(file.GetID()) url, err = d.personalGetLink(file.GetID())
case MetaPersonal: case MetaPersonal:
url, err = d.getLink(file.GetID()) fallthrough
case MetaFamily: case MetaFamily:
url, err = d.familyGetLink(file.GetID(), file.GetPath()) url, err = d.getLink(file.GetID())
case MetaGroup:
url, err = d.groupGetLink(file.GetID(), file.GetPath())
default: default:
return nil, errs.NotImplement return nil, errs.NotImplement
} }
@ -162,7 +131,7 @@ func (d *Yun139) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
"type": "folder", "type": "folder",
"fileRenameMode": "force_rename", "fileRenameMode": "force_rename",
} }
pathname := "/file/create" pathname := "/hcy/file/create"
_, err = d.personalPost(pathname, data, nil) _, err = d.personalPost(pathname, data, nil)
case MetaPersonal: case MetaPersonal:
data := base.Json{ data := base.Json{
@ -170,7 +139,7 @@ func (d *Yun139) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
"parentCatalogID": parentDir.GetID(), "parentCatalogID": parentDir.GetID(),
"newCatalogName": dirName, "newCatalogName": dirName,
"commonAccountInfo": base.Json{ "commonAccountInfo": base.Json{
"account": d.getAccount(), "account": d.Account,
"accountType": 1, "accountType": 1,
}, },
}, },
@ -181,26 +150,12 @@ func (d *Yun139) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
data := base.Json{ data := base.Json{
"cloudID": d.CloudID, "cloudID": d.CloudID,
"commonAccountInfo": base.Json{ "commonAccountInfo": base.Json{
"account": d.getAccount(), "account": d.Account,
"accountType": 1, "accountType": 1,
}, },
"docLibName": dirName, "docLibName": dirName,
"path": path.Join(parentDir.GetPath(), parentDir.GetID()),
} }
pathname := "/orchestration/familyCloud-rebuild/cloudCatalog/v1.0/createCloudDoc" pathname := "/orchestration/familyCloud/cloudCatalog/v1.0/createCloudDoc"
_, err = d.post(pathname, data, nil)
case MetaGroup:
data := base.Json{
"catalogName": dirName,
"commonAccountInfo": base.Json{
"account": d.getAccount(),
"accountType": 1,
},
"groupID": d.CloudID,
"parentFileId": parentDir.GetID(),
"path": path.Join(parentDir.GetPath(), parentDir.GetID()),
}
pathname := "/orchestration/group-rebuild/catalog/v1.0/createGroupCatalog"
_, err = d.post(pathname, data, nil) _, err = d.post(pathname, data, nil)
default: default:
err = errs.NotImplement err = errs.NotImplement
@ -215,40 +170,12 @@ func (d *Yun139) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj,
"fileIds": []string{srcObj.GetID()}, "fileIds": []string{srcObj.GetID()},
"toParentFileId": dstDir.GetID(), "toParentFileId": dstDir.GetID(),
} }
pathname := "/file/batchMove" pathname := "/hcy/file/batchMove"
_, err := d.personalPost(pathname, data, nil) _, err := d.personalPost(pathname, data, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return srcObj, nil return srcObj, nil
case MetaGroup:
var contentList []string
var catalogList []string
if srcObj.IsDir() {
catalogList = append(catalogList, srcObj.GetID())
} else {
contentList = append(contentList, srcObj.GetID())
}
data := base.Json{
"taskType": 3,
"srcType": 2,
"srcGroupID": d.CloudID,
"destType": 2,
"destGroupID": d.CloudID,
"destPath": dstDir.GetPath(),
"contentList": contentList,
"catalogList": catalogList,
"commonAccountInfo": base.Json{
"account": d.getAccount(),
"accountType": 1,
},
}
pathname := "/orchestration/group-rebuild/task/v1.0/createBatchOprTask"
_, err := d.post(pathname, data, nil)
if err != nil {
return nil, err
}
return srcObj, nil
case MetaPersonal: case MetaPersonal:
var contentInfoList []string var contentInfoList []string
var catalogInfoList []string var catalogInfoList []string
@ -267,7 +194,7 @@ func (d *Yun139) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj,
"newCatalogID": dstDir.GetID(), "newCatalogID": dstDir.GetID(),
}, },
"commonAccountInfo": base.Json{ "commonAccountInfo": base.Json{
"account": d.getAccount(), "account": d.Account,
"accountType": 1, "accountType": 1,
}, },
}, },
@ -292,7 +219,7 @@ func (d *Yun139) Rename(ctx context.Context, srcObj model.Obj, newName string) e
"name": newName, "name": newName,
"description": "", "description": "",
} }
pathname := "/file/update" pathname := "/hcy/file/update"
_, err = d.personalPost(pathname, data, nil) _, err = d.personalPost(pathname, data, nil)
case MetaPersonal: case MetaPersonal:
var data base.Json var data base.Json
@ -302,7 +229,7 @@ func (d *Yun139) Rename(ctx context.Context, srcObj model.Obj, newName string) e
"catalogID": srcObj.GetID(), "catalogID": srcObj.GetID(),
"catalogName": newName, "catalogName": newName,
"commonAccountInfo": base.Json{ "commonAccountInfo": base.Json{
"account": d.getAccount(), "account": d.Account,
"accountType": 1, "accountType": 1,
}, },
} }
@ -312,72 +239,13 @@ func (d *Yun139) Rename(ctx context.Context, srcObj model.Obj, newName string) e
"contentID": srcObj.GetID(), "contentID": srcObj.GetID(),
"contentName": newName, "contentName": newName,
"commonAccountInfo": base.Json{ "commonAccountInfo": base.Json{
"account": d.getAccount(), "account": d.Account,
"accountType": 1, "accountType": 1,
}, },
} }
pathname = "/orchestration/personalCloud/content/v1.0/updateContentInfo" pathname = "/orchestration/personalCloud/content/v1.0/updateContentInfo"
} }
_, err = d.post(pathname, data, nil) _, err = d.post(pathname, data, nil)
case MetaGroup:
var data base.Json
var pathname string
if srcObj.IsDir() {
data = base.Json{
"groupID": d.CloudID,
"modifyCatalogID": srcObj.GetID(),
"modifyCatalogName": newName,
"path": srcObj.GetPath(),
"commonAccountInfo": base.Json{
"account": d.getAccount(),
"accountType": 1,
},
}
pathname = "/orchestration/group-rebuild/catalog/v1.0/modifyGroupCatalog"
} else {
data = base.Json{
"groupID": d.CloudID,
"contentID": srcObj.GetID(),
"contentName": newName,
"path": srcObj.GetPath(),
"commonAccountInfo": base.Json{
"account": d.getAccount(),
"accountType": 1,
},
}
pathname = "/orchestration/group-rebuild/content/v1.0/modifyGroupContent"
}
_, err = d.post(pathname, data, nil)
case MetaFamily:
var data base.Json
var pathname string
if srcObj.IsDir() {
// 网页接口不支持重命名家庭云文件夹
// data = base.Json{
// "catalogType": 3,
// "catalogID": srcObj.GetID(),
// "catalogName": newName,
// "commonAccountInfo": base.Json{
// "account": d.getAccount(),
// "accountType": 1,
// },
// "path": srcObj.GetPath(),
// }
// pathname = "/orchestration/familyCloud-rebuild/photoContent/v1.0/modifyCatalogInfo"
return errs.NotImplement
} else {
data = base.Json{
"contentID": srcObj.GetID(),
"contentName": newName,
"commonAccountInfo": base.Json{
"account": d.getAccount(),
"accountType": 1,
},
"path": srcObj.GetPath(),
}
pathname = "/orchestration/familyCloud-rebuild/photoContent/v1.0/modifyContentInfo"
}
_, err = d.post(pathname, data, nil)
default: default:
err = errs.NotImplement err = errs.NotImplement
} }
@ -392,7 +260,7 @@ func (d *Yun139) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
"fileIds": []string{srcObj.GetID()}, "fileIds": []string{srcObj.GetID()},
"toParentFileId": dstDir.GetID(), "toParentFileId": dstDir.GetID(),
} }
pathname := "/file/batchCopy" pathname := "/hcy/file/batchCopy"
_, err := d.personalPost(pathname, data, nil) _, err := d.personalPost(pathname, data, nil)
return err return err
case MetaPersonal: case MetaPersonal:
@ -413,7 +281,7 @@ func (d *Yun139) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
"newCatalogID": dstDir.GetID(), "newCatalogID": dstDir.GetID(),
}, },
"commonAccountInfo": base.Json{ "commonAccountInfo": base.Json{
"account": d.getAccount(), "account": d.Account,
"accountType": 1, "accountType": 1,
}, },
}, },
@ -432,31 +300,9 @@ func (d *Yun139) Remove(ctx context.Context, obj model.Obj) error {
data := base.Json{ data := base.Json{
"fileIds": []string{obj.GetID()}, "fileIds": []string{obj.GetID()},
} }
pathname := "/recyclebin/batchTrash" pathname := "/hcy/recyclebin/batchTrash"
_, err := d.personalPost(pathname, data, nil) _, err := d.personalPost(pathname, data, nil)
return err return err
case MetaGroup:
var contentList []string
var catalogList []string
// 必须使用完整路径删除
if obj.IsDir() {
catalogList = append(catalogList, obj.GetPath())
} else {
contentList = append(contentList, path.Join(obj.GetPath(), obj.GetID()))
}
data := base.Json{
"taskType": 2,
"srcGroupID": d.CloudID,
"contentList": contentList,
"catalogList": catalogList,
"commonAccountInfo": base.Json{
"account": d.getAccount(),
"accountType": 1,
},
}
pathname := "/orchestration/group-rebuild/task/v1.0/createBatchOprTask"
_, err := d.post(pathname, data, nil)
return err
case MetaPersonal: case MetaPersonal:
fallthrough fallthrough
case MetaFamily: case MetaFamily:
@ -477,7 +323,7 @@ func (d *Yun139) Remove(ctx context.Context, obj model.Obj) error {
"catalogInfoList": catalogInfoList, "catalogInfoList": catalogInfoList,
}, },
"commonAccountInfo": base.Json{ "commonAccountInfo": base.Json{
"account": d.getAccount(), "account": d.Account,
"accountType": 1, "accountType": 1,
}, },
}, },
@ -488,15 +334,13 @@ func (d *Yun139) Remove(ctx context.Context, obj model.Obj) error {
"catalogList": catalogInfoList, "catalogList": catalogInfoList,
"contentList": contentInfoList, "contentList": contentInfoList,
"commonAccountInfo": base.Json{ "commonAccountInfo": base.Json{
"account": d.getAccount(), "account": d.Account,
"accountType": 1, "accountType": 1,
}, },
"sourceCloudID": d.CloudID,
"sourceCatalogType": 1002, "sourceCatalogType": 1002,
"taskType": 2, "taskType": 2,
"path": obj.GetPath(),
} }
pathname = "/orchestration/familyCloud-rebuild/batchOprTask/v1.0/createBatchOprTask" pathname = "/orchestration/familyCloud/batchOprTask/v1.0/createBatchOprTask"
} }
_, err := d.post(pathname, data, nil) _, err := d.post(pathname, data, nil)
return err return err
@ -505,15 +349,20 @@ func (d *Yun139) Remove(ctx context.Context, obj model.Obj) error {
} }
} }
func (d *Yun139) getPartSize(size int64) int64 { const (
if d.CustomUploadPartSize != 0 { _ = iota //ignore first value by assigning to blank identifier
return d.CustomUploadPartSize KB = 1 << (10 * iota)
} MB
GB
TB
)
func getPartSize(size int64) int64 {
// 网盘对于分片数量存在上限 // 网盘对于分片数量存在上限
if size/utils.GB > 30 { if size/GB > 30 {
return 512 * utils.MB return 512 * MB
} }
return 100 * utils.MB return 100 * MB
} }
func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
@ -521,288 +370,149 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
case MetaPersonalNew: case MetaPersonalNew:
var err error var err error
fullHash := stream.GetHash().GetHash(utils.SHA256) fullHash := stream.GetHash().GetHash(utils.SHA256)
if len(fullHash) != utils.SHA256.Width { if len(fullHash) <= 0 {
_, fullHash, err = streamPkg.CacheFullInTempFileAndHash(stream, utils.SHA256) tmpF, err := stream.CacheFullInTempFile()
if err != nil {
return err
}
fullHash, err = utils.HashFile(utils.SHA256, tmpF)
if err != nil { if err != nil {
return err return err
} }
} }
// return errs.NotImplement
size := stream.GetSize()
var partSize = d.getPartSize(size)
part := size / partSize
if size%partSize > 0 {
part++
} else if part == 0 {
part = 1
}
partInfos := make([]PartInfo, 0, part)
for i := int64(0); i < part; i++ {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
start := i * partSize
byteSize := size - start
if byteSize > partSize {
byteSize = partSize
}
partNumber := i + 1
partInfo := PartInfo{
PartNumber: partNumber,
PartSize: byteSize,
ParallelHashCtx: ParallelHashCtx{
PartOffset: start,
},
}
partInfos = append(partInfos, partInfo)
}
// 筛选出前 100 个 partInfos
firstPartInfos := partInfos
if len(firstPartInfos) > 100 {
firstPartInfos = firstPartInfos[:100]
}
// 创建任务获取上传信息和前100个分片的上传地址
data := base.Json{ data := base.Json{
"contentHash": fullHash, "contentHash": fullHash,
"contentHashAlgorithm": "SHA256", "contentHashAlgorithm": "SHA256",
"contentType": "application/octet-stream", "contentType": "application/octet-stream",
"parallelUpload": false, "parallelUpload": false,
"partInfos": firstPartInfos, "partInfos": []base.Json{{
"size": size, "parallelHashCtx": base.Json{
"parentFileId": dstDir.GetID(), "partOffset": 0,
"name": stream.GetName(), },
"type": "file", "partNumber": 1,
"fileRenameMode": "auto_rename", "partSize": stream.GetSize(),
}},
"size": stream.GetSize(),
"parentFileId": dstDir.GetID(),
"name": stream.GetName(),
"type": "file",
"fileRenameMode": "auto_rename",
} }
pathname := "/file/create" pathname := "/hcy/file/create"
var resp PersonalUploadResp var resp PersonalUploadResp
_, err = d.personalPost(pathname, data, &resp) _, err = d.personalPost(pathname, data, &resp)
if err != nil { if err != nil {
return err return err
} }
// 判断文件是否已存在 if resp.Data.Exist || resp.Data.RapidUpload {
// resp.Data.Exist: true 已存在同名文件且校验相同,云端不会重复增加文件,无需手动处理冲突
if resp.Data.Exist {
return nil return nil
} }
// 判断文件是否支持快传 // Progress
// resp.Data.RapidUpload: true 支持快传,但此处直接检测是否返回分片的上传地址 p := driver.NewProgress(stream.GetSize(), up)
// 快传的情况下同样需要手动处理冲突
if resp.Data.PartInfos != nil {
// 读取前100个分片的上传地址
uploadPartInfos := resp.Data.PartInfos
// 获取后续分片的上传地址 // Update Progress
for i := 101; i < len(partInfos); i += 100 { r := io.TeeReader(stream, p)
end := i + 100
if end > len(partInfos) {
end = len(partInfos)
}
batchPartInfos := partInfos[i:end]
moredata := base.Json{ req, err := http.NewRequest("PUT", resp.Data.PartInfos[0].UploadUrl, r)
"fileId": resp.Data.FileId, if err != nil {
"uploadId": resp.Data.UploadId, return err
"partInfos": batchPartInfos, }
"commonAccountInfo": base.Json{ req = req.WithContext(ctx)
"account": d.getAccount(), req.Header.Set("Content-Type", "application/octet-stream")
"accountType": 1, req.Header.Set("Content-Length", fmt.Sprint(stream.GetSize()))
}, req.Header.Set("Origin", "https://yun.139.com")
} req.Header.Set("Referer", "https://yun.139.com/")
pathname := "/file/getUploadUrl" req.ContentLength = stream.GetSize()
var moreresp PersonalUploadUrlResp
_, err = d.personalPost(pathname, moredata, &moreresp)
if err != nil {
return err
}
uploadPartInfos = append(uploadPartInfos, moreresp.Data.PartInfos...)
}
// Progress res, err := base.HttpClient.Do(req)
p := driver.NewProgress(size, up) if err != nil {
return err
rateLimited := driver.NewLimitedUploadStream(ctx, stream)
// 上传所有分片
for _, uploadPartInfo := range uploadPartInfos {
index := uploadPartInfo.PartNumber - 1
partSize := partInfos[index].PartSize
log.Debugf("[139] uploading part %+v/%+v", index, len(uploadPartInfos))
limitReader := io.LimitReader(rateLimited, partSize)
// Update Progress
r := io.TeeReader(limitReader, p)
req, err := http.NewRequest("PUT", uploadPartInfo.UploadUrl, r)
if err != nil {
return err
}
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Content-Length", fmt.Sprint(partSize))
req.Header.Set("Origin", "https://yun.139.com")
req.Header.Set("Referer", "https://yun.139.com/")
req.ContentLength = partSize
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
_ = res.Body.Close()
log.Debugf("[139] uploaded: %+v", res)
if res.StatusCode != http.StatusOK {
return fmt.Errorf("unexpected status code: %d", res.StatusCode)
}
}
data = base.Json{
"contentHash": fullHash,
"contentHashAlgorithm": "SHA256",
"fileId": resp.Data.FileId,
"uploadId": resp.Data.UploadId,
}
_, err = d.personalPost("/file/complete", data, nil)
if err != nil {
return err
}
} }
// 处理冲突 _ = res.Body.Close()
if resp.Data.FileName != stream.GetName() { log.Debugf("%+v", res)
log.Debugf("[139] conflict detected: %s != %s", resp.Data.FileName, stream.GetName()) if res.StatusCode != http.StatusOK {
// 给服务器一定时间处理数据,避免无法刷新文件列表 return fmt.Errorf("unexpected status code: %d", res.StatusCode)
time.Sleep(time.Millisecond * 500) }
// 刷新并获取文件列表
files, err := d.List(ctx, dstDir, model.ListArgs{Refresh: true}) data = base.Json{
if err != nil { "contentHash": fullHash,
return err "contentHashAlgorithm": "SHA256",
} "fileId": resp.Data.FileId,
// 删除旧文件 "uploadId": resp.Data.UploadId,
for _, file := range files { }
if file.GetName() == stream.GetName() { _, err = d.personalPost("/hcy/file/complete", data, nil)
log.Debugf("[139] conflict: removing old: %s", file.GetName()) if err != nil {
// 删除前重命名旧文件,避免仍旧冲突 return err
err = d.Rename(ctx, file, stream.GetName()+random.String(4))
if err != nil {
return err
}
err = d.Remove(ctx, file)
if err != nil {
return err
}
break
}
}
// 重命名新文件
for _, file := range files {
if file.GetName() == resp.Data.FileName {
log.Debugf("[139] conflict: renaming new: %s => %s", file.GetName(), stream.GetName())
err = d.Rename(ctx, file, stream.GetName())
if err != nil {
return err
}
break
}
}
} }
return nil return nil
case MetaPersonal: case MetaPersonal:
fallthrough fallthrough
case MetaFamily: case MetaFamily:
// 处理冲突
// 获取文件列表
files, err := d.List(ctx, dstDir, model.ListArgs{})
if err != nil {
return err
}
// 删除旧文件
for _, file := range files {
if file.GetName() == stream.GetName() {
log.Debugf("[139] conflict: removing old: %s", file.GetName())
// 删除前重命名旧文件,避免仍旧冲突
err = d.Rename(ctx, file, stream.GetName()+random.String(4))
if err != nil {
return err
}
err = d.Remove(ctx, file)
if err != nil {
return err
}
break
}
}
var reportSize int64
if d.ReportRealSize {
reportSize = stream.GetSize()
} else {
reportSize = 0
}
data := base.Json{ data := base.Json{
"manualRename": 2, "manualRename": 2,
"operation": 0, "operation": 0,
"fileCount": 1, "fileCount": 1,
"totalSize": reportSize, "totalSize": 0, // 去除上传大小限制
"uploadContentList": []base.Json{{ "uploadContentList": []base.Json{{
"contentName": stream.GetName(), "contentName": stream.GetName(),
"contentSize": reportSize, "contentSize": 0, // 去除上传大小限制
// "digest": "5a3231986ce7a6b46e408612d385bafa" // "digest": "5a3231986ce7a6b46e408612d385bafa"
}}, }},
"parentCatalogID": dstDir.GetID(), "parentCatalogID": dstDir.GetID(),
"newCatalogName": "", "newCatalogName": "",
"commonAccountInfo": base.Json{ "commonAccountInfo": base.Json{
"account": d.getAccount(), "account": d.Account,
"accountType": 1, "accountType": 1,
}, },
} }
pathname := "/orchestration/personalCloud/uploadAndDownload/v1.0/pcUploadFileRequest" pathname := "/orchestration/personalCloud/uploadAndDownload/v1.0/pcUploadFileRequest"
if d.isFamily() { if d.isFamily() {
data = d.newJson(base.Json{ // data = d.newJson(base.Json{
"fileCount": 1, // "fileCount": 1,
"manualRename": 2, // "manualRename": 2,
"operation": 0, // "operation": 0,
"path": path.Join(dstDir.GetPath(), dstDir.GetID()), // "path": "",
"seqNo": random.String(32), //序列号不能为空 // "seqNo": "",
"totalSize": reportSize, // "totalSize": 0,
"uploadContentList": []base.Json{{ // "uploadContentList": []base.Json{{
"contentName": stream.GetName(), // "contentName": stream.GetName(),
"contentSize": reportSize, // "contentSize": 0,
// "digest": "5a3231986ce7a6b46e408612d385bafa" // // "digest": "5a3231986ce7a6b46e408612d385bafa"
}}, // }},
}) // })
pathname = "/orchestration/familyCloud-rebuild/content/v1.0/getFileUploadURL" // pathname = "/orchestration/familyCloud/content/v1.0/getFileUploadURL"
return errs.NotImplement
} }
var resp UploadResp var resp UploadResp
_, err = d.post(pathname, data, &resp) _, err := d.post(pathname, data, &resp)
if err != nil { if err != nil {
return err return err
} }
if resp.Data.Result.ResultCode != "0" {
return fmt.Errorf("get file upload url failed with result code: %s, message: %s", resp.Data.Result.ResultCode, resp.Data.Result.ResultDesc)
}
size := stream.GetSize()
// Progress // Progress
p := driver.NewProgress(size, up) p := driver.NewProgress(stream.GetSize(), up)
var partSize = d.getPartSize(size)
part := size / partSize var partSize = getPartSize(stream.GetSize())
if size%partSize > 0 { part := (stream.GetSize() + partSize - 1) / partSize
part++ if part == 0 {
} else if part == 0 {
part = 1 part = 1
} }
rateLimited := driver.NewLimitedUploadStream(ctx, stream)
for i := int64(0); i < part; i++ { for i := int64(0); i < part; i++ {
if utils.IsCanceled(ctx) { if utils.IsCanceled(ctx) {
return ctx.Err() return ctx.Err()
} }
start := i * partSize start := i * partSize
byteSize := min(size-start, partSize) byteSize := stream.GetSize() - start
if byteSize > partSize {
byteSize = partSize
}
limitReader := io.LimitReader(rateLimited, byteSize) limitReader := io.LimitReader(stream, byteSize)
// Update Progress // Update Progress
r := io.TeeReader(limitReader, p) r := io.TeeReader(limitReader, p)
req, err := http.NewRequest("POST", resp.Data.UploadResult.RedirectionURL, r) req, err := http.NewRequest("POST", resp.Data.UploadResult.RedirectionURL, r)
@ -812,7 +522,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
req = req.WithContext(ctx) req = req.WithContext(ctx)
req.Header.Set("Content-Type", "text/plain;name="+unicode(stream.GetName())) req.Header.Set("Content-Type", "text/plain;name="+unicode(stream.GetName()))
req.Header.Set("contentSize", strconv.FormatInt(size, 10)) req.Header.Set("contentSize", strconv.FormatInt(stream.GetSize(), 10))
req.Header.Set("range", fmt.Sprintf("bytes=%d-%d", start, start+byteSize-1)) req.Header.Set("range", fmt.Sprintf("bytes=%d-%d", start, start+byteSize-1))
req.Header.Set("uploadtaskID", resp.Data.UploadResult.UploadTaskID) req.Header.Set("uploadtaskID", resp.Data.UploadResult.UploadTaskID)
req.Header.Set("rangeType", "0") req.Header.Set("rangeType", "0")
@ -822,23 +532,13 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
if err != nil { if err != nil {
return err return err
} }
_ = res.Body.Close()
log.Debugf("%+v", res)
if res.StatusCode != http.StatusOK { if res.StatusCode != http.StatusOK {
res.Body.Close()
return fmt.Errorf("unexpected status code: %d", res.StatusCode) return fmt.Errorf("unexpected status code: %d", res.StatusCode)
} }
bodyBytes, err := io.ReadAll(res.Body)
if err != nil {
return fmt.Errorf("error reading response body: %v", err)
}
var result InterLayerUploadResult
err = xml.Unmarshal(bodyBytes, &result)
if err != nil {
return fmt.Errorf("error parsing XML: %v", err)
}
if result.ResultCode != 0 {
return fmt.Errorf("upload failed with result code: %d, message: %s", result.ResultCode, result.Msg)
}
} }
return nil return nil
default: default:
return errs.NotImplement return errs.NotImplement
@ -856,7 +556,7 @@ func (d *Yun139) Other(ctx context.Context, args model.OtherArgs) (interface{},
} }
switch args.Method { switch args.Method {
case "video_preview": case "video_preview":
uri = "/videoPreview/getPreviewInfo" uri = "/hcy/videoPreview/getPreviewInfo"
default: default:
return nil, errs.NotSupport return nil, errs.NotSupport
} }

View File

@ -9,11 +9,8 @@ type Addition struct {
//Account string `json:"account" required:"true"` //Account string `json:"account" required:"true"`
Authorization string `json:"authorization" type:"text" required:"true"` Authorization string `json:"authorization" type:"text" required:"true"`
driver.RootID driver.RootID
Type string `json:"type" type:"select" options:"personal_new,family,group,personal" default:"personal_new"` Type string `json:"type" type:"select" options:"personal,family,personal_new" default:"personal"`
CloudID string `json:"cloud_id"` CloudID string `json:"cloud_id"`
CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"`
ReportRealSize bool `json:"report_real_size" type:"bool" default:"true" help:"Enable to report the real file size during upload"`
UseLargeThumbnail bool `json:"use_large_thumbnail" type:"bool" default:"false" help:"Enable to use large thumbnail for images"`
} }
var config = driver.Config{ var config = driver.Config{

View File

@ -7,7 +7,6 @@ import (
const ( const (
MetaPersonal string = "personal" MetaPersonal string = "personal"
MetaFamily string = "family" MetaFamily string = "family"
MetaGroup string = "group"
MetaPersonalNew string = "personal_new" MetaPersonalNew string = "personal_new"
) )
@ -55,7 +54,6 @@ type Content struct {
//ContentDesc string `json:"contentDesc"` //ContentDesc string `json:"contentDesc"`
//ContentType int `json:"contentType"` //ContentType int `json:"contentType"`
//ContentOrigin int `json:"contentOrigin"` //ContentOrigin int `json:"contentOrigin"`
CreateTime string `json:"createTime"`
UpdateTime string `json:"updateTime"` UpdateTime string `json:"updateTime"`
//CommentCount int `json:"commentCount"` //CommentCount int `json:"commentCount"`
ThumbnailURL string `json:"thumbnailURL"` ThumbnailURL string `json:"thumbnailURL"`
@ -143,13 +141,6 @@ type UploadResp struct {
} `json:"data"` } `json:"data"`
} }
type InterLayerUploadResult struct {
XMLName xml.Name `xml:"result"`
Text string `xml:",chardata"`
ResultCode int `xml:"resultCode"`
Msg string `xml:"msg"`
}
type CloudContent struct { type CloudContent struct {
ContentID string `json:"contentID"` ContentID string `json:"contentID"`
//Modifier string `json:"modifier"` //Modifier string `json:"modifier"`
@ -205,37 +196,6 @@ type QueryContentListResp struct {
} `json:"data"` } `json:"data"`
} }
type QueryGroupContentListResp struct {
BaseResp
Data struct {
Result struct {
ResultCode string `json:"resultCode"`
ResultDesc string `json:"resultDesc"`
} `json:"result"`
GetGroupContentResult struct {
ParentCatalogID string `json:"parentCatalogID"` // 根目录是"0"
CatalogList []struct {
Catalog
Path string `json:"path"`
} `json:"catalogList"`
ContentList []Content `json:"contentList"`
NodeCount int `json:"nodeCount"` // 文件+文件夹数量
CtlgCnt int `json:"ctlgCnt"` // 文件夹数量
ContCnt int `json:"contCnt"` // 文件数量
} `json:"getGroupContentResult"`
} `json:"data"`
}
type ParallelHashCtx struct {
PartOffset int64 `json:"partOffset"`
}
type PartInfo struct {
PartNumber int64 `json:"partNumber"`
PartSize int64 `json:"partSize"`
ParallelHashCtx ParallelHashCtx `json:"parallelHashCtx"`
}
type PersonalThumbnail struct { type PersonalThumbnail struct {
Style string `json:"style"` Style string `json:"style"`
Url string `json:"url"` Url string `json:"url"`
@ -268,7 +228,6 @@ type PersonalUploadResp struct {
BaseResp BaseResp
Data struct { Data struct {
FileId string `json:"fileId"` FileId string `json:"fileId"`
FileName string `json:"fileName"`
PartInfos []PersonalPartInfo `json:"partInfos"` PartInfos []PersonalPartInfo `json:"partInfos"`
Exist bool `json:"exist"` Exist bool `json:"exist"`
RapidUpload bool `json:"rapidUpload"` RapidUpload bool `json:"rapidUpload"`
@ -276,39 +235,11 @@ type PersonalUploadResp struct {
} }
} }
type PersonalUploadUrlResp struct {
BaseResp
Data struct {
FileId string `json:"fileId"`
UploadId string `json:"uploadId"`
PartInfos []PersonalPartInfo `json:"partInfos"`
}
}
type QueryRoutePolicyResp struct {
Success bool `json:"success"`
Code string `json:"code"`
Message string `json:"message"`
Data struct {
RoutePolicyList []struct {
SiteID string `json:"siteID"`
SiteCode string `json:"siteCode"`
ModName string `json:"modName"`
HttpUrl string `json:"httpUrl"`
HttpsUrl string `json:"httpsUrl"`
EnvID string `json:"envID"`
ExtInfo string `json:"extInfo"`
HashName string `json:"hashName"`
ModAddrType int `json:"modAddrType"`
} `json:"routePolicyList"`
} `json:"data"`
}
type RefreshTokenResp struct { type RefreshTokenResp struct {
XMLName xml.Name `xml:"root"` XMLName xml.Name `xml:"root"`
Return string `xml:"return"` Return string `xml:"return"`
Token string `xml:"token"` Token string `xml:"token"`
Expiretime int32 `xml:"expiretime"` Expiretime int32 `xml:"expiretime"`
AccessToken string `xml:"accessToken"` AccessToken string `xml:"accessToken"`
Desc string `xml:"desc"` Desc string `xml:"desc"`
} }

View File

@ -6,7 +6,6 @@ import (
"fmt" "fmt"
"net/http" "net/http"
"net/url" "net/url"
"path"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
@ -14,9 +13,9 @@ import (
"github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/pkg/utils"
"github.com/alist-org/alist/v3/pkg/utils/random" "github.com/alist-org/alist/v3/pkg/utils/random"
"github.com/alist-org/alist/v3/internal/op"
"github.com/go-resty/resty/v2" "github.com/go-resty/resty/v2"
jsoniter "github.com/json-iterator/go" jsoniter "github.com/json-iterator/go"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
@ -55,38 +54,14 @@ func getTime(t string) time.Time {
} }
func (d *Yun139) refreshToken() error { func (d *Yun139) refreshToken() error {
if d.ref != nil { url := "https://aas.caiyun.feixin.10086.cn:443/tellin/authTokenRefresh.do"
return d.ref.refreshToken() var resp RefreshTokenResp
}
decode, err := base64.StdEncoding.DecodeString(d.Authorization) decode, err := base64.StdEncoding.DecodeString(d.Authorization)
if err != nil { if err != nil {
return fmt.Errorf("authorization decode failed: %s", err) return err
} }
decodeStr := string(decode) decodeStr := string(decode)
splits := strings.Split(decodeStr, ":") splits := strings.Split(decodeStr, ":")
if len(splits) < 3 {
return fmt.Errorf("authorization is invalid, splits < 3")
}
d.Account = splits[1]
strs := strings.Split(splits[2], "|")
if len(strs) < 4 {
return fmt.Errorf("authorization is invalid, strs < 4")
}
expiration, err := strconv.ParseInt(strs[3], 10, 64)
if err != nil {
return fmt.Errorf("authorization is invalid")
}
expiration -= time.Now().UnixMilli()
if expiration > 1000*60*60*24*15 {
// Authorization有效期大于15天无需刷新
return nil
}
if expiration < 0 {
return fmt.Errorf("authorization has expired")
}
url := "https://aas.caiyun.feixin.10086.cn:443/tellin/authTokenRefresh.do"
var resp RefreshTokenResp
reqBody := "<root><token>" + splits[2] + "</token><account>" + splits[1] + "</account><clienttype>656</clienttype></root>" reqBody := "<root><token>" + splits[2] + "</token><account>" + splits[1] + "</account><clienttype>656</clienttype></root>"
_, err = base.RestyClient.R(). _, err = base.RestyClient.R().
ForceContentType("application/xml"). ForceContentType("application/xml").
@ -124,22 +99,21 @@ func (d *Yun139) request(pathname string, method string, callback base.ReqCallba
req.SetHeaders(map[string]string{ req.SetHeaders(map[string]string{
"Accept": "application/json, text/plain, */*", "Accept": "application/json, text/plain, */*",
"CMS-DEVICE": "default", "CMS-DEVICE": "default",
"Authorization": "Basic " + d.getAuthorization(), "Authorization": "Basic " + d.Authorization,
"mcloud-channel": "1000101", "mcloud-channel": "1000101",
"mcloud-client": "10701", "mcloud-client": "10701",
//"mcloud-route": "001", //"mcloud-route": "001",
"mcloud-sign": fmt.Sprintf("%s,%s,%s", ts, randStr, sign), "mcloud-sign": fmt.Sprintf("%s,%s,%s", ts, randStr, sign),
//"mcloud-skey":"", //"mcloud-skey":"",
"mcloud-version": "7.14.0", "mcloud-version": "6.6.0",
"Origin": "https://yun.139.com", "Origin": "https://yun.139.com",
"Referer": "https://yun.139.com/w/", "Referer": "https://yun.139.com/w/",
"x-DeviceInfo": "||9|7.14.0|chrome|120.0.0.0|||windows 10||zh-CN|||", "x-DeviceInfo": "||9|6.6.0|chrome|95.0.4638.69|uwIy75obnsRPIwlJSd7D9GhUvFwG96ce||macos 10.15.2||zh-CN|||",
"x-huawei-channelSrc": "10000034", "x-huawei-channelSrc": "10000034",
"x-inner-ntwk": "2", "x-inner-ntwk": "2",
"x-m4c-caller": "PC", "x-m4c-caller": "PC",
"x-m4c-src": "10002", "x-m4c-src": "10002",
"x-SvcType": svcType, "x-SvcType": svcType,
"Inner-Hcy-Router-Https": "1",
}) })
var e BaseResp var e BaseResp
@ -157,64 +131,6 @@ func (d *Yun139) request(pathname string, method string, callback base.ReqCallba
} }
return res.Body(), nil return res.Body(), nil
} }
func (d *Yun139) requestRoute(data interface{}, resp interface{}) ([]byte, error) {
url := "https://user-njs.yun.139.com/user/route/qryRoutePolicy"
req := base.RestyClient.R()
randStr := random.String(16)
ts := time.Now().Format("2006-01-02 15:04:05")
callback := func(req *resty.Request) {
req.SetBody(data)
}
if callback != nil {
callback(req)
}
body, err := utils.Json.Marshal(req.Body)
if err != nil {
return nil, err
}
sign := calSign(string(body), ts, randStr)
svcType := "1"
if d.isFamily() {
svcType = "2"
}
req.SetHeaders(map[string]string{
"Accept": "application/json, text/plain, */*",
"CMS-DEVICE": "default",
"Authorization": "Basic " + d.getAuthorization(),
"mcloud-channel": "1000101",
"mcloud-client": "10701",
//"mcloud-route": "001",
"mcloud-sign": fmt.Sprintf("%s,%s,%s", ts, randStr, sign),
//"mcloud-skey":"",
"mcloud-version": "7.14.0",
"Origin": "https://yun.139.com",
"Referer": "https://yun.139.com/w/",
"x-DeviceInfo": "||9|7.14.0|chrome|120.0.0.0|||windows 10||zh-CN|||",
"x-huawei-channelSrc": "10000034",
"x-inner-ntwk": "2",
"x-m4c-caller": "PC",
"x-m4c-src": "10002",
"x-SvcType": svcType,
"Inner-Hcy-Router-Https": "1",
})
var e BaseResp
req.SetResult(&e)
res, err := req.Execute(http.MethodPost, url)
log.Debugln(res.String())
if !e.Success {
return nil, errors.New(e.Message)
}
if resp != nil {
err = utils.Json.Unmarshal(res.Body(), resp)
if err != nil {
return nil, err
}
}
return res.Body(), nil
}
func (d *Yun139) post(pathname string, data interface{}, resp interface{}) ([]byte, error) { func (d *Yun139) post(pathname string, data interface{}, resp interface{}) ([]byte, error) {
return d.request(pathname, http.MethodPost, func(req *resty.Request) { return d.request(pathname, http.MethodPost, func(req *resty.Request) {
req.SetBody(data) req.SetBody(data)
@ -235,7 +151,7 @@ func (d *Yun139) getFiles(catalogID string) ([]model.Obj, error) {
"catalogSortType": 0, "catalogSortType": 0,
"contentSortType": 0, "contentSortType": 0,
"commonAccountInfo": base.Json{ "commonAccountInfo": base.Json{
"account": d.getAccount(), "account": d.Account,
"accountType": 1, "accountType": 1,
}, },
} }
@ -283,7 +199,7 @@ func (d *Yun139) newJson(data map[string]interface{}) base.Json {
"cloudID": d.CloudID, "cloudID": d.CloudID,
"cloudType": 1, "cloudType": 1,
"commonAccountInfo": base.Json{ "commonAccountInfo": base.Json{
"account": d.getAccount(), "account": d.Account,
"accountType": 1, "accountType": 1,
}, },
} }
@ -304,11 +220,10 @@ func (d *Yun139) familyGetFiles(catalogID string) ([]model.Obj, error) {
"sortDirection": 1, "sortDirection": 1,
}) })
var resp QueryContentListResp var resp QueryContentListResp
_, err := d.post("/orchestration/familyCloud-rebuild/content/v1.2/queryContentList", data, &resp) _, err := d.post("/orchestration/familyCloud/content/v1.0/queryContentList", data, &resp)
if err != nil { if err != nil {
return nil, err return nil, err
} }
path := resp.Data.Path
for _, catalog := range resp.Data.CloudCatalogList { for _, catalog := range resp.Data.CloudCatalogList {
f := model.Object{ f := model.Object{
ID: catalog.CatalogID, ID: catalog.CatalogID,
@ -317,7 +232,6 @@ func (d *Yun139) familyGetFiles(catalogID string) ([]model.Obj, error) {
IsFolder: true, IsFolder: true,
Modified: getTime(catalog.LastUpdateTime), Modified: getTime(catalog.LastUpdateTime),
Ctime: getTime(catalog.CreateTime), Ctime: getTime(catalog.CreateTime),
Path: path, // 文件夹上一级的Path
} }
files = append(files, &f) files = append(files, &f)
} }
@ -329,14 +243,13 @@ func (d *Yun139) familyGetFiles(catalogID string) ([]model.Obj, error) {
Size: content.ContentSize, Size: content.ContentSize,
Modified: getTime(content.LastUpdateTime), Modified: getTime(content.LastUpdateTime),
Ctime: getTime(content.CreateTime), Ctime: getTime(content.CreateTime),
Path: path, // 文件所在目录的Path
}, },
Thumbnail: model.Thumbnail{Thumbnail: content.ThumbnailURL}, Thumbnail: model.Thumbnail{Thumbnail: content.ThumbnailURL},
//Thumbnail: content.BigthumbnailURL, //Thumbnail: content.BigthumbnailURL,
} }
files = append(files, &f) files = append(files, &f)
} }
if resp.Data.TotalCount == 0 { if 100*pageNum > resp.Data.TotalCount {
break break
} }
pageNum++ pageNum++
@ -344,67 +257,12 @@ func (d *Yun139) familyGetFiles(catalogID string) ([]model.Obj, error) {
return files, nil return files, nil
} }
func (d *Yun139) groupGetFiles(catalogID string) ([]model.Obj, error) {
pageNum := 1
files := make([]model.Obj, 0)
for {
data := d.newJson(base.Json{
"groupID": d.CloudID,
"catalogID": path.Base(catalogID),
"contentSortType": 0,
"sortDirection": 1,
"startNumber": pageNum,
"endNumber": pageNum + 99,
"path": path.Join(d.RootFolderID, catalogID),
})
var resp QueryGroupContentListResp
_, err := d.post("/orchestration/group-rebuild/content/v1.0/queryGroupContentList", data, &resp)
if err != nil {
return nil, err
}
path := resp.Data.GetGroupContentResult.ParentCatalogID
for _, catalog := range resp.Data.GetGroupContentResult.CatalogList {
f := model.Object{
ID: catalog.CatalogID,
Name: catalog.CatalogName,
Size: 0,
IsFolder: true,
Modified: getTime(catalog.UpdateTime),
Ctime: getTime(catalog.CreateTime),
Path: catalog.Path, // 文件夹的真实Path root:/开头
}
files = append(files, &f)
}
for _, content := range resp.Data.GetGroupContentResult.ContentList {
f := model.ObjThumb{
Object: model.Object{
ID: content.ContentID,
Name: content.ContentName,
Size: content.ContentSize,
Modified: getTime(content.UpdateTime),
Ctime: getTime(content.CreateTime),
Path: path, // 文件所在目录的Path
},
Thumbnail: model.Thumbnail{Thumbnail: content.ThumbnailURL},
//Thumbnail: content.BigthumbnailURL,
}
files = append(files, &f)
}
if (pageNum + 99) > resp.Data.GetGroupContentResult.NodeCount {
break
}
pageNum = pageNum + 100
}
return files, nil
}
func (d *Yun139) getLink(contentId string) (string, error) { func (d *Yun139) getLink(contentId string) (string, error) {
data := base.Json{ data := base.Json{
"appName": "", "appName": "",
"contentID": contentId, "contentID": contentId,
"commonAccountInfo": base.Json{ "commonAccountInfo": base.Json{
"account": d.getAccount(), "account": d.Account,
"accountType": 1, "accountType": 1,
}, },
} }
@ -415,32 +273,6 @@ func (d *Yun139) getLink(contentId string) (string, error) {
} }
return jsoniter.Get(res, "data", "downloadURL").ToString(), nil return jsoniter.Get(res, "data", "downloadURL").ToString(), nil
} }
func (d *Yun139) familyGetLink(contentId string, path string) (string, error) {
data := d.newJson(base.Json{
"contentID": contentId,
"path": path,
})
res, err := d.post("/orchestration/familyCloud-rebuild/content/v1.0/getFileDownLoadURL",
data, nil)
if err != nil {
return "", err
}
return jsoniter.Get(res, "data", "downloadURL").ToString(), nil
}
func (d *Yun139) groupGetLink(contentId string, path string) (string, error) {
data := d.newJson(base.Json{
"contentID": contentId,
"groupID": d.CloudID,
"path": path,
})
res, err := d.post("/orchestration/group-rebuild/groupManage/v1.0/getGroupFileDownLoadURL",
data, nil)
if err != nil {
return "", err
}
return jsoniter.Get(res, "data", "downloadURL").ToString(), nil
}
func unicode(str string) string { func unicode(str string) string {
textQuoted := strconv.QuoteToASCII(str) textQuoted := strconv.QuoteToASCII(str)
@ -449,7 +281,7 @@ func unicode(str string) string {
} }
func (d *Yun139) personalRequest(pathname string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) { func (d *Yun139) personalRequest(pathname string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
url := d.getPersonalCloudHost() + pathname url := "https://personal-kd-njs.yun.139.com" + pathname
req := base.RestyClient.R() req := base.RestyClient.R()
randStr := random.String(16) randStr := random.String(16)
ts := time.Now().Format("2006-01-02 15:04:05") ts := time.Now().Format("2006-01-02 15:04:05")
@ -467,15 +299,17 @@ func (d *Yun139) personalRequest(pathname string, method string, callback base.R
} }
req.SetHeaders(map[string]string{ req.SetHeaders(map[string]string{
"Accept": "application/json, text/plain, */*", "Accept": "application/json, text/plain, */*",
"Authorization": "Basic " + d.getAuthorization(), "Authorization": "Basic " + d.Authorization,
"Caller": "web", "Caller": "web",
"Cms-Device": "default", "Cms-Device": "default",
"Mcloud-Channel": "1000101", "Mcloud-Channel": "1000101",
"Mcloud-Client": "10701", "Mcloud-Client": "10701",
"Mcloud-Route": "001", "Mcloud-Route": "001",
"Mcloud-Sign": fmt.Sprintf("%s,%s,%s", ts, randStr, sign), "Mcloud-Sign": fmt.Sprintf("%s,%s,%s", ts, randStr, sign),
"Mcloud-Version": "7.14.0", "Mcloud-Version": "7.13.0",
"x-DeviceInfo": "||9|7.14.0|chrome|120.0.0.0|||windows 10||zh-CN|||", "Origin": "https://yun.139.com",
"Referer": "https://yun.139.com/w/",
"x-DeviceInfo": "||9|7.13.0|chrome|120.0.0.0|||windows 10||zh-CN|||",
"x-huawei-channelSrc": "10000034", "x-huawei-channelSrc": "10000034",
"x-inner-ntwk": "2", "x-inner-ntwk": "2",
"x-m4c-caller": "PC", "x-m4c-caller": "PC",
@ -484,7 +318,7 @@ func (d *Yun139) personalRequest(pathname string, method string, callback base.R
"X-Yun-Api-Version": "v1", "X-Yun-Api-Version": "v1",
"X-Yun-App-Channel": "10000034", "X-Yun-App-Channel": "10000034",
"X-Yun-Channel-Source": "10000034", "X-Yun-Channel-Source": "10000034",
"X-Yun-Client-Info": "||9|7.14.0|chrome|120.0.0.0|||windows 10||zh-CN|||dW5kZWZpbmVk||", "X-Yun-Client-Info": "||9|7.13.0|chrome|120.0.0.0|||windows 10||zh-CN|||dW5kZWZpbmVk||",
"X-Yun-Module-Type": "100", "X-Yun-Module-Type": "100",
"X-Yun-Svc-Type": "1", "X-Yun-Svc-Type": "1",
}) })
@ -536,7 +370,7 @@ func (d *Yun139) personalGetFiles(fileId string) ([]model.Obj, error) {
"parentFileId": fileId, "parentFileId": fileId,
} }
var resp PersonalListResp var resp PersonalListResp
_, err := d.personalPost("/file/list", data, &resp) _, err := d.personalPost("/hcy/file/list", data, &resp)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -556,15 +390,7 @@ func (d *Yun139) personalGetFiles(fileId string) ([]model.Obj, error) {
} else { } else {
var Thumbnails = item.Thumbnails var Thumbnails = item.Thumbnails
var ThumbnailUrl string var ThumbnailUrl string
if d.UseLargeThumbnail { if len(Thumbnails) > 0 {
for _, thumb := range Thumbnails {
if strings.Contains(thumb.Style, "Large") {
ThumbnailUrl = thumb.Url
break
}
}
}
if ThumbnailUrl == "" && len(Thumbnails) > 0 {
ThumbnailUrl = Thumbnails[len(Thumbnails)-1].Url ThumbnailUrl = Thumbnails[len(Thumbnails)-1].Url
} }
f = &model.ObjThumb{ f = &model.ObjThumb{
@ -592,7 +418,7 @@ func (d *Yun139) personalGetLink(fileId string) (string, error) {
data := base.Json{ data := base.Json{
"fileId": fileId, "fileId": fileId,
} }
res, err := d.personalPost("/file/getDownloadUrl", res, err := d.personalPost("/hcy/file/getDownloadUrl",
data, nil) data, nil)
if err != nil { if err != nil {
return "", err return "", err
@ -604,22 +430,3 @@ func (d *Yun139) personalGetLink(fileId string) (string, error) {
return jsoniter.Get(res, "data", "url").ToString(), nil return jsoniter.Get(res, "data", "url").ToString(), nil
} }
} }
func (d *Yun139) getAuthorization() string {
if d.ref != nil {
return d.ref.getAuthorization()
}
return d.Authorization
}
func (d *Yun139) getAccount() string {
if d.ref != nil {
return d.ref.getAccount()
}
return d.Account
}
func (d *Yun139) getPersonalCloudHost() string {
if d.ref != nil {
return d.ref.getPersonalCloudHost()
}
return d.PersonalCloudHost
}

View File

@ -80,10 +80,9 @@ func (d *Cloud189) Link(ctx context.Context, file model.Obj, args model.LinkArgs
} }
func (d *Cloud189) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { func (d *Cloud189) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
safeName := d.sanitizeName(dirName)
form := map[string]string{ form := map[string]string{
"parentFolderId": parentDir.GetID(), "parentFolderId": parentDir.GetID(),
"folderName": safeName, "folderName": dirName,
} }
_, err := d.request("https://cloud.189.cn/api/open/file/createFolder.action", http.MethodPost, func(req *resty.Request) { _, err := d.request("https://cloud.189.cn/api/open/file/createFolder.action", http.MethodPost, func(req *resty.Request) {
req.SetFormData(form) req.SetFormData(form)
@ -127,10 +126,9 @@ func (d *Cloud189) Rename(ctx context.Context, srcObj model.Obj, newName string)
idKey = "folderId" idKey = "folderId"
nameKey = "destFolderName" nameKey = "destFolderName"
} }
safeName := d.sanitizeName(newName)
form := map[string]string{ form := map[string]string{
idKey: srcObj.GetID(), idKey: srcObj.GetID(),
nameKey: safeName, nameKey: newName,
} }
_, err := d.request(url, http.MethodPost, func(req *resty.Request) { _, err := d.request(url, http.MethodPost, func(req *resty.Request) {
req.SetFormData(form) req.SetFormData(form)

View File

@ -6,10 +6,9 @@ import (
) )
type Addition struct { type Addition struct {
Username string `json:"username" required:"true"` Username string `json:"username" required:"true"`
Password string `json:"password" required:"true"` Password string `json:"password" required:"true"`
Cookie string `json:"cookie" help:"Fill in the cookie if need captcha"` Cookie string `json:"cookie" help:"Fill in the cookie if need captcha"`
StripEmoji bool `json:"strip_emoji" help:"Remove four-byte characters (e.g., emoji) before upload"`
driver.RootID driver.RootID
} }

View File

@ -11,11 +11,9 @@ import (
"io" "io"
"math" "math"
"net/http" "net/http"
"path"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"unicode/utf8"
"github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/driver"
@ -224,37 +222,13 @@ func (d *Cloud189) getFiles(fileId string) ([]model.Obj, error) {
return res, nil return res, nil
} }
func (d *Cloud189) sanitizeName(name string) string {
if !d.StripEmoji {
return name
}
b := strings.Builder{}
for _, r := range name {
if utf8.RuneLen(r) == 4 {
continue
}
b.WriteRune(r)
}
sanitized := b.String()
if sanitized == "" {
ext := path.Ext(name)
if ext != "" {
sanitized = "file" + ext
} else {
sanitized = "file"
}
}
return sanitized
}
func (d *Cloud189) oldUpload(dstDir model.Obj, file model.FileStreamer) error { func (d *Cloud189) oldUpload(dstDir model.Obj, file model.FileStreamer) error {
safeName := d.sanitizeName(file.GetName())
res, err := d.client.R().SetMultipartFormData(map[string]string{ res, err := d.client.R().SetMultipartFormData(map[string]string{
"parentId": dstDir.GetID(), "parentId": dstDir.GetID(),
"sessionKey": "??", "sessionKey": "??",
"opertype": "1", "opertype": "1",
"fname": safeName, "fname": file.GetName(),
}).SetMultipartField("Filedata", safeName, file.GetMimetype(), file).Post("https://hb02.upload.cloud.189.cn/v1/DCIWebUploadAction") }).SetMultipartField("Filedata", file.GetName(), file.GetMimetype(), file).Post("https://hb02.upload.cloud.189.cn/v1/DCIWebUploadAction")
if err != nil { if err != nil {
return err return err
} }
@ -339,10 +313,9 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
const DEFAULT int64 = 10485760 const DEFAULT int64 = 10485760
var count = int64(math.Ceil(float64(file.GetSize()) / float64(DEFAULT))) var count = int64(math.Ceil(float64(file.GetSize()) / float64(DEFAULT)))
safeName := d.sanitizeName(file.GetName())
res, err := d.uploadRequest("/person/initMultiUpload", map[string]string{ res, err := d.uploadRequest("/person/initMultiUpload", map[string]string{
"parentFolderId": dstDir.GetID(), "parentFolderId": dstDir.GetID(),
"fileName": encode(safeName), "fileName": encode(file.GetName()),
"fileSize": strconv.FormatInt(file.GetSize(), 10), "fileSize": strconv.FormatInt(file.GetSize(), 10),
"sliceSize": strconv.FormatInt(DEFAULT, 10), "sliceSize": strconv.FormatInt(DEFAULT, 10),
"lazyCheck": "1", "lazyCheck": "1",
@ -392,7 +365,7 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
log.Debugf("uploadData: %+v", uploadData) log.Debugf("uploadData: %+v", uploadData)
requestURL := uploadData.RequestURL requestURL := uploadData.RequestURL
uploadHeaders := strings.Split(decodeURIComponent(uploadData.RequestHeader), "&") uploadHeaders := strings.Split(decodeURIComponent(uploadData.RequestHeader), "&")
req, err := http.NewRequest(http.MethodPut, requestURL, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData))) req, err := http.NewRequest(http.MethodPut, requestURL, bytes.NewReader(byteData))
if err != nil { if err != nil {
return err return err
} }
@ -402,11 +375,11 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
req.Header.Set(v[0:i], v[i+1:]) req.Header.Set(v[0:i], v[i+1:])
} }
r, err := base.HttpClient.Do(req) r, err := base.HttpClient.Do(req)
log.Debugf("%+v %+v", r, r.Request.Header)
r.Body.Close()
if err != nil { if err != nil {
return err return err
} }
log.Debugf("%+v %+v", r, r.Request.Header)
_ = r.Body.Close()
up(float64(i) * 100 / float64(count)) up(float64(i) * 100 / float64(count))
} }
fileMd5 := hex.EncodeToString(md5Sum.Sum(nil)) fileMd5 := hex.EncodeToString(md5Sum.Sum(nil))

View File

@ -1,8 +1,8 @@
package _189pc package _189pc
import ( import (
"container/ring"
"context" "context"
"fmt"
"net/http" "net/http"
"strconv" "strconv"
"strings" "strings"
@ -14,7 +14,6 @@ import (
"github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2" "github.com/go-resty/resty/v2"
"github.com/google/uuid"
) )
type Cloud189PC struct { type Cloud189PC struct {
@ -30,11 +29,10 @@ type Cloud189PC struct {
uploadThread int uploadThread int
familyTransferFolder *Cloud189Folder familyTransferFolder *ring.Ring
cleanFamilyTransferFile func() cleanFamilyTransferFile func()
storageConfig driver.Config storageConfig driver.Config
ref *Cloud189PC
} }
func (y *Cloud189PC) Config() driver.Config { func (y *Cloud189PC) Config() driver.Config {
@ -49,18 +47,9 @@ func (y *Cloud189PC) GetAddition() driver.Additional {
} }
func (y *Cloud189PC) Init(ctx context.Context) (err error) { func (y *Cloud189PC) Init(ctx context.Context) (err error) {
y.storageConfig = config // 兼容旧上传接口
if y.isFamily() { y.storageConfig.NoOverwriteUpload = y.isFamily() && (y.Addition.RapidUpload || y.Addition.UploadMethod == "old")
// 兼容旧上传接口
if y.Addition.RapidUpload || y.Addition.UploadMethod == "old" {
y.storageConfig.NoOverwriteUpload = true
}
} else {
// 家庭云转存,不支持覆盖上传
if y.Addition.FamilyTransfer {
y.storageConfig.NoOverwriteUpload = true
}
}
// 处理个人云和家庭云参数 // 处理个人云和家庭云参数
if y.isFamily() && y.RootFolderID == "-11" { if y.isFamily() && y.RootFolderID == "-11" {
y.RootFolderID = "" y.RootFolderID = ""
@ -75,22 +64,20 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) {
y.uploadThread, y.UploadThread = 3, "3" y.uploadThread, y.UploadThread = 3, "3"
} }
if y.ref == nil { // 初始化请求客户端
// 初始化请求客户端 if y.client == nil {
if y.client == nil { y.client = base.NewRestyClient().SetHeaders(map[string]string{
y.client = base.NewRestyClient().SetHeaders(map[string]string{ "Accept": "application/json;charset=UTF-8",
"Accept": "application/json;charset=UTF-8", "Referer": WEB_URL,
"Referer": WEB_URL, })
}) }
}
// 避免重复登陆 // 避免重复登陆
identity := utils.GetMD5EncodeStr(y.Username + y.Password) identity := utils.GetMD5EncodeStr(y.Username + y.Password)
if !y.isLogin() || y.identity != identity { if !y.isLogin() || y.identity != identity {
y.identity = identity y.identity = identity
if err = y.login(); err != nil { if err = y.login(); err != nil {
return return
}
} }
} }
@ -101,14 +88,13 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) {
} }
} }
// 创建中转文件夹 // 创建中转文件夹,防止重名文件
if y.FamilyTransfer { if y.FamilyTransfer {
if err := y.createFamilyTransferFolder(); err != nil { if y.familyTransferFolder, err = y.createFamilyTransferFolder(32); err != nil {
return err return err
} }
} }
// 清理转存文件节流
y.cleanFamilyTransferFile = utils.NewThrottle2(time.Minute, func() { y.cleanFamilyTransferFile = utils.NewThrottle2(time.Minute, func() {
if err := y.cleanFamilyTransfer(context.TODO()); err != nil { if err := y.cleanFamilyTransfer(context.TODO()); err != nil {
utils.Log.Errorf("cleanFamilyTransferFolderError:%s", err) utils.Log.Errorf("cleanFamilyTransferFolderError:%s", err)
@ -117,17 +103,7 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) {
return return
} }
func (d *Cloud189PC) InitReference(storage driver.Driver) error {
refStorage, ok := storage.(*Cloud189PC)
if ok {
d.ref = refStorage
return nil
}
return errs.NotSupport
}
func (y *Cloud189PC) Drop(ctx context.Context) error { func (y *Cloud189PC) Drop(ctx context.Context) error {
y.ref = nil
return nil return nil
} }
@ -205,11 +181,10 @@ func (y *Cloud189PC) MakeDir(ctx context.Context, parentDir model.Obj, dirName s
fullUrl += "/createFolder.action" fullUrl += "/createFolder.action"
var newFolder Cloud189Folder var newFolder Cloud189Folder
safeName := y.sanitizeName(dirName)
_, err := y.post(fullUrl, func(req *resty.Request) { _, err := y.post(fullUrl, func(req *resty.Request) {
req.SetContext(ctx) req.SetContext(ctx)
req.SetQueryParams(map[string]string{ req.SetQueryParams(map[string]string{
"folderName": safeName, "folderName": dirName,
"relativePath": "", "relativePath": "",
}) })
if isFamily { if isFamily {
@ -226,7 +201,6 @@ func (y *Cloud189PC) MakeDir(ctx context.Context, parentDir model.Obj, dirName s
if err != nil { if err != nil {
return nil, err return nil, err
} }
newFolder.Name = safeName
return &newFolder, nil return &newFolder, nil
} }
@ -260,29 +234,21 @@ func (y *Cloud189PC) Rename(ctx context.Context, srcObj model.Obj, newName strin
} }
var newObj model.Obj var newObj model.Obj
safeName := y.sanitizeName(newName)
switch f := srcObj.(type) { switch f := srcObj.(type) {
case *Cloud189File: case *Cloud189File:
fullUrl += "/renameFile.action" fullUrl += "/renameFile.action"
queryParam["fileId"] = srcObj.GetID() queryParam["fileId"] = srcObj.GetID()
queryParam["destFileName"] = safeName queryParam["destFileName"] = newName
newObj = &Cloud189File{Icon: f.Icon} // 复用预览 newObj = &Cloud189File{Icon: f.Icon} // 复用预览
case *Cloud189Folder: case *Cloud189Folder:
fullUrl += "/renameFolder.action" fullUrl += "/renameFolder.action"
queryParam["folderId"] = srcObj.GetID() queryParam["folderId"] = srcObj.GetID()
queryParam["destFolderName"] = safeName queryParam["destFolderName"] = newName
newObj = &Cloud189Folder{} newObj = &Cloud189Folder{}
default: default:
return nil, errs.NotSupport return nil, errs.NotSupport
} }
switch obj := newObj.(type) {
case *Cloud189File:
obj.Name = safeName
case *Cloud189Folder:
obj.Name = safeName
}
_, err := y.request(fullUrl, method, func(req *resty.Request) { _, err := y.request(fullUrl, method, func(req *resty.Request) {
req.SetContext(ctx).SetQueryParams(queryParam) req.SetContext(ctx).SetQueryParams(queryParam)
}, nil, newObj, isFamily) }, nil, newObj, isFamily)
@ -348,49 +314,35 @@ func (y *Cloud189PC) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
if !isFamily && y.FamilyTransfer { if !isFamily && y.FamilyTransfer {
// 修改上传目标为家庭云文件夹 // 修改上传目标为家庭云文件夹
transferDstDir := dstDir transferDstDir := dstDir
dstDir = y.familyTransferFolder dstDir = (y.familyTransferFolder.Value).(*Cloud189Folder)
y.familyTransferFolder = y.familyTransferFolder.Next()
// 使用临时文件名
srcName := stream.GetName()
stream = &WrapFileStreamer{
FileStreamer: stream,
Name: fmt.Sprintf("0%s.transfer", uuid.NewString()),
}
// 使用家庭云上传
isFamily = true isFamily = true
overwrite = false overwrite = false
defer func() { defer func() {
if newObj != nil { if newObj != nil {
// 批量任务有概率删不掉
y.cleanFamilyTransferFile()
// 转存家庭云文件到个人云 // 转存家庭云文件到个人云
err = y.SaveFamilyFileToPersonCloud(context.TODO(), y.FamilyID, newObj, transferDstDir, true) err = y.SaveFamilyFileToPersonCloud(context.TODO(), y.FamilyID, newObj, transferDstDir, true)
// 删除家庭云源文件
go y.Delete(context.TODO(), y.FamilyID, newObj) task := BatchTaskInfo{
// 批量任务有概率删不掉 FileId: newObj.GetID(),
go y.cleanFamilyTransferFile() FileName: newObj.GetName(),
// 转存失败返回错误 IsFolder: BoolToNumber(newObj.IsDir()),
if err != nil {
return
} }
// 查找转存文件 // 删除源文件
var file *Cloud189File if resp, err := y.CreateBatchTask("DELETE", y.FamilyID, "", nil, task); err == nil {
file, err = y.findFileByName(context.TODO(), newObj.GetName(), transferDstDir.GetID(), false) y.WaitBatchTask("DELETE", resp.TaskID, time.Second)
if err != nil { // 永久删除
if err == errs.ObjectNotFound { if resp, err := y.CreateBatchTask("CLEAR_RECYCLE", y.FamilyID, "", nil, task); err == nil {
err = fmt.Errorf("unknown error: No transfer file obtained %s", newObj.GetName()) y.WaitBatchTask("CLEAR_RECYCLE", resp.TaskID, time.Second)
} }
return
} }
newObj = nil
// 重命名转存文件
newObj, err = y.Rename(context.TODO(), file, srcName)
if err != nil {
// 重命名失败删除源文件
_ = y.Delete(context.TODO(), "", file)
}
return
} }
}() }()
} }

View File

@ -18,7 +18,6 @@ import (
"strings" "strings"
"time" "time"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils/random" "github.com/alist-org/alist/v3/pkg/utils/random"
) )
@ -209,12 +208,3 @@ func IF[V any](o bool, t V, f V) V {
} }
return f return f
} }
type WrapFileStreamer struct {
model.FileStreamer
Name string
}
func (w *WrapFileStreamer) GetName() string {
return w.Name
}

View File

@ -6,10 +6,9 @@ import (
) )
type Addition struct { type Addition struct {
Username string `json:"username" required:"true"` Username string `json:"username" required:"true"`
Password string `json:"password" required:"true"` Password string `json:"password" required:"true"`
VCode string `json:"validate_code"` VCode string `json:"validate_code"`
StripEmoji bool `json:"strip_emoji" help:"Remove four-byte characters (e.g., emoji) before upload"`
driver.RootID driver.RootID
OrderBy string `json:"order_by" type:"select" options:"filename,filesize,lastOpTime" default:"filename"` OrderBy string `json:"order_by" type:"select" options:"filename,filesize,lastOpTime" default:"filename"`
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"` OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`

View File

@ -2,34 +2,30 @@ package _189pc
import ( import (
"bytes" "bytes"
"container/ring"
"context" "context"
"crypto/md5"
"encoding/base64" "encoding/base64"
"encoding/hex" "encoding/hex"
"encoding/xml" "encoding/xml"
"fmt" "fmt"
"io" "io"
"math"
"net/http" "net/http"
"net/http/cookiejar" "net/http/cookiejar"
"net/url" "net/url"
"os"
"path"
"regexp" "regexp"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"unicode/utf8"
"golang.org/x/sync/semaphore"
"github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op" "github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/internal/setting" "github.com/alist-org/alist/v3/internal/setting"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/errgroup" "github.com/alist-org/alist/v3/pkg/errgroup"
"github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/pkg/utils"
@ -59,36 +55,13 @@ const (
CHANNEL_ID = "web_cloud.189.cn" CHANNEL_ID = "web_cloud.189.cn"
) )
func (y *Cloud189PC) sanitizeName(name string) string {
if !y.StripEmoji {
return name
}
b := strings.Builder{}
for _, r := range name {
if utf8.RuneLen(r) == 4 {
continue
}
b.WriteRune(r)
}
sanitized := b.String()
if sanitized == "" {
ext := path.Ext(name)
if ext != "" {
sanitized = "file" + ext
} else {
sanitized = "file"
}
}
return sanitized
}
func (y *Cloud189PC) SignatureHeader(url, method, params string, isFamily bool) map[string]string { func (y *Cloud189PC) SignatureHeader(url, method, params string, isFamily bool) map[string]string {
dateOfGmt := getHttpDateStr() dateOfGmt := getHttpDateStr()
sessionKey := y.getTokenInfo().SessionKey sessionKey := y.tokenInfo.SessionKey
sessionSecret := y.getTokenInfo().SessionSecret sessionSecret := y.tokenInfo.SessionSecret
if isFamily { if isFamily {
sessionKey = y.getTokenInfo().FamilySessionKey sessionKey = y.tokenInfo.FamilySessionKey
sessionSecret = y.getTokenInfo().FamilySessionSecret sessionSecret = y.tokenInfo.FamilySessionSecret
} }
header := map[string]string{ header := map[string]string{
@ -101,9 +74,9 @@ func (y *Cloud189PC) SignatureHeader(url, method, params string, isFamily bool)
} }
func (y *Cloud189PC) EncryptParams(params Params, isFamily bool) string { func (y *Cloud189PC) EncryptParams(params Params, isFamily bool) string {
sessionSecret := y.getTokenInfo().SessionSecret sessionSecret := y.tokenInfo.SessionSecret
if isFamily { if isFamily {
sessionSecret = y.getTokenInfo().FamilySessionSecret sessionSecret = y.tokenInfo.FamilySessionSecret
} }
if params != nil { if params != nil {
return AesECBEncrypt(params.Encode(), sessionSecret[:16]) return AesECBEncrypt(params.Encode(), sessionSecret[:16])
@ -112,7 +85,7 @@ func (y *Cloud189PC) EncryptParams(params Params, isFamily bool) string {
} }
func (y *Cloud189PC) request(url, method string, callback base.ReqCallback, params Params, resp interface{}, isFamily ...bool) ([]byte, error) { func (y *Cloud189PC) request(url, method string, callback base.ReqCallback, params Params, resp interface{}, isFamily ...bool) ([]byte, error) {
req := y.getClient().R().SetQueryParams(clientSuffix()) req := y.client.R().SetQueryParams(clientSuffix())
// 设置params // 设置params
paramsData := y.EncryptParams(params, isBool(isFamily...)) paramsData := y.EncryptParams(params, isBool(isFamily...))
@ -201,8 +174,8 @@ func (y *Cloud189PC) put(ctx context.Context, url string, headers map[string]str
} }
var erron RespErr var erron RespErr
_ = jsoniter.Unmarshal(body, &erron) jsoniter.Unmarshal(body, &erron)
_ = xml.Unmarshal(body, &erron) xml.Unmarshal(body, &erron)
if erron.HasError() { if erron.HasError() {
return nil, &erron return nil, &erron
} }
@ -212,9 +185,39 @@ func (y *Cloud189PC) put(ctx context.Context, url string, headers map[string]str
return body, nil return body, nil
} }
func (y *Cloud189PC) getFiles(ctx context.Context, fileId string, isFamily bool) ([]model.Obj, error) { func (y *Cloud189PC) getFiles(ctx context.Context, fileId string, isFamily bool) ([]model.Obj, error) {
res := make([]model.Obj, 0, 100) fullUrl := API_URL
if isFamily {
fullUrl += "/family/file"
}
fullUrl += "/listFiles.action"
res := make([]model.Obj, 0, 130)
for pageNum := 1; ; pageNum++ { for pageNum := 1; ; pageNum++ {
resp, err := y.getFilesWithPage(ctx, fileId, isFamily, pageNum, 1000, y.OrderBy, y.OrderDirection) var resp Cloud189FilesResp
_, err := y.get(fullUrl, func(r *resty.Request) {
r.SetContext(ctx)
r.SetQueryParams(map[string]string{
"folderId": fileId,
"fileType": "0",
"mediaAttr": "0",
"iconOption": "5",
"pageNum": fmt.Sprint(pageNum),
"pageSize": "130",
})
if isFamily {
r.SetQueryParams(map[string]string{
"familyId": y.FamilyID,
"orderBy": toFamilyOrderBy(y.OrderBy),
"descending": toDesc(y.OrderDirection),
})
} else {
r.SetQueryParams(map[string]string{
"recursive": "0",
"orderBy": y.OrderBy,
"descending": toDesc(y.OrderDirection),
})
}
}, &resp, isFamily)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -233,63 +236,6 @@ func (y *Cloud189PC) getFiles(ctx context.Context, fileId string, isFamily bool)
return res, nil return res, nil
} }
func (y *Cloud189PC) getFilesWithPage(ctx context.Context, fileId string, isFamily bool, pageNum int, pageSize int, orderBy string, orderDirection string) (*Cloud189FilesResp, error) {
fullUrl := API_URL
if isFamily {
fullUrl += "/family/file"
}
fullUrl += "/listFiles.action"
var resp Cloud189FilesResp
_, err := y.get(fullUrl, func(r *resty.Request) {
r.SetContext(ctx)
r.SetQueryParams(map[string]string{
"folderId": fileId,
"fileType": "0",
"mediaAttr": "0",
"iconOption": "5",
"pageNum": fmt.Sprint(pageNum),
"pageSize": fmt.Sprint(pageSize),
})
if isFamily {
r.SetQueryParams(map[string]string{
"familyId": y.FamilyID,
"orderBy": toFamilyOrderBy(orderBy),
"descending": toDesc(orderDirection),
})
} else {
r.SetQueryParams(map[string]string{
"recursive": "0",
"orderBy": orderBy,
"descending": toDesc(orderDirection),
})
}
}, &resp, isFamily)
if err != nil {
return nil, err
}
return &resp, nil
}
func (y *Cloud189PC) findFileByName(ctx context.Context, searchName string, folderId string, isFamily bool) (*Cloud189File, error) {
for pageNum := 1; ; pageNum++ {
resp, err := y.getFilesWithPage(ctx, folderId, isFamily, pageNum, 10, "filename", "asc")
if err != nil {
return nil, err
}
// 获取完毕跳出
if resp.FileListAO.Count == 0 {
return nil, errs.ObjectNotFound
}
for i := 0; i < len(resp.FileListAO.FileList); i++ {
file := resp.FileListAO.FileList[i]
if file.Name == searchName {
return &file, nil
}
}
}
}
func (y *Cloud189PC) login() (err error) { func (y *Cloud189PC) login() (err error) {
// 初始化登陆所需参数 // 初始化登陆所需参数
if y.loginParam == nil { if y.loginParam == nil {
@ -349,7 +295,7 @@ func (y *Cloud189PC) login() (err error) {
_, err = y.client.R(). _, err = y.client.R().
SetResult(&tokenInfo).SetError(&erron). SetResult(&tokenInfo).SetError(&erron).
SetQueryParams(clientSuffix()). SetQueryParams(clientSuffix()).
SetQueryParam("redirectURL", loginresp.ToUrl). SetQueryParam("redirectURL", url.QueryEscape(loginresp.ToUrl)).
Post(API_URL + "/getSessionForPC.action") Post(API_URL + "/getSessionForPC.action")
if err != nil { if err != nil {
return return
@ -457,9 +403,6 @@ func (y *Cloud189PC) initLoginParam() error {
// 刷新会话 // 刷新会话
func (y *Cloud189PC) refreshSession() (err error) { func (y *Cloud189PC) refreshSession() (err error) {
if y.ref != nil {
return y.ref.refreshSession()
}
var erron RespErr var erron RespErr
var userSessionResp UserSessionResp var userSessionResp UserSessionResp
_, err = y.client.R(). _, err = y.client.R().
@ -498,13 +441,16 @@ func (y *Cloud189PC) refreshSession() (err error) {
// 普通上传 // 普通上传
// 无法上传大小为0的文件 // 无法上传大小为0的文件
func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) { func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
size := file.GetSize() var sliceSize = partSize(file.GetSize())
sliceSize := partSize(size) count := int(math.Ceil(float64(file.GetSize()) / float64(sliceSize)))
safeName := y.sanitizeName(file.GetName()) lastPartSize := file.GetSize() % sliceSize
if file.GetSize() > 0 && lastPartSize == 0 {
lastPartSize = sliceSize
}
params := Params{ params := Params{
"parentFolderId": dstDir.GetID(), "parentFolderId": dstDir.GetID(),
"fileName": url.QueryEscape(safeName), "fileName": url.QueryEscape(file.GetName()),
"fileSize": fmt.Sprint(file.GetSize()), "fileSize": fmt.Sprint(file.GetSize()),
"sliceSize": fmt.Sprint(sliceSize), "sliceSize": fmt.Sprint(sliceSize),
"lazyCheck": "1", "lazyCheck": "1",
@ -532,32 +478,24 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
retry.Attempts(3), retry.Attempts(3),
retry.Delay(time.Second), retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay)) retry.DelayType(retry.BackOffDelay))
sem := semaphore.NewWeighted(3)
count := int(size / sliceSize) fileMd5 := md5.New()
lastPartSize := size % sliceSize silceMd5 := md5.New()
if lastPartSize > 0 {
count++
} else {
lastPartSize = sliceSize
}
fileMd5 := utils.MD5.NewFunc()
silceMd5 := utils.MD5.NewFunc()
silceMd5Hexs := make([]string, 0, count) silceMd5Hexs := make([]string, 0, count)
teeReader := io.TeeReader(file, io.MultiWriter(fileMd5, silceMd5))
byteSize := sliceSize
for i := 1; i <= count; i++ { for i := 1; i <= count; i++ {
if utils.IsCanceled(upCtx) { if utils.IsCanceled(upCtx) {
break break
} }
byteData := make([]byte, sliceSize)
if i == count { if i == count {
byteSize = lastPartSize byteData = byteData[:lastPartSize]
} }
byteData := make([]byte, byteSize)
// 读取块 // 读取块
silceMd5.Reset() silceMd5.Reset()
if _, err := io.ReadFull(teeReader, byteData); err != io.EOF && err != nil { if _, err := io.ReadFull(io.TeeReader(file, io.MultiWriter(fileMd5, silceMd5)), byteData); err != io.EOF && err != nil {
sem.Release(1)
return nil, err return nil, err
} }
@ -567,10 +505,6 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
partInfo := fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes)) partInfo := fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes))
threadG.Go(func(ctx context.Context) error { threadG.Go(func(ctx context.Context) error {
if err = sem.Acquire(ctx, 1); err != nil {
return err
}
defer sem.Release(1)
uploadUrls, err := y.GetMultiUploadUrls(ctx, isFamily, initMultiUpload.Data.UploadFileID, partInfo) uploadUrls, err := y.GetMultiUploadUrls(ctx, isFamily, initMultiUpload.Data.UploadFileID, partInfo)
if err != nil { if err != nil {
return err return err
@ -578,8 +512,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
// step.4 上传切片 // step.4 上传切片
uploadUrl := uploadUrls[0] uploadUrl := uploadUrls[0]
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, _, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, bytes.NewReader(byteData), isFamily)
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)), isFamily)
if err != nil { if err != nil {
return err return err
} }
@ -622,8 +555,7 @@ func (y *Cloud189PC) RapidUpload(ctx context.Context, dstDir model.Obj, stream m
return nil, errors.New("invalid hash") return nil, errors.New("invalid hash")
} }
safeName := y.sanitizeName(stream.GetName()) uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, stream.GetName(), fmt.Sprint(stream.GetSize()), isFamily)
uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, safeName, fmt.Sprint(stream.GetSize()), isFamily)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -637,44 +569,24 @@ func (y *Cloud189PC) RapidUpload(ctx context.Context, dstDir model.Obj, stream m
// 快传 // 快传
func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) { func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
var ( tempFile, err := file.CacheFullInTempFile()
cache = file.GetFile() if err != nil {
tmpF *os.File return nil, err
err error
)
safeName := y.sanitizeName(file.GetName())
size := file.GetSize()
if _, ok := cache.(io.ReaderAt); !ok && size > 0 {
tmpF, err = os.CreateTemp(conf.Conf.TempDir, "file-*")
if err != nil {
return nil, err
}
defer func() {
_ = tmpF.Close()
_ = os.Remove(tmpF.Name())
}()
cache = tmpF
} }
sliceSize := partSize(size)
count := int(size / sliceSize) var sliceSize = partSize(file.GetSize())
lastSliceSize := size % sliceSize count := int(math.Ceil(float64(file.GetSize()) / float64(sliceSize)))
if lastSliceSize > 0 { lastSliceSize := file.GetSize() % sliceSize
count++ if file.GetSize() > 0 && lastSliceSize == 0 {
} else {
lastSliceSize = sliceSize lastSliceSize = sliceSize
} }
//step.1 优先计算所需信息 //step.1 优先计算所需信息
byteSize := sliceSize byteSize := sliceSize
fileMd5 := utils.MD5.NewFunc() fileMd5 := md5.New()
sliceMd5 := utils.MD5.NewFunc() silceMd5 := md5.New()
sliceMd5Hexs := make([]string, 0, count) silceMd5Hexs := make([]string, 0, count)
partInfos := make([]string, 0, count) partInfos := make([]string, 0, count)
writers := []io.Writer{fileMd5, sliceMd5}
if tmpF != nil {
writers = append(writers, tmpF)
}
written := int64(0)
for i := 1; i <= count; i++ { for i := 1; i <= count; i++ {
if utils.IsCanceled(ctx) { if utils.IsCanceled(ctx) {
return nil, ctx.Err() return nil, ctx.Err()
@ -684,31 +596,19 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
byteSize = lastSliceSize byteSize = lastSliceSize
} }
n, err := utils.CopyWithBufferN(io.MultiWriter(writers...), file, byteSize) silceMd5.Reset()
written += n if _, err := utils.CopyWithBufferN(io.MultiWriter(fileMd5, silceMd5), tempFile, byteSize); err != nil && err != io.EOF {
if err != nil && err != io.EOF {
return nil, err return nil, err
} }
md5Byte := sliceMd5.Sum(nil) md5Byte := silceMd5.Sum(nil)
sliceMd5Hexs = append(sliceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Byte))) silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Byte)))
partInfos = append(partInfos, fmt.Sprint(i, "-", base64.StdEncoding.EncodeToString(md5Byte))) partInfos = append(partInfos, fmt.Sprint(i, "-", base64.StdEncoding.EncodeToString(md5Byte)))
sliceMd5.Reset()
}
if tmpF != nil {
if size > 0 && written != size {
return nil, errs.NewErr(err, "CreateTempFile failed, incoming stream actual size= %d, expect = %d ", written, size)
}
_, err = tmpF.Seek(0, io.SeekStart)
if err != nil {
return nil, errs.NewErr(err, "CreateTempFile failed, can't seek to 0 ")
}
} }
fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil))) fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
sliceMd5Hex := fileMd5Hex sliceMd5Hex := fileMd5Hex
if size > sliceSize { if file.GetSize() > sliceSize {
sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(sliceMd5Hexs, "\n"))) sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(silceMd5Hexs, "\n")))
} }
fullUrl := UPLOAD_URL fullUrl := UPLOAD_URL
@ -720,12 +620,12 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
} }
// 尝试恢复进度 // 尝试恢复进度
uploadProgress, ok := base.GetUploadProgress[*UploadProgress](y, y.getTokenInfo().SessionKey, fileMd5Hex) uploadProgress, ok := base.GetUploadProgress[*UploadProgress](y, y.tokenInfo.SessionKey, fileMd5Hex)
if !ok { if !ok {
//step.2 预上传 //step.2 预上传
params := Params{ params := Params{
"parentFolderId": dstDir.GetID(), "parentFolderId": dstDir.GetID(),
"fileName": url.QueryEscape(safeName), "fileName": url.QueryEscape(file.GetName()),
"fileSize": fmt.Sprint(file.GetSize()), "fileSize": fmt.Sprint(file.GetSize()),
"fileMd5": fileMd5Hex, "fileMd5": fileMd5Hex,
"sliceSize": fmt.Sprint(sliceSize), "sliceSize": fmt.Sprint(sliceSize),
@ -774,7 +674,7 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
} }
// step.4 上传切片 // step.4 上传切片
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, io.NewSectionReader(cache, offset, byteSize), isFamily) _, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, io.NewSectionReader(tempFile, offset, byteSize), isFamily)
if err != nil { if err != nil {
return err return err
} }
@ -787,7 +687,7 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
if err = threadG.Wait(); err != nil { if err = threadG.Wait(); err != nil {
if errors.Is(err, context.Canceled) { if errors.Is(err, context.Canceled) {
uploadProgress.UploadParts = utils.SliceFilter(uploadProgress.UploadParts, func(s string) bool { return s != "" }) uploadProgress.UploadParts = utils.SliceFilter(uploadProgress.UploadParts, func(s string) bool { return s != "" })
base.SaveUploadProgress(y, uploadProgress, y.getTokenInfo().SessionKey, fileMd5Hex) base.SaveUploadProgress(y, uploadProgress, y.tokenInfo.SessionKey, fileMd5Hex)
} }
return nil, err return nil, err
} }
@ -856,15 +756,17 @@ func (y *Cloud189PC) GetMultiUploadUrls(ctx context.Context, isFamily bool, uplo
// 旧版本上传,家庭云不支持覆盖 // 旧版本上传,家庭云不支持覆盖
func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) { func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
tempFile, fileMd5, err := stream.CacheFullInTempFileAndHash(file, utils.MD5) tempFile, err := file.CacheFullInTempFile()
if err != nil {
return nil, err
}
fileMd5, err := utils.HashFile(utils.MD5, tempFile)
if err != nil { if err != nil {
return nil, err return nil, err
} }
rateLimited := driver.NewLimitedUploadStream(ctx, io.NopCloser(tempFile))
safeName := y.sanitizeName(file.GetName())
// 创建上传会话 // 创建上传会话
uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, safeName, fmt.Sprint(file.GetSize()), isFamily) uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, file.GetName(), fmt.Sprint(file.GetSize()), isFamily)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -888,7 +790,7 @@ func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model
header["Edrive-UploadFileId"] = fmt.Sprint(status.UploadFileId) header["Edrive-UploadFileId"] = fmt.Sprint(status.UploadFileId)
} }
_, err := y.put(ctx, status.FileUploadUrl, header, true, rateLimited, isFamily) _, err := y.put(ctx, status.FileUploadUrl, header, true, io.NopCloser(tempFile), isFamily)
if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" { if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" {
return nil, err return nil, err
} }
@ -997,7 +899,8 @@ func (y *Cloud189PC) isLogin() bool {
} }
// 创建家庭云中转文件夹 // 创建家庭云中转文件夹
func (y *Cloud189PC) createFamilyTransferFolder() error { func (y *Cloud189PC) createFamilyTransferFolder(count int) (*ring.Ring, error) {
folders := ring.New(count)
var rootFolder Cloud189Folder var rootFolder Cloud189Folder
_, err := y.post(API_URL+"/family/file/createFolder.action", func(req *resty.Request) { _, err := y.post(API_URL+"/family/file/createFolder.action", func(req *resty.Request) {
req.SetQueryParams(map[string]string{ req.SetQueryParams(map[string]string{
@ -1006,61 +909,81 @@ func (y *Cloud189PC) createFamilyTransferFolder() error {
}) })
}, &rootFolder, true) }, &rootFolder, true)
if err != nil { if err != nil {
return err return nil, err
} }
y.familyTransferFolder = &rootFolder
return nil folderCount := 0
// 获取已有目录
files, err := y.getFiles(context.TODO(), rootFolder.GetID(), true)
if err != nil {
return nil, err
}
for _, file := range files {
if folder, ok := file.(*Cloud189Folder); ok {
folders.Value = folder
folders = folders.Next()
folderCount++
}
}
// 创建新的目录
for folderCount < count {
var newFolder Cloud189Folder
_, err := y.post(API_URL+"/family/file/createFolder.action", func(req *resty.Request) {
req.SetQueryParams(map[string]string{
"folderName": uuid.NewString(),
"familyId": y.FamilyID,
"parentId": rootFolder.GetID(),
})
}, &newFolder, true)
if err != nil {
return nil, err
}
folders.Value = &newFolder
folders = folders.Next()
folderCount++
}
return folders, nil
} }
// 清理中转文件夹 // 清理中转文件夹
func (y *Cloud189PC) cleanFamilyTransfer(ctx context.Context) error { func (y *Cloud189PC) cleanFamilyTransfer(ctx context.Context) error {
transferFolderId := y.familyTransferFolder.GetID() var tasks []BatchTaskInfo
for pageNum := 1; ; pageNum++ { r := y.familyTransferFolder
resp, err := y.getFilesWithPage(ctx, transferFolderId, true, pageNum, 100, "lastOpTime", "asc") for p := r.Next(); p != r; p = p.Next() {
folder := p.Value.(*Cloud189Folder)
files, err := y.getFiles(ctx, folder.GetID(), true)
if err != nil { if err != nil {
return err return err
} }
// 获取完毕跳出 for _, file := range files {
if resp.FileListAO.Count == 0 {
break
}
var tasks []BatchTaskInfo
for i := 0; i < len(resp.FileListAO.FolderList); i++ {
folder := resp.FileListAO.FolderList[i]
tasks = append(tasks, BatchTaskInfo{
FileId: folder.GetID(),
FileName: folder.GetName(),
IsFolder: BoolToNumber(folder.IsDir()),
})
}
for i := 0; i < len(resp.FileListAO.FileList); i++ {
file := resp.FileListAO.FileList[i]
tasks = append(tasks, BatchTaskInfo{ tasks = append(tasks, BatchTaskInfo{
FileId: file.GetID(), FileId: file.GetID(),
FileName: file.GetName(), FileName: file.GetName(),
IsFolder: BoolToNumber(file.IsDir()), IsFolder: BoolToNumber(file.IsDir()),
}) })
} }
}
if len(tasks) > 0 { if len(tasks) > 0 {
// 删除 // 删除
resp, err := y.CreateBatchTask("DELETE", y.FamilyID, "", nil, tasks...) resp, err := y.CreateBatchTask("DELETE", y.FamilyID, "", nil, tasks...)
if err != nil { if err != nil {
return err
}
err = y.WaitBatchTask("DELETE", resp.TaskID, time.Second)
if err != nil {
return err
}
// 永久删除
resp, err = y.CreateBatchTask("CLEAR_RECYCLE", y.FamilyID, "", nil, tasks...)
if err != nil {
return err
}
err = y.WaitBatchTask("CLEAR_RECYCLE", resp.TaskID, time.Second)
return err return err
} }
err = y.WaitBatchTask("DELETE", resp.TaskID, time.Second)
if err != nil {
return err
}
// 永久删除
resp, err = y.CreateBatchTask("CLEAR_RECYCLE", y.FamilyID, "", nil, tasks...)
if err != nil {
return err
}
err = y.WaitBatchTask("CLEAR_RECYCLE", resp.TaskID, time.Second)
return err
} }
return nil return nil
} }
@ -1085,7 +1008,7 @@ func (y *Cloud189PC) getFamilyID() (string, error) {
return "", fmt.Errorf("cannot get automatically,please input family_id") return "", fmt.Errorf("cannot get automatically,please input family_id")
} }
for _, info := range infos { for _, info := range infos {
if strings.Contains(y.getTokenInfo().LoginName, info.RemarkName) { if strings.Contains(y.tokenInfo.LoginName, info.RemarkName) {
return fmt.Sprint(info.FamilyID), nil return fmt.Sprint(info.FamilyID), nil
} }
} }
@ -1137,34 +1060,6 @@ func (y *Cloud189PC) SaveFamilyFileToPersonCloud(ctx context.Context, familyId s
} }
} }
// 永久删除文件
func (y *Cloud189PC) Delete(ctx context.Context, familyId string, srcObj model.Obj) error {
task := BatchTaskInfo{
FileId: srcObj.GetID(),
FileName: srcObj.GetName(),
IsFolder: BoolToNumber(srcObj.IsDir()),
}
// 删除源文件
resp, err := y.CreateBatchTask("DELETE", familyId, "", nil, task)
if err != nil {
return err
}
err = y.WaitBatchTask("DELETE", resp.TaskID, time.Second)
if err != nil {
return err
}
// 清除回收站
resp, err = y.CreateBatchTask("CLEAR_RECYCLE", familyId, "", nil, task)
if err != nil {
return err
}
err = y.WaitBatchTask("CLEAR_RECYCLE", resp.TaskID, time.Second)
if err != nil {
return err
}
return nil
}
func (y *Cloud189PC) CreateBatchTask(aType string, familyID string, targetFolderId string, other map[string]string, taskInfos ...BatchTaskInfo) (*CreateBatchTaskResp, error) { func (y *Cloud189PC) CreateBatchTask(aType string, familyID string, targetFolderId string, other map[string]string, taskInfos ...BatchTaskInfo) (*CreateBatchTaskResp, error) {
var resp CreateBatchTaskResp var resp CreateBatchTaskResp
_, err := y.post(API_URL+"/batch/createBatchTask.action", func(req *resty.Request) { _, err := y.post(API_URL+"/batch/createBatchTask.action", func(req *resty.Request) {
@ -1247,17 +1142,3 @@ func (y *Cloud189PC) WaitBatchTask(aType string, taskID string, t time.Duration)
time.Sleep(t) time.Sleep(t)
} }
} }
func (y *Cloud189PC) getTokenInfo() *AppSessionResp {
if y.ref != nil {
return y.ref.getTokenInfo()
}
return y.tokenInfo
}
func (y *Cloud189PC) getClient() *resty.Client {
if y.ref != nil {
return y.ref.getClient()
}
return y.client
}

View File

@ -3,7 +3,6 @@ package alias
import ( import (
"context" "context"
"errors" "errors"
stdpath "path"
"strings" "strings"
"github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/driver"
@ -111,62 +110,14 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
for _, dst := range dsts { for _, dst := range dsts {
link, err := d.link(ctx, dst, sub, args) link, err := d.link(ctx, dst, sub, args)
if err == nil { if err == nil {
if !args.Redirect && len(link.URL) > 0 {
// 正常情况下 多并发 仅支持返回URL的驱动
// alias套娃alias 可以让crypt、mega等驱动(不返回URL的) 支持并发
if d.DownloadConcurrency > 0 {
link.Concurrency = d.DownloadConcurrency
}
if d.DownloadPartSize > 0 {
link.PartSize = d.DownloadPartSize * utils.KB
}
}
return link, nil return link, nil
} }
} }
return nil, errs.ObjectNotFound return nil, errs.ObjectNotFound
} }
func (d *Alias) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
if !d.Writable {
return errs.PermissionDenied
}
reqPath, err := d.getReqPath(ctx, parentDir, true)
if err == nil {
return fs.MakeDir(ctx, stdpath.Join(*reqPath, dirName))
}
if errs.IsNotImplement(err) {
return errors.New("same-name dirs cannot make sub-dir")
}
return err
}
func (d *Alias) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
if !d.Writable {
return errs.PermissionDenied
}
srcPath, err := d.getReqPath(ctx, srcObj, false)
if errs.IsNotImplement(err) {
return errors.New("same-name files cannot be moved")
}
if err != nil {
return err
}
dstPath, err := d.getReqPath(ctx, dstDir, true)
if errs.IsNotImplement(err) {
return errors.New("same-name dirs cannot be moved to")
}
if err != nil {
return err
}
return fs.Move(ctx, *srcPath, *dstPath)
}
func (d *Alias) Rename(ctx context.Context, srcObj model.Obj, newName string) error { func (d *Alias) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
if !d.Writable { reqPath, err := d.getReqPath(ctx, srcObj)
return errs.PermissionDenied
}
reqPath, err := d.getReqPath(ctx, srcObj, false)
if err == nil { if err == nil {
return fs.Rename(ctx, *reqPath, newName) return fs.Rename(ctx, *reqPath, newName)
} }
@ -176,33 +127,8 @@ func (d *Alias) Rename(ctx context.Context, srcObj model.Obj, newName string) er
return err return err
} }
func (d *Alias) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
if !d.Writable {
return errs.PermissionDenied
}
srcPath, err := d.getReqPath(ctx, srcObj, false)
if errs.IsNotImplement(err) {
return errors.New("same-name files cannot be copied")
}
if err != nil {
return err
}
dstPath, err := d.getReqPath(ctx, dstDir, true)
if errs.IsNotImplement(err) {
return errors.New("same-name dirs cannot be copied to")
}
if err != nil {
return err
}
_, err = fs.Copy(ctx, *srcPath, *dstPath)
return err
}
func (d *Alias) Remove(ctx context.Context, obj model.Obj) error { func (d *Alias) Remove(ctx context.Context, obj model.Obj) error {
if !d.Writable { reqPath, err := d.getReqPath(ctx, obj)
return errs.PermissionDenied
}
reqPath, err := d.getReqPath(ctx, obj, false)
if err == nil { if err == nil {
return fs.Remove(ctx, *reqPath) return fs.Remove(ctx, *reqPath)
} }
@ -212,110 +138,4 @@ func (d *Alias) Remove(ctx context.Context, obj model.Obj) error {
return err return err
} }
func (d *Alias) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
if !d.Writable {
return errs.PermissionDenied
}
reqPath, err := d.getReqPath(ctx, dstDir, true)
if err == nil {
return fs.PutDirectly(ctx, *reqPath, s)
}
if errs.IsNotImplement(err) {
return errors.New("same-name dirs cannot be Put")
}
return err
}
func (d *Alias) PutURL(ctx context.Context, dstDir model.Obj, name, url string) error {
if !d.Writable {
return errs.PermissionDenied
}
reqPath, err := d.getReqPath(ctx, dstDir, true)
if err == nil {
return fs.PutURL(ctx, *reqPath, name, url)
}
if errs.IsNotImplement(err) {
return errors.New("same-name files cannot offline download")
}
return err
}
func (d *Alias) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
root, sub := d.getRootAndPath(obj.GetPath())
dsts, ok := d.pathMap[root]
if !ok {
return nil, errs.ObjectNotFound
}
for _, dst := range dsts {
meta, err := d.getArchiveMeta(ctx, dst, sub, args)
if err == nil {
return meta, nil
}
}
return nil, errs.NotImplement
}
func (d *Alias) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
root, sub := d.getRootAndPath(obj.GetPath())
dsts, ok := d.pathMap[root]
if !ok {
return nil, errs.ObjectNotFound
}
for _, dst := range dsts {
l, err := d.listArchive(ctx, dst, sub, args)
if err == nil {
return l, nil
}
}
return nil, errs.NotImplement
}
func (d *Alias) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
// alias的两个驱动一个支持驱动提取一个不支持如何兼容
// 如果访问的是不支持驱动提取的驱动内的压缩文件GetArchiveMeta就会返回errs.NotImplement提取URL前缀就会是/aeExtract就不会被调用
// 如果访问的是支持驱动提取的驱动内的压缩文件GetArchiveMeta就会返回有效值提取URL前缀就会是/adExtract就会被调用
root, sub := d.getRootAndPath(obj.GetPath())
dsts, ok := d.pathMap[root]
if !ok {
return nil, errs.ObjectNotFound
}
for _, dst := range dsts {
link, err := d.extract(ctx, dst, sub, args)
if err == nil {
if !args.Redirect && len(link.URL) > 0 {
if d.DownloadConcurrency > 0 {
link.Concurrency = d.DownloadConcurrency
}
if d.DownloadPartSize > 0 {
link.PartSize = d.DownloadPartSize * utils.KB
}
}
return link, nil
}
}
return nil, errs.NotImplement
}
func (d *Alias) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) error {
if !d.Writable {
return errs.PermissionDenied
}
srcPath, err := d.getReqPath(ctx, srcObj, false)
if errs.IsNotImplement(err) {
return errors.New("same-name files cannot be decompressed")
}
if err != nil {
return err
}
dstPath, err := d.getReqPath(ctx, dstDir, true)
if errs.IsNotImplement(err) {
return errors.New("same-name dirs cannot be decompressed to")
}
if err != nil {
return err
}
_, err = fs.ArchiveDecompress(ctx, *srcPath, *dstPath, args)
return err
}
var _ driver.Driver = (*Alias)(nil) var _ driver.Driver = (*Alias)(nil)

View File

@ -9,18 +9,15 @@ type Addition struct {
// Usually one of two // Usually one of two
// driver.RootPath // driver.RootPath
// define other // define other
Paths string `json:"paths" required:"true" type:"text"` Paths string `json:"paths" required:"true" type:"text"`
ProtectSameName bool `json:"protect_same_name" default:"true" required:"false" help:"Protects same-name files from Delete or Rename"` ProtectSameName bool `json:"protect_same_name" default:"true" required:"false" help:"Protects same-name files from Delete or Rename"`
DownloadConcurrency int `json:"download_concurrency" default:"0" required:"false" type:"number" help:"Need to enable proxy"`
DownloadPartSize int `json:"download_part_size" default:"0" type:"number" required:"false" help:"Need to enable proxy. Unit: KB"`
Writable bool `json:"writable" type:"bool" default:"false"`
} }
var config = driver.Config{ var config = driver.Config{
Name: "Alias", Name: "Alias",
LocalSort: true, LocalSort: true,
NoCache: true, NoCache: true,
NoUpload: false, NoUpload: true,
DefaultRoot: "/", DefaultRoot: "/",
ProxyRangeOption: true, ProxyRangeOption: true,
} }

View File

@ -3,15 +3,12 @@ package alias
import ( import (
"context" "context"
"fmt" "fmt"
"net/url"
stdpath "path" stdpath "path"
"strings" "strings"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/fs" "github.com/alist-org/alist/v3/internal/fs"
"github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/internal/sign" "github.com/alist-org/alist/v3/internal/sign"
"github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/pkg/utils"
"github.com/alist-org/alist/v3/server/common" "github.com/alist-org/alist/v3/server/common"
@ -65,7 +62,6 @@ func (d *Alias) get(ctx context.Context, path string, dst, sub string) (model.Ob
Size: obj.GetSize(), Size: obj.GetSize(),
Modified: obj.ModTime(), Modified: obj.ModTime(),
IsFolder: obj.IsDir(), IsFolder: obj.IsDir(),
HashInfo: obj.GetHash(),
}, nil }, nil
} }
@ -98,15 +94,10 @@ func (d *Alias) list(ctx context.Context, dst, sub string, args *fs.ListArgs) ([
func (d *Alias) link(ctx context.Context, dst, sub string, args model.LinkArgs) (*model.Link, error) { func (d *Alias) link(ctx context.Context, dst, sub string, args model.LinkArgs) (*model.Link, error) {
reqPath := stdpath.Join(dst, sub) reqPath := stdpath.Join(dst, sub)
// 参考 crypt 驱动 storage, err := fs.GetStorage(reqPath, &fs.GetStoragesArgs{})
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if _, ok := storage.(*Alias); !ok && !args.Redirect {
link, _, err := op.Link(ctx, storage, reqActualPath, args)
return link, err
}
_, err = fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true}) _, err = fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
if err != nil { if err != nil {
return nil, err return nil, err
@ -123,13 +114,13 @@ func (d *Alias) link(ctx context.Context, dst, sub string, args model.LinkArgs)
} }
return link, nil return link, nil
} }
link, _, err := op.Link(ctx, storage, reqActualPath, args) link, _, err := fs.Link(ctx, reqPath, args)
return link, err return link, err
} }
func (d *Alias) getReqPath(ctx context.Context, obj model.Obj, isParent bool) (*string, error) { func (d *Alias) getReqPath(ctx context.Context, obj model.Obj) (*string, error) {
root, sub := d.getRootAndPath(obj.GetPath()) root, sub := d.getRootAndPath(obj.GetPath())
if sub == "" && !isParent { if sub == "" {
return nil, errs.NotSupport return nil, errs.NotSupport
} }
dsts, ok := d.pathMap[root] dsts, ok := d.pathMap[root]
@ -158,68 +149,3 @@ func (d *Alias) getReqPath(ctx context.Context, obj model.Obj, isParent bool) (*
} }
return reqPath, nil return reqPath, nil
} }
func (d *Alias) getArchiveMeta(ctx context.Context, dst, sub string, args model.ArchiveArgs) (model.ArchiveMeta, error) {
reqPath := stdpath.Join(dst, sub)
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
if err != nil {
return nil, err
}
if _, ok := storage.(driver.ArchiveReader); ok {
return op.GetArchiveMeta(ctx, storage, reqActualPath, model.ArchiveMetaArgs{
ArchiveArgs: args,
Refresh: true,
})
}
return nil, errs.NotImplement
}
func (d *Alias) listArchive(ctx context.Context, dst, sub string, args model.ArchiveInnerArgs) ([]model.Obj, error) {
reqPath := stdpath.Join(dst, sub)
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
if err != nil {
return nil, err
}
if _, ok := storage.(driver.ArchiveReader); ok {
return op.ListArchive(ctx, storage, reqActualPath, model.ArchiveListArgs{
ArchiveInnerArgs: args,
Refresh: true,
})
}
return nil, errs.NotImplement
}
func (d *Alias) extract(ctx context.Context, dst, sub string, args model.ArchiveInnerArgs) (*model.Link, error) {
reqPath := stdpath.Join(dst, sub)
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
if err != nil {
return nil, err
}
if _, ok := storage.(driver.ArchiveReader); ok {
if _, ok := storage.(*Alias); !ok && !args.Redirect {
link, _, err := op.DriverExtract(ctx, storage, reqActualPath, args)
return link, err
}
_, err = fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
if err != nil {
return nil, err
}
if common.ShouldProxy(storage, stdpath.Base(sub)) {
link := &model.Link{
URL: fmt.Sprintf("%s/ap%s?inner=%s&pass=%s&sign=%s",
common.GetApiUrl(args.HttpReq),
utils.EncodePath(reqPath, true),
utils.EncodePath(args.InnerPath, true),
url.QueryEscape(args.Password),
sign.SignArchive(reqPath)),
}
if args.HttpReq != nil && d.ProxyRange {
link.RangeReadCloser = common.NoProxyRange
}
return link, nil
}
link, _, err := op.DriverExtract(ctx, storage, reqActualPath, args)
return link, err
}
return nil, errs.NotImplement
}

View File

@ -5,14 +5,12 @@ import (
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
"net/url"
"path" "path"
"strings" "strings"
"github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/pkg/utils"
"github.com/alist-org/alist/v3/server/common" "github.com/alist-org/alist/v3/server/common"
@ -36,7 +34,7 @@ func (d *AListV3) GetAddition() driver.Additional {
func (d *AListV3) Init(ctx context.Context) error { func (d *AListV3) Init(ctx context.Context) error {
d.Addition.Address = strings.TrimSuffix(d.Addition.Address, "/") d.Addition.Address = strings.TrimSuffix(d.Addition.Address, "/")
var resp common.Resp[MeResp] var resp common.Resp[MeResp]
_, _, err := d.request("/me", http.MethodGet, func(req *resty.Request) { _, err := d.request("/me", http.MethodGet, func(req *resty.Request) {
req.SetResult(&resp) req.SetResult(&resp)
}) })
if err != nil { if err != nil {
@ -50,15 +48,15 @@ func (d *AListV3) Init(ctx context.Context) error {
} }
} }
// re-get the user info // re-get the user info
_, _, err = d.request("/me", http.MethodGet, func(req *resty.Request) { _, err = d.request("/me", http.MethodGet, func(req *resty.Request) {
req.SetResult(&resp) req.SetResult(&resp)
}) })
if err != nil { if err != nil {
return err return err
} }
if utils.SliceContains(resp.Data.Role, model.GUEST) { if resp.Data.Role == model.GUEST {
u := d.Address + "/api/public/settings" url := d.Address + "/api/public/settings"
res, err := base.RestyClient.R().Get(u) res, err := base.RestyClient.R().Get(url)
if err != nil { if err != nil {
return err return err
} }
@ -76,7 +74,7 @@ func (d *AListV3) Drop(ctx context.Context) error {
func (d *AListV3) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { func (d *AListV3) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
var resp common.Resp[FsListResp] var resp common.Resp[FsListResp]
_, _, err := d.request("/fs/list", http.MethodPost, func(req *resty.Request) { _, err := d.request("/fs/list", http.MethodPost, func(req *resty.Request) {
req.SetResult(&resp).SetBody(ListReq{ req.SetResult(&resp).SetBody(ListReq{
PageReq: model.PageReq{ PageReq: model.PageReq{
Page: 1, Page: 1,
@ -118,7 +116,7 @@ func (d *AListV3) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
userAgent = base.UserAgent userAgent = base.UserAgent
} }
} }
_, _, err := d.request("/fs/get", http.MethodPost, func(req *resty.Request) { _, err := d.request("/fs/get", http.MethodPost, func(req *resty.Request) {
req.SetResult(&resp).SetBody(FsGetReq{ req.SetResult(&resp).SetBody(FsGetReq{
Path: file.GetPath(), Path: file.GetPath(),
Password: d.MetaPassword, Password: d.MetaPassword,
@ -133,7 +131,7 @@ func (d *AListV3) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
} }
func (d *AListV3) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { func (d *AListV3) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
_, _, err := d.request("/fs/mkdir", http.MethodPost, func(req *resty.Request) { _, err := d.request("/fs/mkdir", http.MethodPost, func(req *resty.Request) {
req.SetBody(MkdirOrLinkReq{ req.SetBody(MkdirOrLinkReq{
Path: path.Join(parentDir.GetPath(), dirName), Path: path.Join(parentDir.GetPath(), dirName),
}) })
@ -142,7 +140,7 @@ func (d *AListV3) MakeDir(ctx context.Context, parentDir model.Obj, dirName stri
} }
func (d *AListV3) Move(ctx context.Context, srcObj, dstDir model.Obj) error { func (d *AListV3) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
_, _, err := d.request("/fs/move", http.MethodPost, func(req *resty.Request) { _, err := d.request("/fs/move", http.MethodPost, func(req *resty.Request) {
req.SetBody(MoveCopyReq{ req.SetBody(MoveCopyReq{
SrcDir: path.Dir(srcObj.GetPath()), SrcDir: path.Dir(srcObj.GetPath()),
DstDir: dstDir.GetPath(), DstDir: dstDir.GetPath(),
@ -153,7 +151,7 @@ func (d *AListV3) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
} }
func (d *AListV3) Rename(ctx context.Context, srcObj model.Obj, newName string) error { func (d *AListV3) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
_, _, err := d.request("/fs/rename", http.MethodPost, func(req *resty.Request) { _, err := d.request("/fs/rename", http.MethodPost, func(req *resty.Request) {
req.SetBody(RenameReq{ req.SetBody(RenameReq{
Path: srcObj.GetPath(), Path: srcObj.GetPath(),
Name: newName, Name: newName,
@ -163,7 +161,7 @@ func (d *AListV3) Rename(ctx context.Context, srcObj model.Obj, newName string)
} }
func (d *AListV3) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { func (d *AListV3) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
_, _, err := d.request("/fs/copy", http.MethodPost, func(req *resty.Request) { _, err := d.request("/fs/copy", http.MethodPost, func(req *resty.Request) {
req.SetBody(MoveCopyReq{ req.SetBody(MoveCopyReq{
SrcDir: path.Dir(srcObj.GetPath()), SrcDir: path.Dir(srcObj.GetPath()),
DstDir: dstDir.GetPath(), DstDir: dstDir.GetPath(),
@ -174,7 +172,7 @@ func (d *AListV3) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
} }
func (d *AListV3) Remove(ctx context.Context, obj model.Obj) error { func (d *AListV3) Remove(ctx context.Context, obj model.Obj) error {
_, _, err := d.request("/fs/remove", http.MethodPost, func(req *resty.Request) { _, err := d.request("/fs/remove", http.MethodPost, func(req *resty.Request) {
req.SetBody(RemoveReq{ req.SetBody(RemoveReq{
Dir: path.Dir(obj.GetPath()), Dir: path.Dir(obj.GetPath()),
Names: []string{obj.GetName()}, Names: []string{obj.GetName()},
@ -183,29 +181,16 @@ func (d *AListV3) Remove(ctx context.Context, obj model.Obj) error {
return err return err
} }
func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error { func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ req, err := http.NewRequestWithContext(ctx, http.MethodPut, d.Address+"/api/fs/put", stream)
Reader: s,
UpdateProgress: up,
})
req, err := http.NewRequestWithContext(ctx, http.MethodPut, d.Address+"/api/fs/put", reader)
if err != nil { if err != nil {
return err return err
} }
req.Header.Set("Authorization", d.Token) req.Header.Set("Authorization", d.Token)
req.Header.Set("File-Path", path.Join(dstDir.GetPath(), s.GetName())) req.Header.Set("File-Path", path.Join(dstDir.GetPath(), stream.GetName()))
req.Header.Set("Password", d.MetaPassword) req.Header.Set("Password", d.MetaPassword)
if md5 := s.GetHash().GetHash(utils.MD5); len(md5) > 0 {
req.Header.Set("X-File-Md5", md5)
}
if sha1 := s.GetHash().GetHash(utils.SHA1); len(sha1) > 0 {
req.Header.Set("X-File-Sha1", sha1)
}
if sha256 := s.GetHash().GetHash(utils.SHA256); len(sha256) > 0 {
req.Header.Set("X-File-Sha256", sha256)
}
req.ContentLength = s.GetSize() req.ContentLength = stream.GetSize()
// client := base.NewHttpClient() // client := base.NewHttpClient()
// client.Timeout = time.Hour * 6 // client.Timeout = time.Hour * 6
res, err := base.HttpClient.Do(req) res, err := base.HttpClient.Do(req)
@ -234,127 +219,6 @@ func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, s model.FileStreame
return nil return nil
} }
func (d *AListV3) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
if !d.ForwardArchiveReq {
return nil, errs.NotImplement
}
var resp common.Resp[ArchiveMetaResp]
_, code, err := d.request("/fs/archive/meta", http.MethodPost, func(req *resty.Request) {
req.SetResult(&resp).SetBody(ArchiveMetaReq{
ArchivePass: args.Password,
Password: d.MetaPassword,
Path: obj.GetPath(),
Refresh: false,
})
})
if code == 202 {
return nil, errs.WrongArchivePassword
}
if err != nil {
return nil, err
}
var tree []model.ObjTree
if resp.Data.Content != nil {
tree = make([]model.ObjTree, 0, len(resp.Data.Content))
for _, content := range resp.Data.Content {
tree = append(tree, &content)
}
}
return &model.ArchiveMetaInfo{
Comment: resp.Data.Comment,
Encrypted: resp.Data.Encrypted,
Tree: tree,
}, nil
}
func (d *AListV3) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
if !d.ForwardArchiveReq {
return nil, errs.NotImplement
}
var resp common.Resp[ArchiveListResp]
_, code, err := d.request("/fs/archive/list", http.MethodPost, func(req *resty.Request) {
req.SetResult(&resp).SetBody(ArchiveListReq{
ArchiveMetaReq: ArchiveMetaReq{
ArchivePass: args.Password,
Password: d.MetaPassword,
Path: obj.GetPath(),
Refresh: false,
},
PageReq: model.PageReq{
Page: 1,
PerPage: 0,
},
InnerPath: args.InnerPath,
})
})
if code == 202 {
return nil, errs.WrongArchivePassword
}
if err != nil {
return nil, err
}
var files []model.Obj
for _, f := range resp.Data.Content {
file := model.ObjThumb{
Object: model.Object{
Name: f.Name,
Modified: f.Modified,
Ctime: f.Created,
Size: f.Size,
IsFolder: f.IsDir,
HashInfo: utils.FromString(f.HashInfo),
},
Thumbnail: model.Thumbnail{Thumbnail: f.Thumb},
}
files = append(files, &file)
}
return files, nil
}
func (d *AListV3) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
if !d.ForwardArchiveReq {
return nil, errs.NotSupport
}
var resp common.Resp[ArchiveMetaResp]
_, _, err := d.request("/fs/archive/meta", http.MethodPost, func(req *resty.Request) {
req.SetResult(&resp).SetBody(ArchiveMetaReq{
ArchivePass: args.Password,
Password: d.MetaPassword,
Path: obj.GetPath(),
Refresh: false,
})
})
if err != nil {
return nil, err
}
return &model.Link{
URL: fmt.Sprintf("%s?inner=%s&pass=%s&sign=%s",
resp.Data.RawURL,
utils.EncodePath(args.InnerPath, true),
url.QueryEscape(args.Password),
resp.Data.Sign),
}, nil
}
func (d *AListV3) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) error {
if !d.ForwardArchiveReq {
return errs.NotImplement
}
dir, name := path.Split(srcObj.GetPath())
_, _, err := d.request("/fs/archive/decompress", http.MethodPost, func(req *resty.Request) {
req.SetBody(DecompressReq{
ArchivePass: args.Password,
CacheFull: args.CacheFull,
DstDir: dstDir.GetPath(),
InnerPath: args.InnerPath,
Name: []string{name},
PutIntoNewDir: args.PutIntoNewDir,
SrcDir: dir,
})
})
return err
}
//func (d *AList) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { //func (d *AList) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
// return nil, errs.NotSupport // return nil, errs.NotSupport
//} //}

View File

@ -7,13 +7,12 @@ import (
type Addition struct { type Addition struct {
driver.RootPath driver.RootPath
Address string `json:"url" required:"true"` Address string `json:"url" required:"true"`
MetaPassword string `json:"meta_password"` MetaPassword string `json:"meta_password"`
Username string `json:"username"` Username string `json:"username"`
Password string `json:"password"` Password string `json:"password"`
Token string `json:"token"` Token string `json:"token"`
PassUAToUpsteam bool `json:"pass_ua_to_upsteam" default:"true"` PassUAToUpsteam bool `json:"pass_ua_to_upsteam" default:"true"`
ForwardArchiveReq bool `json:"forward_archive_requests" default:"true"`
} }
var config = driver.Config{ var config = driver.Config{

View File

@ -1,11 +1,9 @@
package alist_v3 package alist_v3
import ( import (
"encoding/json"
"time" "time"
"github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
) )
type ListReq struct { type ListReq struct {
@ -73,113 +71,13 @@ type LoginResp struct {
} }
type MeResp struct { type MeResp struct {
Id int `json:"id"` Id int `json:"id"`
Username string `json:"username"` Username string `json:"username"`
Password string `json:"password"` Password string `json:"password"`
BasePath string `json:"base_path"` BasePath string `json:"base_path"`
Role IntSlice `json:"role"` Role int `json:"role"`
Disabled bool `json:"disabled"` Disabled bool `json:"disabled"`
Permission int `json:"permission"` Permission int `json:"permission"`
SsoId string `json:"sso_id"` SsoId string `json:"sso_id"`
Otp bool `json:"otp"` Otp bool `json:"otp"`
}
type ArchiveMetaReq struct {
ArchivePass string `json:"archive_pass"`
Password string `json:"password"`
Path string `json:"path"`
Refresh bool `json:"refresh"`
}
type TreeResp struct {
ObjResp
Children []TreeResp `json:"children"`
hashCache *utils.HashInfo
}
func (t *TreeResp) GetSize() int64 {
return t.Size
}
func (t *TreeResp) GetName() string {
return t.Name
}
func (t *TreeResp) ModTime() time.Time {
return t.Modified
}
func (t *TreeResp) CreateTime() time.Time {
return t.Created
}
func (t *TreeResp) IsDir() bool {
return t.ObjResp.IsDir
}
func (t *TreeResp) GetHash() utils.HashInfo {
return utils.FromString(t.HashInfo)
}
func (t *TreeResp) GetID() string {
return ""
}
func (t *TreeResp) GetPath() string {
return ""
}
func (t *TreeResp) GetChildren() []model.ObjTree {
ret := make([]model.ObjTree, 0, len(t.Children))
for _, child := range t.Children {
ret = append(ret, &child)
}
return ret
}
func (t *TreeResp) Thumb() string {
return t.ObjResp.Thumb
}
type ArchiveMetaResp struct {
Comment string `json:"comment"`
Encrypted bool `json:"encrypted"`
Content []TreeResp `json:"content"`
RawURL string `json:"raw_url"`
Sign string `json:"sign"`
}
type ArchiveListReq struct {
model.PageReq
ArchiveMetaReq
InnerPath string `json:"inner_path"`
}
type ArchiveListResp struct {
Content []ObjResp `json:"content"`
Total int64 `json:"total"`
}
type DecompressReq struct {
ArchivePass string `json:"archive_pass"`
CacheFull bool `json:"cache_full"`
DstDir string `json:"dst_dir"`
InnerPath string `json:"inner_path"`
Name []string `json:"name"`
PutIntoNewDir bool `json:"put_into_new_dir"`
SrcDir string `json:"src_dir"`
}
type IntSlice []int
func (s *IntSlice) UnmarshalJSON(data []byte) error {
if len(data) > 0 && data[0] == '[' {
return json.Unmarshal(data, (*[]int)(s))
}
var single int
if err := json.Unmarshal(data, &single); err != nil {
return err
}
*s = []int{single}
return nil
} }

View File

@ -17,7 +17,7 @@ func (d *AListV3) login() error {
return nil return nil
} }
var resp common.Resp[LoginResp] var resp common.Resp[LoginResp]
_, _, err := d.request("/auth/login", http.MethodPost, func(req *resty.Request) { _, err := d.request("/auth/login", http.MethodPost, func(req *resty.Request) {
req.SetResult(&resp).SetBody(base.Json{ req.SetResult(&resp).SetBody(base.Json{
"username": d.Username, "username": d.Username,
"password": d.Password, "password": d.Password,
@ -31,7 +31,7 @@ func (d *AListV3) login() error {
return nil return nil
} }
func (d *AListV3) request(api, method string, callback base.ReqCallback, retry ...bool) ([]byte, int, error) { func (d *AListV3) request(api, method string, callback base.ReqCallback, retry ...bool) ([]byte, error) {
url := d.Address + "/api" + api url := d.Address + "/api" + api
req := base.RestyClient.R() req := base.RestyClient.R()
req.SetHeader("Authorization", d.Token) req.SetHeader("Authorization", d.Token)
@ -40,26 +40,22 @@ func (d *AListV3) request(api, method string, callback base.ReqCallback, retry .
} }
res, err := req.Execute(method, url) res, err := req.Execute(method, url)
if err != nil { if err != nil {
code := 0 return nil, err
if res != nil {
code = res.StatusCode()
}
return nil, code, err
} }
log.Debugf("[alist_v3] response body: %s", res.String()) log.Debugf("[alist_v3] response body: %s", res.String())
if res.StatusCode() >= 400 { if res.StatusCode() >= 400 {
return nil, res.StatusCode(), fmt.Errorf("request failed, status: %s", res.Status()) return nil, fmt.Errorf("request failed, status: %s", res.Status())
} }
code := utils.Json.Get(res.Body(), "code").ToInt() code := utils.Json.Get(res.Body(), "code").ToInt()
if code != 200 { if code != 200 {
if (code == 401 || code == 403) && !utils.IsBool(retry...) { if (code == 401 || code == 403) && !utils.IsBool(retry...) {
err = d.login() err = d.login()
if err != nil { if err != nil {
return nil, code, err return nil, err
} }
return d.request(api, method, callback, true) return d.request(api, method, callback, true)
} }
return nil, code, fmt.Errorf("request failed,code: %d, message: %s", code, utils.Json.Get(res.Body(), "message").ToString()) return nil, fmt.Errorf("request failed,code: %d, message: %s", code, utils.Json.Get(res.Body(), "message").ToString())
} }
return res.Body(), 200, nil return res.Body(), nil
} }

View File

@ -14,12 +14,13 @@ import (
"os" "os"
"time" "time"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/cron" "github.com/alist-org/alist/v3/pkg/cron"
"github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2" "github.com/go-resty/resty/v2"
@ -55,7 +56,7 @@ func (d *AliDrive) Init(ctx context.Context) error {
if err != nil { if err != nil {
return err return err
} }
d.DriveId = d.Addition.DeviceID d.DriveId = utils.Json.Get(res, "default_drive_id").ToString()
d.UserID = utils.Json.Get(res, "user_id").ToString() d.UserID = utils.Json.Get(res, "user_id").ToString()
d.cron = cron.NewCron(time.Hour * 2) d.cron = cron.NewCron(time.Hour * 2)
d.cron.Do(func() { d.cron.Do(func() {
@ -193,10 +194,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
} }
if d.RapidUpload { if d.RapidUpload {
buf := bytes.NewBuffer(make([]byte, 0, 1024)) buf := bytes.NewBuffer(make([]byte, 0, 1024))
_, err := utils.CopyWithBufferN(buf, file, 1024) utils.CopyWithBufferN(buf, file, 1024)
if err != nil {
return err
}
reqBody["pre_hash"] = utils.HashData(utils.SHA1, buf.Bytes()) reqBody["pre_hash"] = utils.HashData(utils.SHA1, buf.Bytes())
if localFile != nil { if localFile != nil {
if _, err := localFile.Seek(0, io.SeekStart); err != nil { if _, err := localFile.Seek(0, io.SeekStart); err != nil {
@ -288,7 +286,6 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
file.Reader = localFile file.Reader = localFile
} }
rateLimited := driver.NewLimitedUploadStream(ctx, file)
for i, partInfo := range resp.PartInfoList { for i, partInfo := range resp.PartInfoList {
if utils.IsCanceled(ctx) { if utils.IsCanceled(ctx) {
return ctx.Err() return ctx.Err()
@ -297,7 +294,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
if d.InternalUpload { if d.InternalUpload {
url = partInfo.InternalUploadUrl url = partInfo.InternalUploadUrl
} }
req, err := http.NewRequest("PUT", url, io.LimitReader(rateLimited, DEFAULT)) req, err := http.NewRequest("PUT", url, io.LimitReader(file, DEFAULT))
if err != nil { if err != nil {
return err return err
} }
@ -306,7 +303,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
if err != nil { if err != nil {
return err return err
} }
_ = res.Body.Close() res.Body.Close()
if count > 0 { if count > 0 {
up(float64(i) * 100 / float64(count)) up(float64(i) * 100 / float64(count))
} }

View File

@ -7,8 +7,8 @@ import (
type Addition struct { type Addition struct {
driver.RootID driver.RootID
RefreshToken string `json:"refresh_token" required:"true"` RefreshToken string `json:"refresh_token" required:"true"`
DeviceID string `json:"device_id" required:"true"` //DeviceID string `json:"device_id" required:"true"`
OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at"` OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at"`
OrderDirection string `json:"order_direction" type:"select" options:"ASC,DESC"` OrderDirection string `json:"order_direction" type:"select" options:"ASC,DESC"`
RapidUpload bool `json:"rapid_upload"` RapidUpload bool `json:"rapid_upload"`

View File

@ -5,7 +5,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"net/http" "net/http"
"path/filepath"
"time" "time"
"github.com/Xhofe/rateg" "github.com/Xhofe/rateg"
@ -15,18 +14,17 @@ import (
"github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2" "github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus"
) )
type AliyundriveOpen struct { type AliyundriveOpen struct {
model.Storage model.Storage
Addition Addition
base string
DriveId string DriveId string
limitList func(ctx context.Context, data base.Json) (*Files, error) limitList func(ctx context.Context, data base.Json) (*Files, error)
limitLink func(ctx context.Context, file model.Obj) (*model.Link, error) limitLink func(ctx context.Context, file model.Obj) (*model.Link, error)
ref *AliyundriveOpen
} }
func (d *AliyundriveOpen) Config() driver.Config { func (d *AliyundriveOpen) Config() driver.Config {
@ -60,32 +58,10 @@ func (d *AliyundriveOpen) Init(ctx context.Context) error {
return nil return nil
} }
func (d *AliyundriveOpen) InitReference(storage driver.Driver) error {
refStorage, ok := storage.(*AliyundriveOpen)
if ok {
d.ref = refStorage
return nil
}
return errs.NotSupport
}
func (d *AliyundriveOpen) Drop(ctx context.Context) error { func (d *AliyundriveOpen) Drop(ctx context.Context) error {
d.ref = nil
return nil return nil
} }
// GetRoot implements the driver.GetRooter interface to properly set up the root object
func (d *AliyundriveOpen) GetRoot(ctx context.Context) (model.Obj, error) {
return &model.Object{
ID: d.RootFolderID,
Path: "/",
Name: "root",
Size: 0,
Modified: d.Modified,
IsFolder: true,
}, nil
}
func (d *AliyundriveOpen) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { func (d *AliyundriveOpen) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
if d.limitList == nil { if d.limitList == nil {
return nil, fmt.Errorf("driver not init") return nil, fmt.Errorf("driver not init")
@ -94,17 +70,9 @@ func (d *AliyundriveOpen) List(ctx context.Context, dir model.Obj, args model.Li
if err != nil { if err != nil {
return nil, err return nil, err
} }
return utils.SliceConvert(files, func(src File) (model.Obj, error) {
objs, err := utils.SliceConvert(files, func(src File) (model.Obj, error) { return fileToObj(src), nil
obj := fileToObj(src)
// Set the correct path for the object
if dir.GetPath() != "" {
obj.Path = filepath.Join(dir.GetPath(), obj.GetName())
}
return obj, nil
}) })
return objs, err
} }
func (d *AliyundriveOpen) link(ctx context.Context, file model.Obj) (*model.Link, error) { func (d *AliyundriveOpen) link(ctx context.Context, file model.Obj) (*model.Link, error) {
@ -154,16 +122,7 @@ func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirN
if err != nil { if err != nil {
return nil, err return nil, err
} }
obj := fileToObj(newDir) return fileToObj(newDir), nil
// Set the correct Path for the returned directory object
if parentDir.GetPath() != "" {
obj.Path = filepath.Join(parentDir.GetPath(), dirName)
} else {
obj.Path = "/" + dirName
}
return obj, nil
} }
func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) { func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
@ -173,24 +132,20 @@ func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (m
"drive_id": d.DriveId, "drive_id": d.DriveId,
"file_id": srcObj.GetID(), "file_id": srcObj.GetID(),
"to_parent_file_id": dstDir.GetID(), "to_parent_file_id": dstDir.GetID(),
"check_name_mode": "ignore", // optional:ignore,auto_rename,refuse "check_name_mode": "refuse", // optional:ignore,auto_rename,refuse
//"new_name": "newName", // The new name to use when a file of the same name exists //"new_name": "newName", // The new name to use when a file of the same name exists
}).SetResult(&resp) }).SetResult(&resp)
}) })
if err != nil { if err != nil {
return nil, err return nil, err
} }
if resp.Exist {
return nil, errors.New("existence of files with the same name")
}
if srcObj, ok := srcObj.(*model.ObjThumb); ok { if srcObj, ok := srcObj.(*model.ObjThumb); ok {
srcObj.ID = resp.FileID srcObj.ID = resp.FileID
srcObj.Modified = time.Now() srcObj.Modified = time.Now()
srcObj.Path = filepath.Join(dstDir.GetPath(), srcObj.GetName())
// Check for duplicate files in the destination directory
if err := d.removeDuplicateFiles(ctx, dstDir.GetPath(), srcObj.GetName(), srcObj.GetID()); err != nil {
// Only log a warning instead of returning an error since the move operation has already completed successfully
log.Warnf("Failed to remove duplicate files after move: %v", err)
}
return srcObj, nil return srcObj, nil
} }
return nil, nil return nil, nil
@ -208,47 +163,19 @@ func (d *AliyundriveOpen) Rename(ctx context.Context, srcObj model.Obj, newName
if err != nil { if err != nil {
return nil, err return nil, err
} }
return fileToObj(newFile), nil
// Check for duplicate files in the parent directory
parentPath := filepath.Dir(srcObj.GetPath())
if err := d.removeDuplicateFiles(ctx, parentPath, newName, newFile.FileId); err != nil {
// Only log a warning instead of returning an error since the rename operation has already completed successfully
log.Warnf("Failed to remove duplicate files after rename: %v", err)
}
obj := fileToObj(newFile)
// Set the correct Path for the renamed object
if parentPath != "" && parentPath != "." {
obj.Path = filepath.Join(parentPath, newName)
} else {
obj.Path = "/" + newName
}
return obj, nil
} }
func (d *AliyundriveOpen) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { func (d *AliyundriveOpen) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
var resp MoveOrCopyResp
_, err := d.request("/adrive/v1.0/openFile/copy", http.MethodPost, func(req *resty.Request) { _, err := d.request("/adrive/v1.0/openFile/copy", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{ req.SetBody(base.Json{
"drive_id": d.DriveId, "drive_id": d.DriveId,
"file_id": srcObj.GetID(), "file_id": srcObj.GetID(),
"to_parent_file_id": dstDir.GetID(), "to_parent_file_id": dstDir.GetID(),
"auto_rename": false, "auto_rename": true,
}).SetResult(&resp) })
}) })
if err != nil { return err
return err
}
// Check for duplicate files in the destination directory
if err := d.removeDuplicateFiles(ctx, dstDir.GetPath(), srcObj.GetName(), resp.FileID); err != nil {
// Only log a warning instead of returning an error since the copy operation has already completed successfully
log.Warnf("Failed to remove duplicate files after copy: %v", err)
}
return nil
} }
func (d *AliyundriveOpen) Remove(ctx context.Context, obj model.Obj) error { func (d *AliyundriveOpen) Remove(ctx context.Context, obj model.Obj) error {
@ -266,18 +193,7 @@ func (d *AliyundriveOpen) Remove(ctx context.Context, obj model.Obj) error {
} }
func (d *AliyundriveOpen) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { func (d *AliyundriveOpen) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
obj, err := d.upload(ctx, dstDir, stream, up) return d.upload(ctx, dstDir, stream, up)
// Set the correct Path for the returned file object
if obj != nil && obj.GetPath() == "" {
if dstDir.GetPath() != "" {
if objWithPath, ok := obj.(model.SetPath); ok {
objWithPath.SetPath(filepath.Join(dstDir.GetPath(), obj.GetName()))
}
}
}
return obj, err
} }
func (d *AliyundriveOpen) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { func (d *AliyundriveOpen) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
@ -309,4 +225,3 @@ var _ driver.MkdirResult = (*AliyundriveOpen)(nil)
var _ driver.MoveResult = (*AliyundriveOpen)(nil) var _ driver.MoveResult = (*AliyundriveOpen)(nil)
var _ driver.RenameResult = (*AliyundriveOpen)(nil) var _ driver.RenameResult = (*AliyundriveOpen)(nil)
var _ driver.PutResult = (*AliyundriveOpen)(nil) var _ driver.PutResult = (*AliyundriveOpen)(nil)
var _ driver.GetRooter = (*AliyundriveOpen)(nil)

View File

@ -6,12 +6,12 @@ import (
) )
type Addition struct { type Addition struct {
DriveType string `json:"drive_type" type:"select" options:"default,resource,backup" default:"resource"` DriveType string `json:"drive_type" type:"select" options:"default,resource,backup" default:"default"`
driver.RootID driver.RootID
RefreshToken string `json:"refresh_token" required:"true"` RefreshToken string `json:"refresh_token" required:"true"`
OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at"` OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at"`
OrderDirection string `json:"order_direction" type:"select" options:"ASC,DESC"` OrderDirection string `json:"order_direction" type:"select" options:"ASC,DESC"`
OauthTokenURL string `json:"oauth_token_url" default:"https://api.alistgo.com/alist/ali_open/token"` OauthTokenURL string `json:"oauth_token_url" default:"https://api.nn.ci/alist/ali_open/token"`
ClientID string `json:"client_id" required:"false" help:"Keep it empty if you don't have one"` ClientID string `json:"client_id" required:"false" help:"Keep it empty if you don't have one"`
ClientSecret string `json:"client_secret" required:"false" help:"Keep it empty if you don't have one"` ClientSecret string `json:"client_secret" required:"false" help:"Keep it empty if you don't have one"`
RemoveWay string `json:"remove_way" required:"true" type:"select" options:"trash,delete"` RemoveWay string `json:"remove_way" required:"true" type:"select" options:"trash,delete"`
@ -32,10 +32,11 @@ var config = driver.Config{
DefaultRoot: "root", DefaultRoot: "root",
NoOverwriteUpload: true, NoOverwriteUpload: true,
} }
var API_URL = "https://openapi.alipan.com"
func init() { func init() {
op.RegisterDriver(func() driver.Driver { op.RegisterDriver(func() driver.Driver {
return &AliyundriveOpen{} return &AliyundriveOpen{
base: "https://openapi.alipan.com",
}
}) })
} }

View File

@ -1,6 +1,7 @@
package aliyundrive_open package aliyundrive_open
import ( import (
"bytes"
"context" "context"
"encoding/base64" "encoding/base64"
"fmt" "fmt"
@ -14,7 +15,6 @@ import (
"github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/model"
streamPkg "github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/http_range" "github.com/alist-org/alist/v3/pkg/http_range"
"github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/pkg/utils"
"github.com/avast/retry-go" "github.com/avast/retry-go"
@ -77,7 +77,7 @@ func (d *AliyundriveOpen) uploadPart(ctx context.Context, r io.Reader, partInfo
if err != nil { if err != nil {
return err return err
} }
_ = res.Body.Close() res.Body.Close()
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusConflict { if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusConflict {
return fmt.Errorf("upload status: %d", res.StatusCode) return fmt.Errorf("upload status: %d", res.StatusCode)
} }
@ -126,24 +126,21 @@ func getProofRange(input string, size int64) (*ProofRange, error) {
} }
func (d *AliyundriveOpen) calProofCode(stream model.FileStreamer) (string, error) { func (d *AliyundriveOpen) calProofCode(stream model.FileStreamer) (string, error) {
proofRange, err := getProofRange(d.getAccessToken(), stream.GetSize()) proofRange, err := getProofRange(d.AccessToken, stream.GetSize())
if err != nil { if err != nil {
return "", err return "", err
} }
length := proofRange.End - proofRange.Start length := proofRange.End - proofRange.Start
buf := bytes.NewBuffer(make([]byte, 0, length))
reader, err := stream.RangeRead(http_range.Range{Start: proofRange.Start, Length: length}) reader, err := stream.RangeRead(http_range.Range{Start: proofRange.Start, Length: length})
if err != nil { if err != nil {
return "", err return "", err
} }
buf := make([]byte, length) _, err = utils.CopyWithBufferN(buf, reader, length)
n, err := io.ReadFull(reader, buf)
if err == io.ErrUnexpectedEOF {
return "", fmt.Errorf("can't read data, expected=%d, got=%d", len(buf), n)
}
if err != nil { if err != nil {
return "", err return "", err
} }
return base64.StdEncoding.EncodeToString(buf), nil return base64.StdEncoding.EncodeToString(buf.Bytes()), nil
} }
func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
@ -186,18 +183,25 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
_, err, e := d.requestReturnErrResp("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) { _, err, e := d.requestReturnErrResp("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
req.SetBody(createData).SetResult(&createResp) req.SetBody(createData).SetResult(&createResp)
}) })
var tmpF model.File
if err != nil { if err != nil {
if e.Code != "PreHashMatched" || !rapidUpload { if e.Code != "PreHashMatched" || !rapidUpload {
return nil, err return nil, err
} }
log.Debugf("[aliyundrive_open] pre_hash matched, start rapid upload") log.Debugf("[aliyundrive_open] pre_hash matched, start rapid upload")
hash := stream.GetHash().GetHash(utils.SHA1) hi := stream.GetHash()
if len(hash) != utils.SHA1.Width { hash := hi.GetHash(utils.SHA1)
_, hash, err = streamPkg.CacheFullInTempFileAndHash(stream, utils.SHA1) if len(hash) <= 0 {
tmpF, err = stream.CacheFullInTempFile()
if err != nil { if err != nil {
return nil, err return nil, err
} }
hash, err = utils.HashFile(utils.SHA1, tmpF)
if err != nil {
return nil, err
}
} }
delete(createData, "pre_hash") delete(createData, "pre_hash")
@ -247,9 +251,8 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
rd = utils.NewMultiReadable(srd) rd = utils.NewMultiReadable(srd)
} }
err = retry.Do(func() error { err = retry.Do(func() error {
_ = rd.Reset() rd.Reset()
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd) return d.uploadPart(ctx, rd, createResp.PartInfoList[i])
return d.uploadPart(ctx, rateLimitedRd, createResp.PartInfoList[i])
}, },
retry.Attempts(3), retry.Attempts(3),
retry.DelayType(retry.BackOffDelay), retry.DelayType(retry.BackOffDelay),

View File

@ -10,7 +10,6 @@ import (
"time" "time"
"github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op" "github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2" "github.com/go-resty/resty/v2"
@ -20,7 +19,7 @@ import (
// do others that not defined in Driver interface // do others that not defined in Driver interface
func (d *AliyundriveOpen) _refreshToken() (string, string, error) { func (d *AliyundriveOpen) _refreshToken() (string, string, error) {
url := API_URL + "/oauth/access_token" url := d.base + "/oauth/access_token"
if d.OauthTokenURL != "" && d.ClientID == "" { if d.OauthTokenURL != "" && d.ClientID == "" {
url = d.OauthTokenURL url = d.OauthTokenURL
} }
@ -75,9 +74,6 @@ func getSub(token string) (string, error) {
} }
func (d *AliyundriveOpen) refreshToken() error { func (d *AliyundriveOpen) refreshToken() error {
if d.ref != nil {
return d.ref.refreshToken()
}
refresh, access, err := d._refreshToken() refresh, access, err := d._refreshToken()
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
if err == nil { if err == nil {
@ -104,7 +100,7 @@ func (d *AliyundriveOpen) request(uri, method string, callback base.ReqCallback,
func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error, *ErrResp) { func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error, *ErrResp) {
req := base.RestyClient.R() req := base.RestyClient.R()
// TODO check whether access_token is expired // TODO check whether access_token is expired
req.SetHeader("Authorization", "Bearer "+d.getAccessToken()) req.SetHeader("Authorization", "Bearer "+d.AccessToken)
if method == http.MethodPost { if method == http.MethodPost {
req.SetHeader("Content-Type", "application/json") req.SetHeader("Content-Type", "application/json")
} }
@ -113,7 +109,7 @@ func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base
} }
var e ErrResp var e ErrResp
req.SetError(&e) req.SetError(&e)
res, err := req.Execute(method, API_URL+uri) res, err := req.Execute(method, d.base+uri)
if err != nil { if err != nil {
if res != nil { if res != nil {
log.Errorf("[aliyundrive_open] request error: %s", res.String()) log.Errorf("[aliyundrive_open] request error: %s", res.String())
@ -122,7 +118,7 @@ func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base
} }
isRetry := len(retry) > 0 && retry[0] isRetry := len(retry) > 0 && retry[0]
if e.Code != "" { if e.Code != "" {
if !isRetry && (utils.SliceContains([]string{"AccessTokenInvalid", "AccessTokenExpired", "I400JD"}, e.Code) || d.getAccessToken() == "") { if !isRetry && (utils.SliceContains([]string{"AccessTokenInvalid", "AccessTokenExpired", "I400JD"}, e.Code) || d.AccessToken == "") {
err = d.refreshToken() err = d.refreshToken()
if err != nil { if err != nil {
return nil, err, nil return nil, err, nil
@ -180,43 +176,3 @@ func getNowTime() (time.Time, string) {
nowTimeStr := nowTime.Format("2006-01-02T15:04:05.000Z") nowTimeStr := nowTime.Format("2006-01-02T15:04:05.000Z")
return nowTime, nowTimeStr return nowTime, nowTimeStr
} }
func (d *AliyundriveOpen) getAccessToken() string {
if d.ref != nil {
return d.ref.getAccessToken()
}
return d.AccessToken
}
// Remove duplicate files with the same name in the given directory path,
// preserving the file with the given skipID if provided
func (d *AliyundriveOpen) removeDuplicateFiles(ctx context.Context, parentPath string, fileName string, skipID string) error {
// Handle empty path (root directory) case
if parentPath == "" {
parentPath = "/"
}
// List all files in the parent directory
files, err := op.List(ctx, d, parentPath, model.ListArgs{})
if err != nil {
return err
}
// Find all files with the same name
var duplicates []model.Obj
for _, file := range files {
if file.GetName() == fileName && file.GetID() != skipID {
duplicates = append(duplicates, file)
}
}
// Remove all duplicates files, except the file with the given ID
for _, file := range duplicates {
err := d.Remove(ctx, file)
if err != nil {
return err
}
}
return nil
}

View File

@ -2,11 +2,9 @@ package drivers
import ( import (
_ "github.com/alist-org/alist/v3/drivers/115" _ "github.com/alist-org/alist/v3/drivers/115"
_ "github.com/alist-org/alist/v3/drivers/115_open"
_ "github.com/alist-org/alist/v3/drivers/115_share" _ "github.com/alist-org/alist/v3/drivers/115_share"
_ "github.com/alist-org/alist/v3/drivers/123" _ "github.com/alist-org/alist/v3/drivers/123"
_ "github.com/alist-org/alist/v3/drivers/123_link" _ "github.com/alist-org/alist/v3/drivers/123_link"
_ "github.com/alist-org/alist/v3/drivers/123_open"
_ "github.com/alist-org/alist/v3/drivers/123_share" _ "github.com/alist-org/alist/v3/drivers/123_share"
_ "github.com/alist-org/alist/v3/drivers/139" _ "github.com/alist-org/alist/v3/drivers/139"
_ "github.com/alist-org/alist/v3/drivers/189" _ "github.com/alist-org/alist/v3/drivers/189"
@ -17,24 +15,14 @@ import (
_ "github.com/alist-org/alist/v3/drivers/aliyundrive" _ "github.com/alist-org/alist/v3/drivers/aliyundrive"
_ "github.com/alist-org/alist/v3/drivers/aliyundrive_open" _ "github.com/alist-org/alist/v3/drivers/aliyundrive_open"
_ "github.com/alist-org/alist/v3/drivers/aliyundrive_share" _ "github.com/alist-org/alist/v3/drivers/aliyundrive_share"
_ "github.com/alist-org/alist/v3/drivers/azure_blob"
_ "github.com/alist-org/alist/v3/drivers/baidu_netdisk" _ "github.com/alist-org/alist/v3/drivers/baidu_netdisk"
_ "github.com/alist-org/alist/v3/drivers/baidu_photo" _ "github.com/alist-org/alist/v3/drivers/baidu_photo"
_ "github.com/alist-org/alist/v3/drivers/baidu_share" _ "github.com/alist-org/alist/v3/drivers/baidu_share"
_ "github.com/alist-org/alist/v3/drivers/bitqiu"
_ "github.com/alist-org/alist/v3/drivers/chaoxing" _ "github.com/alist-org/alist/v3/drivers/chaoxing"
_ "github.com/alist-org/alist/v3/drivers/cloudreve" _ "github.com/alist-org/alist/v3/drivers/cloudreve"
_ "github.com/alist-org/alist/v3/drivers/cloudreve_v4"
_ "github.com/alist-org/alist/v3/drivers/crypt" _ "github.com/alist-org/alist/v3/drivers/crypt"
_ "github.com/alist-org/alist/v3/drivers/doubao"
_ "github.com/alist-org/alist/v3/drivers/doubao_share"
_ "github.com/alist-org/alist/v3/drivers/dropbox" _ "github.com/alist-org/alist/v3/drivers/dropbox"
_ "github.com/alist-org/alist/v3/drivers/febbox"
_ "github.com/alist-org/alist/v3/drivers/ftp" _ "github.com/alist-org/alist/v3/drivers/ftp"
_ "github.com/alist-org/alist/v3/drivers/gitee"
_ "github.com/alist-org/alist/v3/drivers/github"
_ "github.com/alist-org/alist/v3/drivers/github_releases"
_ "github.com/alist-org/alist/v3/drivers/gofile"
_ "github.com/alist-org/alist/v3/drivers/google_drive" _ "github.com/alist-org/alist/v3/drivers/google_drive"
_ "github.com/alist-org/alist/v3/drivers/google_photo" _ "github.com/alist-org/alist/v3/drivers/google_photo"
_ "github.com/alist-org/alist/v3/drivers/halalcloud" _ "github.com/alist-org/alist/v3/drivers/halalcloud"
@ -44,19 +32,15 @@ import (
_ "github.com/alist-org/alist/v3/drivers/lanzou" _ "github.com/alist-org/alist/v3/drivers/lanzou"
_ "github.com/alist-org/alist/v3/drivers/lenovonas_share" _ "github.com/alist-org/alist/v3/drivers/lenovonas_share"
_ "github.com/alist-org/alist/v3/drivers/local" _ "github.com/alist-org/alist/v3/drivers/local"
_ "github.com/alist-org/alist/v3/drivers/mediafire"
_ "github.com/alist-org/alist/v3/drivers/mediatrack" _ "github.com/alist-org/alist/v3/drivers/mediatrack"
_ "github.com/alist-org/alist/v3/drivers/mega" _ "github.com/alist-org/alist/v3/drivers/mega"
_ "github.com/alist-org/alist/v3/drivers/misskey"
_ "github.com/alist-org/alist/v3/drivers/mopan" _ "github.com/alist-org/alist/v3/drivers/mopan"
_ "github.com/alist-org/alist/v3/drivers/netease_music" _ "github.com/alist-org/alist/v3/drivers/netease_music"
_ "github.com/alist-org/alist/v3/drivers/onedrive" _ "github.com/alist-org/alist/v3/drivers/onedrive"
_ "github.com/alist-org/alist/v3/drivers/onedrive_app" _ "github.com/alist-org/alist/v3/drivers/onedrive_app"
_ "github.com/alist-org/alist/v3/drivers/onedrive_sharelink" _ "github.com/alist-org/alist/v3/drivers/onedrive_sharelink"
_ "github.com/alist-org/alist/v3/drivers/pcloud"
_ "github.com/alist-org/alist/v3/drivers/pikpak" _ "github.com/alist-org/alist/v3/drivers/pikpak"
_ "github.com/alist-org/alist/v3/drivers/pikpak_share" _ "github.com/alist-org/alist/v3/drivers/pikpak_share"
_ "github.com/alist-org/alist/v3/drivers/proton_drive"
_ "github.com/alist-org/alist/v3/drivers/quark_uc" _ "github.com/alist-org/alist/v3/drivers/quark_uc"
_ "github.com/alist-org/alist/v3/drivers/quark_uc_tv" _ "github.com/alist-org/alist/v3/drivers/quark_uc_tv"
_ "github.com/alist-org/alist/v3/drivers/quqi" _ "github.com/alist-org/alist/v3/drivers/quqi"

View File

@ -1,313 +0,0 @@
package azure_blob
import (
"context"
"fmt"
"io"
"path"
"regexp"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
)
// Azure Blob Storage based on the blob APIs
// Link: https://learn.microsoft.com/rest/api/storageservices/blob-service-rest-api
type AzureBlob struct {
model.Storage
Addition
client *azblob.Client
containerClient *container.Client
config driver.Config
}
// Config returns the driver configuration.
func (d *AzureBlob) Config() driver.Config {
return d.config
}
// GetAddition returns additional settings specific to Azure Blob Storage.
func (d *AzureBlob) GetAddition() driver.Additional {
return &d.Addition
}
// Init initializes the Azure Blob Storage client using shared key authentication.
func (d *AzureBlob) Init(ctx context.Context) error {
// Validate the endpoint URL
accountName := extractAccountName(d.Addition.Endpoint)
if !regexp.MustCompile(`^[a-z0-9]+$`).MatchString(accountName) {
return fmt.Errorf("invalid storage account name: must be chars of lowercase letters or numbers only")
}
credential, err := azblob.NewSharedKeyCredential(accountName, d.Addition.AccessKey)
if err != nil {
return fmt.Errorf("failed to create credential: %w", err)
}
// Check if Endpoint is just account name
endpoint := d.Addition.Endpoint
if accountName == endpoint {
endpoint = fmt.Sprintf("https://%s.blob.core.windows.net/", accountName)
}
// Initialize Azure Blob client with retry policy
client, err := azblob.NewClientWithSharedKeyCredential(endpoint, credential,
&azblob.ClientOptions{ClientOptions: azcore.ClientOptions{
Retry: policy.RetryOptions{
MaxRetries: MaxRetries,
RetryDelay: RetryDelay,
},
}})
if err != nil {
return fmt.Errorf("failed to create client: %w", err)
}
d.client = client
// Ensure container exists or create it
containerName := strings.Trim(d.Addition.ContainerName, "/ \\")
if containerName == "" {
return fmt.Errorf("container name cannot be empty")
}
return d.createContainerIfNotExists(ctx, containerName)
}
// Drop releases resources associated with the Azure Blob client.
func (d *AzureBlob) Drop(ctx context.Context) error {
d.client = nil
return nil
}
// List retrieves blobs and directories under the specified path.
func (d *AzureBlob) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
prefix := ensureTrailingSlash(dir.GetPath())
pager := d.containerClient.NewListBlobsHierarchyPager("/", &container.ListBlobsHierarchyOptions{
Prefix: &prefix,
})
var objs []model.Obj
for pager.More() {
page, err := pager.NextPage(ctx)
if err != nil {
return nil, fmt.Errorf("failed to list blobs: %w", err)
}
// Process directories
for _, blobPrefix := range page.Segment.BlobPrefixes {
objs = append(objs, &model.Object{
Name: path.Base(strings.TrimSuffix(*blobPrefix.Name, "/")),
Path: *blobPrefix.Name,
Modified: *blobPrefix.Properties.LastModified,
Ctime: *blobPrefix.Properties.CreationTime,
IsFolder: true,
})
}
// Process files
for _, blob := range page.Segment.BlobItems {
if strings.HasSuffix(*blob.Name, "/") {
continue
}
objs = append(objs, &model.Object{
Name: path.Base(*blob.Name),
Path: *blob.Name,
Size: *blob.Properties.ContentLength,
Modified: *blob.Properties.LastModified,
Ctime: *blob.Properties.CreationTime,
IsFolder: false,
})
}
}
return objs, nil
}
// Link generates a temporary SAS URL for accessing a blob.
func (d *AzureBlob) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
blobClient := d.containerClient.NewBlobClient(file.GetPath())
expireDuration := time.Hour * time.Duration(d.SignURLExpire)
sasURL, err := blobClient.GetSASURL(sas.BlobPermissions{Read: true}, time.Now().Add(expireDuration), nil)
if err != nil {
return nil, fmt.Errorf("failed to generate SAS URL: %w", err)
}
return &model.Link{URL: sasURL}, nil
}
// MakeDir creates a virtual directory by uploading an empty blob as a marker.
func (d *AzureBlob) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
dirPath := path.Join(parentDir.GetPath(), dirName)
if err := d.mkDir(ctx, dirPath); err != nil {
return nil, fmt.Errorf("failed to create directory marker: %w", err)
}
return &model.Object{
Path: dirPath,
Name: dirName,
IsFolder: true,
}, nil
}
// Move relocates an object (file or directory) to a new directory.
func (d *AzureBlob) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
srcPath := srcObj.GetPath()
dstPath := path.Join(dstDir.GetPath(), srcObj.GetName())
if err := d.moveOrRename(ctx, srcPath, dstPath, srcObj.IsDir(), srcObj.GetSize()); err != nil {
return nil, fmt.Errorf("move operation failed: %w", err)
}
return &model.Object{
Path: dstPath,
Name: srcObj.GetName(),
Modified: time.Now(),
IsFolder: srcObj.IsDir(),
Size: srcObj.GetSize(),
}, nil
}
// Rename changes the name of an existing object.
func (d *AzureBlob) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
srcPath := srcObj.GetPath()
dstPath := path.Join(path.Dir(srcPath), newName)
if err := d.moveOrRename(ctx, srcPath, dstPath, srcObj.IsDir(), srcObj.GetSize()); err != nil {
return nil, fmt.Errorf("rename operation failed: %w", err)
}
return &model.Object{
Path: dstPath,
Name: newName,
Modified: time.Now(),
IsFolder: srcObj.IsDir(),
Size: srcObj.GetSize(),
}, nil
}
// Copy duplicates an object (file or directory) to a specified destination directory.
func (d *AzureBlob) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
dstPath := path.Join(dstDir.GetPath(), srcObj.GetName())
// Handle directory copying using flat listing
if srcObj.IsDir() {
srcPrefix := srcObj.GetPath()
srcPrefix = ensureTrailingSlash(srcPrefix)
// Get all blobs under the source directory
blobs, err := d.flattenListBlobs(ctx, srcPrefix)
if err != nil {
return nil, fmt.Errorf("failed to list source directory contents: %w", err)
}
// Process each blob - copy to destination
for _, blob := range blobs {
// Skip the directory marker itself
if *blob.Name == srcPrefix {
continue
}
// Calculate relative path from source
relPath := strings.TrimPrefix(*blob.Name, srcPrefix)
itemDstPath := path.Join(dstPath, relPath)
if strings.HasSuffix(itemDstPath, "/") || (blob.Metadata["hdi_isfolder"] != nil && *blob.Metadata["hdi_isfolder"] == "true") {
// Create directory marker at destination
err := d.mkDir(ctx, itemDstPath)
if err != nil {
return nil, fmt.Errorf("failed to create directory marker [%s]: %w", itemDstPath, err)
}
} else {
// Copy the blob
if err := d.copyFile(ctx, *blob.Name, itemDstPath); err != nil {
return nil, fmt.Errorf("failed to copy %s: %w", *blob.Name, err)
}
}
}
// Create directory marker at destination if needed
if len(blobs) == 0 {
err := d.mkDir(ctx, dstPath)
if err != nil {
return nil, fmt.Errorf("failed to create directory [%s]: %w", dstPath, err)
}
}
return &model.Object{
Path: dstPath,
Name: srcObj.GetName(),
Modified: time.Now(),
IsFolder: true,
}, nil
}
// Copy a single file
if err := d.copyFile(ctx, srcObj.GetPath(), dstPath); err != nil {
return nil, fmt.Errorf("failed to copy blob: %w", err)
}
return &model.Object{
Path: dstPath,
Name: srcObj.GetName(),
Size: srcObj.GetSize(),
Modified: time.Now(),
IsFolder: false,
}, nil
}
// Remove deletes a specified blob or recursively deletes a directory and its contents.
func (d *AzureBlob) Remove(ctx context.Context, obj model.Obj) error {
path := obj.GetPath()
// Handle recursive directory deletion
if obj.IsDir() {
return d.deleteFolder(ctx, path)
}
// Delete single file
return d.deleteFile(ctx, path, false)
}
// Put uploads a file stream to Azure Blob Storage with progress tracking.
func (d *AzureBlob) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
blobPath := path.Join(dstDir.GetPath(), stream.GetName())
blobClient := d.containerClient.NewBlockBlobClient(blobPath)
// Determine optimal upload options based on file size
options := optimizedUploadOptions(stream.GetSize())
// Track upload progress
progressTracker := &progressTracker{
total: stream.GetSize(),
updateProgress: up,
}
// Wrap stream to handle context cancellation and progress tracking
limitedStream := driver.NewLimitedUploadStream(ctx, io.TeeReader(stream, progressTracker))
// Upload the stream to Azure Blob Storage
_, err := blobClient.UploadStream(ctx, limitedStream, options)
if err != nil {
return nil, fmt.Errorf("failed to upload file: %w", err)
}
return &model.Object{
Path: blobPath,
Name: stream.GetName(),
Size: stream.GetSize(),
Modified: time.Now(),
IsFolder: false,
}, nil
}
// The following methods related to archive handling are not implemented yet.
// func (d *AzureBlob) GetArchiveMeta(...) {...}
// func (d *AzureBlob) ListArchive(...) {...}
// func (d *AzureBlob) Extract(...) {...}
// func (d *AzureBlob) ArchiveDecompress(...) {...}
// Ensure AzureBlob implements the driver.Driver interface.
var _ driver.Driver = (*AzureBlob)(nil)

View File

@ -1,32 +0,0 @@
package azure_blob
import (
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/op"
)
type Addition struct {
Endpoint string `json:"endpoint" required:"true" default:"https://<accountname>.blob.core.windows.net/" help:"e.g. https://accountname.blob.core.windows.net/. The full endpoint URL for Azure Storage, including the unique storage account name (3 ~ 24 numbers and lowercase letters only)."`
AccessKey string `json:"access_key" required:"true" help:"The access key for Azure Storage, used for authentication. https://learn.microsoft.com/azure/storage/common/storage-account-keys-manage"`
ContainerName string `json:"container_name" required:"true" help:"The name of the container in Azure Storage (created in the Azure portal). https://learn.microsoft.com/azure/storage/blobs/blob-containers-portal"`
SignURLExpire int `json:"sign_url_expire" type:"number" default:"4" help:"The expiration time for SAS URLs, in hours."`
}
// implement GetRootId interface
func (r Addition) GetRootId() string {
return r.ContainerName
}
var config = driver.Config{
Name: "Azure Blob Storage",
LocalSort: true,
CheckStatus: true,
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &AzureBlob{
config: config,
}
})
}

View File

@ -1,20 +0,0 @@
package azure_blob
import "github.com/alist-org/alist/v3/internal/driver"
// progressTracker is used to track upload progress
type progressTracker struct {
total int64
current int64
updateProgress driver.UpdateProgress
}
// Write implements io.Writer to track progress
func (pt *progressTracker) Write(p []byte) (n int, err error) {
n = len(p)
pt.current += int64(n)
if pt.updateProgress != nil && pt.total > 0 {
pt.updateProgress(float64(pt.current) * 100 / float64(pt.total))
}
return n, nil
}

View File

@ -1,401 +0,0 @@
package azure_blob
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"path"
"sort"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service"
log "github.com/sirupsen/logrus"
)
const (
// MaxRetries defines the maximum number of retry attempts for Azure operations
MaxRetries = 3
// RetryDelay defines the base delay between retries
RetryDelay = 3 * time.Second
// MaxBatchSize defines the maximum number of operations in a single batch request
MaxBatchSize = 128
)
// extractAccountName 从 Azure 存储 Endpoint 中提取账户名
func extractAccountName(endpoint string) string {
// 移除协议前缀
endpoint = strings.TrimPrefix(endpoint, "https://")
endpoint = strings.TrimPrefix(endpoint, "http://")
// 获取第一个点之前的部分(即账户名)
parts := strings.Split(endpoint, ".")
if len(parts) > 0 {
// to lower case
return strings.ToLower(parts[0])
}
return ""
}
// isNotFoundError checks if the error is a "not found" type error
func isNotFoundError(err error) bool {
var storageErr *azcore.ResponseError
if errors.As(err, &storageErr) {
return storageErr.StatusCode == 404
}
// Fallback to string matching for backwards compatibility
return err != nil && strings.Contains(err.Error(), "BlobNotFound")
}
// flattenListBlobs - Optimize blob listing to handle pagination better
func (d *AzureBlob) flattenListBlobs(ctx context.Context, prefix string) ([]container.BlobItem, error) {
// Standardize prefix format
prefix = ensureTrailingSlash(prefix)
var blobItems []container.BlobItem
pager := d.containerClient.NewListBlobsFlatPager(&container.ListBlobsFlatOptions{
Prefix: &prefix,
Include: container.ListBlobsInclude{
Metadata: true,
},
})
for pager.More() {
page, err := pager.NextPage(ctx)
if err != nil {
return nil, fmt.Errorf("failed to list blobs: %w", err)
}
for _, blob := range page.Segment.BlobItems {
blobItems = append(blobItems, *blob)
}
}
return blobItems, nil
}
// batchDeleteBlobs - Simplify batch deletion logic
func (d *AzureBlob) batchDeleteBlobs(ctx context.Context, blobPaths []string) error {
if len(blobPaths) == 0 {
return nil
}
// Process in batches of MaxBatchSize
for i := 0; i < len(blobPaths); i += MaxBatchSize {
end := min(i+MaxBatchSize, len(blobPaths))
currentBatch := blobPaths[i:end]
// Create batch builder
batchBuilder, err := d.containerClient.NewBatchBuilder()
if err != nil {
return fmt.Errorf("failed to create batch builder: %w", err)
}
// Add delete operations
for _, blobPath := range currentBatch {
if err := batchBuilder.Delete(blobPath, nil); err != nil {
return fmt.Errorf("failed to add delete operation for %s: %w", blobPath, err)
}
}
// Submit batch
responses, err := d.containerClient.SubmitBatch(ctx, batchBuilder, nil)
if err != nil {
return fmt.Errorf("batch delete request failed: %w", err)
}
// Check responses
for _, resp := range responses.Responses {
if resp.Error != nil && !isNotFoundError(resp.Error) {
// 获取 blob 名称以提供更好的错误信息
blobName := "unknown"
if resp.BlobName != nil {
blobName = *resp.BlobName
}
return fmt.Errorf("failed to delete blob %s: %v", blobName, resp.Error)
}
}
}
return nil
}
// deleteFolder recursively deletes a directory and all its contents
func (d *AzureBlob) deleteFolder(ctx context.Context, prefix string) error {
// Ensure directory path ends with slash
prefix = ensureTrailingSlash(prefix)
// Get all blobs under the directory using flattenListBlobs
globs, err := d.flattenListBlobs(ctx, prefix)
if err != nil {
return fmt.Errorf("failed to list blobs for deletion: %w", err)
}
// If there are blobs in the directory, delete them
if len(globs) > 0 {
// 分离文件和目录标记
var filePaths []string
var dirPaths []string
for _, blob := range globs {
blobName := *blob.Name
if isDirectory(blob) {
// remove trailing slash for directory names
dirPaths = append(dirPaths, strings.TrimSuffix(blobName, "/"))
} else {
filePaths = append(filePaths, blobName)
}
}
// 先删除文件,再删除目录
if len(filePaths) > 0 {
if err := d.batchDeleteBlobs(ctx, filePaths); err != nil {
return err
}
}
if len(dirPaths) > 0 {
// 按路径深度分组
depthMap := make(map[int][]string)
for _, dir := range dirPaths {
depth := strings.Count(dir, "/") // 计算目录深度
depthMap[depth] = append(depthMap[depth], dir)
}
// 按深度从大到小排序
var depths []int
for depth := range depthMap {
depths = append(depths, depth)
}
sort.Sort(sort.Reverse(sort.IntSlice(depths)))
// 按深度逐层批量删除
for _, depth := range depths {
batch := depthMap[depth]
if err := d.batchDeleteBlobs(ctx, batch); err != nil {
return err
}
}
}
}
// 最后删除目录标记本身
return d.deleteEmptyDirectory(ctx, prefix)
}
// deleteFile deletes a single file or blob with better error handling
func (d *AzureBlob) deleteFile(ctx context.Context, path string, isDir bool) error {
blobClient := d.containerClient.NewBlobClient(path)
_, err := blobClient.Delete(ctx, nil)
if err != nil && !(isDir && isNotFoundError(err)) {
return err
}
return nil
}
// copyFile copies a single blob from source path to destination path
func (d *AzureBlob) copyFile(ctx context.Context, srcPath, dstPath string) error {
srcBlob := d.containerClient.NewBlobClient(srcPath)
dstBlob := d.containerClient.NewBlobClient(dstPath)
// Use configured expiration time for SAS URL
expireDuration := time.Hour * time.Duration(d.SignURLExpire)
srcURL, err := srcBlob.GetSASURL(sas.BlobPermissions{Read: true}, time.Now().Add(expireDuration), nil)
if err != nil {
return fmt.Errorf("failed to generate source SAS URL: %w", err)
}
_, err = dstBlob.StartCopyFromURL(ctx, srcURL, nil)
return err
}
// createContainerIfNotExists - Create container if not exists
// Clean up commented code
func (d *AzureBlob) createContainerIfNotExists(ctx context.Context, containerName string) error {
serviceClient := d.client.ServiceClient()
containerClient := serviceClient.NewContainerClient(containerName)
var options = service.CreateContainerOptions{}
_, err := containerClient.Create(ctx, &options)
if err != nil {
var responseErr *azcore.ResponseError
if errors.As(err, &responseErr) && responseErr.ErrorCode != "ContainerAlreadyExists" {
return fmt.Errorf("failed to create or access container [%s]: %w", containerName, err)
}
}
d.containerClient = containerClient
return nil
}
// mkDir creates a virtual directory marker by uploading an empty blob with metadata.
func (d *AzureBlob) mkDir(ctx context.Context, fullDirName string) error {
dirPath := ensureTrailingSlash(fullDirName)
blobClient := d.containerClient.NewBlockBlobClient(dirPath)
// Upload an empty blob with metadata indicating it's a directory
_, err := blobClient.Upload(ctx, struct {
*bytes.Reader
io.Closer
}{
Reader: bytes.NewReader([]byte{}),
Closer: io.NopCloser(nil),
}, &blockblob.UploadOptions{
Metadata: map[string]*string{
"hdi_isfolder": to.Ptr("true"),
},
})
return err
}
// ensureTrailingSlash ensures the provided path ends with a trailing slash.
func ensureTrailingSlash(path string) string {
if !strings.HasSuffix(path, "/") {
return path + "/"
}
return path
}
// moveOrRename moves or renames blobs or directories from source to destination.
func (d *AzureBlob) moveOrRename(ctx context.Context, srcPath, dstPath string, isDir bool, srcSize int64) error {
if isDir {
// Normalize paths for directory operations
srcPath = ensureTrailingSlash(srcPath)
dstPath = ensureTrailingSlash(dstPath)
// List all blobs under the source directory
blobs, err := d.flattenListBlobs(ctx, srcPath)
if err != nil {
return fmt.Errorf("failed to list blobs: %w", err)
}
// Iterate and copy each blob to the destination
for _, item := range blobs {
srcBlobName := *item.Name
relPath := strings.TrimPrefix(srcBlobName, srcPath)
itemDstPath := path.Join(dstPath, relPath)
if isDirectory(item) {
// Create directory marker at destination
if err := d.mkDir(ctx, itemDstPath); err != nil {
return fmt.Errorf("failed to create directory marker [%s]: %w", itemDstPath, err)
}
} else {
// Copy file blob to destination
if err := d.copyFile(ctx, srcBlobName, itemDstPath); err != nil {
return fmt.Errorf("failed to copy blob [%s]: %w", srcBlobName, err)
}
}
}
// Handle empty directories by creating a marker at destination
if len(blobs) == 0 {
if err := d.mkDir(ctx, dstPath); err != nil {
return fmt.Errorf("failed to create directory [%s]: %w", dstPath, err)
}
}
// Delete source directory and its contents
if err := d.deleteFolder(ctx, srcPath); err != nil {
log.Warnf("failed to delete source directory [%s]: %v\n, and try again", srcPath, err)
// Retry deletion once more and ignore the result
if err := d.deleteFolder(ctx, srcPath); err != nil {
log.Errorf("Retry deletion of source directory [%s] failed: %v", srcPath, err)
}
}
return nil
}
// Single file move or rename operation
if err := d.copyFile(ctx, srcPath, dstPath); err != nil {
return fmt.Errorf("failed to copy file: %w", err)
}
// Delete source file after successful copy
if err := d.deleteFile(ctx, srcPath, false); err != nil {
log.Errorf("Error deleting source file [%s]: %v", srcPath, err)
}
return nil
}
// optimizedUploadOptions returns the optimal upload options based on file size
func optimizedUploadOptions(fileSize int64) *azblob.UploadStreamOptions {
options := &azblob.UploadStreamOptions{
BlockSize: 4 * 1024 * 1024, // 4MB block size
Concurrency: 4, // Default concurrency
}
// For large files, increase block size and concurrency
if fileSize > 256*1024*1024 { // For files larger than 256MB
options.BlockSize = 8 * 1024 * 1024 // 8MB blocks
options.Concurrency = 8 // More concurrent uploads
}
// For very large files (>1GB)
if fileSize > 1024*1024*1024 {
options.BlockSize = 16 * 1024 * 1024 // 16MB blocks
options.Concurrency = 16 // Higher concurrency
}
return options
}
// isDirectory determines if a blob represents a directory
// Checks multiple indicators: path suffix, metadata, and content type
func isDirectory(blob container.BlobItem) bool {
// Check path suffix
if strings.HasSuffix(*blob.Name, "/") {
return true
}
// Check metadata for directory marker
if blob.Metadata != nil {
if val, ok := blob.Metadata["hdi_isfolder"]; ok && val != nil && *val == "true" {
return true
}
// Azure Storage Explorer and other tools may use different metadata keys
if val, ok := blob.Metadata["is_directory"]; ok && val != nil && strings.ToLower(*val) == "true" {
return true
}
}
// Check content type (some tools mark directories with specific content types)
if blob.Properties != nil && blob.Properties.ContentType != nil {
contentType := strings.ToLower(*blob.Properties.ContentType)
if blob.Properties.ContentLength != nil && *blob.Properties.ContentLength == 0 && (contentType == "application/directory" || contentType == "directory") {
return true
}
}
return false
}
// deleteEmptyDirectory deletes a directory only if it's empty
func (d *AzureBlob) deleteEmptyDirectory(ctx context.Context, dirPath string) error {
// Directory is empty, delete the directory marker
blobClient := d.containerClient.NewBlobClient(strings.TrimSuffix(dirPath, "/"))
_, err := blobClient.Delete(ctx, nil)
// Also try deleting with trailing slash (for different directory marker formats)
if err != nil && isNotFoundError(err) {
blobClient = d.containerClient.NewBlobClient(dirPath)
_, err = blobClient.Delete(ctx, nil)
}
// Ignore not found errors
if err != nil && isNotFoundError(err) {
log.Infof("Directory [%s] not found during deletion: %v", dirPath, err)
return nil
}
return err
}

View File

@ -6,16 +6,13 @@ import (
"encoding/hex" "encoding/hex"
"errors" "errors"
"io" "io"
"math"
"net/url" "net/url"
"os"
stdpath "path" stdpath "path"
"strconv" "strconv"
"time" "time"
"golang.org/x/sync/semaphore"
"github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/model"
@ -79,8 +76,6 @@ func (d *BaiduNetdisk) List(ctx context.Context, dir model.Obj, args model.ListA
func (d *BaiduNetdisk) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { func (d *BaiduNetdisk) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if d.DownloadAPI == "crack" { if d.DownloadAPI == "crack" {
return d.linkCrack(file, args) return d.linkCrack(file, args)
} else if d.DownloadAPI == "crack_video" {
return d.linkCrackVideo(file, args)
} }
return d.linkOfficial(file, args) return d.linkOfficial(file, args)
} }
@ -186,35 +181,21 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
return newObj, nil return newObj, nil
} }
var ( tempFile, err := stream.CacheFullInTempFile()
cache = stream.GetFile() if err != nil {
tmpF *os.File return nil, err
err error
)
if _, ok := cache.(io.ReaderAt); !ok {
tmpF, err = os.CreateTemp(conf.Conf.TempDir, "file-*")
if err != nil {
return nil, err
}
defer func() {
_ = tmpF.Close()
_ = os.Remove(tmpF.Name())
}()
cache = tmpF
} }
streamSize := stream.GetSize() streamSize := stream.GetSize()
sliceSize := d.getSliceSize(streamSize) sliceSize := d.getSliceSize()
count := int(streamSize / sliceSize) count := int(math.Max(math.Ceil(float64(streamSize)/float64(sliceSize)), 1))
lastBlockSize := streamSize % sliceSize lastBlockSize := streamSize % sliceSize
if lastBlockSize > 0 { if streamSize > 0 && lastBlockSize == 0 {
count++
} else {
lastBlockSize = sliceSize lastBlockSize = sliceSize
} }
//cal md5 for first 256k data //cal md5 for first 256k data
const SliceSize int64 = 256 * utils.KB const SliceSize int64 = 256 * 1024
// cal md5 // cal md5
blockList := make([]string, 0, count) blockList := make([]string, 0, count)
byteSize := sliceSize byteSize := sliceSize
@ -222,11 +203,6 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
sliceMd5H := md5.New() sliceMd5H := md5.New()
sliceMd5H2 := md5.New() sliceMd5H2 := md5.New()
slicemd5H2Write := utils.LimitWriter(sliceMd5H2, SliceSize) slicemd5H2Write := utils.LimitWriter(sliceMd5H2, SliceSize)
writers := []io.Writer{fileMd5H, sliceMd5H, slicemd5H2Write}
if tmpF != nil {
writers = append(writers, tmpF)
}
written := int64(0)
for i := 1; i <= count; i++ { for i := 1; i <= count; i++ {
if utils.IsCanceled(ctx) { if utils.IsCanceled(ctx) {
@ -235,23 +211,13 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
if i == count { if i == count {
byteSize = lastBlockSize byteSize = lastBlockSize
} }
n, err := utils.CopyWithBufferN(io.MultiWriter(writers...), stream, byteSize) _, err := utils.CopyWithBufferN(io.MultiWriter(fileMd5H, sliceMd5H, slicemd5H2Write), tempFile, byteSize)
written += n
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
return nil, err return nil, err
} }
blockList = append(blockList, hex.EncodeToString(sliceMd5H.Sum(nil))) blockList = append(blockList, hex.EncodeToString(sliceMd5H.Sum(nil)))
sliceMd5H.Reset() sliceMd5H.Reset()
} }
if tmpF != nil {
if written != streamSize {
return nil, errs.NewErr(err, "CreateTempFile failed, incoming stream actual size= %d, expect = %d ", written, streamSize)
}
_, err = tmpF.Seek(0, io.SeekStart)
if err != nil {
return nil, errs.NewErr(err, "CreateTempFile failed, can't seek to 0 ")
}
}
contentMd5 := hex.EncodeToString(fileMd5H.Sum(nil)) contentMd5 := hex.EncodeToString(fileMd5H.Sum(nil))
sliceMd5 := hex.EncodeToString(sliceMd5H2.Sum(nil)) sliceMd5 := hex.EncodeToString(sliceMd5H2.Sum(nil))
blockListStr, _ := utils.Json.MarshalToString(blockList) blockListStr, _ := utils.Json.MarshalToString(blockList)
@ -294,10 +260,9 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
} }
// step.2 上传分片 // step.2 上传分片
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread, threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
retry.Attempts(1), retry.Attempts(3),
retry.Delay(time.Second), retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay)) retry.DelayType(retry.BackOffDelay))
sem := semaphore.NewWeighted(3)
for i, partseq := range precreateResp.BlockList { for i, partseq := range precreateResp.BlockList {
if utils.IsCanceled(upCtx) { if utils.IsCanceled(upCtx) {
break break
@ -308,10 +273,6 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
byteSize = lastBlockSize byteSize = lastBlockSize
} }
threadG.Go(func(ctx context.Context) error { threadG.Go(func(ctx context.Context) error {
if err = sem.Acquire(ctx, 1); err != nil {
return err
}
defer sem.Release(1)
params := map[string]string{ params := map[string]string{
"method": "upload", "method": "upload",
"access_token": d.AccessToken, "access_token": d.AccessToken,
@ -320,8 +281,7 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
"uploadid": precreateResp.Uploadid, "uploadid": precreateResp.Uploadid,
"partseq": strconv.Itoa(partseq), "partseq": strconv.Itoa(partseq),
} }
err := d.uploadSlice(ctx, params, stream.GetName(), err := d.uploadSlice(ctx, params, stream.GetName(), io.NewSectionReader(tempFile, offset, byteSize))
driver.NewLimitedUploadStream(ctx, io.NewSectionReader(cache, offset, byteSize)))
if err != nil { if err != nil {
return err return err
} }

View File

@ -8,18 +8,16 @@ import (
type Addition struct { type Addition struct {
RefreshToken string `json:"refresh_token" required:"true"` RefreshToken string `json:"refresh_token" required:"true"`
driver.RootPath driver.RootPath
OrderBy string `json:"order_by" type:"select" options:"name,time,size" default:"name"` OrderBy string `json:"order_by" type:"select" options:"name,time,size" default:"name"`
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"` OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
DownloadAPI string `json:"download_api" type:"select" options:"official,crack,crack_video" default:"official"` DownloadAPI string `json:"download_api" type:"select" options:"official,crack" default:"official"`
ClientID string `json:"client_id" required:"true" default:"hq9yQ9w9kR4YHj1kyYafLygVocobh7Sf"` ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"`
ClientSecret string `json:"client_secret" required:"true" default:"YH2VpZcFJHYNnV6vLfHQXDBhcE7ZChyE"` ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
CustomCrackUA string `json:"custom_crack_ua" required:"true" default:"netdisk"` CustomCrackUA string `json:"custom_crack_ua" required:"true" default:"netdisk"`
AccessToken string AccessToken string
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"` UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
UploadAPI string `json:"upload_api" default:"https://d.pcs.baidu.com"` UploadAPI string `json:"upload_api" default:"https://d.pcs.baidu.com"`
CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"` CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"`
LowBandwithUploadMode bool `json:"low_bandwith_upload_mode" default:"false"`
OnlyListVideoFile bool `json:"only_list_video_file" default:"false"`
} }
var config = driver.Config{ var config = driver.Config{

View File

@ -6,7 +6,6 @@ import (
"time" "time"
"github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
) )
type TokenErrResp struct { type TokenErrResp struct {
@ -17,7 +16,7 @@ type TokenErrResp struct {
type File struct { type File struct {
//TkbindId int `json:"tkbind_id"` //TkbindId int `json:"tkbind_id"`
//OwnerType int `json:"owner_type"` //OwnerType int `json:"owner_type"`
Category int `json:"category"` //Category int `json:"category"`
//RealCategory string `json:"real_category"` //RealCategory string `json:"real_category"`
FsId int64 `json:"fs_id"` FsId int64 `json:"fs_id"`
//OperId int `json:"oper_id"` //OperId int `json:"oper_id"`
@ -56,11 +55,11 @@ func fileToObj(f File) *model.ObjThumb {
if f.ServerFilename == "" { if f.ServerFilename == "" {
f.ServerFilename = path.Base(f.Path) f.ServerFilename = path.Base(f.Path)
} }
if f.ServerCtime == 0 { if f.LocalCtime == 0 {
f.ServerCtime = f.Ctime f.LocalCtime = f.Ctime
} }
if f.ServerMtime == 0 { if f.LocalMtime == 0 {
f.ServerMtime = f.Mtime f.LocalMtime = f.Mtime
} }
return &model.ObjThumb{ return &model.ObjThumb{
Object: model.Object{ Object: model.Object{
@ -68,12 +67,12 @@ func fileToObj(f File) *model.ObjThumb {
Path: f.Path, Path: f.Path,
Name: f.ServerFilename, Name: f.ServerFilename,
Size: f.Size, Size: f.Size,
Modified: time.Unix(f.ServerMtime, 0), Modified: time.Unix(f.LocalMtime, 0),
Ctime: time.Unix(f.ServerCtime, 0), Ctime: time.Unix(f.LocalCtime, 0),
IsFolder: f.Isdir == 1, IsFolder: f.Isdir == 1,
// 直接获取的MD5是错误的 // 直接获取的MD5是错误的
HashInfo: utils.NewHashInfo(utils.MD5, DecryptMd5(f.Md5)), // HashInfo: utils.NewHashInfo(utils.MD5, f.Md5),
}, },
Thumbnail: model.Thumbnail{Thumbnail: f.Thumbs.Url3}, Thumbnail: model.Thumbnail{Thumbnail: f.Thumbs.Url3},
} }

View File

@ -1,14 +1,11 @@
package baidu_netdisk package baidu_netdisk
import ( import (
"encoding/hex"
"errors" "errors"
"fmt" "fmt"
"net/http" "net/http"
"strconv" "strconv"
"strings"
"time" "time"
"unicode"
"github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/errs"
@ -79,12 +76,6 @@ func (d *BaiduNetdisk) request(furl string, method string, callback base.ReqCall
return retry.Unrecoverable(err2) return retry.Unrecoverable(err2)
} }
} }
if 31023 == errno && d.DownloadAPI == "crack_video" {
result = res.Body()
return nil
}
return fmt.Errorf("req: [%s] ,errno: %d, refer to https://pan.baidu.com/union/doc/", furl, errno) return fmt.Errorf("req: [%s] ,errno: %d, refer to https://pan.baidu.com/union/doc/", furl, errno)
} }
result = res.Body() result = res.Body()
@ -137,21 +128,12 @@ func (d *BaiduNetdisk) getFiles(dir string) ([]File, error) {
if len(resp.List) == 0 { if len(resp.List) == 0 {
break break
} }
res = append(res, resp.List...)
if d.OnlyListVideoFile {
for _, file := range resp.List {
if file.Isdir == 1 || file.Category == 1 {
res = append(res, file)
}
}
} else {
res = append(res, resp.List...)
}
} }
return res, nil return res, nil
} }
func (d *BaiduNetdisk) linkOfficial(file model.Obj, _ model.LinkArgs) (*model.Link, error) { func (d *BaiduNetdisk) linkOfficial(file model.Obj, args model.LinkArgs) (*model.Link, error) {
var resp DownloadResp var resp DownloadResp
params := map[string]string{ params := map[string]string{
"method": "filemetas", "method": "filemetas",
@ -171,6 +153,8 @@ func (d *BaiduNetdisk) linkOfficial(file model.Obj, _ model.LinkArgs) (*model.Li
u = res.Header().Get("location") u = res.Header().Get("location")
//} //}
updateObjMd5(file, "pan.baidu.com", u)
return &model.Link{ return &model.Link{
URL: u, URL: u,
Header: http.Header{ Header: http.Header{
@ -179,7 +163,7 @@ func (d *BaiduNetdisk) linkOfficial(file model.Obj, _ model.LinkArgs) (*model.Li
}, nil }, nil
} }
func (d *BaiduNetdisk) linkCrack(file model.Obj, _ model.LinkArgs) (*model.Link, error) { func (d *BaiduNetdisk) linkCrack(file model.Obj, args model.LinkArgs) (*model.Link, error) {
var resp DownloadResp2 var resp DownloadResp2
param := map[string]string{ param := map[string]string{
"target": fmt.Sprintf("[\"%s\"]", file.GetPath()), "target": fmt.Sprintf("[\"%s\"]", file.GetPath()),
@ -194,6 +178,8 @@ func (d *BaiduNetdisk) linkCrack(file model.Obj, _ model.LinkArgs) (*model.Link,
return nil, err return nil, err
} }
updateObjMd5(file, d.CustomCrackUA, resp.Info[0].Dlink)
return &model.Link{ return &model.Link{
URL: resp.Info[0].Dlink, URL: resp.Info[0].Dlink,
Header: http.Header{ Header: http.Header{
@ -202,34 +188,6 @@ func (d *BaiduNetdisk) linkCrack(file model.Obj, _ model.LinkArgs) (*model.Link,
}, nil }, nil
} }
func (d *BaiduNetdisk) linkCrackVideo(file model.Obj, _ model.LinkArgs) (*model.Link, error) {
param := map[string]string{
"type": "VideoURL",
"path": fmt.Sprintf("%s", file.GetPath()),
"fs_id": file.GetID(),
"devuid": "0%1",
"clienttype": "1",
"channel": "android_15_25010PN30C_bd-netdisk_1523a",
"nom3u8": "1",
"dlink": "1",
"media": "1",
"origin": "dlna",
}
resp, err := d.request("https://pan.baidu.com/api/mediainfo", http.MethodGet, func(req *resty.Request) {
req.SetQueryParams(param)
}, nil)
if err != nil {
return nil, err
}
return &model.Link{
URL: utils.Json.Get(resp, "info", "dlink").ToString(),
Header: http.Header{
"User-Agent": []string{d.CustomCrackUA},
},
}, nil
}
func (d *BaiduNetdisk) manage(opera string, filelist any) ([]byte, error) { func (d *BaiduNetdisk) manage(opera string, filelist any) ([]byte, error) {
params := map[string]string{ params := map[string]string{
"method": "filemanager", "method": "filemanager",
@ -271,74 +229,37 @@ func joinTime(form map[string]string, ctime, mtime int64) {
form["local_ctime"] = strconv.FormatInt(ctime, 10) form["local_ctime"] = strconv.FormatInt(ctime, 10)
} }
func updateObjMd5(obj model.Obj, userAgent, u string) {
object := model.GetRawObject(obj)
if object != nil {
req, _ := http.NewRequest(http.MethodHead, u, nil)
req.Header.Add("User-Agent", userAgent)
resp, _ := base.HttpClient.Do(req)
if resp != nil {
contentMd5 := resp.Header.Get("Content-Md5")
object.HashInfo = utils.NewHashInfo(utils.MD5, contentMd5)
}
}
}
const ( const (
DefaultSliceSize int64 = 4 * utils.MB DefaultSliceSize int64 = 4 * utils.MB
VipSliceSize int64 = 16 * utils.MB VipSliceSize = 16 * utils.MB
SVipSliceSize int64 = 32 * utils.MB SVipSliceSize = 32 * utils.MB
MaxSliceNum = 2048 // 文档写的是 1024/没写 ,但实际测试是 2048
SliceStep int64 = 1 * utils.MB
) )
func (d *BaiduNetdisk) getSliceSize(filesize int64) int64 { func (d *BaiduNetdisk) getSliceSize() int64 {
// 非会员固定为 4MB
if d.vipType == 0 {
if d.CustomUploadPartSize != 0 {
log.Warnf("CustomUploadPartSize is not supported for non-vip user, use DefaultSliceSize")
}
if filesize > MaxSliceNum*DefaultSliceSize {
log.Warnf("File size(%d) is too large, may cause upload failure", filesize)
}
return DefaultSliceSize
}
if d.CustomUploadPartSize != 0 { if d.CustomUploadPartSize != 0 {
if d.CustomUploadPartSize < DefaultSliceSize {
log.Warnf("CustomUploadPartSize(%d) is less than DefaultSliceSize(%d), use DefaultSliceSize", d.CustomUploadPartSize, DefaultSliceSize)
return DefaultSliceSize
}
if d.vipType == 1 && d.CustomUploadPartSize > VipSliceSize {
log.Warnf("CustomUploadPartSize(%d) is greater than VipSliceSize(%d), use VipSliceSize", d.CustomUploadPartSize, VipSliceSize)
return VipSliceSize
}
if d.vipType == 2 && d.CustomUploadPartSize > SVipSliceSize {
log.Warnf("CustomUploadPartSize(%d) is greater than SVipSliceSize(%d), use SVipSliceSize", d.CustomUploadPartSize, SVipSliceSize)
return SVipSliceSize
}
return d.CustomUploadPartSize return d.CustomUploadPartSize
} }
maxSliceSize := DefaultSliceSize
switch d.vipType { switch d.vipType {
case 1: case 1:
maxSliceSize = VipSliceSize return VipSliceSize
case 2: case 2:
maxSliceSize = SVipSliceSize return SVipSliceSize
default:
return DefaultSliceSize
} }
// upload on low bandwidth
if d.LowBandwithUploadMode {
size := DefaultSliceSize
for size <= maxSliceSize {
if filesize <= MaxSliceNum*size {
return size
}
size += SliceStep
}
}
if filesize > MaxSliceNum*maxSliceSize {
log.Warnf("File size(%d) is too large, may cause upload failure", filesize)
}
return maxSliceSize
} }
// func encodeURIComponent(str string) string { // func encodeURIComponent(str string) string {
@ -346,40 +267,3 @@ func (d *BaiduNetdisk) getSliceSize(filesize int64) int64 {
// r = strings.ReplaceAll(r, "+", "%20") // r = strings.ReplaceAll(r, "+", "%20")
// return r // return r
// } // }
func DecryptMd5(encryptMd5 string) string {
if _, err := hex.DecodeString(encryptMd5); err == nil {
return encryptMd5
}
var out strings.Builder
out.Grow(len(encryptMd5))
for i, n := 0, int64(0); i < len(encryptMd5); i++ {
if i == 9 {
n = int64(unicode.ToLower(rune(encryptMd5[i])) - 'g')
} else {
n, _ = strconv.ParseInt(encryptMd5[i:i+1], 16, 64)
}
out.WriteString(strconv.FormatInt(n^int64(15&i), 16))
}
encryptMd5 = out.String()
return encryptMd5[8:16] + encryptMd5[:8] + encryptMd5[24:32] + encryptMd5[16:24]
}
func EncryptMd5(originalMd5 string) string {
reversed := originalMd5[8:16] + originalMd5[:8] + originalMd5[24:32] + originalMd5[16:24]
var out strings.Builder
out.Grow(len(reversed))
for i, n := 0, int64(0); i < len(reversed); i++ {
n, _ = strconv.ParseInt(reversed[i:i+1], 16, 64)
n ^= int64(15 & i)
if i == 9 {
out.WriteRune(rune(n) + 'g')
} else {
out.WriteString(strconv.FormatInt(n, 16))
}
}
return out.String()
}

View File

@ -7,16 +7,13 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"os" "math"
"regexp" "regexp"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"golang.org/x/sync/semaphore"
"github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/model"
@ -30,10 +27,9 @@ type BaiduPhoto struct {
model.Storage model.Storage
Addition Addition
// AccessToken string AccessToken string
Uk int64 Uk int64
bdstoken string root model.Obj
root model.Obj
uploadThread int uploadThread int
} }
@ -52,9 +48,9 @@ func (d *BaiduPhoto) Init(ctx context.Context) error {
d.uploadThread, d.UploadThread = 3, "3" d.uploadThread, d.UploadThread = 3, "3"
} }
// if err := d.refreshToken(); err != nil { if err := d.refreshToken(); err != nil {
// return err return err
// } }
// root // root
if d.AlbumID != "" { if d.AlbumID != "" {
@ -77,10 +73,6 @@ func (d *BaiduPhoto) Init(ctx context.Context) error {
if err != nil { if err != nil {
return err return err
} }
d.bdstoken, err = d.getBDStoken()
if err != nil {
return err
}
d.Uk, err = strconv.ParseInt(info.YouaID, 10, 64) d.Uk, err = strconv.ParseInt(info.YouaID, 10, 64)
return err return err
} }
@ -90,7 +82,7 @@ func (d *BaiduPhoto) GetRoot(ctx context.Context) (model.Obj, error) {
} }
func (d *BaiduPhoto) Drop(ctx context.Context) error { func (d *BaiduPhoto) Drop(ctx context.Context) error {
// d.AccessToken = "" d.AccessToken = ""
d.Uk = 0 d.Uk = 0
d.root = nil d.root = nil
return nil return nil
@ -148,13 +140,14 @@ func (d *BaiduPhoto) Link(ctx context.Context, file model.Obj, args model.LinkAr
// 处理共享相册 // 处理共享相册
if d.Uk != file.Uk { if d.Uk != file.Uk {
// 有概率无法获取到链接 // 有概率无法获取到链接
// return d.linkAlbum(ctx, file, args) return d.linkAlbum(ctx, file, args)
f, err := d.CopyAlbumFile(ctx, file) // 接口被限制只能使用cookie
if err != nil { // f, err := d.CopyAlbumFile(ctx, file)
return nil, err // if err != nil {
} // return nil, err
return d.linkFile(ctx, f, args) // }
// return d.linkFile(ctx, f, args)
} }
return d.linkFile(ctx, &file.File, args) return d.linkFile(ctx, &file.File, args)
} }
@ -242,21 +235,11 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
// TODO: // TODO:
// 暂时没有找到妙传方式 // 暂时没有找到妙传方式
var (
cache = stream.GetFile() // 需要获取完整文件md5,必须支持 io.Seek
tmpF *os.File tempFile, err := stream.CacheFullInTempFile()
err error if err != nil {
) return nil, err
if _, ok := cache.(io.ReaderAt); !ok {
tmpF, err = os.CreateTemp(conf.Conf.TempDir, "file-*")
if err != nil {
return nil, err
}
defer func() {
_ = tmpF.Close()
_ = os.Remove(tmpF.Name())
}()
cache = tmpF
} }
const DEFAULT int64 = 1 << 22 const DEFAULT int64 = 1 << 22
@ -264,11 +247,9 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
// 计算需要的数据 // 计算需要的数据
streamSize := stream.GetSize() streamSize := stream.GetSize()
count := int(streamSize / DEFAULT) count := int(math.Ceil(float64(streamSize) / float64(DEFAULT)))
lastBlockSize := streamSize % DEFAULT lastBlockSize := streamSize % DEFAULT
if lastBlockSize > 0 { if lastBlockSize == 0 {
count++
} else {
lastBlockSize = DEFAULT lastBlockSize = DEFAULT
} }
@ -279,11 +260,6 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
sliceMd5H := md5.New() sliceMd5H := md5.New()
sliceMd5H2 := md5.New() sliceMd5H2 := md5.New()
slicemd5H2Write := utils.LimitWriter(sliceMd5H2, SliceSize) slicemd5H2Write := utils.LimitWriter(sliceMd5H2, SliceSize)
writers := []io.Writer{fileMd5H, sliceMd5H, slicemd5H2Write}
if tmpF != nil {
writers = append(writers, tmpF)
}
written := int64(0)
for i := 1; i <= count; i++ { for i := 1; i <= count; i++ {
if utils.IsCanceled(ctx) { if utils.IsCanceled(ctx) {
return nil, ctx.Err() return nil, ctx.Err()
@ -291,23 +267,13 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
if i == count { if i == count {
byteSize = lastBlockSize byteSize = lastBlockSize
} }
n, err := utils.CopyWithBufferN(io.MultiWriter(writers...), stream, byteSize) _, err := utils.CopyWithBufferN(io.MultiWriter(fileMd5H, sliceMd5H, slicemd5H2Write), tempFile, byteSize)
written += n
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
return nil, err return nil, err
} }
sliceMD5List = append(sliceMD5List, hex.EncodeToString(sliceMd5H.Sum(nil))) sliceMD5List = append(sliceMD5List, hex.EncodeToString(sliceMd5H.Sum(nil)))
sliceMd5H.Reset() sliceMd5H.Reset()
} }
if tmpF != nil {
if written != streamSize {
return nil, errs.NewErr(err, "CreateTempFile failed, incoming stream actual size= %d, expect = %d ", written, streamSize)
}
_, err = tmpF.Seek(0, io.SeekStart)
if err != nil {
return nil, errs.NewErr(err, "CreateTempFile failed, can't seek to 0 ")
}
}
contentMd5 := hex.EncodeToString(fileMd5H.Sum(nil)) contentMd5 := hex.EncodeToString(fileMd5H.Sum(nil))
sliceMd5 := hex.EncodeToString(sliceMd5H2.Sum(nil)) sliceMd5 := hex.EncodeToString(sliceMd5H2.Sum(nil))
blockListStr, _ := utils.Json.MarshalToString(sliceMD5List) blockListStr, _ := utils.Json.MarshalToString(sliceMD5List)
@ -319,19 +285,18 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
"rtype": "1", "rtype": "1",
"ctype": "11", "ctype": "11",
"path": fmt.Sprintf("/%s", stream.GetName()), "path": fmt.Sprintf("/%s", stream.GetName()),
"size": fmt.Sprint(streamSize), "size": fmt.Sprint(stream.GetSize()),
"slice-md5": sliceMd5, "slice-md5": sliceMd5,
"content-md5": contentMd5, "content-md5": contentMd5,
"block_list": blockListStr, "block_list": blockListStr,
} }
// 尝试获取之前的进度 // 尝试获取之前的进度
precreateResp, ok := base.GetUploadProgress[*PrecreateResp](d, strconv.FormatInt(d.Uk, 10), contentMd5) precreateResp, ok := base.GetUploadProgress[*PrecreateResp](d, d.AccessToken, contentMd5)
if !ok { if !ok {
_, err = d.Post(FILE_API_URL_V1+"/precreate", func(r *resty.Request) { _, err = d.Post(FILE_API_URL_V1+"/precreate", func(r *resty.Request) {
r.SetContext(ctx) r.SetContext(ctx)
r.SetFormData(params) r.SetFormData(params)
r.SetQueryParam("bdstoken", d.bdstoken)
}, &precreateResp) }, &precreateResp)
if err != nil { if err != nil {
return nil, err return nil, err
@ -344,7 +309,6 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
retry.Attempts(3), retry.Attempts(3),
retry.Delay(time.Second), retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay)) retry.DelayType(retry.BackOffDelay))
sem := semaphore.NewWeighted(3)
for i, partseq := range precreateResp.BlockList { for i, partseq := range precreateResp.BlockList {
if utils.IsCanceled(upCtx) { if utils.IsCanceled(upCtx) {
break break
@ -356,22 +320,17 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
} }
threadG.Go(func(ctx context.Context) error { threadG.Go(func(ctx context.Context) error {
if err = sem.Acquire(ctx, 1); err != nil {
return err
}
defer sem.Release(1)
uploadParams := map[string]string{ uploadParams := map[string]string{
"method": "upload", "method": "upload",
"path": params["path"], "path": params["path"],
"partseq": fmt.Sprint(partseq), "partseq": fmt.Sprint(partseq),
"uploadid": precreateResp.UploadID, "uploadid": precreateResp.UploadID,
"app_id": "16051585",
} }
_, err = d.Post("https://c3.pcs.baidu.com/rest/2.0/pcs/superfile2", func(r *resty.Request) { _, err = d.Post("https://c3.pcs.baidu.com/rest/2.0/pcs/superfile2", func(r *resty.Request) {
r.SetContext(ctx) r.SetContext(ctx)
r.SetQueryParams(uploadParams) r.SetQueryParams(uploadParams)
r.SetFileReader("file", stream.GetName(), r.SetFileReader("file", stream.GetName(), io.NewSectionReader(tempFile, offset, byteSize))
driver.NewLimitedUploadStream(ctx, io.NewSectionReader(cache, offset, byteSize)))
}, nil) }, nil)
if err != nil { if err != nil {
return err return err
@ -384,7 +343,7 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
if err = threadG.Wait(); err != nil { if err = threadG.Wait(); err != nil {
if errors.Is(err, context.Canceled) { if errors.Is(err, context.Canceled) {
precreateResp.BlockList = utils.SliceFilter(precreateResp.BlockList, func(s int) bool { return s >= 0 }) precreateResp.BlockList = utils.SliceFilter(precreateResp.BlockList, func(s int) bool { return s >= 0 })
base.SaveUploadProgress(d, strconv.FormatInt(d.Uk, 10), contentMd5) base.SaveUploadProgress(d, precreateResp, d.AccessToken, contentMd5)
} }
return nil, err return nil, err
} }
@ -394,7 +353,6 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
_, err = d.Post(FILE_API_URL_V1+"/create", func(r *resty.Request) { _, err = d.Post(FILE_API_URL_V1+"/create", func(r *resty.Request) {
r.SetContext(ctx) r.SetContext(ctx)
r.SetFormData(params) r.SetFormData(params)
r.SetQueryParam("bdstoken", d.bdstoken)
}, &precreateResp) }, &precreateResp)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -6,14 +6,13 @@ import (
) )
type Addition struct { type Addition struct {
// RefreshToken string `json:"refresh_token" required:"true"` RefreshToken string `json:"refresh_token" required:"true"`
Cookie string `json:"cookie" required:"true"` ShowType string `json:"show_type" type:"select" options:"root,root_only_album,root_only_file" default:"root"`
ShowType string `json:"show_type" type:"select" options:"root,root_only_album,root_only_file" default:"root"` AlbumID string `json:"album_id"`
AlbumID string `json:"album_id"`
//AlbumPassword string `json:"album_password"` //AlbumPassword string `json:"album_password"`
DeleteOrigin bool `json:"delete_origin"` DeleteOrigin bool `json:"delete_origin"`
// ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"` ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"`
// ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"` ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"` UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
} }

View File

@ -72,7 +72,7 @@ func (c *File) Thumb() string {
} }
func (c *File) GetHash() utils.HashInfo { func (c *File) GetHash() utils.HashInfo {
return utils.NewHashInfo(utils.MD5, DecryptMd5(c.Md5)) return utils.NewHashInfo(utils.MD5, c.Md5)
} }
/*相册部分*/ /*相册部分*/

View File

@ -2,15 +2,13 @@ package baiduphoto
import ( import (
"context" "context"
"encoding/hex"
"fmt" "fmt"
"net/http" "net/http"
"strconv"
"strings"
"unicode"
"github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2" "github.com/go-resty/resty/v2"
) )
@ -25,8 +23,7 @@ const (
func (d *BaiduPhoto) Request(client *resty.Client, furl string, method string, callback base.ReqCallback, resp interface{}) (*resty.Response, error) { func (d *BaiduPhoto) Request(client *resty.Client, furl string, method string, callback base.ReqCallback, resp interface{}) (*resty.Response, error) {
req := client.R(). req := client.R().
// SetQueryParam("access_token", d.AccessToken) SetQueryParam("access_token", d.AccessToken)
SetHeader("Cookie", d.Cookie)
if callback != nil { if callback != nil {
callback(req) callback(req)
} }
@ -48,10 +45,10 @@ func (d *BaiduPhoto) Request(client *resty.Client, furl string, method string, c
return nil, fmt.Errorf("no shared albums found") return nil, fmt.Errorf("no shared albums found")
case 50100: case 50100:
return nil, fmt.Errorf("illegal title, only supports 50 characters") return nil, fmt.Errorf("illegal title, only supports 50 characters")
// case -6: case -6:
// if err = d.refreshToken(); err != nil { if err = d.refreshToken(); err != nil {
// return nil, err return nil, err
// } }
default: default:
return nil, fmt.Errorf("errno: %d, refer to https://photo.baidu.com/union/doc", erron) return nil, fmt.Errorf("errno: %d, refer to https://photo.baidu.com/union/doc", erron)
} }
@ -66,29 +63,29 @@ func (d *BaiduPhoto) Request(client *resty.Client, furl string, method string, c
// return res.Body(), nil // return res.Body(), nil
//} //}
// func (d *BaiduPhoto) refreshToken() error { func (d *BaiduPhoto) refreshToken() error {
// u := "https://openapi.baidu.com/oauth/2.0/token" u := "https://openapi.baidu.com/oauth/2.0/token"
// var resp base.TokenResp var resp base.TokenResp
// var e TokenErrResp var e TokenErrResp
// _, err := base.RestyClient.R().SetResult(&resp).SetError(&e).SetQueryParams(map[string]string{ _, err := base.RestyClient.R().SetResult(&resp).SetError(&e).SetQueryParams(map[string]string{
// "grant_type": "refresh_token", "grant_type": "refresh_token",
// "refresh_token": d.RefreshToken, "refresh_token": d.RefreshToken,
// "client_id": d.ClientID, "client_id": d.ClientID,
// "client_secret": d.ClientSecret, "client_secret": d.ClientSecret,
// }).Get(u) }).Get(u)
// if err != nil { if err != nil {
// return err return err
// } }
// if e.ErrorMsg != "" { if e.ErrorMsg != "" {
// return &e return &e
// } }
// if resp.RefreshToken == "" { if resp.RefreshToken == "" {
// return errs.EmptyToken return errs.EmptyToken
// } }
// d.AccessToken, d.RefreshToken = resp.AccessToken, resp.RefreshToken d.AccessToken, d.RefreshToken = resp.AccessToken, resp.RefreshToken
// op.MustSaveDriverStorage(d) op.MustSaveDriverStorage(d)
// return nil return nil
// } }
func (d *BaiduPhoto) Get(furl string, callback base.ReqCallback, resp interface{}) (*resty.Response, error) { func (d *BaiduPhoto) Get(furl string, callback base.ReqCallback, resp interface{}) (*resty.Response, error) {
return d.Request(base.RestyClient, furl, http.MethodGet, callback, resp) return d.Request(base.RestyClient, furl, http.MethodGet, callback, resp)
@ -362,6 +359,10 @@ func (d *BaiduPhoto) linkAlbum(ctx context.Context, file *AlbumFile, args model.
location := resp.Header().Get("Location") location := resp.Header().Get("Location")
if err != nil {
return nil, err
}
link := &model.Link{ link := &model.Link{
URL: location, URL: location,
Header: http.Header{ Header: http.Header{
@ -383,36 +384,36 @@ func (d *BaiduPhoto) linkFile(ctx context.Context, file *File, args model.LinkAr
headers["X-Forwarded-For"] = args.IP headers["X-Forwarded-For"] = args.IP
} }
var downloadUrl struct { // var downloadUrl struct {
Dlink string `json:"dlink"` // Dlink string `json:"dlink"`
} // }
_, err := d.Get(FILE_API_URL_V2+"/download", func(r *resty.Request) { // _, err := d.Get(FILE_API_URL_V1+"/download", func(r *resty.Request) {
r.SetContext(ctx)
r.SetHeaders(headers)
r.SetQueryParams(map[string]string{
"fsid": fmt.Sprint(file.Fsid),
})
}, &downloadUrl)
// resp, err := d.Request(base.NoRedirectClient, FILE_API_URL_V1+"/download", http.MethodHead, func(r *resty.Request) {
// r.SetContext(ctx) // r.SetContext(ctx)
// r.SetHeaders(headers) // r.SetHeaders(headers)
// r.SetQueryParams(map[string]string{ // r.SetQueryParams(map[string]string{
// "fsid": fmt.Sprint(file.Fsid), // "fsid": fmt.Sprint(file.Fsid),
// }) // })
// }, nil) // }, &downloadUrl)
resp, err := d.Request(base.NoRedirectClient, FILE_API_URL_V1+"/download", http.MethodHead, func(r *resty.Request) {
r.SetContext(ctx)
r.SetHeaders(headers)
r.SetQueryParams(map[string]string{
"fsid": fmt.Sprint(file.Fsid),
})
}, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// if resp.StatusCode() != 302 { if resp.StatusCode() != 302 {
// return nil, fmt.Errorf("not found 302 redirect") return nil, fmt.Errorf("not found 302 redirect")
// } }
// location := resp.Header().Get("Location") location := resp.Header().Get("Location")
link := &model.Link{ link := &model.Link{
URL: downloadUrl.Dlink, URL: location,
Header: http.Header{ Header: http.Header{
"User-Agent": []string{headers["User-Agent"]}, "User-Agent": []string{headers["User-Agent"]},
"Referer": []string{"https://photo.baidu.com/"}, "Referer": []string{"https://photo.baidu.com/"},
@ -475,55 +476,3 @@ func (d *BaiduPhoto) uInfo() (*UInfo, error) {
} }
return &info, nil return &info, nil
} }
func (d *BaiduPhoto) getBDStoken() (string, error) {
var info struct {
Result struct {
Bdstoken string `json:"bdstoken"`
Token string `json:"token"`
Uk int64 `json:"uk"`
} `json:"result"`
}
_, err := d.Get("https://pan.baidu.com/api/gettemplatevariable?fields=[%22bdstoken%22,%22token%22,%22uk%22]", nil, &info)
if err != nil {
return "", err
}
return info.Result.Bdstoken, nil
}
func DecryptMd5(encryptMd5 string) string {
if _, err := hex.DecodeString(encryptMd5); err == nil {
return encryptMd5
}
var out strings.Builder
out.Grow(len(encryptMd5))
for i, n := 0, int64(0); i < len(encryptMd5); i++ {
if i == 9 {
n = int64(unicode.ToLower(rune(encryptMd5[i])) - 'g')
} else {
n, _ = strconv.ParseInt(encryptMd5[i:i+1], 16, 64)
}
out.WriteString(strconv.FormatInt(n^int64(15&i), 16))
}
encryptMd5 = out.String()
return encryptMd5[8:16] + encryptMd5[:8] + encryptMd5[24:32] + encryptMd5[16:24]
}
func EncryptMd5(originalMd5 string) string {
reversed := originalMd5[8:16] + originalMd5[:8] + originalMd5[24:32] + originalMd5[16:24]
var out strings.Builder
out.Grow(len(reversed))
for i, n := 0, int64(0); i < len(reversed); i++ {
n, _ = strconv.ParseInt(reversed[i:i+1], 16, 64)
n ^= int64(15 & i)
if i == 9 {
out.WriteRune(rune(n) + 'g')
} else {
out.WriteString(strconv.FormatInt(n, 16))
}
}
return out.String()
}

View File

@ -6,7 +6,6 @@ import (
"time" "time"
"github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/net"
"github.com/go-resty/resty/v2" "github.com/go-resty/resty/v2"
) )
@ -27,7 +26,7 @@ func InitClient() {
NoRedirectClient.SetHeader("user-agent", UserAgent) NoRedirectClient.SetHeader("user-agent", UserAgent)
RestyClient = NewRestyClient() RestyClient = NewRestyClient()
HttpClient = net.NewHttpClient() HttpClient = NewHttpClient()
} }
func NewRestyClient() *resty.Client { func NewRestyClient() *resty.Client {
@ -39,3 +38,13 @@ func NewRestyClient() *resty.Client {
SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify}) SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
return client return client
} }
func NewHttpClient() *http.Client {
return &http.Client{
Timeout: time.Hour * 48,
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
TLSClientConfig: &tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify},
},
}
}

View File

@ -1,767 +0,0 @@
package bitqiu
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http/cookiejar"
"path"
"strconv"
"strings"
"time"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
streamPkg "github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
"github.com/google/uuid"
)
const (
baseURL = "https://pan.bitqiu.com"
loginURL = baseURL + "/loginServer/login"
userInfoURL = baseURL + "/user/getInfo"
listURL = baseURL + "/apiToken/cfi/fs/resources/pages"
uploadInitializeURL = baseURL + "/apiToken/cfi/fs/upload/v2/initialize"
uploadCompleteURL = baseURL + "/apiToken/cfi/fs/upload/v2/complete"
downloadURL = baseURL + "/download/getUrl"
createDirURL = baseURL + "/resource/create"
moveResourceURL = baseURL + "/resource/remove"
renameResourceURL = baseURL + "/resource/rename"
copyResourceURL = baseURL + "/apiToken/cfi/fs/async/copy"
copyManagerURL = baseURL + "/apiToken/cfi/fs/async/manager"
deleteResourceURL = baseURL + "/resource/delete"
successCode = "10200"
uploadSuccessCode = "30010"
copySubmittedCode = "10300"
orgChannel = "default|default|default"
)
const (
copyPollInterval = time.Second
copyPollMaxAttempts = 60
chunkSize = int64(1 << 20)
)
const defaultUserAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/141.0.0.0 Safari/537.36"
type BitQiu struct {
model.Storage
Addition
client *resty.Client
userID string
}
func (d *BitQiu) Config() driver.Config {
return config
}
func (d *BitQiu) GetAddition() driver.Additional {
return &d.Addition
}
func (d *BitQiu) Init(ctx context.Context) error {
if d.Addition.UserPlatform == "" {
d.Addition.UserPlatform = uuid.NewString()
op.MustSaveDriverStorage(d)
}
if d.client == nil {
jar, err := cookiejar.New(nil)
if err != nil {
return err
}
d.client = base.NewRestyClient()
d.client.SetBaseURL(baseURL)
d.client.SetCookieJar(jar)
}
d.client.SetHeader("user-agent", d.userAgent())
return d.login(ctx)
}
func (d *BitQiu) Drop(ctx context.Context) error {
d.client = nil
d.userID = ""
return nil
}
func (d *BitQiu) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
if d.userID == "" {
if err := d.login(ctx); err != nil {
return nil, err
}
}
parentID := d.resolveParentID(dir)
dirPath := ""
if dir != nil {
dirPath = dir.GetPath()
}
pageSize := d.pageSize()
orderType := d.orderType()
desc := d.orderDesc()
var results []model.Obj
page := 1
for {
form := map[string]string{
"parentId": parentID,
"limit": strconv.Itoa(pageSize),
"orderType": orderType,
"desc": desc,
"model": "1",
"userId": d.userID,
"currentPage": strconv.Itoa(page),
"page": strconv.Itoa(page),
"org_channel": orgChannel,
}
var resp Response[ResourcePage]
if err := d.postForm(ctx, listURL, form, &resp); err != nil {
return nil, err
}
if resp.Code != successCode {
if resp.Code == "10401" || resp.Code == "10404" {
if err := d.login(ctx); err != nil {
return nil, err
}
continue
}
return nil, fmt.Errorf("list failed: %s", resp.Message)
}
objs, err := utils.SliceConvert(resp.Data.Data, func(item Resource) (model.Obj, error) {
return item.toObject(parentID, dirPath)
})
if err != nil {
return nil, err
}
results = append(results, objs...)
if !resp.Data.HasNext || len(resp.Data.Data) == 0 {
break
}
page++
}
return results, nil
}
func (d *BitQiu) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if file.IsDir() {
return nil, errs.NotFile
}
if d.userID == "" {
if err := d.login(ctx); err != nil {
return nil, err
}
}
form := map[string]string{
"fileIds": file.GetID(),
"org_channel": orgChannel,
}
for attempt := 0; attempt < 2; attempt++ {
var resp Response[DownloadData]
if err := d.postForm(ctx, downloadURL, form, &resp); err != nil {
return nil, err
}
switch resp.Code {
case successCode:
if resp.Data.URL == "" {
return nil, fmt.Errorf("empty download url returned")
}
return &model.Link{URL: resp.Data.URL}, nil
case "10401", "10404":
if err := d.login(ctx); err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("get link failed: %s", resp.Message)
}
}
return nil, fmt.Errorf("get link failed: retry limit reached")
}
func (d *BitQiu) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
if d.userID == "" {
if err := d.login(ctx); err != nil {
return nil, err
}
}
parentID := d.resolveParentID(parentDir)
parentPath := ""
if parentDir != nil {
parentPath = parentDir.GetPath()
}
form := map[string]string{
"parentId": parentID,
"name": dirName,
"org_channel": orgChannel,
}
for attempt := 0; attempt < 2; attempt++ {
var resp Response[CreateDirData]
if err := d.postForm(ctx, createDirURL, form, &resp); err != nil {
return nil, err
}
switch resp.Code {
case successCode:
newParentID := parentID
if resp.Data.ParentID != "" {
newParentID = resp.Data.ParentID
}
name := resp.Data.Name
if name == "" {
name = dirName
}
resource := Resource{
ResourceID: resp.Data.DirID,
ResourceType: 1,
Name: name,
ParentID: newParentID,
}
obj, err := resource.toObject(newParentID, parentPath)
if err != nil {
return nil, err
}
if o, ok := obj.(*Object); ok {
o.ParentID = newParentID
}
return obj, nil
case "10401", "10404":
if err := d.login(ctx); err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("create folder failed: %s", resp.Message)
}
}
return nil, fmt.Errorf("create folder failed: retry limit reached")
}
func (d *BitQiu) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
if d.userID == "" {
if err := d.login(ctx); err != nil {
return nil, err
}
}
targetParentID := d.resolveParentID(dstDir)
form := map[string]string{
"dirIds": "",
"fileIds": "",
"parentId": targetParentID,
"org_channel": orgChannel,
}
if srcObj.IsDir() {
form["dirIds"] = srcObj.GetID()
} else {
form["fileIds"] = srcObj.GetID()
}
for attempt := 0; attempt < 2; attempt++ {
var resp Response[any]
if err := d.postForm(ctx, moveResourceURL, form, &resp); err != nil {
return nil, err
}
switch resp.Code {
case successCode:
dstPath := ""
if dstDir != nil {
dstPath = dstDir.GetPath()
}
if setter, ok := srcObj.(model.SetPath); ok {
setter.SetPath(path.Join(dstPath, srcObj.GetName()))
}
if o, ok := srcObj.(*Object); ok {
o.ParentID = targetParentID
}
return srcObj, nil
case "10401", "10404":
if err := d.login(ctx); err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("move failed: %s", resp.Message)
}
}
return nil, fmt.Errorf("move failed: retry limit reached")
}
func (d *BitQiu) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
if d.userID == "" {
if err := d.login(ctx); err != nil {
return nil, err
}
}
form := map[string]string{
"resourceId": srcObj.GetID(),
"name": newName,
"type": "0",
"org_channel": orgChannel,
}
if srcObj.IsDir() {
form["type"] = "1"
}
for attempt := 0; attempt < 2; attempt++ {
var resp Response[any]
if err := d.postForm(ctx, renameResourceURL, form, &resp); err != nil {
return nil, err
}
switch resp.Code {
case successCode:
return updateObjectName(srcObj, newName), nil
case "10401", "10404":
if err := d.login(ctx); err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("rename failed: %s", resp.Message)
}
}
return nil, fmt.Errorf("rename failed: retry limit reached")
}
func (d *BitQiu) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
if d.userID == "" {
if err := d.login(ctx); err != nil {
return nil, err
}
}
targetParentID := d.resolveParentID(dstDir)
form := map[string]string{
"dirIds": "",
"fileIds": "",
"parentId": targetParentID,
"org_channel": orgChannel,
}
if srcObj.IsDir() {
form["dirIds"] = srcObj.GetID()
} else {
form["fileIds"] = srcObj.GetID()
}
for attempt := 0; attempt < 2; attempt++ {
var resp Response[any]
if err := d.postForm(ctx, copyResourceURL, form, &resp); err != nil {
return nil, err
}
switch resp.Code {
case successCode, copySubmittedCode:
return d.waitForCopiedObject(ctx, srcObj, dstDir)
case "10401", "10404":
if err := d.login(ctx); err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("copy failed: %s", resp.Message)
}
}
return nil, fmt.Errorf("copy failed: retry limit reached")
}
func (d *BitQiu) Remove(ctx context.Context, obj model.Obj) error {
if d.userID == "" {
if err := d.login(ctx); err != nil {
return err
}
}
form := map[string]string{
"dirIds": "",
"fileIds": "",
"org_channel": orgChannel,
}
if obj.IsDir() {
form["dirIds"] = obj.GetID()
} else {
form["fileIds"] = obj.GetID()
}
for attempt := 0; attempt < 2; attempt++ {
var resp Response[any]
if err := d.postForm(ctx, deleteResourceURL, form, &resp); err != nil {
return err
}
switch resp.Code {
case successCode:
return nil
case "10401", "10404":
if err := d.login(ctx); err != nil {
return err
}
default:
return fmt.Errorf("remove failed: %s", resp.Message)
}
}
return fmt.Errorf("remove failed: retry limit reached")
}
func (d *BitQiu) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
if d.userID == "" {
if err := d.login(ctx); err != nil {
return nil, err
}
}
up(0)
tmpFile, md5sum, err := streamPkg.CacheFullInTempFileAndHash(file, utils.MD5)
if err != nil {
return nil, err
}
defer tmpFile.Close()
parentID := d.resolveParentID(dstDir)
parentPath := ""
if dstDir != nil {
parentPath = dstDir.GetPath()
}
form := map[string]string{
"parentId": parentID,
"name": file.GetName(),
"size": strconv.FormatInt(file.GetSize(), 10),
"hash": md5sum,
"sampleMd5": md5sum,
"org_channel": orgChannel,
}
var resp Response[json.RawMessage]
if err = d.postForm(ctx, uploadInitializeURL, form, &resp); err != nil {
return nil, err
}
if resp.Code != uploadSuccessCode {
switch resp.Code {
case successCode:
var initData UploadInitData
if err := json.Unmarshal(resp.Data, &initData); err != nil {
return nil, fmt.Errorf("parse upload init response failed: %w", err)
}
serverCode, err := d.uploadFileInChunks(ctx, tmpFile, file.GetSize(), md5sum, initData, up)
if err != nil {
return nil, err
}
obj, err := d.completeChunkUpload(ctx, initData, parentID, parentPath, file.GetName(), file.GetSize(), md5sum, serverCode)
if err != nil {
return nil, err
}
up(100)
return obj, nil
default:
return nil, fmt.Errorf("upload failed: %s", resp.Message)
}
}
var resource Resource
if err := json.Unmarshal(resp.Data, &resource); err != nil {
return nil, fmt.Errorf("parse upload response failed: %w", err)
}
obj, err := resource.toObject(parentID, parentPath)
if err != nil {
return nil, err
}
up(100)
return obj, nil
}
func (d *BitQiu) uploadFileInChunks(ctx context.Context, tmpFile model.File, size int64, md5sum string, initData UploadInitData, up driver.UpdateProgress) (string, error) {
if d.client == nil {
return "", fmt.Errorf("client not initialized")
}
if size <= 0 {
return "", fmt.Errorf("invalid file size")
}
buf := make([]byte, chunkSize)
offset := int64(0)
var finishedFlag string
for offset < size {
chunkLen := chunkSize
remaining := size - offset
if remaining < chunkLen {
chunkLen = remaining
}
reader := io.NewSectionReader(tmpFile, offset, chunkLen)
chunkBuf := buf[:chunkLen]
if _, err := io.ReadFull(reader, chunkBuf); err != nil {
return "", fmt.Errorf("read chunk failed: %w", err)
}
headers := map[string]string{
"accept": "*/*",
"content-type": "application/octet-stream",
"appid": initData.AppID,
"token": initData.Token,
"userid": strconv.FormatInt(initData.UserID, 10),
"serialnumber": initData.SerialNumber,
"hash": md5sum,
"len": strconv.FormatInt(chunkLen, 10),
"offset": strconv.FormatInt(offset, 10),
"user-agent": d.userAgent(),
}
var chunkResp ChunkUploadResponse
req := d.client.R().
SetContext(ctx).
SetHeaders(headers).
SetBody(chunkBuf).
SetResult(&chunkResp)
if _, err := req.Post(initData.UploadURL); err != nil {
return "", err
}
if chunkResp.ErrCode != 0 {
return "", fmt.Errorf("chunk upload failed with code %d", chunkResp.ErrCode)
}
finishedFlag = chunkResp.FinishedFlag
offset += chunkLen
up(float64(offset) * 100 / float64(size))
}
if finishedFlag == "" {
return "", fmt.Errorf("upload finished without server code")
}
return finishedFlag, nil
}
func (d *BitQiu) completeChunkUpload(ctx context.Context, initData UploadInitData, parentID, parentPath, name string, size int64, md5sum, serverCode string) (model.Obj, error) {
form := map[string]string{
"currentPage": "1",
"limit": "1",
"userId": strconv.FormatInt(initData.UserID, 10),
"status": "0",
"parentId": parentID,
"name": name,
"fileUid": initData.FileUID,
"fileSid": initData.FileSID,
"size": strconv.FormatInt(size, 10),
"serverCode": serverCode,
"snapTime": "",
"hash": md5sum,
"sampleMd5": md5sum,
"org_channel": orgChannel,
}
var resp Response[Resource]
if err := d.postForm(ctx, uploadCompleteURL, form, &resp); err != nil {
return nil, err
}
if resp.Code != successCode {
return nil, fmt.Errorf("complete upload failed: %s", resp.Message)
}
return resp.Data.toObject(parentID, parentPath)
}
func (d *BitQiu) login(ctx context.Context) error {
if d.client == nil {
return fmt.Errorf("client not initialized")
}
form := map[string]string{
"passport": d.Username,
"password": utils.GetMD5EncodeStr(d.Password),
"remember": "0",
"captcha": "",
"org_channel": orgChannel,
}
var resp Response[LoginData]
if err := d.postForm(ctx, loginURL, form, &resp); err != nil {
return err
}
if resp.Code != successCode {
return fmt.Errorf("login failed: %s", resp.Message)
}
d.userID = strconv.FormatInt(resp.Data.UserID, 10)
return d.ensureRootFolderID(ctx)
}
func (d *BitQiu) ensureRootFolderID(ctx context.Context) error {
rootID := d.Addition.GetRootId()
if rootID != "" && rootID != "0" {
return nil
}
form := map[string]string{
"org_channel": orgChannel,
}
var resp Response[UserInfoData]
if err := d.postForm(ctx, userInfoURL, form, &resp); err != nil {
return err
}
if resp.Code != successCode {
return fmt.Errorf("get user info failed: %s", resp.Message)
}
if resp.Data.RootDirID == "" {
return fmt.Errorf("get user info failed: empty root dir id")
}
if d.Addition.RootFolderID != resp.Data.RootDirID {
d.Addition.RootFolderID = resp.Data.RootDirID
op.MustSaveDriverStorage(d)
}
return nil
}
func (d *BitQiu) postForm(ctx context.Context, url string, form map[string]string, result interface{}) error {
if d.client == nil {
return fmt.Errorf("client not initialized")
}
req := d.client.R().
SetContext(ctx).
SetHeaders(d.commonHeaders()).
SetFormData(form)
if result != nil {
req = req.SetResult(result)
}
_, err := req.Post(url)
return err
}
func (d *BitQiu) waitForCopiedObject(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
expectedName := srcObj.GetName()
expectedIsDir := srcObj.IsDir()
var lastListErr error
for attempt := 0; attempt < copyPollMaxAttempts; attempt++ {
if attempt > 0 {
if err := waitWithContext(ctx, copyPollInterval); err != nil {
return nil, err
}
}
if err := d.checkCopyFailure(ctx); err != nil {
return nil, err
}
obj, err := d.findObjectInDir(ctx, dstDir, expectedName, expectedIsDir)
if err != nil {
lastListErr = err
continue
}
if obj != nil {
return obj, nil
}
}
if lastListErr != nil {
return nil, lastListErr
}
return nil, fmt.Errorf("copy task timed out waiting for completion")
}
func (d *BitQiu) checkCopyFailure(ctx context.Context) error {
form := map[string]string{
"org_channel": orgChannel,
}
for attempt := 0; attempt < 2; attempt++ {
var resp Response[AsyncManagerData]
if err := d.postForm(ctx, copyManagerURL, form, &resp); err != nil {
return err
}
switch resp.Code {
case successCode:
if len(resp.Data.FailTasks) > 0 {
return fmt.Errorf("copy failed: %s", resp.Data.FailTasks[0].ErrorMessage())
}
return nil
case "10401", "10404":
if err := d.login(ctx); err != nil {
return err
}
default:
return fmt.Errorf("query copy status failed: %s", resp.Message)
}
}
return fmt.Errorf("query copy status failed: retry limit reached")
}
func (d *BitQiu) findObjectInDir(ctx context.Context, dir model.Obj, name string, isDir bool) (model.Obj, error) {
objs, err := d.List(ctx, dir, model.ListArgs{})
if err != nil {
return nil, err
}
for _, obj := range objs {
if obj.GetName() == name && obj.IsDir() == isDir {
return obj, nil
}
}
return nil, nil
}
func waitWithContext(ctx context.Context, d time.Duration) error {
timer := time.NewTimer(d)
defer timer.Stop()
select {
case <-ctx.Done():
return ctx.Err()
case <-timer.C:
return nil
}
}
func (d *BitQiu) commonHeaders() map[string]string {
headers := map[string]string{
"accept": "application/json, text/plain, */*",
"accept-language": "en-US,en;q=0.9",
"cache-control": "no-cache",
"pragma": "no-cache",
"user-platform": d.Addition.UserPlatform,
"x-kl-saas-ajax-request": "Ajax_Request",
"x-requested-with": "XMLHttpRequest",
"referer": baseURL + "/",
"origin": baseURL,
"user-agent": d.userAgent(),
}
return headers
}
func (d *BitQiu) userAgent() string {
if ua := strings.TrimSpace(d.Addition.UserAgent); ua != "" {
return ua
}
return defaultUserAgent
}
func (d *BitQiu) resolveParentID(dir model.Obj) string {
if dir != nil && dir.GetID() != "" {
return dir.GetID()
}
if root := d.Addition.GetRootId(); root != "" {
return root
}
return config.DefaultRoot
}
func (d *BitQiu) pageSize() int {
if size, err := strconv.Atoi(d.Addition.PageSize); err == nil && size > 0 {
return size
}
return 24
}
func (d *BitQiu) orderType() string {
if d.Addition.OrderType != "" {
return d.Addition.OrderType
}
return "updateTime"
}
func (d *BitQiu) orderDesc() string {
if d.Addition.OrderDesc {
return "1"
}
return "0"
}
var _ driver.Driver = (*BitQiu)(nil)
var _ driver.PutResult = (*BitQiu)(nil)

View File

@ -1,28 +0,0 @@
package bitqiu
import (
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/op"
)
type Addition struct {
driver.RootID
Username string `json:"username" required:"true"`
Password string `json:"password" required:"true"`
UserPlatform string `json:"user_platform" help:"Optional device identifier; auto-generated if empty."`
OrderType string `json:"order_type" type:"select" options:"updateTime,createTime,name,size" default:"updateTime"`
OrderDesc bool `json:"order_desc"`
PageSize string `json:"page_size" default:"24" help:"Number of entries to request per page."`
UserAgent string `json:"user_agent" default:"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/141.0.0.0 Safari/537.36"`
}
var config = driver.Config{
Name: "BitQiu",
DefaultRoot: "0",
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &BitQiu{}
})
}

View File

@ -1,107 +0,0 @@
package bitqiu
import "encoding/json"
type Response[T any] struct {
Code string `json:"code"`
Message string `json:"message"`
Data T `json:"data"`
}
type LoginData struct {
UserID int64 `json:"userId"`
}
type ResourcePage struct {
CurrentPage int `json:"currentPage"`
PageSize int `json:"pageSize"`
TotalCount int `json:"totalCount"`
TotalPageCount int `json:"totalPageCount"`
Data []Resource `json:"data"`
HasNext bool `json:"hasNext"`
}
type Resource struct {
ResourceID string `json:"resourceId"`
ResourceUID string `json:"resourceUid"`
ResourceType int `json:"resourceType"`
ParentID string `json:"parentId"`
Name string `json:"name"`
ExtName string `json:"extName"`
Size *json.Number `json:"size"`
CreateTime *string `json:"createTime"`
UpdateTime *string `json:"updateTime"`
FileMD5 string `json:"fileMd5"`
}
type DownloadData struct {
URL string `json:"url"`
MD5 string `json:"md5"`
Size int64 `json:"size"`
}
type UserInfoData struct {
RootDirID string `json:"rootDirId"`
}
type CreateDirData struct {
DirID string `json:"dirId"`
Name string `json:"name"`
ParentID string `json:"parentId"`
}
type AsyncManagerData struct {
WaitTasks []AsyncTask `json:"waitTaskList"`
RunningTasks []AsyncTask `json:"runningTaskList"`
SuccessTasks []AsyncTask `json:"successTaskList"`
FailTasks []AsyncTask `json:"failTaskList"`
TaskList []AsyncTask `json:"taskList"`
}
type AsyncTask struct {
TaskID string `json:"taskId"`
Status int `json:"status"`
ErrorMsg string `json:"errorMsg"`
Message string `json:"message"`
Result *AsyncTaskInfo `json:"result"`
TargetName string `json:"targetName"`
TargetDirID string `json:"parentId"`
}
type AsyncTaskInfo struct {
Resource Resource `json:"resource"`
DirID string `json:"dirId"`
FileID string `json:"fileId"`
Name string `json:"name"`
ParentID string `json:"parentId"`
}
func (t AsyncTask) ErrorMessage() string {
if t.ErrorMsg != "" {
return t.ErrorMsg
}
if t.Message != "" {
return t.Message
}
return "unknown error"
}
type UploadInitData struct {
Name string `json:"name"`
Size int64 `json:"size"`
Token string `json:"token"`
FileUID string `json:"fileUid"`
FileSID string `json:"fileSid"`
ParentID string `json:"parentId"`
UserID int64 `json:"userId"`
SerialNumber string `json:"serialNumber"`
UploadURL string `json:"uploadUrl"`
AppID string `json:"appId"`
}
type ChunkUploadResponse struct {
ErrCode int `json:"errCode"`
Offset int64 `json:"offset"`
Finished int `json:"finished"`
FinishedFlag string `json:"finishedFlag"`
}

View File

@ -1,102 +0,0 @@
package bitqiu
import (
"path"
"strings"
"time"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
)
type Object struct {
model.Object
ParentID string
}
func (r Resource) toObject(parentID, parentPath string) (model.Obj, error) {
id := r.ResourceID
if id == "" {
id = r.ResourceUID
}
obj := &Object{
Object: model.Object{
ID: id,
Name: r.Name,
IsFolder: r.ResourceType == 1,
},
ParentID: parentID,
}
if r.Size != nil {
if size, err := (*r.Size).Int64(); err == nil {
obj.Size = size
}
}
if ct := parseBitQiuTime(r.CreateTime); !ct.IsZero() {
obj.Ctime = ct
}
if mt := parseBitQiuTime(r.UpdateTime); !mt.IsZero() {
obj.Modified = mt
}
if r.FileMD5 != "" {
obj.HashInfo = utils.NewHashInfo(utils.MD5, strings.ToLower(r.FileMD5))
}
obj.SetPath(path.Join(parentPath, obj.Name))
return obj, nil
}
func parseBitQiuTime(value *string) time.Time {
if value == nil {
return time.Time{}
}
trimmed := strings.TrimSpace(*value)
if trimmed == "" {
return time.Time{}
}
if ts, err := time.ParseInLocation("2006-01-02 15:04:05", trimmed, time.Local); err == nil {
return ts
}
return time.Time{}
}
func updateObjectName(obj model.Obj, newName string) model.Obj {
newPath := path.Join(parentPathOf(obj.GetPath()), newName)
switch o := obj.(type) {
case *Object:
o.Name = newName
o.Object.Name = newName
o.SetPath(newPath)
return o
case *model.Object:
o.Name = newName
o.SetPath(newPath)
return o
}
if setter, ok := obj.(model.SetPath); ok {
setter.SetPath(newPath)
}
return &model.Object{
ID: obj.GetID(),
Path: newPath,
Name: newName,
Size: obj.GetSize(),
Modified: obj.ModTime(),
Ctime: obj.CreateTime(),
IsFolder: obj.IsDir(),
HashInfo: obj.GetHash(),
}
}
func parentPathOf(p string) string {
if p == "" {
return ""
}
dir := path.Dir(p)
if dir == "." {
return ""
}
return dir
}

View File

@ -67,9 +67,7 @@ func (d *ChaoXing) Init(ctx context.Context) error {
} }
func (d *ChaoXing) Drop(ctx context.Context) error { func (d *ChaoXing) Drop(ctx context.Context) error {
if d.cron != nil { d.cron.Stop()
d.cron.Stop()
}
return nil return nil
} }
@ -215,7 +213,7 @@ func (d *ChaoXing) Remove(ctx context.Context, obj model.Obj) error {
return nil return nil
} }
func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error { func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
var resp UploadDataRsp var resp UploadDataRsp
_, err := d.request("https://noteyd.chaoxing.com/pc/files/getUploadConfig", http.MethodGet, func(req *resty.Request) { _, err := d.request("https://noteyd.chaoxing.com/pc/files/getUploadConfig", http.MethodGet, func(req *resty.Request) {
}, &resp) }, &resp)
@ -227,11 +225,11 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStr
} }
body := &bytes.Buffer{} body := &bytes.Buffer{}
writer := multipart.NewWriter(body) writer := multipart.NewWriter(body)
filePart, err := writer.CreateFormFile("file", file.GetName()) filePart, err := writer.CreateFormFile("file", stream.GetName())
if err != nil { if err != nil {
return err return err
} }
_, err = utils.CopyWithBuffer(filePart, file) _, err = utils.CopyWithBuffer(filePart, stream)
if err != nil { if err != nil {
return err return err
} }
@ -248,14 +246,7 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStr
if err != nil { if err != nil {
return err return err
} }
r := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{ req, err := http.NewRequest("POST", "https://pan-yz.chaoxing.com/upload", body)
Reader: &driver.SimpleReaderWithSize{
Reader: body,
Size: int64(body.Len()),
},
UpdateProgress: up,
})
req, err := http.NewRequestWithContext(ctx, "POST", "https://pan-yz.chaoxing.com/upload", r)
if err != nil { if err != nil {
return err return err
} }

View File

@ -4,12 +4,11 @@ import (
"context" "context"
"io" "io"
"net/http" "net/http"
"path" "strconv"
"strings" "strings"
"github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2" "github.com/go-resty/resty/v2"
@ -18,7 +17,6 @@ import (
type Cloudreve struct { type Cloudreve struct {
model.Storage model.Storage
Addition Addition
ref *Cloudreve
} }
func (d *Cloudreve) Config() driver.Config { func (d *Cloudreve) Config() driver.Config {
@ -38,18 +36,8 @@ func (d *Cloudreve) Init(ctx context.Context) error {
return d.login() return d.login()
} }
func (d *Cloudreve) InitReference(storage driver.Driver) error {
refStorage, ok := storage.(*Cloudreve)
if ok {
d.ref = refStorage
return nil
}
return errs.NotSupport
}
func (d *Cloudreve) Drop(ctx context.Context) error { func (d *Cloudreve) Drop(ctx context.Context) error {
d.Cookie = "" d.Cookie = ""
d.ref = nil
return nil return nil
} }
@ -102,7 +90,7 @@ func (d *Cloudreve) MakeDir(ctx context.Context, parentDir model.Obj, dirName st
func (d *Cloudreve) Move(ctx context.Context, srcObj, dstDir model.Obj) error { func (d *Cloudreve) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
body := base.Json{ body := base.Json{
"action": "move", "action": "move",
"src_dir": path.Dir(srcObj.GetPath()), "src_dir": srcObj.GetPath(),
"dst": dstDir.GetPath(), "dst": dstDir.GetPath(),
"src": convertSrc(srcObj), "src": convertSrc(srcObj),
} }
@ -124,7 +112,7 @@ func (d *Cloudreve) Rename(ctx context.Context, srcObj model.Obj, newName string
func (d *Cloudreve) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { func (d *Cloudreve) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
body := base.Json{ body := base.Json{
"src_dir": path.Dir(srcObj.GetPath()), "src_dir": srcObj.GetPath(),
"dst": dstDir.GetPath(), "dst": dstDir.GetPath(),
"src": convertSrc(srcObj), "src": convertSrc(srcObj),
} }
@ -145,8 +133,6 @@ func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.File
if io.ReadCloser(stream) == http.NoBody { if io.ReadCloser(stream) == http.NoBody {
return d.create(ctx, dstDir, stream) return d.create(ctx, dstDir, stream)
} }
// 获取存储策略
var r DirectoryResp var r DirectoryResp
err := d.request(http.MethodGet, "/directory"+dstDir.GetPath(), nil, &r) err := d.request(http.MethodGet, "/directory"+dstDir.GetPath(), nil, &r)
if err != nil { if err != nil {
@ -157,10 +143,8 @@ func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.File
"size": stream.GetSize(), "size": stream.GetSize(),
"name": stream.GetName(), "name": stream.GetName(),
"policy_id": r.Policy.Id, "policy_id": r.Policy.Id,
"last_modified": stream.ModTime().UnixMilli(), "last_modified": stream.ModTime().Unix(),
} }
// 获取上传会话信息
var u UploadInfo var u UploadInfo
err = d.request(http.MethodPut, "/file/upload", func(req *resty.Request) { err = d.request(http.MethodPut, "/file/upload", func(req *resty.Request) {
req.SetBody(uploadBody) req.SetBody(uploadBody)
@ -168,26 +152,36 @@ func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.File
if err != nil { if err != nil {
return err return err
} }
var chunkSize = u.ChunkSize
var buf []byte
var chunk int
for {
var n int
buf = make([]byte, chunkSize)
n, err = io.ReadAtLeast(stream, buf, chunkSize)
if err != nil && err != io.ErrUnexpectedEOF {
if err == io.EOF {
return nil
}
return err
}
if n == 0 {
break
}
buf = buf[:n]
err = d.request(http.MethodPost, "/file/upload/"+u.SessionID+"/"+strconv.Itoa(chunk), func(req *resty.Request) {
req.SetHeader("Content-Type", "application/octet-stream")
req.SetHeader("Content-Length", strconv.Itoa(n))
req.SetBody(buf)
}, nil)
if err != nil {
break
}
chunk++
// 根据存储方式选择分片上传的方法
switch r.Policy.Type {
case "onedrive":
err = d.upOneDrive(ctx, stream, u, up)
case "s3":
err = d.upS3(ctx, stream, u, up)
case "remote": // 从机存储
err = d.upRemote(ctx, stream, u, up)
case "local": // 本机存储
err = d.upLocal(ctx, stream, u, up)
default:
err = errs.NotImplement
} }
if err != nil { return err
// 删除失败的会话
_ = d.request(http.MethodDelete, "/file/upload/"+u.SessionID, nil, nil)
return err
}
return nil
} }
func (d *Cloudreve) create(ctx context.Context, dir model.Obj, file model.Obj) error { func (d *Cloudreve) create(ctx context.Context, dir model.Obj, file model.Obj) error {

View File

@ -21,12 +21,9 @@ type Policy struct {
} }
type UploadInfo struct { type UploadInfo struct {
SessionID string `json:"sessionID"` SessionID string `json:"sessionID"`
ChunkSize int `json:"chunkSize"` ChunkSize int `json:"chunkSize"`
Expires int `json:"expires"` Expires int `json:"expires"`
UploadURLs []string `json:"uploadURLs"`
Credential string `json:"credential,omitempty"` // local
CompleteURL string `json:"completeURL,omitempty"` // s3
} }
type DirectoryResp struct { type DirectoryResp struct {

View File

@ -1,26 +1,18 @@
package cloudreve package cloudreve
import ( import (
"bytes"
"context"
"encoding/base64" "encoding/base64"
"encoding/json"
"errors" "errors"
"fmt"
"io"
"net/http" "net/http"
"strconv"
"strings" "strings"
"time"
"github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/setting" "github.com/alist-org/alist/v3/internal/setting"
"github.com/alist-org/alist/v3/pkg/cookie" "github.com/alist-org/alist/v3/pkg/cookie"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2" "github.com/go-resty/resty/v2"
json "github.com/json-iterator/go"
jsoniter "github.com/json-iterator/go" jsoniter "github.com/json-iterator/go"
) )
@ -28,23 +20,17 @@ import (
const loginPath = "/user/session" const loginPath = "/user/session"
func (d *Cloudreve) getUA() string {
if d.CustomUA != "" {
return d.CustomUA
}
return base.UserAgent
}
func (d *Cloudreve) request(method string, path string, callback base.ReqCallback, out interface{}) error { func (d *Cloudreve) request(method string, path string, callback base.ReqCallback, out interface{}) error {
if d.ref != nil {
return d.ref.request(method, path, callback, out)
}
u := d.Address + "/api/v3" + path u := d.Address + "/api/v3" + path
ua := d.CustomUA
if ua == "" {
ua = base.UserAgent
}
req := base.RestyClient.R() req := base.RestyClient.R()
req.SetHeaders(map[string]string{ req.SetHeaders(map[string]string{
"Cookie": "cloudreve-session=" + d.Cookie, "Cookie": "cloudreve-session=" + d.Cookie,
"Accept": "application/json, text/plain, */*", "Accept": "application/json, text/plain, */*",
"User-Agent": d.getUA(), "User-Agent": ua,
}) })
var r Resp var r Resp
@ -83,11 +69,11 @@ func (d *Cloudreve) request(method string, path string, callback base.ReqCallbac
} }
if out != nil && r.Data != nil { if out != nil && r.Data != nil {
var marshal []byte var marshal []byte
marshal, err = jsoniter.Marshal(r.Data) marshal, err = json.Marshal(r.Data)
if err != nil { if err != nil {
return err return err
} }
err = jsoniter.Unmarshal(marshal, out) err = json.Unmarshal(marshal, out)
if err != nil { if err != nil {
return err return err
} }
@ -107,7 +93,7 @@ func (d *Cloudreve) login() error {
if err == nil { if err == nil {
break break
} }
if err.Error() != "CAPTCHA not match." { if err != nil && err.Error() != "CAPTCHA not match." {
break break
} }
} }
@ -168,11 +154,15 @@ func (d *Cloudreve) GetThumb(file Object) (model.Thumbnail, error) {
if !d.Addition.EnableThumbAndFolderSize { if !d.Addition.EnableThumbAndFolderSize {
return model.Thumbnail{}, nil return model.Thumbnail{}, nil
} }
ua := d.CustomUA
if ua == "" {
ua = base.UserAgent
}
req := base.NoRedirectClient.R() req := base.NoRedirectClient.R()
req.SetHeaders(map[string]string{ req.SetHeaders(map[string]string{
"Cookie": "cloudreve-session=" + d.Cookie, "Cookie": "cloudreve-session=" + d.Cookie,
"Accept": "image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8", "Accept": "image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8",
"User-Agent": d.getUA(), "User-Agent": ua,
}) })
resp, err := req.Execute(http.MethodGet, d.Address+"/api/v3/file/thumb/"+file.Id) resp, err := req.Execute(http.MethodGet, d.Address+"/api/v3/file/thumb/"+file.Id)
if err != nil { if err != nil {
@ -182,281 +172,3 @@ func (d *Cloudreve) GetThumb(file Object) (model.Thumbnail, error) {
Thumbnail: resp.Header().Get("Location"), Thumbnail: resp.Header().Get("Location"),
}, nil }, nil
} }
func (d *Cloudreve) upLocal(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
var finish int64 = 0
var chunk int = 0
DEFAULT := int64(u.ChunkSize)
for finish < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
left := stream.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[Cloudreve-Local] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
err = d.request(http.MethodPost, "/file/upload/"+u.SessionID+"/"+strconv.Itoa(chunk), func(req *resty.Request) {
req.SetHeader("Content-Type", "application/octet-stream")
req.SetContentLength(true)
req.SetHeader("Content-Length", strconv.FormatInt(byteSize, 10))
req.SetHeader("User-Agent", d.getUA())
req.SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
req.AddRetryCondition(func(r *resty.Response, err error) bool {
if err != nil {
return true
}
if r.IsError() {
return true
}
var retryResp Resp
jErr := base.RestyClient.JSONUnmarshal(r.Body(), &retryResp)
if jErr != nil {
return true
}
if retryResp.Code != 0 {
return true
}
return false
})
}, nil)
if err != nil {
return err
}
finish += byteSize
up(float64(finish) * 100 / float64(stream.GetSize()))
chunk++
}
return nil
}
func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
uploadUrl := u.UploadURLs[0]
credential := u.Credential
var finish int64 = 0
var chunk int = 0
DEFAULT := int64(u.ChunkSize)
retryCount := 0
maxRetries := 3
for finish < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
left := stream.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[Cloudreve-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk),
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = byteSize
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
req.Header.Set("Authorization", fmt.Sprint(credential))
req.Header.Set("User-Agent", d.getUA())
err = func() error {
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != 200 {
return errors.New(res.Status)
}
body, err := io.ReadAll(res.Body)
if err != nil {
return err
}
var up Resp
err = json.Unmarshal(body, &up)
if err != nil {
return err
}
if up.Code != 0 {
return errors.New(up.Msg)
}
return nil
}()
if err == nil {
retryCount = 0
finish += byteSize
up(float64(finish) * 100 / float64(stream.GetSize()))
chunk++
} else {
retryCount++
if retryCount > maxRetries {
return fmt.Errorf("upload failed after %d retries due to server errors, error: %s", maxRetries, err)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("[Cloudreve-Remote] server errors while uploading, retrying after %v...", backoff)
time.Sleep(backoff)
}
}
return nil
}
func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
uploadUrl := u.UploadURLs[0]
var finish int64 = 0
DEFAULT := int64(u.ChunkSize)
retryCount := 0
maxRetries := 3
for finish < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
left := stream.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[Cloudreve-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = byteSize
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()))
req.Header.Set("User-Agent", d.getUA())
finish += byteSize
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
switch {
case res.StatusCode >= 500 && res.StatusCode <= 504:
retryCount++
if retryCount > maxRetries {
res.Body.Close()
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("[Cloudreve-OneDrive] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
time.Sleep(backoff)
case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200:
data, _ := io.ReadAll(res.Body)
res.Body.Close()
return errors.New(string(data))
default:
res.Body.Close()
retryCount = 0
finish += byteSize
up(float64(finish) * 100 / float64(stream.GetSize()))
}
}
// 上传成功发送回调请求
return d.request(http.MethodPost, "/callback/onedrive/finish/"+u.SessionID, func(req *resty.Request) {
req.SetBody("{}")
}, nil)
}
func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
var finish int64 = 0
var chunk int = 0
var etags []string
DEFAULT := int64(u.ChunkSize)
retryCount := 0
maxRetries := 3
for finish < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
left := stream.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[Cloudreve-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
req, err := http.NewRequest("PUT", u.UploadURLs[chunk],
driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = byteSize
finish += byteSize
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
etag := res.Header.Get("ETag")
res.Body.Close()
switch {
case res.StatusCode != 200:
retryCount++
if retryCount > maxRetries {
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("[Cloudreve-S3] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
time.Sleep(backoff)
case etag == "":
return errors.New("faild to get ETag from header")
default:
retryCount = 0
etags = append(etags, etag)
finish += byteSize
up(float64(finish) * 100 / float64(stream.GetSize()))
chunk++
}
}
// s3LikeFinishUpload
// https://github.com/cloudreve/frontend/blob/b485bf297974cbe4834d2e8e744ae7b7e5b2ad39/src/component/Uploader/core/api/index.ts#L204-L252
bodyBuilder := &strings.Builder{}
bodyBuilder.WriteString("<CompleteMultipartUpload>")
for i, etag := range etags {
bodyBuilder.WriteString(fmt.Sprintf(
`<Part><PartNumber>%d</PartNumber><ETag>%s</ETag></Part>`,
i+1, // PartNumber 从 1 开始
etag,
))
}
bodyBuilder.WriteString("</CompleteMultipartUpload>")
req, err := http.NewRequest(
"POST",
u.CompleteURL,
strings.NewReader(bodyBuilder.String()),
)
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/xml")
req.Header.Set("User-Agent", d.getUA())
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
body, _ := io.ReadAll(res.Body)
return fmt.Errorf("up status: %d, error: %s", res.StatusCode, string(body))
}
// 上传成功发送回调请求
err = d.request(http.MethodGet, "/callback/s3/"+u.SessionID, nil, nil)
if err != nil {
return err
}
return nil
}

View File

@ -1,305 +0,0 @@
package cloudreve_v4
import (
"context"
"errors"
"net/http"
"strconv"
"strings"
"time"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
)
type CloudreveV4 struct {
model.Storage
Addition
ref *CloudreveV4
}
func (d *CloudreveV4) Config() driver.Config {
if d.ref != nil {
return d.ref.Config()
}
if d.EnableVersionUpload {
config.NoOverwriteUpload = false
}
return config
}
func (d *CloudreveV4) GetAddition() driver.Additional {
return &d.Addition
}
func (d *CloudreveV4) Init(ctx context.Context) error {
// removing trailing slash
d.Address = strings.TrimSuffix(d.Address, "/")
op.MustSaveDriverStorage(d)
if d.ref != nil {
return nil
}
if d.AccessToken == "" && d.RefreshToken != "" {
return d.refreshToken()
}
if d.Username != "" {
return d.login()
}
return nil
}
func (d *CloudreveV4) InitReference(storage driver.Driver) error {
refStorage, ok := storage.(*CloudreveV4)
if ok {
d.ref = refStorage
return nil
}
return errs.NotSupport
}
func (d *CloudreveV4) Drop(ctx context.Context) error {
d.ref = nil
return nil
}
func (d *CloudreveV4) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
const pageSize int = 100
var f []File
var r FileResp
params := map[string]string{
"page_size": strconv.Itoa(pageSize),
"uri": dir.GetPath(),
"order_by": d.OrderBy,
"order_direction": d.OrderDirection,
"page": "0",
}
for {
err := d.request(http.MethodGet, "/file", func(req *resty.Request) {
req.SetQueryParams(params)
}, &r)
if err != nil {
return nil, err
}
f = append(f, r.Files...)
if r.Pagination.NextToken == "" || len(r.Files) < pageSize {
break
}
params["next_page_token"] = r.Pagination.NextToken
}
return utils.SliceConvert(f, func(src File) (model.Obj, error) {
if d.EnableFolderSize && src.Type == 1 {
var ds FolderSummaryResp
err := d.request(http.MethodGet, "/file/info", func(req *resty.Request) {
req.SetQueryParam("uri", src.Path)
req.SetQueryParam("folder_summary", "true")
}, &ds)
if err == nil && ds.FolderSummary.Size > 0 {
src.Size = ds.FolderSummary.Size
}
}
var thumb model.Thumbnail
if d.EnableThumb && src.Type == 0 {
var t FileThumbResp
err := d.request(http.MethodGet, "/file/thumb", func(req *resty.Request) {
req.SetQueryParam("uri", src.Path)
}, &t)
if err == nil && t.URL != "" {
thumb = model.Thumbnail{
Thumbnail: t.URL,
}
}
}
return &model.ObjThumb{
Object: model.Object{
ID: src.ID,
Path: src.Path,
Name: src.Name,
Size: src.Size,
Modified: src.UpdatedAt,
Ctime: src.CreatedAt,
IsFolder: src.Type == 1,
},
Thumbnail: thumb,
}, nil
})
}
func (d *CloudreveV4) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
var url FileUrlResp
err := d.request(http.MethodPost, "/file/url", func(req *resty.Request) {
req.SetBody(base.Json{
"uris": []string{file.GetPath()},
"download": true,
})
}, &url)
if err != nil {
return nil, err
}
if len(url.Urls) == 0 {
return nil, errors.New("server returns no url")
}
exp := time.Until(url.Expires)
return &model.Link{
URL: url.Urls[0].URL,
Expiration: &exp,
}, nil
}
func (d *CloudreveV4) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
return d.request(http.MethodPost, "/file/create", func(req *resty.Request) {
req.SetBody(base.Json{
"type": "folder",
"uri": parentDir.GetPath() + "/" + dirName,
"error_on_conflict": true,
})
}, nil)
}
func (d *CloudreveV4) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
return d.request(http.MethodPost, "/file/move", func(req *resty.Request) {
req.SetBody(base.Json{
"uris": []string{srcObj.GetPath()},
"dst": dstDir.GetPath(),
"copy": false,
})
}, nil)
}
func (d *CloudreveV4) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
return d.request(http.MethodPost, "/file/create", func(req *resty.Request) {
req.SetBody(base.Json{
"new_name": newName,
"uri": srcObj.GetPath(),
})
}, nil)
}
func (d *CloudreveV4) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
return d.request(http.MethodPost, "/file/move", func(req *resty.Request) {
req.SetBody(base.Json{
"uris": []string{srcObj.GetPath()},
"dst": dstDir.GetPath(),
"copy": true,
})
}, nil)
}
func (d *CloudreveV4) Remove(ctx context.Context, obj model.Obj) error {
return d.request(http.MethodDelete, "/file", func(req *resty.Request) {
req.SetBody(base.Json{
"uris": []string{obj.GetPath()},
"unlink": false,
"skip_soft_delete": true,
})
}, nil)
}
func (d *CloudreveV4) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
if file.GetSize() == 0 {
// 空文件使用新建文件方法,避免上传卡锁
return d.request(http.MethodPost, "/file/create", func(req *resty.Request) {
req.SetBody(base.Json{
"type": "file",
"uri": dstDir.GetPath() + "/" + file.GetName(),
"error_on_conflict": true,
})
}, nil)
}
var p StoragePolicy
var r FileResp
var u FileUploadResp
var err error
params := map[string]string{
"page_size": "10",
"uri": dstDir.GetPath(),
"order_by": "created_at",
"order_direction": "asc",
"page": "0",
}
err = d.request(http.MethodGet, "/file", func(req *resty.Request) {
req.SetQueryParams(params)
}, &r)
if err != nil {
return err
}
p = r.StoragePolicy
body := base.Json{
"uri": dstDir.GetPath() + "/" + file.GetName(),
"size": file.GetSize(),
"policy_id": p.ID,
"last_modified": file.ModTime().UnixMilli(),
"mime_type": "",
}
if d.EnableVersionUpload {
body["entity_type"] = "version"
}
err = d.request(http.MethodPut, "/file/upload", func(req *resty.Request) {
req.SetBody(body)
}, &u)
if err != nil {
return err
}
if u.StoragePolicy.Relay {
err = d.upLocal(ctx, file, u, up)
} else {
switch u.StoragePolicy.Type {
case "local":
err = d.upLocal(ctx, file, u, up)
case "remote":
err = d.upRemote(ctx, file, u, up)
case "onedrive":
err = d.upOneDrive(ctx, file, u, up)
case "s3":
err = d.upS3(ctx, file, u, up)
default:
return errs.NotImplement
}
}
if err != nil {
// 删除失败的会话
_ = d.request(http.MethodDelete, "/file/upload", func(req *resty.Request) {
req.SetBody(base.Json{
"id": u.SessionID,
"uri": u.URI,
})
}, nil)
return err
}
return nil
}
func (d *CloudreveV4) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
// TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
return nil, errs.NotImplement
}
func (d *CloudreveV4) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
// TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
return nil, errs.NotImplement
}
func (d *CloudreveV4) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
// TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
return nil, errs.NotImplement
}
func (d *CloudreveV4) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
// TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional
// a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir
// return errs.NotImplement to use an internal archive tool
return nil, errs.NotImplement
}
//func (d *CloudreveV4) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
// return nil, errs.NotSupport
//}
var _ driver.Driver = (*CloudreveV4)(nil)

View File

@ -1,44 +0,0 @@
package cloudreve_v4
import (
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/op"
)
type Addition struct {
// Usually one of two
driver.RootPath
// driver.RootID
// define other
Address string `json:"address" required:"true"`
Username string `json:"username"`
Password string `json:"password"`
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
CustomUA string `json:"custom_ua"`
EnableFolderSize bool `json:"enable_folder_size"`
EnableThumb bool `json:"enable_thumb"`
EnableVersionUpload bool `json:"enable_version_upload"`
OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at" default:"name" required:"true"`
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc" required:"true"`
}
var config = driver.Config{
Name: "Cloudreve V4",
LocalSort: false,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,
NeedMs: false,
DefaultRoot: "cloudreve://my",
CheckStatus: true,
Alert: "",
NoOverwriteUpload: true,
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &CloudreveV4{}
})
}

View File

@ -1,164 +0,0 @@
package cloudreve_v4
import (
"time"
"github.com/alist-org/alist/v3/internal/model"
)
type Object struct {
model.Object
StoragePolicy StoragePolicy
}
type Resp struct {
Code int `json:"code"`
Msg string `json:"msg"`
Data any `json:"data"`
}
type BasicConfigResp struct {
InstanceID string `json:"instance_id"`
// Title string `json:"title"`
// Themes string `json:"themes"`
// DefaultTheme string `json:"default_theme"`
User struct {
ID string `json:"id"`
// Nickname string `json:"nickname"`
// CreatedAt time.Time `json:"created_at"`
// Anonymous bool `json:"anonymous"`
Group struct {
ID string `json:"id"`
Name string `json:"name"`
Permission string `json:"permission"`
} `json:"group"`
} `json:"user"`
// Logo string `json:"logo"`
// LogoLight string `json:"logo_light"`
// CaptchaReCaptchaKey string `json:"captcha_ReCaptchaKey"`
CaptchaType string `json:"captcha_type"` // support 'normal' only
// AppPromotion bool `json:"app_promotion"`
}
type SiteLoginConfigResp struct {
LoginCaptcha bool `json:"login_captcha"`
Authn bool `json:"authn"`
}
type PrepareLoginResp struct {
WebauthnEnabled bool `json:"webauthn_enabled"`
PasswordEnabled bool `json:"password_enabled"`
}
type CaptchaResp struct {
Image string `json:"image"`
Ticket string `json:"ticket"`
}
type Token struct {
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
AccessExpires time.Time `json:"access_expires"`
RefreshExpires time.Time `json:"refresh_expires"`
}
type TokenResponse struct {
User struct {
ID string `json:"id"`
// Email string `json:"email"`
// Nickname string `json:"nickname"`
Status string `json:"status"`
// CreatedAt time.Time `json:"created_at"`
Group struct {
ID string `json:"id"`
Name string `json:"name"`
Permission string `json:"permission"`
// DirectLinkBatchSize int `json:"direct_link_batch_size"`
// TrashRetention int `json:"trash_retention"`
} `json:"group"`
// Language string `json:"language"`
} `json:"user"`
Token Token `json:"token"`
}
type File struct {
Type int `json:"type"` // 0: file, 1: folder
ID string `json:"id"`
Name string `json:"name"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
Size int64 `json:"size"`
Metadata interface{} `json:"metadata"`
Path string `json:"path"`
Capability string `json:"capability"`
Owned bool `json:"owned"`
PrimaryEntity string `json:"primary_entity"`
}
type StoragePolicy struct {
ID string `json:"id"`
Name string `json:"name"`
Type string `json:"type"`
MaxSize int64 `json:"max_size"`
Relay bool `json:"relay,omitempty"`
}
type Pagination struct {
Page int `json:"page"`
PageSize int `json:"page_size"`
IsCursor bool `json:"is_cursor"`
NextToken string `json:"next_token,omitempty"`
}
type Props struct {
Capability string `json:"capability"`
MaxPageSize int `json:"max_page_size"`
OrderByOptions []string `json:"order_by_options"`
OrderDirectionOptions []string `json:"order_direction_options"`
}
type FileResp struct {
Files []File `json:"files"`
Parent File `json:"parent"`
Pagination Pagination `json:"pagination"`
Props Props `json:"props"`
ContextHint string `json:"context_hint"`
MixedType bool `json:"mixed_type"`
StoragePolicy StoragePolicy `json:"storage_policy"`
}
type FileUrlResp struct {
Urls []struct {
URL string `json:"url"`
} `json:"urls"`
Expires time.Time `json:"expires"`
}
type FileUploadResp struct {
// UploadID string `json:"upload_id"`
SessionID string `json:"session_id"`
ChunkSize int64 `json:"chunk_size"`
Expires int64 `json:"expires"`
StoragePolicy StoragePolicy `json:"storage_policy"`
URI string `json:"uri"`
CompleteURL string `json:"completeURL,omitempty"` // for S3-like
CallbackSecret string `json:"callback_secret,omitempty"` // for S3-like, OneDrive
UploadUrls []string `json:"upload_urls,omitempty"` // for not-local
Credential string `json:"credential,omitempty"` // for local
}
type FileThumbResp struct {
URL string `json:"url"`
Expires time.Time `json:"expires"`
}
type FolderSummaryResp struct {
File
FolderSummary struct {
Size int64 `json:"size"`
Files int64 `json:"files"`
Folders int64 `json:"folders"`
Completed bool `json:"completed"`
CalculatedAt time.Time `json:"calculated_at"`
} `json:"folder_summary"`
}

View File

@ -1,476 +0,0 @@
package cloudreve_v4
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"time"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/internal/setting"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
jsoniter "github.com/json-iterator/go"
)
// do others that not defined in Driver interface
func (d *CloudreveV4) getUA() string {
if d.CustomUA != "" {
return d.CustomUA
}
return base.UserAgent
}
func (d *CloudreveV4) request(method string, path string, callback base.ReqCallback, out any) error {
if d.ref != nil {
return d.ref.request(method, path, callback, out)
}
u := d.Address + "/api/v4" + path
req := base.RestyClient.R()
req.SetHeaders(map[string]string{
"Accept": "application/json, text/plain, */*",
"User-Agent": d.getUA(),
})
if d.AccessToken != "" {
req.SetHeader("Authorization", "Bearer "+d.AccessToken)
}
var r Resp
req.SetResult(&r)
if callback != nil {
callback(req)
}
resp, err := req.Execute(method, u)
if err != nil {
return err
}
if !resp.IsSuccess() {
return errors.New(resp.String())
}
if r.Code != 0 {
if r.Code == 401 && d.RefreshToken != "" && path != "/session/token/refresh" {
// try to refresh token
err = d.refreshToken()
if err != nil {
return err
}
return d.request(method, path, callback, out)
}
return errors.New(r.Msg)
}
if out != nil && r.Data != nil {
var marshal []byte
marshal, err = json.Marshal(r.Data)
if err != nil {
return err
}
err = json.Unmarshal(marshal, out)
if err != nil {
return err
}
}
return nil
}
func (d *CloudreveV4) login() error {
var siteConfig SiteLoginConfigResp
err := d.request(http.MethodGet, "/site/config/login", nil, &siteConfig)
if err != nil {
return err
}
if !siteConfig.Authn {
return errors.New("authn not support")
}
var prepareLogin PrepareLoginResp
err = d.request(http.MethodGet, "/session/prepare?email="+d.Addition.Username, nil, &prepareLogin)
if err != nil {
return err
}
if !prepareLogin.PasswordEnabled {
return errors.New("password not enabled")
}
if prepareLogin.WebauthnEnabled {
return errors.New("webauthn not support")
}
for range 5 {
err = d.doLogin(siteConfig.LoginCaptcha)
if err == nil {
break
}
if err.Error() != "CAPTCHA not match." {
break
}
}
return err
}
func (d *CloudreveV4) doLogin(needCaptcha bool) error {
var err error
loginBody := base.Json{
"email": d.Username,
"password": d.Password,
}
if needCaptcha {
var config BasicConfigResp
err = d.request(http.MethodGet, "/site/config/basic", nil, &config)
if err != nil {
return err
}
if config.CaptchaType != "normal" {
return fmt.Errorf("captcha type %s not support", config.CaptchaType)
}
var captcha CaptchaResp
err = d.request(http.MethodGet, "/site/captcha", nil, &captcha)
if err != nil {
return err
}
if !strings.HasPrefix(captcha.Image, "data:image/png;base64,") {
return errors.New("can not get captcha")
}
loginBody["ticket"] = captcha.Ticket
i := strings.Index(captcha.Image, ",")
dec := base64.NewDecoder(base64.StdEncoding, strings.NewReader(captcha.Image[i+1:]))
vRes, err := base.RestyClient.R().SetMultipartField(
"image", "validateCode.png", "image/png", dec).
Post(setting.GetStr(conf.OcrApi))
if err != nil {
return err
}
if jsoniter.Get(vRes.Body(), "status").ToInt() != 200 {
return errors.New("ocr error:" + jsoniter.Get(vRes.Body(), "msg").ToString())
}
captchaCode := jsoniter.Get(vRes.Body(), "result").ToString()
if captchaCode == "" {
return errors.New("ocr error: empty result")
}
loginBody["captcha"] = captchaCode
}
var token TokenResponse
err = d.request(http.MethodPost, "/session/token", func(req *resty.Request) {
req.SetBody(loginBody)
}, &token)
if err != nil {
return err
}
d.AccessToken, d.RefreshToken = token.Token.AccessToken, token.Token.RefreshToken
op.MustSaveDriverStorage(d)
return nil
}
func (d *CloudreveV4) refreshToken() error {
var token Token
if token.RefreshToken == "" {
if d.Username != "" {
err := d.login()
if err != nil {
return fmt.Errorf("cannot login to get refresh token, error: %s", err)
}
}
return nil
}
err := d.request(http.MethodPost, "/session/token/refresh", func(req *resty.Request) {
req.SetBody(base.Json{
"refresh_token": d.RefreshToken,
})
}, &token)
if err != nil {
return err
}
d.AccessToken, d.RefreshToken = token.AccessToken, token.RefreshToken
op.MustSaveDriverStorage(d)
return nil
}
func (d *CloudreveV4) upLocal(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
var finish int64 = 0
var chunk int = 0
DEFAULT := int64(u.ChunkSize)
if DEFAULT == 0 {
// support relay
DEFAULT = file.GetSize()
}
for finish < file.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
left := file.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[CloudreveV4-Local] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(file, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
err = d.request(http.MethodPost, "/file/upload/"+u.SessionID+"/"+strconv.Itoa(chunk), func(req *resty.Request) {
req.SetHeader("Content-Type", "application/octet-stream")
req.SetContentLength(true)
req.SetHeader("Content-Length", strconv.FormatInt(byteSize, 10))
req.SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
req.AddRetryCondition(func(r *resty.Response, err error) bool {
if err != nil {
return true
}
if r.IsError() {
return true
}
var retryResp Resp
jErr := base.RestyClient.JSONUnmarshal(r.Body(), &retryResp)
if jErr != nil {
return true
}
if retryResp.Code != 0 {
return true
}
return false
})
}, nil)
if err != nil {
return err
}
finish += byteSize
up(float64(finish) * 100 / float64(file.GetSize()))
chunk++
}
return nil
}
func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
uploadUrl := u.UploadUrls[0]
credential := u.Credential
var finish int64 = 0
var chunk int = 0
DEFAULT := int64(u.ChunkSize)
retryCount := 0
maxRetries := 3
for finish < file.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
left := file.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[CloudreveV4-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(file, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk),
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = byteSize
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
req.Header.Set("Authorization", fmt.Sprint(credential))
req.Header.Set("User-Agent", d.getUA())
err = func() error {
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != 200 {
return errors.New(res.Status)
}
body, err := io.ReadAll(res.Body)
if err != nil {
return err
}
var up Resp
err = json.Unmarshal(body, &up)
if err != nil {
return err
}
if up.Code != 0 {
return errors.New(up.Msg)
}
return nil
}()
if err == nil {
retryCount = 0
finish += byteSize
up(float64(finish) * 100 / float64(file.GetSize()))
chunk++
} else {
retryCount++
if retryCount > maxRetries {
return fmt.Errorf("upload failed after %d retries due to server errors, error: %s", maxRetries, err)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("[Cloudreve-Remote] server errors while uploading, retrying after %v...", backoff)
time.Sleep(backoff)
}
}
return nil
}
func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
uploadUrl := u.UploadUrls[0]
var finish int64 = 0
DEFAULT := int64(u.ChunkSize)
retryCount := 0
maxRetries := 3
for finish < file.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
left := file.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[CloudreveV4-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(file, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
req, err := http.NewRequest(http.MethodPut, uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = byteSize
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, file.GetSize()))
req.Header.Set("User-Agent", d.getUA())
finish += byteSize
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
switch {
case res.StatusCode >= 500 && res.StatusCode <= 504:
retryCount++
if retryCount > maxRetries {
res.Body.Close()
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("[CloudreveV4-OneDrive] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
time.Sleep(backoff)
case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200:
data, _ := io.ReadAll(res.Body)
res.Body.Close()
return errors.New(string(data))
default:
res.Body.Close()
retryCount = 0
finish += byteSize
up(float64(finish) * 100 / float64(file.GetSize()))
}
}
// 上传成功发送回调请求
return d.request(http.MethodPost, "/callback/onedrive/"+u.SessionID+"/"+u.CallbackSecret, func(req *resty.Request) {
req.SetBody("{}")
}, nil)
}
func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
var finish int64 = 0
var chunk int = 0
var etags []string
DEFAULT := int64(u.ChunkSize)
retryCount := 0
maxRetries := 3
for finish < file.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
left := file.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[CloudreveV4-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(file, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
req, err := http.NewRequest(http.MethodPut, u.UploadUrls[chunk],
driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = byteSize
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
etag := res.Header.Get("ETag")
res.Body.Close()
switch {
case res.StatusCode != 200:
retryCount++
if retryCount > maxRetries {
return fmt.Errorf("upload failed after %d retries due to server errors", maxRetries)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("server error %d, retrying after %v...", res.StatusCode, backoff)
time.Sleep(backoff)
case etag == "":
return errors.New("faild to get ETag from header")
default:
retryCount = 0
etags = append(etags, etag)
finish += byteSize
up(float64(finish) * 100 / float64(file.GetSize()))
chunk++
}
}
// s3LikeFinishUpload
bodyBuilder := &strings.Builder{}
bodyBuilder.WriteString("<CompleteMultipartUpload>")
for i, etag := range etags {
bodyBuilder.WriteString(fmt.Sprintf(
`<Part><PartNumber>%d</PartNumber><ETag>%s</ETag></Part>`,
i+1, // PartNumber 从 1 开始
etag,
))
}
bodyBuilder.WriteString("</CompleteMultipartUpload>")
req, err := http.NewRequest(
"POST",
u.CompleteURL,
strings.NewReader(bodyBuilder.String()),
)
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/xml")
req.Header.Set("User-Agent", d.getUA())
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
body, _ := io.ReadAll(res.Body)
return fmt.Errorf("up status: %d, error: %s", res.StatusCode, string(body))
}
// 上传成功发送回调请求
return d.request(http.MethodPost, "/callback/s3/"+u.SessionID+"/"+u.CallbackSecret, func(req *resty.Request) {
req.SetBody("{}")
}, nil)
}

Some files were not shown because too many files have changed in this diff Show More