mirror of
https://github.com/cloudreve/cloudreve.git
synced 2025-12-15 10:04:01 +08:00
feat: file blob encryption
This commit is contained in:
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/credmanager"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/email"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/encrypt"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/mime"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/lock"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
|
||||
@@ -129,50 +130,55 @@ type Dep interface {
|
||||
WebAuthn(ctx context.Context) (*webauthn.WebAuthn, error)
|
||||
// UAParser Get a singleton uaparser.Parser instance for user agent parsing.
|
||||
UAParser() *uaparser.Parser
|
||||
// MasterEncryptKeyVault Get a singleton encrypt.MasterEncryptKeyVault instance for master encrypt key vault.
|
||||
MasterEncryptKeyVault() encrypt.MasterEncryptKeyVault
|
||||
// EncryptorFactory Get a new encrypt.CryptorFactory instance.
|
||||
EncryptorFactory() encrypt.CryptorFactory
|
||||
}
|
||||
|
||||
type dependency struct {
|
||||
configProvider conf.ConfigProvider
|
||||
logger logging.Logger
|
||||
statics iofs.FS
|
||||
serverStaticFS static.ServeFileSystem
|
||||
dbClient *ent.Client
|
||||
rawEntClient *ent.Client
|
||||
kv cache.Driver
|
||||
navigatorStateKv cache.Driver
|
||||
settingClient inventory.SettingClient
|
||||
fileClient inventory.FileClient
|
||||
shareClient inventory.ShareClient
|
||||
settingProvider setting.Provider
|
||||
userClient inventory.UserClient
|
||||
groupClient inventory.GroupClient
|
||||
storagePolicyClient inventory.StoragePolicyClient
|
||||
taskClient inventory.TaskClient
|
||||
nodeClient inventory.NodeClient
|
||||
davAccountClient inventory.DavAccountClient
|
||||
directLinkClient inventory.DirectLinkClient
|
||||
emailClient email.Driver
|
||||
generalAuth auth.Auth
|
||||
hashidEncoder hashid.Encoder
|
||||
tokenAuth auth.TokenAuth
|
||||
lockSystem lock.LockSystem
|
||||
requestClient request.Client
|
||||
ioIntenseQueue queue.Queue
|
||||
thumbQueue queue.Queue
|
||||
mediaMetaQueue queue.Queue
|
||||
entityRecycleQueue queue.Queue
|
||||
slaveQueue queue.Queue
|
||||
remoteDownloadQueue queue.Queue
|
||||
ioIntenseQueueTask queue.Task
|
||||
mediaMeta mediameta.Extractor
|
||||
thumbPipeline thumb.Generator
|
||||
mimeDetector mime.MimeDetector
|
||||
credManager credmanager.CredManager
|
||||
nodePool cluster.NodePool
|
||||
taskRegistry queue.TaskRegistry
|
||||
webauthn *webauthn.WebAuthn
|
||||
parser *uaparser.Parser
|
||||
cron *cron.Cron
|
||||
configProvider conf.ConfigProvider
|
||||
logger logging.Logger
|
||||
statics iofs.FS
|
||||
serverStaticFS static.ServeFileSystem
|
||||
dbClient *ent.Client
|
||||
rawEntClient *ent.Client
|
||||
kv cache.Driver
|
||||
navigatorStateKv cache.Driver
|
||||
settingClient inventory.SettingClient
|
||||
fileClient inventory.FileClient
|
||||
shareClient inventory.ShareClient
|
||||
settingProvider setting.Provider
|
||||
userClient inventory.UserClient
|
||||
groupClient inventory.GroupClient
|
||||
storagePolicyClient inventory.StoragePolicyClient
|
||||
taskClient inventory.TaskClient
|
||||
nodeClient inventory.NodeClient
|
||||
davAccountClient inventory.DavAccountClient
|
||||
directLinkClient inventory.DirectLinkClient
|
||||
emailClient email.Driver
|
||||
generalAuth auth.Auth
|
||||
hashidEncoder hashid.Encoder
|
||||
tokenAuth auth.TokenAuth
|
||||
lockSystem lock.LockSystem
|
||||
requestClient request.Client
|
||||
ioIntenseQueue queue.Queue
|
||||
thumbQueue queue.Queue
|
||||
mediaMetaQueue queue.Queue
|
||||
entityRecycleQueue queue.Queue
|
||||
slaveQueue queue.Queue
|
||||
remoteDownloadQueue queue.Queue
|
||||
ioIntenseQueueTask queue.Task
|
||||
mediaMeta mediameta.Extractor
|
||||
thumbPipeline thumb.Generator
|
||||
mimeDetector mime.MimeDetector
|
||||
credManager credmanager.CredManager
|
||||
nodePool cluster.NodePool
|
||||
taskRegistry queue.TaskRegistry
|
||||
webauthn *webauthn.WebAuthn
|
||||
parser *uaparser.Parser
|
||||
cron *cron.Cron
|
||||
masterEncryptKeyVault encrypt.MasterEncryptKeyVault
|
||||
|
||||
configPath string
|
||||
isPro bool
|
||||
@@ -206,6 +212,19 @@ func (d *dependency) RequestClient(opts ...request.Option) request.Client {
|
||||
return request.NewClient(d.ConfigProvider(), opts...)
|
||||
}
|
||||
|
||||
func (d *dependency) MasterEncryptKeyVault() encrypt.MasterEncryptKeyVault {
|
||||
if d.masterEncryptKeyVault != nil {
|
||||
return d.masterEncryptKeyVault
|
||||
}
|
||||
|
||||
d.masterEncryptKeyVault = encrypt.NewMasterEncryptKeyVault(d.SettingProvider())
|
||||
return d.masterEncryptKeyVault
|
||||
}
|
||||
|
||||
func (d *dependency) EncryptorFactory() encrypt.CryptorFactory {
|
||||
return encrypt.NewCryptorFactory(d.MasterEncryptKeyVault())
|
||||
}
|
||||
|
||||
func (d *dependency) WebAuthn(ctx context.Context) (*webauthn.WebAuthn, error) {
|
||||
if d.webauthn != nil {
|
||||
return d.webauthn, nil
|
||||
|
||||
2
assets
2
assets
Submodule assets updated: 1c38544ef7...1c9dd8d9ad
@@ -42,8 +42,8 @@ type Entity struct {
|
||||
CreatedBy int `json:"created_by,omitempty"`
|
||||
// UploadSessionID holds the value of the "upload_session_id" field.
|
||||
UploadSessionID *uuid.UUID `json:"upload_session_id,omitempty"`
|
||||
// RecycleOptions holds the value of the "recycle_options" field.
|
||||
RecycleOptions *types.EntityRecycleOption `json:"recycle_options,omitempty"`
|
||||
// Props holds the value of the "props" field.
|
||||
Props *types.EntityProps `json:"props,omitempty"`
|
||||
// Edges holds the relations/edges for other nodes in the graph.
|
||||
// The values are being populated by the EntityQuery when eager-loading is set.
|
||||
Edges EntityEdges `json:"edges"`
|
||||
@@ -105,7 +105,7 @@ func (*Entity) scanValues(columns []string) ([]any, error) {
|
||||
switch columns[i] {
|
||||
case entity.FieldUploadSessionID:
|
||||
values[i] = &sql.NullScanner{S: new(uuid.UUID)}
|
||||
case entity.FieldRecycleOptions:
|
||||
case entity.FieldProps:
|
||||
values[i] = new([]byte)
|
||||
case entity.FieldID, entity.FieldType, entity.FieldSize, entity.FieldReferenceCount, entity.FieldStoragePolicyEntities, entity.FieldCreatedBy:
|
||||
values[i] = new(sql.NullInt64)
|
||||
@@ -196,12 +196,12 @@ func (e *Entity) assignValues(columns []string, values []any) error {
|
||||
e.UploadSessionID = new(uuid.UUID)
|
||||
*e.UploadSessionID = *value.S.(*uuid.UUID)
|
||||
}
|
||||
case entity.FieldRecycleOptions:
|
||||
case entity.FieldProps:
|
||||
if value, ok := values[i].(*[]byte); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field recycle_options", values[i])
|
||||
return fmt.Errorf("unexpected type %T for field props", values[i])
|
||||
} else if value != nil && len(*value) > 0 {
|
||||
if err := json.Unmarshal(*value, &e.RecycleOptions); err != nil {
|
||||
return fmt.Errorf("unmarshal field recycle_options: %w", err)
|
||||
if err := json.Unmarshal(*value, &e.Props); err != nil {
|
||||
return fmt.Errorf("unmarshal field props: %w", err)
|
||||
}
|
||||
}
|
||||
default:
|
||||
@@ -289,8 +289,8 @@ func (e *Entity) String() string {
|
||||
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("recycle_options=")
|
||||
builder.WriteString(fmt.Sprintf("%v", e.RecycleOptions))
|
||||
builder.WriteString("props=")
|
||||
builder.WriteString(fmt.Sprintf("%v", e.Props))
|
||||
builder.WriteByte(')')
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
@@ -35,8 +35,8 @@ const (
|
||||
FieldCreatedBy = "created_by"
|
||||
// FieldUploadSessionID holds the string denoting the upload_session_id field in the database.
|
||||
FieldUploadSessionID = "upload_session_id"
|
||||
// FieldRecycleOptions holds the string denoting the recycle_options field in the database.
|
||||
FieldRecycleOptions = "recycle_options"
|
||||
// FieldProps holds the string denoting the props field in the database.
|
||||
FieldProps = "recycle_options"
|
||||
// EdgeFile holds the string denoting the file edge name in mutations.
|
||||
EdgeFile = "file"
|
||||
// EdgeUser holds the string denoting the user edge name in mutations.
|
||||
@@ -79,7 +79,7 @@ var Columns = []string{
|
||||
FieldStoragePolicyEntities,
|
||||
FieldCreatedBy,
|
||||
FieldUploadSessionID,
|
||||
FieldRecycleOptions,
|
||||
FieldProps,
|
||||
}
|
||||
|
||||
var (
|
||||
|
||||
@@ -521,14 +521,14 @@ func UploadSessionIDNotNil() predicate.Entity {
|
||||
return predicate.Entity(sql.FieldNotNull(FieldUploadSessionID))
|
||||
}
|
||||
|
||||
// RecycleOptionsIsNil applies the IsNil predicate on the "recycle_options" field.
|
||||
func RecycleOptionsIsNil() predicate.Entity {
|
||||
return predicate.Entity(sql.FieldIsNull(FieldRecycleOptions))
|
||||
// PropsIsNil applies the IsNil predicate on the "props" field.
|
||||
func PropsIsNil() predicate.Entity {
|
||||
return predicate.Entity(sql.FieldIsNull(FieldProps))
|
||||
}
|
||||
|
||||
// RecycleOptionsNotNil applies the NotNil predicate on the "recycle_options" field.
|
||||
func RecycleOptionsNotNil() predicate.Entity {
|
||||
return predicate.Entity(sql.FieldNotNull(FieldRecycleOptions))
|
||||
// PropsNotNil applies the NotNil predicate on the "props" field.
|
||||
func PropsNotNil() predicate.Entity {
|
||||
return predicate.Entity(sql.FieldNotNull(FieldProps))
|
||||
}
|
||||
|
||||
// HasFile applies the HasEdge predicate on the "file" edge.
|
||||
|
||||
@@ -135,9 +135,9 @@ func (ec *EntityCreate) SetNillableUploadSessionID(u *uuid.UUID) *EntityCreate {
|
||||
return ec
|
||||
}
|
||||
|
||||
// SetRecycleOptions sets the "recycle_options" field.
|
||||
func (ec *EntityCreate) SetRecycleOptions(tro *types.EntityRecycleOption) *EntityCreate {
|
||||
ec.mutation.SetRecycleOptions(tro)
|
||||
// SetProps sets the "props" field.
|
||||
func (ec *EntityCreate) SetProps(tp *types.EntityProps) *EntityCreate {
|
||||
ec.mutation.SetProps(tp)
|
||||
return ec
|
||||
}
|
||||
|
||||
@@ -336,9 +336,9 @@ func (ec *EntityCreate) createSpec() (*Entity, *sqlgraph.CreateSpec) {
|
||||
_spec.SetField(entity.FieldUploadSessionID, field.TypeUUID, value)
|
||||
_node.UploadSessionID = &value
|
||||
}
|
||||
if value, ok := ec.mutation.RecycleOptions(); ok {
|
||||
_spec.SetField(entity.FieldRecycleOptions, field.TypeJSON, value)
|
||||
_node.RecycleOptions = value
|
||||
if value, ok := ec.mutation.Props(); ok {
|
||||
_spec.SetField(entity.FieldProps, field.TypeJSON, value)
|
||||
_node.Props = value
|
||||
}
|
||||
if nodes := ec.mutation.FileIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
@@ -586,21 +586,21 @@ func (u *EntityUpsert) ClearUploadSessionID() *EntityUpsert {
|
||||
return u
|
||||
}
|
||||
|
||||
// SetRecycleOptions sets the "recycle_options" field.
|
||||
func (u *EntityUpsert) SetRecycleOptions(v *types.EntityRecycleOption) *EntityUpsert {
|
||||
u.Set(entity.FieldRecycleOptions, v)
|
||||
// SetProps sets the "props" field.
|
||||
func (u *EntityUpsert) SetProps(v *types.EntityProps) *EntityUpsert {
|
||||
u.Set(entity.FieldProps, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateRecycleOptions sets the "recycle_options" field to the value that was provided on create.
|
||||
func (u *EntityUpsert) UpdateRecycleOptions() *EntityUpsert {
|
||||
u.SetExcluded(entity.FieldRecycleOptions)
|
||||
// UpdateProps sets the "props" field to the value that was provided on create.
|
||||
func (u *EntityUpsert) UpdateProps() *EntityUpsert {
|
||||
u.SetExcluded(entity.FieldProps)
|
||||
return u
|
||||
}
|
||||
|
||||
// ClearRecycleOptions clears the value of the "recycle_options" field.
|
||||
func (u *EntityUpsert) ClearRecycleOptions() *EntityUpsert {
|
||||
u.SetNull(entity.FieldRecycleOptions)
|
||||
// ClearProps clears the value of the "props" field.
|
||||
func (u *EntityUpsert) ClearProps() *EntityUpsert {
|
||||
u.SetNull(entity.FieldProps)
|
||||
return u
|
||||
}
|
||||
|
||||
@@ -817,24 +817,24 @@ func (u *EntityUpsertOne) ClearUploadSessionID() *EntityUpsertOne {
|
||||
})
|
||||
}
|
||||
|
||||
// SetRecycleOptions sets the "recycle_options" field.
|
||||
func (u *EntityUpsertOne) SetRecycleOptions(v *types.EntityRecycleOption) *EntityUpsertOne {
|
||||
// SetProps sets the "props" field.
|
||||
func (u *EntityUpsertOne) SetProps(v *types.EntityProps) *EntityUpsertOne {
|
||||
return u.Update(func(s *EntityUpsert) {
|
||||
s.SetRecycleOptions(v)
|
||||
s.SetProps(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateRecycleOptions sets the "recycle_options" field to the value that was provided on create.
|
||||
func (u *EntityUpsertOne) UpdateRecycleOptions() *EntityUpsertOne {
|
||||
// UpdateProps sets the "props" field to the value that was provided on create.
|
||||
func (u *EntityUpsertOne) UpdateProps() *EntityUpsertOne {
|
||||
return u.Update(func(s *EntityUpsert) {
|
||||
s.UpdateRecycleOptions()
|
||||
s.UpdateProps()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearRecycleOptions clears the value of the "recycle_options" field.
|
||||
func (u *EntityUpsertOne) ClearRecycleOptions() *EntityUpsertOne {
|
||||
// ClearProps clears the value of the "props" field.
|
||||
func (u *EntityUpsertOne) ClearProps() *EntityUpsertOne {
|
||||
return u.Update(func(s *EntityUpsert) {
|
||||
s.ClearRecycleOptions()
|
||||
s.ClearProps()
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1222,24 +1222,24 @@ func (u *EntityUpsertBulk) ClearUploadSessionID() *EntityUpsertBulk {
|
||||
})
|
||||
}
|
||||
|
||||
// SetRecycleOptions sets the "recycle_options" field.
|
||||
func (u *EntityUpsertBulk) SetRecycleOptions(v *types.EntityRecycleOption) *EntityUpsertBulk {
|
||||
// SetProps sets the "props" field.
|
||||
func (u *EntityUpsertBulk) SetProps(v *types.EntityProps) *EntityUpsertBulk {
|
||||
return u.Update(func(s *EntityUpsert) {
|
||||
s.SetRecycleOptions(v)
|
||||
s.SetProps(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateRecycleOptions sets the "recycle_options" field to the value that was provided on create.
|
||||
func (u *EntityUpsertBulk) UpdateRecycleOptions() *EntityUpsertBulk {
|
||||
// UpdateProps sets the "props" field to the value that was provided on create.
|
||||
func (u *EntityUpsertBulk) UpdateProps() *EntityUpsertBulk {
|
||||
return u.Update(func(s *EntityUpsert) {
|
||||
s.UpdateRecycleOptions()
|
||||
s.UpdateProps()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearRecycleOptions clears the value of the "recycle_options" field.
|
||||
func (u *EntityUpsertBulk) ClearRecycleOptions() *EntityUpsertBulk {
|
||||
// ClearProps clears the value of the "props" field.
|
||||
func (u *EntityUpsertBulk) ClearProps() *EntityUpsertBulk {
|
||||
return u.Update(func(s *EntityUpsert) {
|
||||
s.ClearRecycleOptions()
|
||||
s.ClearProps()
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -190,15 +190,15 @@ func (eu *EntityUpdate) ClearUploadSessionID() *EntityUpdate {
|
||||
return eu
|
||||
}
|
||||
|
||||
// SetRecycleOptions sets the "recycle_options" field.
|
||||
func (eu *EntityUpdate) SetRecycleOptions(tro *types.EntityRecycleOption) *EntityUpdate {
|
||||
eu.mutation.SetRecycleOptions(tro)
|
||||
// SetProps sets the "props" field.
|
||||
func (eu *EntityUpdate) SetProps(tp *types.EntityProps) *EntityUpdate {
|
||||
eu.mutation.SetProps(tp)
|
||||
return eu
|
||||
}
|
||||
|
||||
// ClearRecycleOptions clears the value of the "recycle_options" field.
|
||||
func (eu *EntityUpdate) ClearRecycleOptions() *EntityUpdate {
|
||||
eu.mutation.ClearRecycleOptions()
|
||||
// ClearProps clears the value of the "props" field.
|
||||
func (eu *EntityUpdate) ClearProps() *EntityUpdate {
|
||||
eu.mutation.ClearProps()
|
||||
return eu
|
||||
}
|
||||
|
||||
@@ -383,11 +383,11 @@ func (eu *EntityUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
if eu.mutation.UploadSessionIDCleared() {
|
||||
_spec.ClearField(entity.FieldUploadSessionID, field.TypeUUID)
|
||||
}
|
||||
if value, ok := eu.mutation.RecycleOptions(); ok {
|
||||
_spec.SetField(entity.FieldRecycleOptions, field.TypeJSON, value)
|
||||
if value, ok := eu.mutation.Props(); ok {
|
||||
_spec.SetField(entity.FieldProps, field.TypeJSON, value)
|
||||
}
|
||||
if eu.mutation.RecycleOptionsCleared() {
|
||||
_spec.ClearField(entity.FieldRecycleOptions, field.TypeJSON)
|
||||
if eu.mutation.PropsCleared() {
|
||||
_spec.ClearField(entity.FieldProps, field.TypeJSON)
|
||||
}
|
||||
if eu.mutation.FileCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
@@ -669,15 +669,15 @@ func (euo *EntityUpdateOne) ClearUploadSessionID() *EntityUpdateOne {
|
||||
return euo
|
||||
}
|
||||
|
||||
// SetRecycleOptions sets the "recycle_options" field.
|
||||
func (euo *EntityUpdateOne) SetRecycleOptions(tro *types.EntityRecycleOption) *EntityUpdateOne {
|
||||
euo.mutation.SetRecycleOptions(tro)
|
||||
// SetProps sets the "props" field.
|
||||
func (euo *EntityUpdateOne) SetProps(tp *types.EntityProps) *EntityUpdateOne {
|
||||
euo.mutation.SetProps(tp)
|
||||
return euo
|
||||
}
|
||||
|
||||
// ClearRecycleOptions clears the value of the "recycle_options" field.
|
||||
func (euo *EntityUpdateOne) ClearRecycleOptions() *EntityUpdateOne {
|
||||
euo.mutation.ClearRecycleOptions()
|
||||
// ClearProps clears the value of the "props" field.
|
||||
func (euo *EntityUpdateOne) ClearProps() *EntityUpdateOne {
|
||||
euo.mutation.ClearProps()
|
||||
return euo
|
||||
}
|
||||
|
||||
@@ -892,11 +892,11 @@ func (euo *EntityUpdateOne) sqlSave(ctx context.Context) (_node *Entity, err err
|
||||
if euo.mutation.UploadSessionIDCleared() {
|
||||
_spec.ClearField(entity.FieldUploadSessionID, field.TypeUUID)
|
||||
}
|
||||
if value, ok := euo.mutation.RecycleOptions(); ok {
|
||||
_spec.SetField(entity.FieldRecycleOptions, field.TypeJSON, value)
|
||||
if value, ok := euo.mutation.Props(); ok {
|
||||
_spec.SetField(entity.FieldProps, field.TypeJSON, value)
|
||||
}
|
||||
if euo.mutation.RecycleOptionsCleared() {
|
||||
_spec.ClearField(entity.FieldRecycleOptions, field.TypeJSON)
|
||||
if euo.mutation.PropsCleared() {
|
||||
_spec.ClearField(entity.FieldProps, field.TypeJSON)
|
||||
}
|
||||
if euo.mutation.FileCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -1723,7 +1723,7 @@ type EntityMutation struct {
|
||||
reference_count *int
|
||||
addreference_count *int
|
||||
upload_session_id *uuid.UUID
|
||||
recycle_options **types.EntityRecycleOption
|
||||
props **types.EntityProps
|
||||
clearedFields map[string]struct{}
|
||||
file map[int]struct{}
|
||||
removedfile map[int]struct{}
|
||||
@@ -2294,53 +2294,53 @@ func (m *EntityMutation) ResetUploadSessionID() {
|
||||
delete(m.clearedFields, entity.FieldUploadSessionID)
|
||||
}
|
||||
|
||||
// SetRecycleOptions sets the "recycle_options" field.
|
||||
func (m *EntityMutation) SetRecycleOptions(tro *types.EntityRecycleOption) {
|
||||
m.recycle_options = &tro
|
||||
// SetProps sets the "props" field.
|
||||
func (m *EntityMutation) SetProps(tp *types.EntityProps) {
|
||||
m.props = &tp
|
||||
}
|
||||
|
||||
// RecycleOptions returns the value of the "recycle_options" field in the mutation.
|
||||
func (m *EntityMutation) RecycleOptions() (r *types.EntityRecycleOption, exists bool) {
|
||||
v := m.recycle_options
|
||||
// Props returns the value of the "props" field in the mutation.
|
||||
func (m *EntityMutation) Props() (r *types.EntityProps, exists bool) {
|
||||
v := m.props
|
||||
if v == nil {
|
||||
return
|
||||
}
|
||||
return *v, true
|
||||
}
|
||||
|
||||
// OldRecycleOptions returns the old "recycle_options" field's value of the Entity entity.
|
||||
// OldProps returns the old "props" field's value of the Entity entity.
|
||||
// If the Entity object wasn't provided to the builder, the object is fetched from the database.
|
||||
// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
|
||||
func (m *EntityMutation) OldRecycleOptions(ctx context.Context) (v *types.EntityRecycleOption, err error) {
|
||||
func (m *EntityMutation) OldProps(ctx context.Context) (v *types.EntityProps, err error) {
|
||||
if !m.op.Is(OpUpdateOne) {
|
||||
return v, errors.New("OldRecycleOptions is only allowed on UpdateOne operations")
|
||||
return v, errors.New("OldProps is only allowed on UpdateOne operations")
|
||||
}
|
||||
if m.id == nil || m.oldValue == nil {
|
||||
return v, errors.New("OldRecycleOptions requires an ID field in the mutation")
|
||||
return v, errors.New("OldProps requires an ID field in the mutation")
|
||||
}
|
||||
oldValue, err := m.oldValue(ctx)
|
||||
if err != nil {
|
||||
return v, fmt.Errorf("querying old value for OldRecycleOptions: %w", err)
|
||||
return v, fmt.Errorf("querying old value for OldProps: %w", err)
|
||||
}
|
||||
return oldValue.RecycleOptions, nil
|
||||
return oldValue.Props, nil
|
||||
}
|
||||
|
||||
// ClearRecycleOptions clears the value of the "recycle_options" field.
|
||||
func (m *EntityMutation) ClearRecycleOptions() {
|
||||
m.recycle_options = nil
|
||||
m.clearedFields[entity.FieldRecycleOptions] = struct{}{}
|
||||
// ClearProps clears the value of the "props" field.
|
||||
func (m *EntityMutation) ClearProps() {
|
||||
m.props = nil
|
||||
m.clearedFields[entity.FieldProps] = struct{}{}
|
||||
}
|
||||
|
||||
// RecycleOptionsCleared returns if the "recycle_options" field was cleared in this mutation.
|
||||
func (m *EntityMutation) RecycleOptionsCleared() bool {
|
||||
_, ok := m.clearedFields[entity.FieldRecycleOptions]
|
||||
// PropsCleared returns if the "props" field was cleared in this mutation.
|
||||
func (m *EntityMutation) PropsCleared() bool {
|
||||
_, ok := m.clearedFields[entity.FieldProps]
|
||||
return ok
|
||||
}
|
||||
|
||||
// ResetRecycleOptions resets all changes to the "recycle_options" field.
|
||||
func (m *EntityMutation) ResetRecycleOptions() {
|
||||
m.recycle_options = nil
|
||||
delete(m.clearedFields, entity.FieldRecycleOptions)
|
||||
// ResetProps resets all changes to the "props" field.
|
||||
func (m *EntityMutation) ResetProps() {
|
||||
m.props = nil
|
||||
delete(m.clearedFields, entity.FieldProps)
|
||||
}
|
||||
|
||||
// AddFileIDs adds the "file" edge to the File entity by ids.
|
||||
@@ -2542,8 +2542,8 @@ func (m *EntityMutation) Fields() []string {
|
||||
if m.upload_session_id != nil {
|
||||
fields = append(fields, entity.FieldUploadSessionID)
|
||||
}
|
||||
if m.recycle_options != nil {
|
||||
fields = append(fields, entity.FieldRecycleOptions)
|
||||
if m.props != nil {
|
||||
fields = append(fields, entity.FieldProps)
|
||||
}
|
||||
return fields
|
||||
}
|
||||
@@ -2573,8 +2573,8 @@ func (m *EntityMutation) Field(name string) (ent.Value, bool) {
|
||||
return m.CreatedBy()
|
||||
case entity.FieldUploadSessionID:
|
||||
return m.UploadSessionID()
|
||||
case entity.FieldRecycleOptions:
|
||||
return m.RecycleOptions()
|
||||
case entity.FieldProps:
|
||||
return m.Props()
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
@@ -2604,8 +2604,8 @@ func (m *EntityMutation) OldField(ctx context.Context, name string) (ent.Value,
|
||||
return m.OldCreatedBy(ctx)
|
||||
case entity.FieldUploadSessionID:
|
||||
return m.OldUploadSessionID(ctx)
|
||||
case entity.FieldRecycleOptions:
|
||||
return m.OldRecycleOptions(ctx)
|
||||
case entity.FieldProps:
|
||||
return m.OldProps(ctx)
|
||||
}
|
||||
return nil, fmt.Errorf("unknown Entity field %s", name)
|
||||
}
|
||||
@@ -2685,12 +2685,12 @@ func (m *EntityMutation) SetField(name string, value ent.Value) error {
|
||||
}
|
||||
m.SetUploadSessionID(v)
|
||||
return nil
|
||||
case entity.FieldRecycleOptions:
|
||||
v, ok := value.(*types.EntityRecycleOption)
|
||||
case entity.FieldProps:
|
||||
v, ok := value.(*types.EntityProps)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected type %T for field %s", value, name)
|
||||
}
|
||||
m.SetRecycleOptions(v)
|
||||
m.SetProps(v)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("unknown Entity field %s", name)
|
||||
@@ -2770,8 +2770,8 @@ func (m *EntityMutation) ClearedFields() []string {
|
||||
if m.FieldCleared(entity.FieldUploadSessionID) {
|
||||
fields = append(fields, entity.FieldUploadSessionID)
|
||||
}
|
||||
if m.FieldCleared(entity.FieldRecycleOptions) {
|
||||
fields = append(fields, entity.FieldRecycleOptions)
|
||||
if m.FieldCleared(entity.FieldProps) {
|
||||
fields = append(fields, entity.FieldProps)
|
||||
}
|
||||
return fields
|
||||
}
|
||||
@@ -2796,8 +2796,8 @@ func (m *EntityMutation) ClearField(name string) error {
|
||||
case entity.FieldUploadSessionID:
|
||||
m.ClearUploadSessionID()
|
||||
return nil
|
||||
case entity.FieldRecycleOptions:
|
||||
m.ClearRecycleOptions()
|
||||
case entity.FieldProps:
|
||||
m.ClearProps()
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("unknown Entity nullable field %s", name)
|
||||
@@ -2837,8 +2837,8 @@ func (m *EntityMutation) ResetField(name string) error {
|
||||
case entity.FieldUploadSessionID:
|
||||
m.ResetUploadSessionID()
|
||||
return nil
|
||||
case entity.FieldRecycleOptions:
|
||||
m.ResetRecycleOptions()
|
||||
case entity.FieldProps:
|
||||
m.ResetProps()
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("unknown Entity field %s", name)
|
||||
|
||||
@@ -25,8 +25,9 @@ func (Entity) Fields() []ent.Field {
|
||||
field.UUID("upload_session_id", uuid.Must(uuid.NewV4())).
|
||||
Optional().
|
||||
Nillable(),
|
||||
field.JSON("recycle_options", &types.EntityRecycleOption{}).
|
||||
Optional(),
|
||||
field.JSON("props", &types.EntityProps{}).
|
||||
Optional().
|
||||
StorageKey("recycle_options"),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -130,6 +130,7 @@ type (
|
||||
Size int64
|
||||
UploadSessionID uuid.UUID
|
||||
Importing bool
|
||||
EncryptMetadata *types.EncryptMetadata
|
||||
}
|
||||
|
||||
RelocateEntityParameter struct {
|
||||
@@ -188,7 +189,7 @@ type FileClient interface {
|
||||
// Copy copies a layer of file to its corresponding destination folder. dstMap is a map from src parent ID to dst parent Files.
|
||||
Copy(ctx context.Context, files []*ent.File, dstMap map[int][]*ent.File) (map[int][]*ent.File, StorageDiff, error)
|
||||
// Delete deletes a group of files (and related models) with given entity recycle option
|
||||
Delete(ctx context.Context, files []*ent.File, options *types.EntityRecycleOption) ([]*ent.Entity, StorageDiff, error)
|
||||
Delete(ctx context.Context, files []*ent.File, options *types.EntityProps) ([]*ent.Entity, StorageDiff, error)
|
||||
// StaleEntities returns stale entities of a given file. If ID is not provided, all entities
|
||||
// will be examined.
|
||||
StaleEntities(ctx context.Context, ids ...int) ([]*ent.Entity, error)
|
||||
@@ -469,7 +470,7 @@ func (f *fileClient) DeleteByUser(ctx context.Context, uid int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fileClient) Delete(ctx context.Context, files []*ent.File, options *types.EntityRecycleOption) ([]*ent.Entity, StorageDiff, error) {
|
||||
func (f *fileClient) Delete(ctx context.Context, files []*ent.File, options *types.EntityProps) ([]*ent.Entity, StorageDiff, error) {
|
||||
// 1. Decrease reference count for all entities;
|
||||
// entities stores the relation between its reference count in `files` and entity ID.
|
||||
entities := make(map[int]int)
|
||||
@@ -525,7 +526,7 @@ func (f *fileClient) Delete(ctx context.Context, files []*ent.File, options *typ
|
||||
for _, chunk := range chunks {
|
||||
if err := f.client.Entity.Update().
|
||||
Where(entity.IDIn(chunk...)).
|
||||
SetRecycleOptions(options).
|
||||
SetProps(options).
|
||||
Exec(ctx); err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to update recycle options for entities %v: %w", chunk, err)
|
||||
}
|
||||
@@ -884,6 +885,17 @@ func (f *fileClient) RemoveStaleEntities(ctx context.Context, file *ent.File) (S
|
||||
|
||||
func (f *fileClient) CreateEntity(ctx context.Context, file *ent.File, args *EntityParameters) (*ent.Entity, StorageDiff, error) {
|
||||
createdBy := UserFromContext(ctx)
|
||||
var opt *types.EntityProps
|
||||
if args.EncryptMetadata != nil {
|
||||
opt = &types.EntityProps{
|
||||
EncryptMetadata: &types.EncryptMetadata{
|
||||
Algorithm: args.EncryptMetadata.Algorithm,
|
||||
Key: args.EncryptMetadata.Key,
|
||||
IV: args.EncryptMetadata.IV,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
stm := f.client.Entity.
|
||||
Create().
|
||||
SetType(int(args.EntityType)).
|
||||
@@ -891,6 +903,10 @@ func (f *fileClient) CreateEntity(ctx context.Context, file *ent.File, args *Ent
|
||||
SetSize(args.Size).
|
||||
SetStoragePolicyID(args.StoragePolicyID)
|
||||
|
||||
if opt != nil {
|
||||
stm.SetProps(opt)
|
||||
}
|
||||
|
||||
if createdBy != nil && !IsAnonymousUser(createdBy) {
|
||||
stm.SetUser(createdBy)
|
||||
}
|
||||
|
||||
@@ -2,8 +2,11 @@ package inventory
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/setting"
|
||||
@@ -661,6 +664,7 @@ var DefaultSettings = map[string]string{
|
||||
"headless_footer_html": "",
|
||||
"headless_bottom_html": "",
|
||||
"sidebar_bottom_html": "",
|
||||
"encrypt_master_key": "",
|
||||
}
|
||||
|
||||
func init() {
|
||||
@@ -721,4 +725,10 @@ func init() {
|
||||
panic(err)
|
||||
}
|
||||
DefaultSettings["mail_reset_template"] = string(mailResetTemplates)
|
||||
|
||||
key := make([]byte, 32)
|
||||
if _, err := io.ReadFull(rand.Reader, key); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
DefaultSettings["encrypt_master_key"] = base64.StdEncoding.EncodeToString(key)
|
||||
}
|
||||
|
||||
@@ -103,6 +103,8 @@ type (
|
||||
QiniuUploadCdn bool `json:"qiniu_upload_cdn,omitempty"`
|
||||
// ChunkConcurrency the number of chunks to upload concurrently.
|
||||
ChunkConcurrency int `json:"chunk_concurrency,omitempty"`
|
||||
// Whether to enable file encryption.
|
||||
Encryption bool `json:"encryption,omitempty"`
|
||||
}
|
||||
|
||||
FileType int
|
||||
@@ -154,8 +156,18 @@ type (
|
||||
MasterSiteVersion string `json:"master_site_version,omitempty"`
|
||||
}
|
||||
|
||||
EntityRecycleOption struct {
|
||||
UnlinkOnly bool `json:"unlink_only,omitempty"`
|
||||
EntityProps struct {
|
||||
UnlinkOnly bool `json:"unlink_only,omitempty"`
|
||||
EncryptMetadata *EncryptMetadata `json:"encrypt_metadata,omitempty"`
|
||||
}
|
||||
|
||||
Algorithm string
|
||||
|
||||
EncryptMetadata struct {
|
||||
Algorithm Algorithm `json:"algorithm"`
|
||||
Key []byte `json:"key"`
|
||||
KeyPlainText []byte `json:"key_plain_text,omitempty"`
|
||||
IV []byte `json:"iv"`
|
||||
}
|
||||
|
||||
DavAccountProps struct {
|
||||
@@ -347,3 +359,7 @@ const (
|
||||
ProfileAllShare = ShareLinksInProfileLevel("all_share")
|
||||
ProfileHideShare = ShareLinksInProfileLevel("hide_share")
|
||||
)
|
||||
|
||||
const (
|
||||
AlgorithmAES256CTR Algorithm = "aes-256-ctr"
|
||||
)
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
"github.com/gofrs/uuid"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// NewLocalFileEntity creates a new local file entity.
|
||||
@@ -73,3 +74,11 @@ func (l *localFileEntity) UploadSessionID() *uuid.UUID {
|
||||
func (l *localFileEntity) Model() *ent.Entity {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *localFileEntity) Props() *types.EntityProps {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *localFileEntity) Encrypted() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
360
pkg/filemanager/encrypt/aes256ctr.go
Normal file
360
pkg/filemanager/encrypt/aes256ctr.go
Normal file
@@ -0,0 +1,360 @@
|
||||
// Package encrypt provides AES-256-CTR encryption and decryption functionality
|
||||
// compatible with the JavaScript EncryptedBlob implementation.
|
||||
//
|
||||
// # Usage Example
|
||||
//
|
||||
// Basic usage with encrypted metadata:
|
||||
//
|
||||
// // Create AES256CTR instance
|
||||
// aes := NewAES256CTR(masterKeyVault)
|
||||
//
|
||||
// // Load encrypted metadata (key is encrypted with master key)
|
||||
// err := aes.LoadMetadata(ctx, encryptedMetadata, masterKeyVault)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// // Set encrypted source stream
|
||||
// err = aes.SetSource(encryptedStream, 0)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// // Read decrypted data
|
||||
// decryptedData, err := io.ReadAll(aes)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// aes.Close()
|
||||
//
|
||||
// Usage with plain metadata (already decrypted):
|
||||
//
|
||||
// aes := NewAES256CTR(masterKeyVault)
|
||||
// err := aes.LoadPlainMetadata(plainMetadata)
|
||||
// err = aes.SetSource(encryptedStream, 0)
|
||||
// // Read decrypted data...
|
||||
//
|
||||
// Usage with counter offset (for chunked/sliced streams):
|
||||
//
|
||||
// // If reading from byte offset 1048576 (1MB) of the encrypted file
|
||||
// aes := NewAES256CTR(masterKeyVault)
|
||||
// err := aes.LoadPlainMetadata(metadata)
|
||||
// err = aes.SetSource(encryptedStreamStartingAt1MB, 1048576)
|
||||
// // This ensures proper counter alignment for correct decryption
|
||||
//
|
||||
// Using the Seeker interface (requires seekable source):
|
||||
//
|
||||
// aes := NewAES256CTR(masterKeyVault)
|
||||
// err := aes.LoadPlainMetadata(metadata)
|
||||
// err = aes.SetSource(seekableEncryptedStream, 0)
|
||||
// aes.SetSize(totalFileSize) // Required for io.SeekEnd
|
||||
//
|
||||
// // Seek to position 1048576
|
||||
// newPos, err := aes.Seek(1048576, io.SeekStart)
|
||||
// // Read from that position...
|
||||
//
|
||||
// // Seek relative to current position
|
||||
// newPos, err = aes.Seek(100, io.SeekCurrent)
|
||||
//
|
||||
// // Seek from end (requires SetSize to be called first)
|
||||
// newPos, err = aes.Seek(-1024, io.SeekEnd)
|
||||
//
|
||||
// Using the factory pattern:
|
||||
//
|
||||
// factory := NewDecrypterFactory(masterKeyVault)
|
||||
// decrypter, err := factory(types.AlgorithmAES256CTR)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// err = decrypter.LoadMetadata(ctx, encryptedMetadata, masterKeyVault)
|
||||
// err = decrypter.SetSource(encryptedStream, 0)
|
||||
// defer decrypter.Close()
|
||||
package encrypt
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
)
|
||||
|
||||
// AES256CTR provides both encryption and decryption for AES-256-CTR.
|
||||
// It implements both Cryptor and Decrypter interfaces.
|
||||
type AES256CTR struct {
|
||||
masterKeyVault MasterEncryptKeyVault
|
||||
|
||||
// Decryption fields
|
||||
src io.ReadCloser // Source encrypted stream
|
||||
seeker io.Seeker // Seeker for the source stream
|
||||
stream cipher.Stream // AES-CTR cipher stream
|
||||
metadata *types.EncryptMetadata
|
||||
counterOffset int64 // Byte offset for sliced streams
|
||||
pos int64 // Current read position relative to counterOffset
|
||||
size int64 // Total size of encrypted data (for SeekEnd support, -1 if unknown)
|
||||
eof bool // EOF flag
|
||||
}
|
||||
|
||||
func NewAES256CTR(masterKeyVault MasterEncryptKeyVault) *AES256CTR {
|
||||
return &AES256CTR{
|
||||
masterKeyVault: masterKeyVault,
|
||||
size: -1, // Unknown by default
|
||||
}
|
||||
}
|
||||
|
||||
func (e *AES256CTR) GenerateMetadata(ctx context.Context) (*types.EncryptMetadata, error) {
|
||||
// Generate random 32-byte key for AES-256
|
||||
key := make([]byte, 32)
|
||||
if _, err := io.ReadFull(rand.Reader, key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Generate random 16-byte IV for CTR mode
|
||||
iv := make([]byte, 16)
|
||||
if _, err := io.ReadFull(rand.Reader, iv); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get master key from vault
|
||||
masterKey, err := e.masterKeyVault.GetMasterKey(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Encrypt the key with master key
|
||||
encryptedKey, err := EncryptWithMasterKey(masterKey, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &types.EncryptMetadata{
|
||||
Algorithm: types.AlgorithmAES256CTR,
|
||||
Key: encryptedKey,
|
||||
KeyPlainText: key,
|
||||
IV: iv,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// LoadMetadata loads and decrypts the encryption metadata using the master key.
|
||||
func (e *AES256CTR) LoadMetadata(ctx context.Context, encryptedMetadata *types.EncryptMetadata) error {
|
||||
if encryptedMetadata == nil {
|
||||
return fmt.Errorf("encryption metadata is nil")
|
||||
}
|
||||
|
||||
if encryptedMetadata.Algorithm != types.AlgorithmAES256CTR {
|
||||
return fmt.Errorf("unsupported algorithm: %s", encryptedMetadata.Algorithm)
|
||||
}
|
||||
|
||||
if len(encryptedMetadata.KeyPlainText) > 0 {
|
||||
e.metadata = encryptedMetadata
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decrypt the encryption key
|
||||
decryptedKey, err := DecriptKey(ctx, e.masterKeyVault, encryptedMetadata.Key)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to decrypt encryption key: %w", err)
|
||||
}
|
||||
|
||||
// Store decrypted metadata
|
||||
e.metadata = &types.EncryptMetadata{
|
||||
Algorithm: encryptedMetadata.Algorithm,
|
||||
KeyPlainText: decryptedKey,
|
||||
IV: encryptedMetadata.IV,
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetSource sets the encrypted data source and initializes the cipher stream.
|
||||
// The counterOffset parameter allows for proper decryption of sliced streams,
|
||||
// where the stream doesn't start at byte 0 of the original file.
|
||||
//
|
||||
// For non-block-aligned offsets (offset % 16 != 0), this method advances the
|
||||
// cipher stream to the correct position within the block to ensure proper decryption.
|
||||
func (e *AES256CTR) SetSource(src io.ReadCloser, seeker io.Seeker, size, counterOffset int64) error {
|
||||
if e.metadata == nil {
|
||||
return fmt.Errorf("metadata not loaded, call LoadMetadata first")
|
||||
}
|
||||
|
||||
e.src = src
|
||||
e.seeker = seeker
|
||||
e.counterOffset = counterOffset
|
||||
e.pos = 0 // Reset position to start
|
||||
e.eof = false // Reset EOF flag
|
||||
e.size = size
|
||||
|
||||
// Initialize cipher stream at counterOffset position
|
||||
return e.initCipherStream(counterOffset)
|
||||
}
|
||||
|
||||
// Read implements io.Reader interface to read decrypted data.
|
||||
// It reads encrypted data from the source and decrypts it on-the-fly.
|
||||
func (e *AES256CTR) Read(p []byte) (int, error) {
|
||||
if e.src == nil {
|
||||
return 0, fmt.Errorf("source not set, call SetSource first")
|
||||
}
|
||||
|
||||
if e.eof {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
// Read encrypted data from source
|
||||
n, err := e.src.Read(p)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
e.eof = true
|
||||
if n == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
} else {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
|
||||
// Decrypt data in place
|
||||
if n > 0 {
|
||||
e.stream.XORKeyStream(p[:n], p[:n])
|
||||
e.pos += int64(n) // Update current position
|
||||
}
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Close implements io.Closer interface.
|
||||
func (e *AES256CTR) Close() error {
|
||||
if e.src != nil {
|
||||
return e.src.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Seek implements io.Seeker interface for seeking within the encrypted stream.
|
||||
// It properly adjusts the AES-CTR counter based on the seek position.
|
||||
//
|
||||
// Parameters:
|
||||
// - offset: byte offset relative to whence
|
||||
// - whence: io.SeekStart, io.SeekCurrent, or io.SeekEnd
|
||||
//
|
||||
// Returns the new absolute position (relative to counterOffset start).
|
||||
//
|
||||
// Note: For io.SeekEnd to work, you must call SetSize() first, otherwise it returns an error.
|
||||
// Also note that seeking requires the underlying source to support seeking (io.Seeker).
|
||||
func (e *AES256CTR) Seek(offset int64, whence int) (int64, error) {
|
||||
if e.metadata == nil {
|
||||
return 0, fmt.Errorf("metadata not loaded, call LoadMetadata first")
|
||||
}
|
||||
|
||||
if e.src == nil {
|
||||
return 0, fmt.Errorf("source not set, call SetSource first")
|
||||
}
|
||||
|
||||
// Check if source supports seeking
|
||||
if e.seeker == nil {
|
||||
return 0, fmt.Errorf("source does not support seeking")
|
||||
}
|
||||
|
||||
// Calculate new absolute position
|
||||
var newPos int64
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
newPos = offset
|
||||
case io.SeekCurrent:
|
||||
newPos = e.pos + offset
|
||||
case io.SeekEnd:
|
||||
if e.size < 0 {
|
||||
return 0, fmt.Errorf("size unknown, call SetSize before using SeekEnd")
|
||||
}
|
||||
newPos = e.size + offset
|
||||
default:
|
||||
return 0, fmt.Errorf("invalid whence: %d", whence)
|
||||
}
|
||||
|
||||
// Validate new position
|
||||
if newPos < 0 {
|
||||
return 0, fmt.Errorf("negative position: %d", newPos)
|
||||
}
|
||||
|
||||
// Seek in the underlying source stream
|
||||
// The absolute position in the source is counterOffset + newPos
|
||||
absPos := e.counterOffset + newPos
|
||||
_, err := e.seeker.Seek(absPos, io.SeekStart)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to seek source: %w", err)
|
||||
}
|
||||
|
||||
// Reinitialize cipher stream with new counter position
|
||||
if err := e.initCipherStream(absPos); err != nil {
|
||||
return 0, fmt.Errorf("failed to reinitialize cipher stream: %w", err)
|
||||
}
|
||||
|
||||
// Update position and reset EOF flag
|
||||
e.pos = newPos
|
||||
e.eof = false
|
||||
|
||||
return newPos, nil
|
||||
}
|
||||
|
||||
// initCipherStream initializes the cipher stream with proper counter alignment
|
||||
// for the given absolute byte position.
|
||||
func (e *AES256CTR) initCipherStream(absolutePosition int64) error {
|
||||
// Create AES cipher block
|
||||
block, err := aes.NewCipher(e.metadata.KeyPlainText)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create AES cipher: %w", err)
|
||||
}
|
||||
|
||||
// Create counter value (16 bytes IV) and apply offset for position
|
||||
counter := make([]byte, 16)
|
||||
copy(counter, e.metadata.IV)
|
||||
|
||||
// Apply counter offset based on byte position (each block is 16 bytes)
|
||||
if absolutePosition > 0 {
|
||||
blockOffset := absolutePosition / 16
|
||||
incrementCounter(counter, blockOffset)
|
||||
}
|
||||
|
||||
// Create CTR cipher stream
|
||||
e.stream = cipher.NewCTR(block, counter)
|
||||
|
||||
// For non-block-aligned offsets, we need to advance the stream position
|
||||
// within the current block to match the offset
|
||||
offsetInBlock := absolutePosition % 16
|
||||
if offsetInBlock > 0 {
|
||||
// Create a dummy buffer to advance the stream
|
||||
dummy := make([]byte, offsetInBlock)
|
||||
e.stream.XORKeyStream(dummy, dummy)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// incrementCounter increments a counter ([]byte) by a given number of blocks.
|
||||
// This matches the JavaScript implementation's incrementCounter function.
|
||||
// The counter is treated as a big-endian 128-bit integer.
|
||||
func incrementCounter(counter []byte, blocks int64) {
|
||||
// Convert blocks to add into bytes (big-endian)
|
||||
// We only need to handle the lower 64 bits since blocks is int64
|
||||
for i := 15; i >= 0 && blocks > 0; i-- {
|
||||
// Add the lowest byte of blocks to current counter byte
|
||||
sum := uint64(counter[i]) + uint64(blocks&0xff)
|
||||
counter[i] = byte(sum & 0xff)
|
||||
|
||||
// Shift blocks right by 8 bits for next iteration
|
||||
blocks = blocks >> 8
|
||||
|
||||
// Add carry from this position to the next
|
||||
if sum > 0xff {
|
||||
carry := sum >> 8
|
||||
// Propagate carry to higher bytes
|
||||
for j := i - 1; j >= 0 && carry > 0; j-- {
|
||||
sum = uint64(counter[j]) + carry
|
||||
counter[j] = byte(sum & 0xff)
|
||||
carry = sum >> 8
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
97
pkg/filemanager/encrypt/encrypt.go
Normal file
97
pkg/filemanager/encrypt/encrypt.go
Normal file
@@ -0,0 +1,97 @@
|
||||
package encrypt
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
)
|
||||
|
||||
type (
|
||||
Cryptor interface {
|
||||
io.ReadCloser
|
||||
io.Seeker
|
||||
// LoadMetadata loads and decrypts the encryption metadata using the master key
|
||||
LoadMetadata(ctx context.Context, encryptedMetadata *types.EncryptMetadata) error
|
||||
// SetSource sets the encrypted data source and initializes the cipher stream
|
||||
SetSource(src io.ReadCloser, seeker io.Seeker, size, counterOffset int64) error
|
||||
// GenerateMetadata generates a new encryption metadata
|
||||
GenerateMetadata(ctx context.Context) (*types.EncryptMetadata, error)
|
||||
}
|
||||
|
||||
CryptorFactory func(algorithm types.Algorithm) (Cryptor, error)
|
||||
)
|
||||
|
||||
func NewCryptorFactory(masterKeyVault MasterEncryptKeyVault) CryptorFactory {
|
||||
return func(algorithm types.Algorithm) (Cryptor, error) {
|
||||
switch algorithm {
|
||||
case types.AlgorithmAES256CTR:
|
||||
return NewAES256CTR(masterKeyVault), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown algorithm: %s", algorithm)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// EncryptWithMasterKey encrypts data using the master key with AES-256-CTR
|
||||
// Returns: [16-byte IV] + [encrypted data]
|
||||
func EncryptWithMasterKey(masterKey, data []byte) ([]byte, error) {
|
||||
// Create AES cipher with master key
|
||||
block, err := aes.NewCipher(masterKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Generate random IV for encryption
|
||||
iv := make([]byte, 16)
|
||||
if _, err := io.ReadFull(rand.Reader, iv); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Encrypt data
|
||||
stream := cipher.NewCTR(block, iv)
|
||||
encrypted := make([]byte, len(data))
|
||||
stream.XORKeyStream(encrypted, data)
|
||||
|
||||
// Return IV + encrypted data
|
||||
result := append(iv, encrypted...)
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func DecriptKey(ctx context.Context, keyVault MasterEncryptKeyVault, encryptedKey []byte) ([]byte, error) {
|
||||
masterKey, err := keyVault.GetMasterKey(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get master key: %w", err)
|
||||
}
|
||||
return DecryptWithMasterKey(masterKey, encryptedKey)
|
||||
}
|
||||
|
||||
// DecryptWithMasterKey decrypts data using the master key with AES-256-CTR
|
||||
// Input format: [16-byte IV] + [encrypted data]
|
||||
func DecryptWithMasterKey(masterKey, encryptedData []byte) ([]byte, error) {
|
||||
// Validate input length
|
||||
if len(encryptedData) < 16 {
|
||||
return nil, aes.KeySizeError(len(encryptedData))
|
||||
}
|
||||
|
||||
// Extract IV and encrypted data
|
||||
iv := encryptedData[:16]
|
||||
encrypted := encryptedData[16:]
|
||||
|
||||
// Create AES cipher with master key
|
||||
block, err := aes.NewCipher(masterKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Decrypt data
|
||||
stream := cipher.NewCTR(block, iv)
|
||||
decrypted := make([]byte, len(encrypted))
|
||||
stream.XORKeyStream(decrypted, encrypted)
|
||||
|
||||
return decrypted, nil
|
||||
}
|
||||
30
pkg/filemanager/encrypt/masterkey.go
Normal file
30
pkg/filemanager/encrypt/masterkey.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package encrypt
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
)
|
||||
|
||||
// MasterEncryptKeyVault is a vault for the master encrypt key.
|
||||
type MasterEncryptKeyVault interface {
|
||||
GetMasterKey(ctx context.Context) ([]byte, error)
|
||||
}
|
||||
|
||||
func NewMasterEncryptKeyVault(setting setting.Provider) MasterEncryptKeyVault {
|
||||
return &settingMasterEncryptKeyVault{setting: setting}
|
||||
}
|
||||
|
||||
// settingMasterEncryptKeyVault is a vault for the master encrypt key that gets the key from the setting KV.
|
||||
type settingMasterEncryptKeyVault struct {
|
||||
setting setting.Provider
|
||||
}
|
||||
|
||||
func (v *settingMasterEncryptKeyVault) GetMasterKey(ctx context.Context) ([]byte, error) {
|
||||
key := v.setting.MasterEncryptKey(ctx)
|
||||
if key == nil {
|
||||
return nil, errors.New("master encrypt key is not set")
|
||||
}
|
||||
return key, nil
|
||||
}
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/encrypt"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/lock"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
|
||||
@@ -47,7 +48,7 @@ type (
|
||||
func NewDatabaseFS(u *ent.User, fileClient inventory.FileClient, shareClient inventory.ShareClient,
|
||||
l logging.Logger, ls lock.LockSystem, settingClient setting.Provider,
|
||||
storagePolicyClient inventory.StoragePolicyClient, hasher hashid.Encoder, userClient inventory.UserClient,
|
||||
cache, stateKv cache.Driver, directLinkClient inventory.DirectLinkClient) fs.FileSystem {
|
||||
cache, stateKv cache.Driver, directLinkClient inventory.DirectLinkClient, encryptorFactory encrypt.CryptorFactory) fs.FileSystem {
|
||||
return &DBFS{
|
||||
user: u,
|
||||
navigators: make(map[string]Navigator),
|
||||
@@ -62,6 +63,7 @@ func NewDatabaseFS(u *ent.User, fileClient inventory.FileClient, shareClient inv
|
||||
cache: cache,
|
||||
stateKv: stateKv,
|
||||
directLinkClient: directLinkClient,
|
||||
encryptorFactory: encryptorFactory,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -80,6 +82,7 @@ type DBFS struct {
|
||||
cache cache.Driver
|
||||
stateKv cache.Driver
|
||||
mu sync.Mutex
|
||||
encryptorFactory encrypt.CryptorFactory
|
||||
}
|
||||
|
||||
func (f *DBFS) Recycle() {
|
||||
@@ -287,6 +290,7 @@ func (f *DBFS) CreateEntity(ctx context.Context, file fs.File, policy *ent.Stora
|
||||
Source: req.Props.SavePath,
|
||||
Size: req.Props.Size,
|
||||
UploadSessionID: uuid.FromStringOrNil(o.UploadRequest.Props.UploadSessionID),
|
||||
EncryptMetadata: o.encryptMetadata,
|
||||
})
|
||||
if err != nil {
|
||||
_ = inventory.Rollback(tx)
|
||||
@@ -617,6 +621,7 @@ func (f *DBFS) createFile(ctx context.Context, parent *File, name string, fileTy
|
||||
ModifiedAt: o.UploadRequest.Props.LastModified,
|
||||
UploadSessionID: uuid.FromStringOrNil(o.UploadRequest.Props.UploadSessionID),
|
||||
Importing: o.UploadRequest.ImportFrom != nil,
|
||||
EncryptMetadata: o.encryptMetadata,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -645,6 +650,20 @@ func (f *DBFS) createFile(ctx context.Context, parent *File, name string, fileTy
|
||||
return newFile(parent, file), nil
|
||||
}
|
||||
|
||||
func (f *DBFS) generateEncryptMetadata(ctx context.Context, uploadRequest *fs.UploadRequest, policy *ent.StoragePolicy) (*types.EncryptMetadata, error) {
|
||||
relayEnabled := policy.Settings != nil && policy.Settings.Relay
|
||||
if (len(uploadRequest.Props.EncryptionSupported) > 0 && uploadRequest.Props.EncryptionSupported[0] == types.AlgorithmAES256CTR) || relayEnabled {
|
||||
encryptor, err := f.encryptorFactory(types.AlgorithmAES256CTR)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get encryptor: %w", err)
|
||||
}
|
||||
|
||||
return encryptor.GenerateMetadata(ctx)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// getPreferredPolicy tries to get the preferred storage policy for the given file.
|
||||
func (f *DBFS) getPreferredPolicy(ctx context.Context, file *File) (*ent.StoragePolicy, error) {
|
||||
ownerGroup := file.Owner().Edges.Group
|
||||
|
||||
@@ -312,9 +312,9 @@ func (f *DBFS) Delete(ctx context.Context, path []*fs.URI, opts ...fs.Option) ([
|
||||
o.apply(opt)
|
||||
}
|
||||
|
||||
var opt *types.EntityRecycleOption
|
||||
var opt *types.EntityProps
|
||||
if o.UnlinkOnly {
|
||||
opt = &types.EntityRecycleOption{
|
||||
opt = &types.EntityProps{
|
||||
UnlinkOnly: true,
|
||||
}
|
||||
}
|
||||
@@ -756,7 +756,7 @@ func (f *DBFS) setCurrentVersion(ctx context.Context, target *File, versionId in
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *DBFS) deleteFiles(ctx context.Context, targets map[Navigator][]*File, fc inventory.FileClient, opt *types.EntityRecycleOption) ([]fs.Entity, inventory.StorageDiff, error) {
|
||||
func (f *DBFS) deleteFiles(ctx context.Context, targets map[Navigator][]*File, fc inventory.FileClient, opt *types.EntityProps) ([]fs.Entity, inventory.StorageDiff, error) {
|
||||
if f.user.Edges.Group == nil {
|
||||
return nil, nil, fmt.Errorf("user group not loaded")
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package dbfs
|
||||
|
||||
import (
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
)
|
||||
|
||||
@@ -26,6 +27,7 @@ type dbfsOption struct {
|
||||
streamListResponseCallback func(parent fs.File, file []fs.File)
|
||||
ancestor *File
|
||||
notRoot bool
|
||||
encryptMetadata *types.EncryptMetadata
|
||||
}
|
||||
|
||||
func newDbfsOption() *dbfsOption {
|
||||
@@ -50,6 +52,13 @@ func (f optionFunc) Apply(o any) {
|
||||
}
|
||||
}
|
||||
|
||||
// WithEncryptMetadata sets the encrypt metadata for the upload operation.
|
||||
func WithEncryptMetadata(encryptMetadata *types.EncryptMetadata) fs.Option {
|
||||
return optionFunc(func(o *dbfsOption) {
|
||||
o.encryptMetadata = encryptMetadata
|
||||
})
|
||||
}
|
||||
|
||||
// WithFilePublicMetadata enables loading file public metadata.
|
||||
func WithFilePublicMetadata() fs.Option {
|
||||
return optionFunc(func(o *dbfsOption) {
|
||||
|
||||
@@ -129,6 +129,20 @@ func (f *DBFS) PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts ..
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Encryption setting
|
||||
var (
|
||||
encryptMetadata *types.EncryptMetadata
|
||||
)
|
||||
if !policy.Settings.Encryption || req.ImportFrom != nil || len(req.Props.EncryptionSupported) == 0 {
|
||||
req.Props.EncryptionSupported = nil
|
||||
} else {
|
||||
res, err := f.generateEncryptMetadata(ctx, req, policy)
|
||||
if err != nil {
|
||||
return nil, serializer.NewError(serializer.CodeInternalSetting, "Failed to generate encrypt metadata", err)
|
||||
}
|
||||
encryptMetadata = res
|
||||
}
|
||||
|
||||
// validate upload request
|
||||
if err := validateNewFile(req.Props.Uri.Name(), req.Props.Size, policy); err != nil {
|
||||
return nil, err
|
||||
@@ -170,6 +184,7 @@ func (f *DBFS) PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts ..
|
||||
entity, err := f.CreateEntity(ctx, ancestor, policy, entityType, req,
|
||||
WithPreviousVersion(req.Props.PreviousVersion),
|
||||
fs.WithUploadRequest(req),
|
||||
WithEncryptMetadata(encryptMetadata),
|
||||
WithRemoveStaleEntities(),
|
||||
)
|
||||
if err != nil {
|
||||
@@ -185,6 +200,7 @@ func (f *DBFS) PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts ..
|
||||
WithPreferredStoragePolicy(policy),
|
||||
WithErrorOnConflict(),
|
||||
WithAncestor(ancestor),
|
||||
WithEncryptMetadata(encryptMetadata),
|
||||
)
|
||||
if err != nil {
|
||||
_ = inventory.Rollback(dbTx)
|
||||
@@ -215,14 +231,15 @@ func (f *DBFS) PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts ..
|
||||
|
||||
session := &fs.UploadSession{
|
||||
Props: &fs.UploadProps{
|
||||
Uri: req.Props.Uri,
|
||||
Size: req.Props.Size,
|
||||
SavePath: req.Props.SavePath,
|
||||
LastModified: req.Props.LastModified,
|
||||
UploadSessionID: req.Props.UploadSessionID,
|
||||
ExpireAt: req.Props.ExpireAt,
|
||||
EntityType: req.Props.EntityType,
|
||||
Metadata: req.Props.Metadata,
|
||||
Uri: req.Props.Uri,
|
||||
Size: req.Props.Size,
|
||||
SavePath: req.Props.SavePath,
|
||||
LastModified: req.Props.LastModified,
|
||||
UploadSessionID: req.Props.UploadSessionID,
|
||||
ExpireAt: req.Props.ExpireAt,
|
||||
EntityType: req.Props.EntityType,
|
||||
Metadata: req.Props.Metadata,
|
||||
ClientSideEncrypted: req.Props.ClientSideEncrypted,
|
||||
},
|
||||
FileID: fileId,
|
||||
NewFileCreated: !fileExisted,
|
||||
@@ -234,6 +251,10 @@ func (f *DBFS) PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts ..
|
||||
LockToken: lockToken, // Prevent lock being released.
|
||||
}
|
||||
|
||||
if encryptMetadata != nil {
|
||||
session.EncryptMetadata = encryptMetadata
|
||||
}
|
||||
|
||||
// TODO: frontend should create new upload session if resumed session does not exist.
|
||||
return session, nil
|
||||
}
|
||||
|
||||
@@ -183,6 +183,8 @@ type (
|
||||
UploadSessionID() *uuid.UUID
|
||||
CreatedBy() *ent.User
|
||||
Model() *ent.Entity
|
||||
Props() *types.EntityProps
|
||||
Encrypted() bool
|
||||
}
|
||||
|
||||
FileExtendedInfo struct {
|
||||
@@ -238,38 +240,40 @@ type (
|
||||
|
||||
// UploadCredential for uploading files in client side.
|
||||
UploadCredential struct {
|
||||
SessionID string `json:"session_id"`
|
||||
ChunkSize int64 `json:"chunk_size"` // 分块大小,0 为部分快
|
||||
Expires int64 `json:"expires"` // 上传凭证过期时间, Unix 时间戳
|
||||
UploadURLs []string `json:"upload_urls,omitempty"`
|
||||
Credential string `json:"credential,omitempty"`
|
||||
UploadID string `json:"uploadID,omitempty"`
|
||||
Callback string `json:"callback,omitempty"`
|
||||
Uri string `json:"uri,omitempty"` // 存储路径
|
||||
AccessKey string `json:"ak,omitempty"`
|
||||
KeyTime string `json:"keyTime,omitempty"` // COS用有效期
|
||||
CompleteURL string `json:"completeURL,omitempty"`
|
||||
StoragePolicy *ent.StoragePolicy
|
||||
CallbackSecret string `json:"callback_secret,omitempty"`
|
||||
MimeType string `json:"mime_type,omitempty"` // Expected mimetype
|
||||
UploadPolicy string `json:"upload_policy,omitempty"` // Upyun upload policy
|
||||
SessionID string `json:"session_id"`
|
||||
ChunkSize int64 `json:"chunk_size"` // 分块大小,0 为部分快
|
||||
Expires int64 `json:"expires"` // 上传凭证过期时间, Unix 时间戳
|
||||
UploadURLs []string `json:"upload_urls,omitempty"`
|
||||
Credential string `json:"credential,omitempty"`
|
||||
UploadID string `json:"uploadID,omitempty"`
|
||||
Callback string `json:"callback,omitempty"`
|
||||
Uri string `json:"uri,omitempty"` // 存储路径
|
||||
AccessKey string `json:"ak,omitempty"`
|
||||
KeyTime string `json:"keyTime,omitempty"` // COS用有效期
|
||||
CompleteURL string `json:"completeURL,omitempty"`
|
||||
StoragePolicy *ent.StoragePolicy
|
||||
CallbackSecret string `json:"callback_secret,omitempty"`
|
||||
MimeType string `json:"mime_type,omitempty"` // Expected mimetype
|
||||
UploadPolicy string `json:"upload_policy,omitempty"` // Upyun upload policy
|
||||
EncryptMetadata *types.EncryptMetadata `json:"encrypt_metadata,omitempty"`
|
||||
}
|
||||
|
||||
// UploadSession stores the information of an upload session, used in server side.
|
||||
UploadSession struct {
|
||||
UID int // 发起者
|
||||
Policy *ent.StoragePolicy
|
||||
FileID int // ID of the placeholder file
|
||||
EntityID int // ID of the new entity
|
||||
Callback string // 回调 URL 地址
|
||||
CallbackSecret string // Callback secret
|
||||
UploadID string // Multi-part upload ID
|
||||
UploadURL string
|
||||
Credential string
|
||||
ChunkSize int64
|
||||
SentinelTaskID int
|
||||
NewFileCreated bool // If new file is created for this session
|
||||
Importing bool // If the upload is importing from another file
|
||||
UID int // 发起者
|
||||
Policy *ent.StoragePolicy
|
||||
FileID int // ID of the placeholder file
|
||||
EntityID int // ID of the new entity
|
||||
Callback string // 回调 URL 地址
|
||||
CallbackSecret string // Callback secret
|
||||
UploadID string // Multi-part upload ID
|
||||
UploadURL string
|
||||
Credential string
|
||||
ChunkSize int64
|
||||
SentinelTaskID int
|
||||
NewFileCreated bool // If new file is created for this session
|
||||
Importing bool // If the upload is importing from another file
|
||||
EncryptMetadata *types.EncryptMetadata
|
||||
|
||||
LockToken string // Token of the locked placeholder file
|
||||
Props *UploadProps
|
||||
@@ -288,8 +292,10 @@ type (
|
||||
PreviousVersion string
|
||||
// EntityType is the type of the entity to be created. If not set, a new file will be created
|
||||
// with a default version entity. This will be set in update request for existing files.
|
||||
EntityType *types.EntityType
|
||||
ExpireAt time.Time
|
||||
EntityType *types.EntityType
|
||||
ExpireAt time.Time
|
||||
EncryptionSupported []types.Algorithm
|
||||
ClientSideEncrypted bool // Whether the file stream is already encrypted by client side.
|
||||
}
|
||||
|
||||
// FsOption options for underlying file system.
|
||||
@@ -782,6 +788,14 @@ func (e *DbEntity) Model() *ent.Entity {
|
||||
return e.model
|
||||
}
|
||||
|
||||
func (e *DbEntity) Props() *types.EntityProps {
|
||||
return e.model.Props
|
||||
}
|
||||
|
||||
func (e *DbEntity) Encrypted() bool {
|
||||
return e.model.Props != nil && e.model.Props.EncryptMetadata != nil
|
||||
}
|
||||
|
||||
func NewEmptyEntity(u *ent.User) Entity {
|
||||
return &DbEntity{
|
||||
model: &ent.Entity{
|
||||
|
||||
@@ -120,7 +120,7 @@ func (m *manager) GetDirectLink(ctx context.Context, urls ...*fs.URI) ([]DirectL
|
||||
}
|
||||
|
||||
source := entitysource.NewEntitySource(target, d, policy, m.auth, m.settings, m.hasher, m.dep.RequestClient(),
|
||||
m.l, m.config, m.dep.MimeDetector(ctx))
|
||||
m.l, m.config, m.dep.MimeDetector(ctx), m.dep.EncryptorFactory())
|
||||
sourceUrl, err := source.Url(ctx,
|
||||
entitysource.WithSpeedLimit(int64(m.user.Edges.Group.SpeedLimit)),
|
||||
entitysource.WithDisplayName(file.Name()),
|
||||
@@ -182,7 +182,7 @@ func (m *manager) GetUrlForRedirectedDirectLink(ctx context.Context, dl *ent.Dir
|
||||
}
|
||||
|
||||
source := entitysource.NewEntitySource(primaryEntity, d, policy, m.auth, m.settings, m.hasher, m.dep.RequestClient(),
|
||||
m.l, m.config, m.dep.MimeDetector(ctx))
|
||||
m.l, m.config, m.dep.MimeDetector(ctx), m.dep.EncryptorFactory())
|
||||
downloadUrl, err := source.Url(ctx,
|
||||
entitysource.WithExpire(o.Expire),
|
||||
entitysource.WithDownload(o.IsDownload),
|
||||
@@ -282,7 +282,7 @@ func (m *manager) GetEntityUrls(ctx context.Context, args []GetEntityUrlArgs, op
|
||||
|
||||
// Cache miss, Generate new url
|
||||
source := entitysource.NewEntitySource(target, d, policy, m.auth, m.settings, m.hasher, m.dep.RequestClient(),
|
||||
m.l, m.config, m.dep.MimeDetector(ctx))
|
||||
m.l, m.config, m.dep.MimeDetector(ctx), m.dep.EncryptorFactory())
|
||||
downloadUrl, err := source.Url(ctx,
|
||||
entitysource.WithExpire(o.Expire),
|
||||
entitysource.WithDownload(o.IsDownload),
|
||||
@@ -349,7 +349,7 @@ func (m *manager) GetEntitySource(ctx context.Context, entityID int, opts ...fs.
|
||||
}
|
||||
|
||||
return entitysource.NewEntitySource(entity, handler, policy, m.auth, m.settings, m.hasher, m.dep.RequestClient(), m.l,
|
||||
m.config, m.dep.MimeDetector(ctx), entitysource.WithContext(ctx), entitysource.WithThumb(o.IsThumb)), nil
|
||||
m.config, m.dep.MimeDetector(ctx), m.dep.EncryptorFactory(), entitysource.WithContext(ctx), entitysource.WithThumb(o.IsThumb)), nil
|
||||
}
|
||||
|
||||
func (l *manager) SetCurrentVersion(ctx context.Context, path *fs.URI, version int) error {
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver/local"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/encrypt"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/mime"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
|
||||
@@ -83,6 +84,7 @@ type EntitySourceOptions struct {
|
||||
OneTimeDownloadKey string
|
||||
Ctx context.Context
|
||||
IsThumb bool
|
||||
DisableCryptor bool
|
||||
}
|
||||
|
||||
type EntityUrl struct {
|
||||
@@ -143,22 +145,31 @@ func WithThumb(isThumb bool) EntitySourceOption {
|
||||
})
|
||||
}
|
||||
|
||||
// WithDisableCryptor disable cryptor for file source, file stream will be
|
||||
// presented as is.
|
||||
func WithDisableCryptor() EntitySourceOption {
|
||||
return EntitySourceOptionFunc(func(option any) {
|
||||
option.(*EntitySourceOptions).DisableCryptor = true
|
||||
})
|
||||
}
|
||||
|
||||
func (f EntitySourceOptionFunc) Apply(option any) {
|
||||
f(option)
|
||||
}
|
||||
|
||||
type (
|
||||
entitySource struct {
|
||||
e fs.Entity
|
||||
handler driver.Handler
|
||||
policy *ent.StoragePolicy
|
||||
generalAuth auth.Auth
|
||||
settings setting.Provider
|
||||
hasher hashid.Encoder
|
||||
c request.Client
|
||||
l logging.Logger
|
||||
config conf.ConfigProvider
|
||||
mime mime.MimeDetector
|
||||
e fs.Entity
|
||||
handler driver.Handler
|
||||
policy *ent.StoragePolicy
|
||||
generalAuth auth.Auth
|
||||
settings setting.Provider
|
||||
hasher hashid.Encoder
|
||||
c request.Client
|
||||
l logging.Logger
|
||||
config conf.ConfigProvider
|
||||
mime mime.MimeDetector
|
||||
encryptorFactory encrypt.CryptorFactory
|
||||
|
||||
rsc io.ReadCloser
|
||||
pos int64
|
||||
@@ -197,20 +208,22 @@ func NewEntitySource(
|
||||
l logging.Logger,
|
||||
config conf.ConfigProvider,
|
||||
mime mime.MimeDetector,
|
||||
encryptorFactory encrypt.CryptorFactory,
|
||||
opts ...EntitySourceOption,
|
||||
) EntitySource {
|
||||
s := &entitySource{
|
||||
e: e,
|
||||
handler: handler,
|
||||
policy: policy,
|
||||
generalAuth: generalAuth,
|
||||
settings: settings,
|
||||
hasher: hasher,
|
||||
c: c,
|
||||
config: config,
|
||||
l: l,
|
||||
mime: mime,
|
||||
o: &EntitySourceOptions{},
|
||||
e: e,
|
||||
handler: handler,
|
||||
policy: policy,
|
||||
generalAuth: generalAuth,
|
||||
settings: settings,
|
||||
hasher: hasher,
|
||||
c: c,
|
||||
config: config,
|
||||
l: l,
|
||||
mime: mime,
|
||||
encryptorFactory: encryptorFactory,
|
||||
o: &EntitySourceOptions{},
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt.Apply(s.o)
|
||||
@@ -237,7 +250,7 @@ func (f *entitySource) CloneToLocalSrc(t types.EntityType, src string) (EntitySo
|
||||
policy := &ent.StoragePolicy{Type: types.PolicyTypeLocal}
|
||||
handler := local.New(policy, f.l, f.config)
|
||||
|
||||
newSrc := NewEntitySource(e, handler, policy, f.generalAuth, f.settings, f.hasher, f.c, f.l, f.config, f.mime).(*entitySource)
|
||||
newSrc := NewEntitySource(e, handler, policy, f.generalAuth, f.settings, f.hasher, f.c, f.l, f.config, f.mime, f.encryptorFactory).(*entitySource)
|
||||
newSrc.o = f.o
|
||||
return newSrc, nil
|
||||
}
|
||||
@@ -328,6 +341,20 @@ func (f *entitySource) Serve(w http.ResponseWriter, r *http.Request, opts ...Ent
|
||||
response.Header.Del("ETag")
|
||||
response.Header.Del("Content-Disposition")
|
||||
response.Header.Del("Cache-Control")
|
||||
|
||||
// If the response is successful, decrypt the body if needed
|
||||
if response.StatusCode >= 200 && response.StatusCode < 300 {
|
||||
// Parse offset from Content-Range header if present
|
||||
offset := parseContentRangeOffset(response.Header.Get("Content-Range"))
|
||||
|
||||
body, err := f.getDecryptedRsc(response.Body, offset)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get decrypted rsc: %w", err)
|
||||
}
|
||||
|
||||
response.Body = body
|
||||
}
|
||||
|
||||
logging.Request(f.l,
|
||||
false,
|
||||
response.StatusCode,
|
||||
@@ -554,7 +581,7 @@ func (f *entitySource) ShouldInternalProxy(opts ...EntitySourceOption) bool {
|
||||
}
|
||||
handlerCapability := f.handler.Capabilities()
|
||||
return f.e.ID() == 0 || handlerCapability.StaticFeatures.Enabled(int(driver.HandlerCapabilityProxyRequired)) ||
|
||||
f.policy.Settings.InternalProxy && !f.o.NoInternalProxy
|
||||
(f.policy.Settings.InternalProxy || f.e.Encrypted()) && !f.o.NoInternalProxy
|
||||
}
|
||||
|
||||
func (f *entitySource) Url(ctx context.Context, opts ...EntitySourceOption) (*EntityUrl, error) {
|
||||
@@ -582,6 +609,7 @@ func (f *entitySource) Url(ctx context.Context, opts ...EntitySourceOption) (*En
|
||||
// 1. Internal proxy is required by driver's definition
|
||||
// 2. Internal proxy is enabled in Policy setting and not disabled by option
|
||||
// 3. It's an empty entity.
|
||||
// 4. The entity is encrypted and internal proxy not disabled by option
|
||||
handlerCapability := f.handler.Capabilities()
|
||||
if f.ShouldInternalProxy() {
|
||||
siteUrl := f.settings.SiteURL(ctx)
|
||||
@@ -655,6 +683,7 @@ func (f *entitySource) resetRequest() error {
|
||||
|
||||
func (f *entitySource) getRsc(pos int64) (io.ReadCloser, error) {
|
||||
// For inbound files, we can use the handler to open the file directly
|
||||
var rsc io.ReadCloser
|
||||
if f.IsLocal() {
|
||||
file, err := f.handler.Open(f.o.Ctx, f.e.Source())
|
||||
if err != nil {
|
||||
@@ -670,46 +699,75 @@ func (f *entitySource) getRsc(pos int64) (io.ReadCloser, error) {
|
||||
|
||||
if f.o.SpeedLimit > 0 {
|
||||
bucket := ratelimit.NewBucketWithRate(float64(f.o.SpeedLimit), f.o.SpeedLimit)
|
||||
return lrs{file, ratelimit.Reader(file, bucket)}, nil
|
||||
rsc = lrs{file, ratelimit.Reader(file, bucket)}
|
||||
} else {
|
||||
return file, nil
|
||||
rsc = file
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
var urlStr string
|
||||
now := time.Now()
|
||||
|
||||
// Check if we have a valid cached URL and expiry
|
||||
if f.cachedUrl != "" && now.Before(f.cachedExpiry.Add(-time.Minute)) {
|
||||
// Use cached URL if it's still valid (with 1 minute buffer before expiry)
|
||||
urlStr = f.cachedUrl
|
||||
} else {
|
||||
// Generate new URL and cache it
|
||||
expire := now.Add(defaultUrlExpire)
|
||||
u, err := f.Url(driver.WithForcePublicEndpoint(f.o.Ctx, false), WithNoInternalProxy(), WithExpire(&expire))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate download url: %w", err)
|
||||
var urlStr string
|
||||
now := time.Now()
|
||||
|
||||
// Check if we have a valid cached URL and expiry
|
||||
if f.cachedUrl != "" && now.Before(f.cachedExpiry.Add(-time.Minute)) {
|
||||
// Use cached URL if it's still valid (with 1 minute buffer before expiry)
|
||||
urlStr = f.cachedUrl
|
||||
} else {
|
||||
// Generate new URL and cache it
|
||||
expire := now.Add(defaultUrlExpire)
|
||||
u, err := f.Url(driver.WithForcePublicEndpoint(f.o.Ctx, false), WithNoInternalProxy(), WithExpire(&expire))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate download url: %w", err)
|
||||
}
|
||||
|
||||
// Cache the URL and expiry
|
||||
f.cachedUrl = u.Url
|
||||
f.cachedExpiry = expire
|
||||
urlStr = u.Url
|
||||
}
|
||||
|
||||
// Cache the URL and expiry
|
||||
f.cachedUrl = u.Url
|
||||
f.cachedExpiry = expire
|
||||
urlStr = u.Url
|
||||
h := http.Header{}
|
||||
h.Set("Range", fmt.Sprintf("bytes=%d-", pos))
|
||||
resp := f.c.Request(http.MethodGet, urlStr, nil,
|
||||
request.WithContext(f.o.Ctx),
|
||||
request.WithLogger(f.l),
|
||||
request.WithHeader(h),
|
||||
).CheckHTTPResponse(http.StatusOK, http.StatusPartialContent)
|
||||
if resp.Err != nil {
|
||||
return nil, fmt.Errorf("failed to request download url: %w", resp.Err)
|
||||
}
|
||||
|
||||
rsc = resp.Response.Body
|
||||
}
|
||||
|
||||
h := http.Header{}
|
||||
h.Set("Range", fmt.Sprintf("bytes=%d-", pos))
|
||||
resp := f.c.Request(http.MethodGet, urlStr, nil,
|
||||
request.WithContext(f.o.Ctx),
|
||||
request.WithLogger(f.l),
|
||||
request.WithHeader(h),
|
||||
).CheckHTTPResponse(http.StatusOK, http.StatusPartialContent)
|
||||
if resp.Err != nil {
|
||||
return nil, fmt.Errorf("failed to request download url: %w", resp.Err)
|
||||
var err error
|
||||
rsc, err = f.getDecryptedRsc(rsc, pos)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get decrypted rsc: %w", err)
|
||||
}
|
||||
|
||||
return resp.Response.Body, nil
|
||||
return rsc, nil
|
||||
}
|
||||
|
||||
func (f *entitySource) getDecryptedRsc(rsc io.ReadCloser, pos int64) (io.ReadCloser, error) {
|
||||
props := f.e.Props()
|
||||
if props != nil && props.EncryptMetadata != nil && !f.o.DisableCryptor {
|
||||
cryptor, err := f.encryptorFactory(props.EncryptMetadata.Algorithm)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create decryptor: %w", err)
|
||||
}
|
||||
err = cryptor.LoadMetadata(f.o.Ctx, props.EncryptMetadata)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load metadata: %w", err)
|
||||
}
|
||||
|
||||
if err := cryptor.SetSource(rsc, nil, f.e.Size(), pos); err != nil {
|
||||
return nil, fmt.Errorf("failed to set source: %w", err)
|
||||
}
|
||||
|
||||
return cryptor, nil
|
||||
}
|
||||
|
||||
return rsc, nil
|
||||
}
|
||||
|
||||
// capExpireTime make sure expire time is not too long or too short (if min or max is set)
|
||||
@@ -1002,6 +1060,33 @@ func sumRangesSize(ranges []httpRange) (size int64) {
|
||||
return
|
||||
}
|
||||
|
||||
// parseContentRangeOffset parses the start offset from a Content-Range header.
|
||||
// Content-Range format: "bytes start-end/total" (e.g., "bytes 100-200/1000")
|
||||
// Returns 0 if the header is empty, invalid, or cannot be parsed.
|
||||
func parseContentRangeOffset(contentRange string) int64 {
|
||||
if contentRange == "" {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Content-Range format: "bytes start-end/total"
|
||||
if !strings.HasPrefix(contentRange, "bytes ") {
|
||||
return 0
|
||||
}
|
||||
|
||||
rangeSpec := strings.TrimPrefix(contentRange, "bytes ")
|
||||
dashPos := strings.Index(rangeSpec, "-")
|
||||
if dashPos <= 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
start, err := strconv.ParseInt(rangeSpec[:dashPos], 10, 64)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return start
|
||||
}
|
||||
|
||||
// countingWriter counts how many bytes have been written to it.
|
||||
type countingWriter int64
|
||||
|
||||
|
||||
@@ -147,7 +147,8 @@ func NewFileManager(dep dependency.Dep, u *ent.User) FileManager {
|
||||
user: u,
|
||||
settings: dep.SettingProvider(),
|
||||
fs: dbfs.NewDatabaseFS(u, dep.FileClient(), dep.ShareClient(), dep.Logger(), dep.LockSystem(),
|
||||
dep.SettingProvider(), dep.StoragePolicyClient(), dep.HashIDEncoder(), dep.UserClient(), dep.KV(), dep.NavigatorStateKV(), dep.DirectLinkClient()),
|
||||
dep.SettingProvider(), dep.StoragePolicyClient(), dep.HashIDEncoder(), dep.UserClient(), dep.KV(), dep.NavigatorStateKV(),
|
||||
dep.DirectLinkClient(), dep.EncryptorFactory()),
|
||||
kv: dep.KV(),
|
||||
config: config,
|
||||
auth: dep.GeneralAuth(),
|
||||
|
||||
@@ -222,7 +222,7 @@ func (m *manager) RecycleEntities(ctx context.Context, force bool, entityIDs ...
|
||||
|
||||
toBeDeletedSrc := lo.Map(lo.Filter(chunk, func(item fs.Entity, index int) bool {
|
||||
// Only delete entities that are not marked as "unlink only"
|
||||
return item.Model().RecycleOptions == nil || !item.Model().RecycleOptions.UnlinkOnly
|
||||
return item.Model().Props == nil || !item.Model().Props.UnlinkOnly
|
||||
}), func(entity fs.Entity, index int) string {
|
||||
return entity.Source()
|
||||
})
|
||||
|
||||
@@ -29,7 +29,7 @@ type (
|
||||
// ConfirmUploadSession confirms whether upload session is valid for upload.
|
||||
ConfirmUploadSession(ctx context.Context, session *fs.UploadSession, chunkIndex int) (fs.File, error)
|
||||
// Upload uploads file data to storage
|
||||
Upload(ctx context.Context, req *fs.UploadRequest, policy *ent.StoragePolicy) error
|
||||
Upload(ctx context.Context, req *fs.UploadRequest, policy *ent.StoragePolicy, session *fs.UploadSession) error
|
||||
// CompleteUpload completes upload session and returns file object
|
||||
CompleteUpload(ctx context.Context, session *fs.UploadSession) (fs.File, error)
|
||||
// CancelUploadSession cancels upload session
|
||||
@@ -93,7 +93,8 @@ func (m *manager) CreateUploadSession(ctx context.Context, req *fs.UploadRequest
|
||||
uploadSession.ChunkSize = uploadSession.Policy.Settings.ChunkSize
|
||||
// Create upload credential for underlying storage driver
|
||||
credential := &fs.UploadCredential{}
|
||||
if !uploadSession.Policy.Settings.Relay || m.stateless {
|
||||
unrelayed := !uploadSession.Policy.Settings.Relay || m.stateless
|
||||
if unrelayed {
|
||||
credential, err = d.Token(ctx, uploadSession, req)
|
||||
if err != nil {
|
||||
m.OnUploadFailed(ctx, uploadSession)
|
||||
@@ -103,12 +104,18 @@ func (m *manager) CreateUploadSession(ctx context.Context, req *fs.UploadRequest
|
||||
// For relayed upload, we don't need to create credential
|
||||
uploadSession.ChunkSize = 0
|
||||
credential.ChunkSize = 0
|
||||
credential.EncryptMetadata = nil
|
||||
uploadSession.Props.ClientSideEncrypted = false
|
||||
}
|
||||
credential.SessionID = uploadSession.Props.UploadSessionID
|
||||
credential.Expires = req.Props.ExpireAt.Unix()
|
||||
credential.StoragePolicy = uploadSession.Policy
|
||||
credential.CallbackSecret = uploadSession.CallbackSecret
|
||||
credential.Uri = uploadSession.Props.Uri.String()
|
||||
credential.EncryptMetadata = uploadSession.EncryptMetadata
|
||||
if !unrelayed {
|
||||
credential.EncryptMetadata = nil
|
||||
}
|
||||
|
||||
// If upload sentinel check is required, queue a check task
|
||||
if d.Capabilities().StaticFeatures.Enabled(int(driver.HandlerCapabilityUploadSentinelRequired)) {
|
||||
@@ -178,12 +185,34 @@ func (m *manager) PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts
|
||||
return m.fs.PrepareUpload(ctx, req, opts...)
|
||||
}
|
||||
|
||||
func (m *manager) Upload(ctx context.Context, req *fs.UploadRequest, policy *ent.StoragePolicy) error {
|
||||
func (m *manager) Upload(ctx context.Context, req *fs.UploadRequest, policy *ent.StoragePolicy, session *fs.UploadSession) error {
|
||||
d, err := m.GetStorageDriver(ctx, m.CastStoragePolicyOnSlave(ctx, policy))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if session != nil && session.EncryptMetadata != nil && !req.Props.ClientSideEncrypted {
|
||||
cryptor, err := m.dep.EncryptorFactory()(session.EncryptMetadata.Algorithm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create cryptor: %w", err)
|
||||
}
|
||||
|
||||
err = cryptor.LoadMetadata(ctx, session.EncryptMetadata)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load encrypt metadata: %w", err)
|
||||
}
|
||||
|
||||
if err := cryptor.SetSource(req.File, req.Seeker, req.Props.Size, 0); err != nil {
|
||||
return fmt.Errorf("failed to set source: %w", err)
|
||||
}
|
||||
|
||||
req.File = cryptor
|
||||
|
||||
if req.Seeker != nil {
|
||||
req.Seeker = cryptor
|
||||
}
|
||||
}
|
||||
|
||||
if err := d.Put(ctx, req); err != nil {
|
||||
return serializer.NewError(serializer.CodeIOFailed, "Failed to upload file", err)
|
||||
}
|
||||
@@ -301,6 +330,8 @@ func (m *manager) Update(ctx context.Context, req *fs.UploadRequest, opts ...fs.
|
||||
}
|
||||
|
||||
req.Props.UploadSessionID = uuid.Must(uuid.NewV4()).String()
|
||||
// Sever side supported encryption algorithms
|
||||
req.Props.EncryptionSupported = []types.Algorithm{types.AlgorithmAES256CTR}
|
||||
|
||||
if m.stateless {
|
||||
return m.updateStateless(ctx, req, o)
|
||||
@@ -312,7 +343,7 @@ func (m *manager) Update(ctx context.Context, req *fs.UploadRequest, opts ...fs.
|
||||
return nil, fmt.Errorf("faield to prepare uplaod: %w", err)
|
||||
}
|
||||
|
||||
if err := m.Upload(ctx, req, uploadSession.Policy); err != nil {
|
||||
if err := m.Upload(ctx, req, uploadSession.Policy, uploadSession); err != nil {
|
||||
m.OnUploadFailed(ctx, uploadSession)
|
||||
return nil, fmt.Errorf("failed to upload new entity: %w", err)
|
||||
}
|
||||
@@ -368,7 +399,7 @@ func (m *manager) updateStateless(ctx context.Context, req *fs.UploadRequest, o
|
||||
}
|
||||
|
||||
req.Props = res.Req.Props
|
||||
if err := m.Upload(ctx, req, res.Session.Policy); err != nil {
|
||||
if err := m.Upload(ctx, req, res.Session.Policy, res.Session); err != nil {
|
||||
if err := o.Node.OnUploadFailed(ctx, &fs.StatelessOnUploadFailedService{
|
||||
UploadSession: res.Session,
|
||||
UserID: o.StatelessUserID,
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cluster"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/encrypt"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager/entitysource"
|
||||
@@ -217,11 +218,18 @@ func (m *CreateArchiveTask) listEntitiesAndSendToSlave(ctx context.Context, dep
|
||||
user := inventory.UserFromContext(ctx)
|
||||
fm := manager.NewFileManager(dep, user)
|
||||
storagePolicyClient := dep.StoragePolicyClient()
|
||||
masterKey, _ := dep.MasterEncryptKeyVault().GetMasterKey(ctx)
|
||||
|
||||
failed, err := fm.CreateArchive(ctx, uris, io.Discard,
|
||||
fs.WithDryRun(func(name string, e fs.Entity) {
|
||||
entityModel, err := decryptEntityKeyIfNeeded(masterKey, e.Model())
|
||||
if err != nil {
|
||||
m.l.Warning("Failed to decrypt entity key for %q: %s", name, err)
|
||||
return
|
||||
}
|
||||
|
||||
payload.Entities = append(payload.Entities, SlaveCreateArchiveEntity{
|
||||
Entity: e.Model(),
|
||||
Entity: entityModel,
|
||||
Path: name,
|
||||
})
|
||||
if _, ok := payload.Policies[e.PolicyID()]; !ok {
|
||||
@@ -680,3 +688,18 @@ func (m *SlaveCreateArchiveTask) Progress(ctx context.Context) queue.Progresses
|
||||
|
||||
return m.progress
|
||||
}
|
||||
|
||||
func decryptEntityKeyIfNeeded(masterKey []byte, entity *ent.Entity) (*ent.Entity, error) {
|
||||
if entity.Props == nil || entity.Props.EncryptMetadata == nil || entity.Props.EncryptMetadata.KeyPlainText != nil {
|
||||
return entity, nil
|
||||
}
|
||||
|
||||
decryptedKey, err := encrypt.DecryptWithMasterKey(masterKey, entity.Props.EncryptMetadata.Key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decrypt entity key: %w", err)
|
||||
}
|
||||
|
||||
entity.Props.EncryptMetadata.KeyPlainText = decryptedKey
|
||||
entity.Props.EncryptMetadata.Key = nil
|
||||
return entity, nil
|
||||
}
|
||||
|
||||
@@ -194,9 +194,15 @@ func (m *ExtractArchiveTask) createSlaveExtractTask(ctx context.Context, dep dep
|
||||
return task.StatusError, fmt.Errorf("failed to get policy: %w", err)
|
||||
}
|
||||
|
||||
masterKey, _ := dep.MasterEncryptKeyVault().GetMasterKey(ctx)
|
||||
entityModel, err := decryptEntityKeyIfNeeded(masterKey, archiveFile.PrimaryEntity().Model())
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to decrypt entity key for archive file %q: %s", archiveFile.DisplayName(), err)
|
||||
}
|
||||
|
||||
payload := &SlaveExtractArchiveTaskState{
|
||||
FileName: archiveFile.DisplayName(),
|
||||
Entity: archiveFile.PrimaryEntity().Model(),
|
||||
Entity: entityModel,
|
||||
Policy: policy,
|
||||
Encoding: m.state.Encoding,
|
||||
Dst: m.state.Dst,
|
||||
|
||||
@@ -100,7 +100,7 @@ func (f *ffprobeExtractor) Extract(ctx context.Context, ext string, source entit
|
||||
}
|
||||
|
||||
var input string
|
||||
if source.IsLocal() {
|
||||
if source.IsLocal() && !source.Entity().Encrypted() {
|
||||
input = source.LocalPath(ctx)
|
||||
} else {
|
||||
expire := time.Now().Add(UrlExpire)
|
||||
|
||||
@@ -2,13 +2,14 @@ package setting
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/samber/lo"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -2,6 +2,7 @@ package setting
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
@@ -10,7 +11,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/auth/requestinfo"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
|
||||
)
|
||||
@@ -208,6 +208,8 @@ type (
|
||||
CustomHTML(ctx context.Context) *CustomHTML
|
||||
// FFMpegExtraArgs returns the extra arguments of ffmpeg thumb generator.
|
||||
FFMpegExtraArgs(ctx context.Context) string
|
||||
// MasterEncryptKey returns the master encrypt key.
|
||||
MasterEncryptKey(ctx context.Context) []byte
|
||||
}
|
||||
UseFirstSiteUrlCtxKey = struct{}
|
||||
)
|
||||
@@ -235,6 +237,15 @@ type (
|
||||
}
|
||||
)
|
||||
|
||||
func (s *settingProvider) MasterEncryptKey(ctx context.Context) []byte {
|
||||
encoded := s.getString(ctx, "encrypt_master_key", "")
|
||||
key, err := base64.StdEncoding.DecodeString(encoded)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
func (s *settingProvider) CustomHTML(ctx context.Context) *CustomHTML {
|
||||
return &CustomHTML{
|
||||
HeadlessFooter: s.getString(ctx, "headless_footer_html", ""),
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager/entitysource"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
@@ -51,10 +50,17 @@ func (f *FfmpegGenerator) Generate(ctx context.Context, es entitysource.EntitySo
|
||||
|
||||
input := ""
|
||||
expire := time.Now().Add(urlTimeout)
|
||||
if es.IsLocal() {
|
||||
if es.IsLocal() && !es.Entity().Encrypted() {
|
||||
input = es.LocalPath(ctx)
|
||||
} else {
|
||||
src, err := es.Url(driver.WithForcePublicEndpoint(ctx, false), entitysource.WithNoInternalProxy(), entitysource.WithContext(ctx), entitysource.WithExpire(&expire))
|
||||
opts := []entitysource.EntitySourceOption{
|
||||
entitysource.WithContext(ctx),
|
||||
entitysource.WithExpire(&expire),
|
||||
}
|
||||
if !es.Entity().Encrypted() {
|
||||
opts = append(opts, entitysource.WithNoInternalProxy())
|
||||
}
|
||||
src, err := es.Url(ctx, opts...)
|
||||
if err != nil {
|
||||
return &Result{Path: tempOutputPath}, fmt.Errorf("failed to get entity url: %w", err)
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@ func (l *LibreOfficeGenerator) Generate(ctx context.Context, es entitysource.Ent
|
||||
)
|
||||
|
||||
tempInputPath := ""
|
||||
if es.IsLocal() {
|
||||
if es.IsLocal() && !es.Entity().Encrypted() {
|
||||
tempInputPath = es.LocalPath(ctx)
|
||||
} else {
|
||||
// If not local policy files, download to temp folder
|
||||
|
||||
@@ -46,7 +46,7 @@ func (v *VipsGenerator) Generate(ctx context.Context, es entitysource.EntitySour
|
||||
usePipe := true
|
||||
if runtime.GOOS == "windows" {
|
||||
// Pipe IO is not working on Windows for VIPS
|
||||
if es.IsLocal() {
|
||||
if es.IsLocal() && !es.Entity().Encrypted() {
|
||||
// escape [ and ] in file name
|
||||
input = fmt.Sprintf("[filename=\"%s\"]", es.LocalPath(ctx))
|
||||
usePipe = false
|
||||
|
||||
@@ -347,7 +347,7 @@ func (s *SingleFileService) Url(c *gin.Context) (string, error) {
|
||||
}
|
||||
|
||||
es := entitysource.NewEntitySource(fs.NewEntity(primaryEntity), driver, policy, dep.GeneralAuth(),
|
||||
dep.SettingProvider(), dep.HashIDEncoder(), dep.RequestClient(), dep.Logger(), dep.ConfigProvider(), dep.MimeDetector(ctx))
|
||||
dep.SettingProvider(), dep.HashIDEncoder(), dep.RequestClient(), dep.Logger(), dep.ConfigProvider(), dep.MimeDetector(ctx), dep.EncryptorFactory())
|
||||
|
||||
expire := time.Now().Add(time.Hour * 1)
|
||||
url, err := es.Url(ctx, entitysource.WithExpire(&expire), entitysource.WithDisplayName(file.Name))
|
||||
@@ -547,7 +547,7 @@ func (s *SingleEntityService) Url(c *gin.Context) (string, error) {
|
||||
}
|
||||
|
||||
es := entitysource.NewEntitySource(fs.NewEntity(entity), driver, policy, dep.GeneralAuth(),
|
||||
dep.SettingProvider(), dep.HashIDEncoder(), dep.RequestClient(), dep.Logger(), dep.ConfigProvider(), dep.MimeDetector(c))
|
||||
dep.SettingProvider(), dep.HashIDEncoder(), dep.RequestClient(), dep.Logger(), dep.ConfigProvider(), dep.MimeDetector(c), dep.EncryptorFactory())
|
||||
|
||||
expire := time.Now().Add(time.Hour * 1)
|
||||
url, err := es.Url(c, entitysource.WithDownload(true), entitysource.WithExpire(&expire), entitysource.WithDisplayName(path.Base(entity.Source)))
|
||||
|
||||
@@ -126,37 +126,49 @@ func BuildTaskResponse(task queue.Task, node *ent.Node, hasher hashid.Encoder) *
|
||||
}
|
||||
|
||||
type UploadSessionResponse struct {
|
||||
SessionID string `json:"session_id"`
|
||||
UploadID string `json:"upload_id"`
|
||||
ChunkSize int64 `json:"chunk_size"` // 分块大小,0 为部分快
|
||||
Expires int64 `json:"expires"` // 上传凭证过期时间, Unix 时间戳
|
||||
UploadURLs []string `json:"upload_urls,omitempty"`
|
||||
Credential string `json:"credential,omitempty"`
|
||||
AccessKey string `json:"ak,omitempty"`
|
||||
KeyTime string `json:"keyTime,omitempty"` // COS用有效期
|
||||
CompleteURL string `json:"completeURL,omitempty"`
|
||||
StoragePolicy *StoragePolicy `json:"storage_policy,omitempty"`
|
||||
Uri string `json:"uri"`
|
||||
CallbackSecret string `json:"callback_secret"`
|
||||
MimeType string `json:"mime_type,omitempty"`
|
||||
UploadPolicy string `json:"upload_policy,omitempty"`
|
||||
SessionID string `json:"session_id"`
|
||||
UploadID string `json:"upload_id"`
|
||||
ChunkSize int64 `json:"chunk_size"` // 分块大小,0 为部分快
|
||||
Expires int64 `json:"expires"` // 上传凭证过期时间, Unix 时间戳
|
||||
UploadURLs []string `json:"upload_urls,omitempty"`
|
||||
Credential string `json:"credential,omitempty"`
|
||||
AccessKey string `json:"ak,omitempty"`
|
||||
KeyTime string `json:"keyTime,omitempty"` // COS用有效期
|
||||
CompleteURL string `json:"completeURL,omitempty"`
|
||||
StoragePolicy *StoragePolicy `json:"storage_policy,omitempty"`
|
||||
Uri string `json:"uri"`
|
||||
CallbackSecret string `json:"callback_secret"`
|
||||
MimeType string `json:"mime_type,omitempty"`
|
||||
UploadPolicy string `json:"upload_policy,omitempty"`
|
||||
EncryptMetadata *types.EncryptMetadata `json:"encrypt_metadata,omitempty"`
|
||||
}
|
||||
|
||||
func BuildUploadSessionResponse(session *fs.UploadCredential, hasher hashid.Encoder) *UploadSessionResponse {
|
||||
return &UploadSessionResponse{
|
||||
SessionID: session.SessionID,
|
||||
ChunkSize: session.ChunkSize,
|
||||
Expires: session.Expires,
|
||||
UploadURLs: session.UploadURLs,
|
||||
Credential: session.Credential,
|
||||
CompleteURL: session.CompleteURL,
|
||||
Uri: session.Uri,
|
||||
UploadID: session.UploadID,
|
||||
StoragePolicy: BuildStoragePolicy(session.StoragePolicy, hasher),
|
||||
CallbackSecret: session.CallbackSecret,
|
||||
MimeType: session.MimeType,
|
||||
UploadPolicy: session.UploadPolicy,
|
||||
res := &UploadSessionResponse{
|
||||
SessionID: session.SessionID,
|
||||
ChunkSize: session.ChunkSize,
|
||||
Expires: session.Expires,
|
||||
UploadURLs: session.UploadURLs,
|
||||
Credential: session.Credential,
|
||||
CompleteURL: session.CompleteURL,
|
||||
Uri: session.Uri,
|
||||
UploadID: session.UploadID,
|
||||
StoragePolicy: BuildStoragePolicy(session.StoragePolicy, hasher),
|
||||
CallbackSecret: session.CallbackSecret,
|
||||
MimeType: session.MimeType,
|
||||
UploadPolicy: session.UploadPolicy,
|
||||
EncryptMetadata: session.EncryptMetadata,
|
||||
}
|
||||
|
||||
if session.EncryptMetadata != nil {
|
||||
res.EncryptMetadata = &types.EncryptMetadata{
|
||||
Algorithm: session.EncryptMetadata.Algorithm,
|
||||
KeyPlainText: session.EncryptMetadata.KeyPlainText,
|
||||
IV: session.EncryptMetadata.IV,
|
||||
}
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
// WopiFileInfo Response for `CheckFileInfo`
|
||||
@@ -270,6 +282,7 @@ type StoragePolicy struct {
|
||||
MaxSize int64 `json:"max_size"`
|
||||
Relay bool `json:"relay,omitempty"`
|
||||
ChunkConcurrency int `json:"chunk_concurrency,omitempty"`
|
||||
Encryption bool `json:"encryption,omitempty"`
|
||||
}
|
||||
|
||||
type Entity struct {
|
||||
@@ -469,6 +482,7 @@ func BuildStoragePolicy(sp *ent.StoragePolicy, hasher hashid.Encoder) *StoragePo
|
||||
MaxSize: sp.MaxSize,
|
||||
Relay: sp.Settings.Relay,
|
||||
ChunkConcurrency: sp.Settings.ChunkConcurrency,
|
||||
Encryption: sp.Settings.Encryption,
|
||||
}
|
||||
|
||||
if sp.Settings.IsFileTypeDenyList {
|
||||
|
||||
@@ -3,6 +3,9 @@ package explorer
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/application/dependency"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
@@ -13,21 +16,20 @@ import (
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
|
||||
"github.com/gin-gonic/gin"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// CreateUploadSessionService 获取上传凭证服务
|
||||
type (
|
||||
CreateUploadSessionParameterCtx struct{}
|
||||
CreateUploadSessionService struct {
|
||||
Uri string `json:"uri" binding:"required"`
|
||||
Size int64 `json:"size" binding:"min=0"`
|
||||
LastModified int64 `json:"last_modified"`
|
||||
MimeType string `json:"mime_type"`
|
||||
PolicyID string `json:"policy_id"`
|
||||
Metadata map[string]string `json:"metadata" binding:"max=256"`
|
||||
EntityType string `json:"entity_type" binding:"eq=|eq=live_photo|eq=version"`
|
||||
Uri string `json:"uri" binding:"required"`
|
||||
Size int64 `json:"size" binding:"min=0"`
|
||||
LastModified int64 `json:"last_modified"`
|
||||
MimeType string `json:"mime_type"`
|
||||
PolicyID string `json:"policy_id"`
|
||||
Metadata map[string]string `json:"metadata" binding:"max=256"`
|
||||
EntityType string `json:"entity_type" binding:"eq=|eq=live_photo|eq=version"`
|
||||
EncryptionSupported []types.Algorithm `json:"encryption_supported"`
|
||||
}
|
||||
)
|
||||
|
||||
@@ -68,6 +70,8 @@ func (service *CreateUploadSessionService) Create(c context.Context) (*UploadSes
|
||||
Metadata: service.Metadata,
|
||||
EntityType: entityType,
|
||||
PreferredStoragePolicy: policyId,
|
||||
EncryptionSupported: service.EncryptionSupported,
|
||||
ClientSideEncrypted: len(service.EncryptionSupported) > 0,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -133,6 +137,7 @@ func (service *UploadService) SlaveUpload(c *gin.Context) error {
|
||||
}
|
||||
|
||||
uploadSession := uploadSessionRaw.(fs.UploadSession)
|
||||
uploadSession.Props.ClientSideEncrypted = true
|
||||
|
||||
// Parse chunk index from query
|
||||
service.Index, _ = strconv.Atoi(c.Query("chunk"))
|
||||
@@ -175,7 +180,7 @@ func processChunkUpload(c *gin.Context, m manager.FileManager, session *fs.Uploa
|
||||
|
||||
// 执行上传
|
||||
ctx := context.WithValue(c, cluster.SlaveNodeIDCtx{}, strconv.Itoa(session.Policy.NodeID))
|
||||
err = m.Upload(ctx, req, session.Policy)
|
||||
err = m.Upload(ctx, req, session.Policy, session)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user