feat: allow configuring maximum archive size
This also splits the zipping function up into two parts, so that we can use the response to notify the user in case the resulting archive turns out to be too big.pull/566/head
parent
e60d52a136
commit
baa8babc59
15
src/args.rs
15
src/args.rs
|
@ -148,6 +148,16 @@ pub fn build_cli() -> Command {
|
||||||
.action(ArgAction::SetTrue)
|
.action(ArgAction::SetTrue)
|
||||||
.help("Allow download folders as archive file"),
|
.help("Allow download folders as archive file"),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
.arg(
|
||||||
|
Arg::new("max-archive-size")
|
||||||
|
.env("DUFS_MAX_ARCHIVE_SIZE")
|
||||||
|
.hide_env(true)
|
||||||
|
.long("max-archive-size")
|
||||||
|
.value_parser(value_parser!(u64))
|
||||||
|
.help("Maximum (uncompressed) total size in bytes of files/directories to be archived [default: no maximum]")
|
||||||
|
.value_name("max_archive_size"),
|
||||||
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::new("follow-symlinks")
|
Arg::new("follow-symlinks")
|
||||||
.env("DUFS_FOLLOW_SYMLINKS")
|
.env("DUFS_FOLLOW_SYMLINKS")
|
||||||
|
@ -289,6 +299,7 @@ pub struct Args {
|
||||||
pub allow_search: bool,
|
pub allow_search: bool,
|
||||||
pub allow_symlink: bool,
|
pub allow_symlink: bool,
|
||||||
pub allow_archive: bool,
|
pub allow_archive: bool,
|
||||||
|
pub max_archive_size: Option<u64>,
|
||||||
pub follow_symlinks: bool,
|
pub follow_symlinks: bool,
|
||||||
pub render_index: bool,
|
pub render_index: bool,
|
||||||
pub render_spa: bool,
|
pub render_spa: bool,
|
||||||
|
@ -394,6 +405,10 @@ impl Args {
|
||||||
args.render_index = matches.get_flag("render-index");
|
args.render_index = matches.get_flag("render-index");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Some(max_archive_size) = matches.get_one::<u64>("max-archive-size") {
|
||||||
|
args.max_archive_size = Some(*max_archive_size);
|
||||||
|
}
|
||||||
|
|
||||||
if !args.render_try_index {
|
if !args.render_try_index {
|
||||||
args.render_try_index = matches.get_flag("render-try-index");
|
args.render_try_index = matches.get_flag("render-try-index");
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,7 +8,7 @@ use crate::utils::{
|
||||||
};
|
};
|
||||||
use crate::Args;
|
use crate::Args;
|
||||||
|
|
||||||
use anyhow::{anyhow, Result};
|
use anyhow::{anyhow, bail, Result};
|
||||||
use async_zip::{tokio::write::ZipFileWriter, Compression, ZipDateTime, ZipEntryBuilder};
|
use async_zip::{tokio::write::ZipFileWriter, Compression, ZipDateTime, ZipEntryBuilder};
|
||||||
use base64::{engine::general_purpose::STANDARD, Engine as _};
|
use base64::{engine::general_purpose::STANDARD, Engine as _};
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
|
@ -37,6 +37,7 @@ use std::collections::HashMap;
|
||||||
use std::fs::Metadata;
|
use std::fs::Metadata;
|
||||||
use std::io::SeekFrom;
|
use std::io::SeekFrom;
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
|
use std::os::unix::fs::MetadataExt;
|
||||||
use std::path::{Component, Path, PathBuf};
|
use std::path::{Component, Path, PathBuf};
|
||||||
use std::sync::atomic::{self, AtomicBool};
|
use std::sync::atomic::{self, AtomicBool};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
@ -661,19 +662,36 @@ impl Server {
|
||||||
let running = self.running.clone();
|
let running = self.running.clone();
|
||||||
let compression = self.args.compress.to_compression();
|
let compression = self.args.compress.to_compression();
|
||||||
let follow_symlinks = self.args.follow_symlinks;
|
let follow_symlinks = self.args.follow_symlinks;
|
||||||
tokio::spawn(async move {
|
let max_archive_size = self.args.max_archive_size;
|
||||||
if let Err(e) = zip_dir(
|
// We collect the paths first to ensure their size doesn't exceed the configured maximum.
|
||||||
&mut writer,
|
let zip_paths = collect_zip_paths(
|
||||||
&path,
|
&path,
|
||||||
access_paths,
|
access_paths,
|
||||||
&hidden,
|
&hidden,
|
||||||
compression,
|
|
||||||
follow_symlinks,
|
follow_symlinks,
|
||||||
|
max_archive_size,
|
||||||
running,
|
running,
|
||||||
)
|
)
|
||||||
|
.await;
|
||||||
|
let zip_paths = match zip_paths {
|
||||||
|
Ok(z) => z,
|
||||||
|
Err(e) => {
|
||||||
|
let msg = format!("Cannot archive directory: {e}");
|
||||||
|
res.headers_mut().clear();
|
||||||
|
status_bad_request(res, &msg);
|
||||||
|
return Ok(())
|
||||||
|
}
|
||||||
|
};
|
||||||
|
tokio::spawn(async move {
|
||||||
|
if let Err(e) = zip_dir(
|
||||||
|
&mut writer,
|
||||||
|
zip_paths,
|
||||||
|
&path,
|
||||||
|
compression,
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
error!("Failed to zip {}, {}", path.display(), e);
|
error!("Failed to zip {}, {e}", path.display());
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
let reader_stream = ReaderStream::with_capacity(reader, BUF_SIZE);
|
let reader_stream = ReaderStream::with_capacity(reader, BUF_SIZE);
|
||||||
|
@ -1636,20 +1654,19 @@ fn res_multistatus(res: &mut Response, content: &str) {
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn zip_dir<W: AsyncWrite + Unpin>(
|
async fn collect_zip_paths(
|
||||||
writer: &mut W,
|
|
||||||
dir: &Path,
|
dir: &Path,
|
||||||
access_paths: AccessPaths,
|
access_paths: AccessPaths,
|
||||||
hidden: &[String],
|
hidden: &[String],
|
||||||
compression: Compression,
|
|
||||||
follow_symlinks: bool,
|
follow_symlinks: bool,
|
||||||
|
max_size: Option<u64>,
|
||||||
running: Arc<AtomicBool>,
|
running: Arc<AtomicBool>,
|
||||||
) -> Result<()> {
|
) -> Result<Vec<PathBuf>> {
|
||||||
let mut writer = ZipFileWriter::with_tokio(writer);
|
|
||||||
let hidden = Arc::new(hidden.to_vec());
|
let hidden = Arc::new(hidden.to_vec());
|
||||||
let dir_clone = dir.to_path_buf();
|
let dir_clone = dir.to_path_buf();
|
||||||
let zip_paths = tokio::task::spawn_blocking(move || {
|
tokio::task::spawn_blocking(move || {
|
||||||
let mut paths: Vec<PathBuf> = vec![];
|
let mut paths: Vec<PathBuf> = vec![];
|
||||||
|
let mut total_size = 0u64;
|
||||||
for dir in access_paths.entry_paths(&dir_clone) {
|
for dir in access_paths.entry_paths(&dir_clone) {
|
||||||
let mut it = WalkDir::new(&dir).follow_links(follow_symlinks).into_iter();
|
let mut it = WalkDir::new(&dir).follow_links(follow_symlinks).into_iter();
|
||||||
it.next();
|
it.next();
|
||||||
|
@ -1666,7 +1683,13 @@ async fn zip_dir<W: AsyncWrite + Unpin>(
|
||||||
}
|
}
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if entry.path().symlink_metadata().is_err() {
|
if let Ok(metadata) = entry.path().symlink_metadata() {
|
||||||
|
total_size += metadata.size();
|
||||||
|
|
||||||
|
if let Some(max_size) = max_size.filter(|max| total_size > *max) {
|
||||||
|
bail!("Sorry, but this would exceed the maximum archive size of {max_size} bytes.");
|
||||||
|
};
|
||||||
|
} else {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if !file_type.is_file() {
|
if !file_type.is_file() {
|
||||||
|
@ -1675,9 +1698,18 @@ async fn zip_dir<W: AsyncWrite + Unpin>(
|
||||||
paths.push(entry_path.to_path_buf());
|
paths.push(entry_path.to_path_buf());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
paths
|
Ok(paths)
|
||||||
})
|
})
|
||||||
.await?;
|
.await?
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn zip_dir<W: AsyncWrite + Unpin>(
|
||||||
|
writer: &mut W,
|
||||||
|
zip_paths: Vec<PathBuf>,
|
||||||
|
dir: &Path,
|
||||||
|
compression: Compression,
|
||||||
|
) -> Result<()> {
|
||||||
|
let mut writer = ZipFileWriter::with_tokio(writer);
|
||||||
for zip_path in zip_paths.into_iter() {
|
for zip_path in zip_paths.into_iter() {
|
||||||
let filename = match zip_path.strip_prefix(dir).ok().and_then(|v| v.to_str()) {
|
let filename = match zip_path.strip_prefix(dir).ok().and_then(|v| v.to_str()) {
|
||||||
Some(v) => v,
|
Some(v) => v,
|
||||||
|
|
Loading…
Reference in New Issue