aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSven-Hendrik Haase <svenstaro@gmail.com>2025-03-03 03:54:51 +0000
committerGitHub <noreply@github.com>2025-03-03 03:54:51 +0000
commitc26b74c8a9f47f86b39a687b20d769e3b349fd8d (patch)
tree1c562da6fbcba9900ce85e300cc9298a0ddf23c8
parentMerge pull request #1478 from svenstaro/dependabot/cargo/all-dependencies-65d... (diff)
parentfeat: validate temp dir exists through `value_parser` and fixed clippy issues (diff)
downloadminiserve-c26b74c8a9f47f86b39a687b20d769e3b349fd8d.tar.gz
miniserve-c26b74c8a9f47f86b39a687b20d769e3b349fd8d.zip
Merge pull request #1431 from AlecDivito/upload-progress-bar
feat: Added HTML and Javascript progress bar when uploading files
-rw-r--r--Cargo.lock17
-rw-r--r--Cargo.toml3
-rw-r--r--README.md6
-rw-r--r--data/style.scss179
-rw-r--r--data/themes/archlinux.scss8
-rw-r--r--data/themes/monokai.scss8
-rw-r--r--data/themes/squirrel.scss8
-rw-r--r--data/themes/zenburn.scss8
-rw-r--r--src/args.rs49
-rw-r--r--src/config.rs8
-rw-r--r--src/errors.rs6
-rw-r--r--src/file_op.rs222
-rw-r--r--src/renderer.rs327
-rw-r--r--tests/upload_files.rs94
14 files changed, 902 insertions, 41 deletions
diff --git a/Cargo.lock b/Cargo.lock
index cadfcd2..0a96e96 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2097,6 +2097,7 @@ dependencies = [
"socket2",
"strum",
"tar",
+ "tempfile",
"thiserror 2.0.11",
"tokio",
"url",
@@ -3230,9 +3231,9 @@ dependencies = [
[[package]]
name = "tempfile"
-version = "3.16.0"
+version = "3.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "38c246215d7d24f48ae091a2902398798e05d978b24315d6efbc00ede9a8bb91"
+checksum = "a40f762a77d2afa88c2d919489e390a12bdd261ed568e60cfa7e48d4e20f0d33"
dependencies = [
"cfg-if",
"fastrand",
@@ -3390,10 +3391,22 @@ dependencies = [
"pin-project-lite",
"signal-hook-registry",
"socket2",
+ "tokio-macros",
"windows-sys 0.52.0",
]
[[package]]
+name = "tokio-macros"
+version = "2.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.98",
+]
+
+[[package]]
name = "tokio-native-tls"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
diff --git a/Cargo.toml b/Cargo.toml
index fe6a874..40c2250 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -56,8 +56,9 @@ simplelog = "0.12"
socket2 = "0.5"
strum = { version = "0.27", features = ["derive"] }
tar = "0.4"
+tempfile = "3.17.0"
thiserror = "2"
-tokio = { version = "1.42.0", features = ["fs"] }
+tokio = { version = "1.42.0", features = ["fs", "macros"] }
zip = { version = "2", default-features = false }
[features]
diff --git a/README.md b/README.md
index 36b3841..8a8f78d 100644
--- a/README.md
+++ b/README.md
@@ -274,6 +274,12 @@ Options:
[env: MINISERVE_ALLOWED_UPLOAD_DIR=]
+ --web-upload-files-concurrency <WEB_UPLOAD_CONCURRENCY>
+ Configure amount of concurrent uploads when visiting the website. Must have upload-files option enabled for this setting to matter.
+
+ [env: MINISERVE_WEB_UPLOAD_CONCURRENCY=]
+ [default: 0]
+
-U, --mkdir
Enable creating directories
diff --git a/data/style.scss b/data/style.scss
index ceedc5f..828146a 100644
--- a/data/style.scss
+++ b/data/style.scss
@@ -27,6 +27,157 @@ body {
padding: 1.5rem 5rem;
}
+$upload_container_height: 18rem;
+
+.upload_area {
+ display: block;
+ position: fixed;
+ bottom: 1rem;
+ right: -105%;
+ color: #ffffff;
+ box-shadow: 0 3px 6px -1px rgba(0, 0, 0, 0.12), 0 10px 36px -4px rgba(77, 96, 232, 0.3);
+ background: linear-gradient(135deg, #73a5ff, #5477f5);
+ padding: 0px;
+ margin: 0px;
+ opacity: 1; // Change this
+ transition: all 0.4s cubic-bezier(0.215, 0.61, 0.355, 1);
+ border-radius: 4px;
+ text-decoration: none;
+ min-width: 400px;
+ max-width: 600px;
+ z-index: 2147483647;
+ max-height: $upload_container_height;
+ overflow: hidden;
+
+ &.active {
+ right: 1rem;
+ }
+
+ #upload-toggle {
+ display: none;
+ transition: transform 0.3s ease;
+ cursor: pointer;
+ }
+}
+
+.upload_container {
+ max-height: $upload_container_height;
+ display: flex;
+ flex-direction: column;
+}
+
+.upload_header {
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+ padding: 1rem;
+ background-color: var(--upload_modal_header_background);
+ color: var(--upload_modal_header_color);
+}
+
+.upload_header svg {
+ width: 24px;
+ height: 24px;
+}
+
+.upload_action {
+ background-color: var(--upload_modal_sub_header_background);
+ color: var(--upload_modal_header_color);
+ padding: 0.25rem 1rem;
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+ font-size: 0.75em;
+ font-weight: 500;
+}
+
+.upload_cancel {
+ background: none;
+ border: none;
+ font-weight: 500;
+ cursor: pointer;
+}
+
+.upload_files {
+ padding: 0px;
+ margin: 0px;
+ flex: 1;
+ overflow-y: auto;
+ max-height: inherit;
+}
+
+.upload_file_list {
+ background-color: var(--upload_modal_file_item_background);
+ color: var(--upload_modal_file_item_color);
+ padding: 0px;
+ margin: 0px;
+ list-style: none;
+ list-style: none;
+ display: flex;
+ flex-direction: column;
+ align-items: stretch;
+ overflow-y: scroll;
+}
+
+.upload_file_container {
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+ padding: 1rem 1rem calc(1rem - 2px) 1rem;
+}
+
+.upload_file_action {
+ display: flex;
+ justify-content: right;
+}
+
+.file_progress_bar {
+ width: 0%;
+ border-top: 2px solid var(--progress_bar_background);
+ transition: width 0.25s ease;
+
+ &.cancelled {
+ border-color: var(--error_color);
+ }
+
+ &.failed {
+ border-color: var(--error_color);
+ }
+
+ &.complete {
+ border-color: var(--success_color);
+ }
+}
+
+.upload_file_text {
+ font-size: 0.80em;
+ white-space: nowrap;
+ overflow: hidden;
+ text-overflow: ellipsis;
+
+ &.cancelled {
+ text-decoration: line-through;
+ }
+
+ &.failed {
+ text-decoration: line-through;
+ }
+}
+
+.file_cancel_upload {
+ padding-left: 0.25rem;
+ font-size: 1em;
+ cursor: pointer;
+ border: none;
+ background: inherit;
+ font-size: 1em;
+ color: var(--error_color);
+
+ &.complete {
+ color: var(--success_color);
+ }
+}
+
.title {
word-break: break-all;
}
@@ -87,7 +238,7 @@ a.file,
color: var(--file_link_color);
&:visited {
- color: var(--file_link_color_visited)
+ color: var(--file_link_color_visited);
}
}
@@ -529,6 +680,32 @@ th span.active span {
.back {
right: 1.5rem;
}
+
+ $upload_container_height_mobile: 100vh;
+
+ .upload_area {
+ width: 100%;
+ height: 136px;
+ max-height: $upload_container_height_mobile;
+ max-width: unset;
+ min-width: unset;
+ bottom: 0;
+ transition: height 0.3s ease;
+
+ &.active {
+ right: 0;
+ left: 0;
+ }
+
+ #upload-toggle {
+ display: block;
+ transition: transform 0.3s ease;
+ }
+ }
+
+ .upload_container {
+ max-height: $upload_container_height_mobile;
+ }
}
@media (max-width: 600px) {
diff --git a/data/themes/archlinux.scss b/data/themes/archlinux.scss
index f95b8bb..16020a0 100644
--- a/data/themes/archlinux.scss
+++ b/data/themes/archlinux.scss
@@ -45,6 +45,14 @@ $generate_default: true !default;
--size_text_color: #fefefe;
--error_color: #e44b4b;
--footer_color: #8eabcc;
+ --success_color: #52e28a;
+ --upload_modal_header_background: #5294e2;
+ --upload_modal_header_color: #eeeeee;
+ --upload_modal_sub_header_background: #35547a;
+ --upload_modal_file_item_background: #eeeeee;
+ --upload_modal_file_item_color: #111111;
+ --upload_modal_file_upload_complete_background: #cccccc;
+ --progress_bar_background: #5294e2;
};
@if $generate_default {
diff --git a/data/themes/monokai.scss b/data/themes/monokai.scss
index 4a47794..60c45e7 100644
--- a/data/themes/monokai.scss
+++ b/data/themes/monokai.scss
@@ -45,6 +45,14 @@ $generate_default: true !default;
--size_text_color: #f8f8f2;
--error_color: #d02929;
--footer_color: #56c9df;
+ --success_color: #52e28a;
+ --upload_modal_header_background: #75715e;
+ --upload_modal_header_color: #eeeeee;
+ --upload_modal_sub_header_background: #323129;
+ --upload_modal_file_item_background: #eeeeee;
+ --upload_modal_file_item_color: #111111;
+ --upload_modal_file_upload_complete_background: #cccccc;
+ --progress_bar_background: #5294e2;
};
@if $generate_default {
diff --git a/data/themes/squirrel.scss b/data/themes/squirrel.scss
index 9eb572e..7ec47e6 100644
--- a/data/themes/squirrel.scss
+++ b/data/themes/squirrel.scss
@@ -45,6 +45,14 @@ $generate_default: true !default;
--size_text_color: #ffffff;
--error_color: #d02424;
--footer_color: #898989;
+ --success_color: #52e28a;
+ --upload_modal_header_background: #323232;
+ --upload_modal_header_color: #eeeeee;
+ --upload_modal_sub_header_background: #171616;
+ --upload_modal_file_item_background: #eeeeee;
+ --upload_modal_file_item_color: #111111;
+ --upload_modal_file_upload_complete_background: #cccccc;
+ --progress_bar_background: #5294e2;
};
@if $generate_default {
diff --git a/data/themes/zenburn.scss b/data/themes/zenburn.scss
index 9eb4d11..efb2e83 100644
--- a/data/themes/zenburn.scss
+++ b/data/themes/zenburn.scss
@@ -45,6 +45,14 @@ $generate_default: true !default;
--size_text_color: #efefef;
--error_color: #d06565;
--footer_color: #bfaf9f;
+ --success_color: #52e28a;
+ --upload_modal_header_background: #7f9f7f;
+ --upload_modal_header_color: #eeeeee;
+ --upload_modal_sub_header_background: #404e40;
+ --upload_modal_file_item_background: #eeeeee;
+ --upload_modal_file_item_color: #111111;
+ --upload_modal_file_upload_complete_background: #cccccc;
+ --progress_bar_background: #5294e2;
};
@if $generate_default {
diff --git a/src/args.rs b/src/args.rs
index 922e78b..72ade7b 100644
--- a/src/args.rs
+++ b/src/args.rs
@@ -26,6 +26,21 @@ pub struct CliArgs {
#[arg(value_hint = ValueHint::AnyPath, env = "MINISERVE_PATH")]
pub path: Option<PathBuf>,
+ /// The path to where file uploads will be written to before being moved to their
+ /// correct location. It's wise to make sure that this directory will be written to
+ /// disk and not into memory.
+ ///
+ /// This value will only be used **IF** file uploading is enabled. If this option is
+ /// not set, the operating system default temporary directory will be used.
+ #[arg(
+ long = "temp-directory",
+ value_hint = ValueHint::FilePath,
+ requires = "allowed_upload_dir",
+ value_parser(validate_is_dir_and_exists),
+ env = "MINISERVER_TEMP_UPLOAD_DIRECTORY")
+ ]
+ pub temp_upload_directory: Option<PathBuf>,
+
/// The name of a directory index file to serve, like "index.html"
///
/// Normally, when miniserve serves a directory, it creates a listing for that directory.
@@ -166,6 +181,26 @@ pub struct CliArgs {
#[arg(short = 'u', long = "upload-files", value_hint = ValueHint::FilePath, num_args(0..=1), value_delimiter(','), env = "MINISERVE_ALLOWED_UPLOAD_DIR")]
pub allowed_upload_dir: Option<Vec<PathBuf>>,
+ /// Configure amount of concurrent uploads when visiting the website. Must have
+ /// upload-files option enabled for this setting to matter.
+ ///
+ /// For example, a value of 4 would mean that the web browser will only upload
+ /// 4 files at a time to the web server when using the web browser interface.
+ ///
+ /// When the value is kept at 0, it attempts to resolve all the uploads at once
+ /// in the web browser.
+ ///
+ /// NOTE: Web pages have a limit of how many active HTTP connections that they
+ /// can make at one time, so even though you might set a concurrency limit of
+ /// 100, the browser might only make progress on the max amount of connections
+ /// it allows the web page to have open.
+ #[arg(
+ long = "web-upload-files-concurrency",
+ env = "MINISERVE_WEB_UPLOAD_CONCURRENCY",
+ default_value = "0"
+ )]
+ pub web_upload_concurrency: usize,
+
/// Enable creating directories
#[arg(
short = 'U',
@@ -322,6 +357,20 @@ fn parse_interface(src: &str) -> Result<IpAddr, std::net::AddrParseError> {
src.parse::<IpAddr>()
}
+/// Validate that a path passed in is a directory and it exists.
+fn validate_is_dir_and_exists(s: &str) -> Result<PathBuf, String> {
+ let path = PathBuf::from(s);
+ if path.exists() && path.is_dir() {
+ Ok(path)
+ } else {
+ Err(format!(
+ "Upload temporary directory must exist and be a directory. \
+ Validate that path {:?} meets those requirements.",
+ path
+ ))
+ }
+}
+
#[derive(Clone, Debug, thiserror::Error)]
pub enum AuthParseError {
/// Might occur if the HTTP credential string does not respect the expected format
diff --git a/src/config.rs b/src/config.rs
index 953bba1..4404959 100644
--- a/src/config.rs
+++ b/src/config.rs
@@ -33,6 +33,9 @@ pub struct MiniserveConfig {
/// Path to be served by miniserve
pub path: std::path::PathBuf,
+ /// Temporary directory that should be used when files are uploaded to the server
+ pub temp_upload_directory: Option<std::path::PathBuf>,
+
/// Port on which miniserve will be listening
pub port: u16,
@@ -101,6 +104,9 @@ pub struct MiniserveConfig {
/// Enable file upload
pub file_upload: bool,
+ /// Max amount of concurrency when uploading multiple files
+ pub web_upload_concurrency: usize,
+
/// List of allowed upload directories
pub allowed_upload_dir: Vec<String>,
@@ -275,6 +281,7 @@ impl MiniserveConfig {
Ok(Self {
verbose: args.verbose,
path: args.path.unwrap_or_else(|| PathBuf::from(".")),
+ temp_upload_directory: args.temp_upload_directory,
port,
interfaces,
auth,
@@ -295,6 +302,7 @@ impl MiniserveConfig {
show_qrcode: args.qrcode,
mkdir_enabled: args.mkdir_enabled,
file_upload: args.allowed_upload_dir.is_some(),
+ web_upload_concurrency: args.web_upload_concurrency,
allowed_upload_dir,
uploadable_media_type,
tar_enabled: args.enable_tar,
diff --git a/src/errors.rs b/src/errors.rs
index 99c15ff..b7d75f2 100644
--- a/src/errors.rs
+++ b/src/errors.rs
@@ -43,6 +43,10 @@ pub enum RuntimeError {
#[error("File already exists, and the overwrite_files option has not been set")]
DuplicateFileError,
+ /// Uploaded hash not correct
+ #[error("File hash that was provided did not match checksum of uploaded file")]
+ UploadHashMismatchError,
+
/// Upload not allowed
#[error("Upload not allowed to this directory")]
UploadForbiddenError,
@@ -86,6 +90,7 @@ impl ResponseError for RuntimeError {
use StatusCode as S;
match self {
E::IoError(_, _) => S::INTERNAL_SERVER_ERROR,
+ E::UploadHashMismatchError => S::BAD_REQUEST,
E::MultipartError(_) => S::BAD_REQUEST,
E::DuplicateFileError => S::CONFLICT,
E::UploadForbiddenError => S::FORBIDDEN,
@@ -132,6 +137,7 @@ where
let res = fut.await?.map_into_boxed_body();
if (res.status().is_client_error() || res.status().is_server_error())
+ && res.request().path() != "/upload"
&& res
.headers()
.get(header::CONTENT_TYPE)
diff --git a/src/file_op.rs b/src/file_op.rs
index 18bdcbe..afd6449 100644
--- a/src/file_op.rs
+++ b/src/file_op.rs
@@ -4,10 +4,12 @@ use std::io::ErrorKind;
use std::path::{Component, Path, PathBuf};
use actix_web::{http::header, web, HttpRequest, HttpResponse};
-use futures::TryFutureExt;
-use futures::TryStreamExt;
+use futures::{StreamExt, TryStreamExt};
+use log::{info, warn};
use serde::Deserialize;
-use tokio::fs::File;
+use sha2::digest::DynDigest;
+use sha2::{Digest, Sha256, Sha512};
+use tempfile::NamedTempFile;
use tokio::io::AsyncWriteExt;
use crate::{
@@ -15,52 +17,183 @@ use crate::{
file_utils::sanitize_path,
};
-/// Saves file data from a multipart form field (`field`) to `file_path`, optionally overwriting
-/// existing file.
+enum FileHash {
+ SHA256(String),
+ SHA512(String),
+}
+
+impl FileHash {
+ pub fn get_hasher(&self) -> Box<dyn DynDigest> {
+ match self {
+ Self::SHA256(_) => Box::new(Sha256::new()),
+ Self::SHA512(_) => Box::new(Sha512::new()),
+ }
+ }
+
+ pub fn get_hash(&self) -> &str {
+ match self {
+ Self::SHA256(string) => string,
+ Self::SHA512(string) => string,
+ }
+ }
+}
+
+/// Saves file data from a multipart form field (`field`) to `file_path`. Optionally overwriting
+/// existing file and comparing the uploaded file checksum to the user provided `file_hash`.
///
/// Returns total bytes written to file.
async fn save_file(
- field: actix_multipart::Field,
+ field: &mut actix_multipart::Field,
file_path: PathBuf,
overwrite_files: bool,
+ file_checksum: Option<&FileHash>,
+ temporary_upload_directory: Option<&PathBuf>,
) -> Result<u64, RuntimeError> {
if !overwrite_files && file_path.exists() {
return Err(RuntimeError::DuplicateFileError);
}
- let file = match File::create(&file_path).await {
+ let temp_upload_directory = temporary_upload_directory.cloned();
+ // Tempfile doesn't support async operations, so we'll do it on a background thread.
+ let temp_upload_directory_task = tokio::task::spawn_blocking(move || {
+ // If the user provided a temporary directory path, then use it.
+ if let Some(temp_directory) = temp_upload_directory {
+ NamedTempFile::new_in(temp_directory)
+ } else {
+ NamedTempFile::new()
+ }
+ });
+
+ // Validate that the temporary task completed successfully.
+ let named_temp_file_task = match temp_upload_directory_task.await {
+ Ok(named_temp_file) => Ok(named_temp_file),
+ Err(err) => Err(RuntimeError::MultipartError(format!(
+ "Failed to complete spawned task to create named temp file. {err}",
+ ))),
+ }?;
+
+ // Validate the the temporary file was created successfully.
+ let named_temp_file = match named_temp_file_task {
Err(err) if err.kind() == ErrorKind::PermissionDenied => Err(
RuntimeError::InsufficientPermissionsError(file_path.display().to_string()),
),
Err(err) => Err(RuntimeError::IoError(
- format!("Failed to create {}", file_path.display()),
+ format!("Failed to create temporary file {}", file_path.display()),
err,
)),
- Ok(v) => Ok(v),
+ Ok(file) => Ok(file),
}?;
- let (_, written_len) = field
- .map_err(|x| RuntimeError::MultipartError(x.to_string()))
- .try_fold((file, 0u64), |(mut file, written_len), bytes| async move {
- file.write_all(bytes.as_ref())
- .map_err(|e| RuntimeError::IoError("Failed to write to file".to_string(), e))
- .await?;
- Ok((file, written_len + bytes.len() as u64))
- })
- .await?;
+ // Convert the temporary file into a non-temporary file. This allows us
+ // to control the lifecycle of the file. This is useful for us because
+ // we need to convert the temporary file into an async enabled file and
+ // on successful upload, we want to move it to the target directory.
+ let (file, temp_path) = named_temp_file
+ .keep()
+ .map_err(|err| RuntimeError::IoError("Failed to keep temporary file".into(), err.error))?;
+ let mut temp_file = tokio::fs::File::from_std(file);
+
+ let mut written_len = 0;
+ let mut hasher = file_checksum.as_ref().map(|h| h.get_hasher());
+ let mut save_upload_file_error: Option<RuntimeError> = None;
+
+ // This while loop take a stream (in this case `field`) and awaits
+ // new chunks from the websocket connection. The while loop reads
+ // the file from the HTTP connection and writes it to disk or until
+ // the stream from the multipart request is aborted.
+ while let Some(Ok(bytes)) = field.next().await {
+ // If the hasher exists (if the user has also sent a chunksum with the request)
+ // then we want to update the hasher with the new bytes uploaded.
+ if let Some(hasher) = hasher.as_mut() {
+ hasher.update(&bytes)
+ }
+ // Write the bytes from the stream into our temporary file.
+ if let Err(e) = temp_file.write_all(&bytes).await {
+ // Failed to write to file. Drop it and return the error
+ save_upload_file_error =
+ Some(RuntimeError::IoError("Failed to write to file".into(), e));
+ break;
+ }
+ // record the bytes written to the file.
+ written_len += bytes.len() as u64;
+ }
+
+ if save_upload_file_error.is_none() {
+ // Flush the changes to disk so that we are sure they are there.
+ if let Err(e) = temp_file.flush().await {
+ save_upload_file_error = Some(RuntimeError::IoError(
+ "Failed to flush all the file writes to disk".into(),
+ e,
+ ));
+ }
+ }
+
+ // Drop the file expcitly here because IF there is an error when writing to the
+ // temp file, we won't be able to remove as per the comment in `tokio::fs::remove_file`
+ // > Note that there is no guarantee that the file is immediately deleted
+ // > (e.g. depending on platform, other open file descriptors may prevent immediate removal).
+ drop(temp_file);
+
+ // If there was an error during uploading.
+ if let Some(e) = save_upload_file_error {
+ // If there was an error when writing the file to disk, remove it and return
+ // the error that was encountered.
+ let _ = tokio::fs::remove_file(temp_path).await;
+ return Err(e);
+ }
+
+ // There isn't a way to get notified when a request is cancelled
+ // by the user in actix it seems. References:
+ // - https://github.com/actix/actix-web/issues/1313
+ // - https://github.com/actix/actix-web/discussions/3011
+ // Therefore, we are relying on the fact that the web UI uploads a
+ // hash of the file to determine if it was completed uploaded or not.
+ if let Some(hasher) = hasher {
+ if let Some(expected_hash) = file_checksum.as_ref().map(|f| f.get_hash()) {
+ let actual_hash = hex::encode(hasher.finalize());
+ if actual_hash != expected_hash {
+ warn!("The expected file hash {expected_hash} did not match the calculated hash of {actual_hash}. This can be caused if a file upload was aborted.");
+ let _ = tokio::fs::remove_file(&temp_path).await;
+ return Err(RuntimeError::UploadHashMismatchError);
+ }
+ }
+ }
+
+ info!("File upload successful to {temp_path:?}. Moving to {file_path:?}",);
+ if let Err(e) = tokio::fs::rename(&temp_path, &file_path).await {
+ let _ = tokio::fs::remove_file(&temp_path).await;
+ return Err(RuntimeError::IoError(
+ format!("Failed to move temporary file {temp_path:?} to {file_path:?}",),
+ e,
+ ));
+ }
Ok(written_len)
}
-/// Handles a single field in a multipart form
-async fn handle_multipart(
- mut field: actix_multipart::Field,
- path: PathBuf,
+struct HandleMultipartOpts<'a> {
overwrite_files: bool,
allow_mkdir: bool,
allow_hidden_paths: bool,
allow_symlinks: bool,
+ file_hash: Option<&'a FileHash>,
+ upload_directory: Option<&'a PathBuf>,
+}
+
+/// Handles a single field in a multipart form
+async fn handle_multipart(
+ mut field: actix_multipart::Field,
+ path: PathBuf,
+ opts: HandleMultipartOpts<'_>,
) -> Result<u64, RuntimeError> {
+ let HandleMultipartOpts {
+ overwrite_files,
+ allow_mkdir,
+ allow_hidden_paths,
+ allow_symlinks,
+ file_hash,
+ upload_directory,
+ } = opts;
let field_name = field.name().expect("No name field found").to_string();
match tokio::fs::metadata(&path).await {
@@ -168,7 +301,14 @@ async fn handle_multipart(
}
}
- save_file(field, path.join(filename_path), overwrite_files).await
+ save_file(
+ &mut field,
+ path.join(filename_path),
+ overwrite_files,
+ file_hash,
+ upload_directory,
+ )
+ .await
}
/// Query parameters used by upload and rm APIs
@@ -218,16 +358,44 @@ pub async fn upload_file(
)),
}?;
+ let upload_directory = conf.temp_upload_directory.as_ref();
+
+ let file_hash = if let (Some(hash), Some(hash_function)) = (
+ req.headers()
+ .get("X-File-Hash")
+ .and_then(|h| h.to_str().ok()),
+ req.headers()
+ .get("X-File-Hash-Function")
+ .and_then(|h| h.to_str().ok()),
+ ) {
+ match hash_function.to_ascii_uppercase().as_str() {
+ "SHA256" => Some(FileHash::SHA256(hash.to_string())),
+ "SHA512" => Some(FileHash::SHA512(hash.to_string())),
+ sha => {
+ return Err(RuntimeError::InvalidHttpRequestError(format!(
+ "Invalid header value found for 'X-File-Hash-Function'. Supported values are SHA256 or SHA512. Found {sha}.",
+ )))
+ }
+ }
+ } else {
+ None
+ };
+
+ let hash_ref = file_hash.as_ref();
actix_multipart::Multipart::new(req.headers(), payload)
.map_err(|x| RuntimeError::MultipartError(x.to_string()))
.and_then(|field| {
handle_multipart(
field,
non_canonicalized_target_dir.clone(),
- conf.overwrite_files,
- conf.mkdir_enabled,
- conf.show_hidden,
- !conf.no_symlinks,
+ HandleMultipartOpts {
+ overwrite_files: conf.overwrite_files,
+ allow_mkdir: conf.mkdir_enabled,
+ allow_hidden_paths: conf.show_hidden,
+ allow_symlinks: !conf.no_symlinks,
+ file_hash: hash_ref,
+ upload_directory,
+ },
)
})
.try_collect::<Vec<u64>>()
diff --git a/src/renderer.rs b/src/renderer.rs
index ca49413..fae3751 100644
--- a/src/renderer.rs
+++ b/src/renderer.rs
@@ -52,7 +52,7 @@ pub fn page(
html! {
(DOCTYPE)
html {
- (page_header(&title_path, conf.file_upload, &conf.favicon_route, &conf.css_route))
+ (page_header(&title_path, conf.file_upload, conf.web_upload_concurrency, &conf.favicon_route, &conf.css_route))
body #drop-container
{
@@ -174,6 +174,40 @@ pub fn page(
}
}
}
+ div.upload_area id="upload_area" {
+ template id="upload_file_item" {
+ li.upload_file_item {
+ div.upload_file_container {
+ div.upload_file_text {
+ span.file_upload_percent { "" }
+ {" - "}
+ span.file_size { "" }
+ {" - "}
+ span.file_name { "" }
+ }
+ button.file_cancel_upload { "✖" }
+ }
+ div.file_progress_bar {}
+ }
+ }
+ div.upload_container {
+ div.upload_header {
+ h4 style="margin:0px" id="upload_title" {}
+ svg id="upload-toggle" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor" class="size-6" {
+ path stroke-linecap="round" stroke-linejoin="round" d="m4.5 15.75 7.5-7.5 7.5 7.5" {}
+ }
+ }
+ div.upload_action {
+ p id="upload_action_text" { "Starting upload..." }
+ button.upload_cancel id="upload_cancel" { "CANCEL" }
+ }
+ div.upload_files {
+ ul.upload_file_list id="upload_file_list" {
+
+ }
+ }
+ }
+ }
}
}
}
@@ -575,7 +609,13 @@ fn chevron_down() -> Markup {
}
/// Partial: page header
-fn page_header(title: &str, file_upload: bool, favicon_route: &str, css_route: &str) -> Markup {
+fn page_header(
+ title: &str,
+ file_upload: bool,
+ web_file_concurrency: usize,
+ favicon_route: &str,
+ css_route: &str,
+) -> Markup {
html! {
head {
meta charset="utf-8";
@@ -616,9 +656,26 @@ fn page_header(title: &str, file_upload: bool, favicon_route: &str, css_route: &
"#))
@if file_upload {
- (PreEscaped(r#"
- <script>
+ script {
+ (format!("const CONCURRENCY = {web_file_concurrency};"))
+ (PreEscaped(r#"
window.onload = function() {
+ // Constants
+ const UPLOADING = 'uploading', PENDING = 'pending', COMPLETE = 'complete', CANCELLED = 'cancelled', FAILED = 'failed'
+ const UPLOAD_ITEM_ORDER = { UPLOADING: 0, PENDING: 1, COMPLETE: 2, CANCELLED: 3, FAILED: 4 }
+ let CANCEL_UPLOAD = false;
+
+ // File Upload dom elements. Used for interacting with the
+ // upload container.
+ const form = document.querySelector('#file_submit');
+ const uploadArea = document.querySelector('#upload_area');
+ const uploadTitle = document.querySelector('#upload_title');
+ const uploadActionText = document.querySelector('#upload_action_text');
+ const uploadCancelButton = document.querySelector('#upload_cancel');
+ const uploadList = document.querySelector('#upload_file_list');
+ const fileUploadItemTemplate = document.querySelector('#upload_file_item');
+ const uploadWidgetToggle = document.querySelector('#upload-toggle');
+
const dropContainer = document.querySelector('#drop-container');
const dragForm = document.querySelector('.drag-form');
const fileInput = document.querySelector('#file-input');
@@ -647,12 +704,266 @@ fn page_header(title: &str, file_upload: bool, favicon_route: &str, css_route: &
dropContainer.ondrop = function(e) {
e.preventDefault();
fileInput.files = e.dataTransfer.files;
- file_submit.submit();
+ form.requestSubmit();
dragForm.style.display = 'none';
};
+
+ // Event listener for toggling the upload widget display on mobile.
+ uploadWidgetToggle.addEventListener('click', function (e) {
+ e.preventDefault();
+ if (uploadArea.style.height === "100vh") {
+ uploadArea.style = ""
+ document.body.style = ""
+ uploadWidgetToggle.style = ""
+ } else {
+ uploadArea.style.height = "100vh"
+ document.body.style = "overflow: hidden"
+ uploadWidgetToggle.style = "transform: rotate(180deg)"
+ }
+ })
+
+ // Cancel all active and pending uploads
+ uploadCancelButton.addEventListener('click', function (e) {
+ e.preventDefault();
+ CANCEL_UPLOAD = true;
+ })
+
+ form.addEventListener('submit', function (e) {
+ e.preventDefault()
+ uploadFiles()
+ })
+
+ // When uploads start, finish or are cancelled, the UI needs to reactively shows those
+ // updates of the state. This function updates the text on the upload widget to accurately
+ // show the state of all uploads.
+ function updateUploadTextAndList() {
+ // All state is kept as `data-*` attributed on the HTML node.
+ const queryLength = (state) => document.querySelectorAll(`[data-state='${state}']`).length;
+ const total = document.querySelectorAll("[data-state]").length;
+ const uploads = queryLength(UPLOADING);
+ const pending = queryLength(PENDING);
+ const completed = queryLength(COMPLETE);
+ const cancelled = queryLength(CANCELLED);
+ const failed = queryLength(FAILED);
+ const allCompleted = completed + cancelled + failed;
+
+ // Update header text based on remaining uploads
+ let headerText = `${total - allCompleted} uploads remaining...`;
+ if (total === allCompleted) {
+ headerText = `Complete! Reloading Page!`
+ }
+
+ // Build a summary of statuses for sub header
+ const statuses = []
+ if (uploads > 0) { statuses.push(`Uploading ${uploads}`) }
+ if (pending > 0) { statuses.push(`Pending ${pending}`) }
+ if (completed > 0) { statuses.push(`Complete ${completed}`) }
+ if (cancelled > 0) { statuses.push(`Cancelled ${cancelled}`) }
+ if (failed > 0) { statuses.push(`Failed ${failed}`) }
+
+ uploadTitle.textContent = headerText
+ uploadActionText.textContent = statuses.join(', ')
+ }
+
+ // Initiates the file upload process by disabling the ability for more files to be
+ // uploaded and creating async callbacks for each file that needs to be uploaded.
+ // Given the concurrency set by the server input arguments, it will try to process
+ // that many uploads at once
+ function uploadFiles() {
+ fileInput.disabled = true;
+
+ // Map all the files into async callbacks (uploadFile is a function that returns a function)
+ const callbacks = Array.from(fileInput.files).map(uploadFile);
+
+ // Get a list of all the callbacks
+ const concurrency = CONCURRENCY === 0 ? callbacks.length : CONCURRENCY;
+
+ // Worker function that continuously pulls tasks from the shared queue.
+ async function worker() {
+ while (callbacks.length > 0) {
+ // Remove a task from the front of the queue.
+ const task = callbacks.shift();
+ if (task) {
+ await task();
+ updateUploadTextAndList();
+ }
+ }
+ }
+
+ // Create a work stealing shared queue, split up between `concurrency` amount of workers.
+ const workers = Array.from({ length: concurrency }).map(worker);
+
+ // Wait for all the workers to complete
+ Promise.allSettled(workers)
+ .finally(() => {
+ updateUploadTextAndList();
+ form.reset();
+ setTimeout(() => { uploadArea.classList.remove('active'); }, 1000)
+ setTimeout(() => { window.location.reload(); }, 1500)
+ })
+
+ updateUploadTextAndList();
+ uploadArea.classList.add('active')
+ uploadList.scrollTo(0, 0)
+ }
+
+ function formatBytes(bytes, decimals) {
+ if (bytes == 0) return '0 Bytes';
+ var k = 1024,
+ dm = decimals || 2,
+ sizes = ['Bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'],
+ i = Math.floor(Math.log(bytes) / Math.log(k));
+ return parseFloat((bytes / Math.pow(k, i)).toFixed(dm)) + ' ' + sizes[i];
+ }
+
+ document.querySelector('input[type="file"]').addEventListener('change', async (e) => {
+ const file = e.target.files[0];
+ const hash = await hashFile(file);
+ });
+
+ async function get256FileHash(file) {
+ const arrayBuffer = await file.arrayBuffer();
+ const hashBuffer = await crypto.subtle.digest('SHA-256', arrayBuffer);
+ const hashArray = Array.from(new Uint8Array(hashBuffer));
+ return hashArray.map(b => b.toString(16).padStart(2, '0')).join('');
+ }
+
+ // Upload a file. This function will create a upload item in the upload
+ // widget from an HTML template. It then returns a promise which will
+ // be used to upload the file to the server and control the styles and
+ // interactions on the HTML list item.
+ function uploadFile(file) {
+ const fileUploadItem = fileUploadItemTemplate.content.cloneNode(true)
+ const itemContainer = fileUploadItem.querySelector(".upload_file_item")
+ const itemText = fileUploadItem.querySelector(".upload_file_text")
+ const size = fileUploadItem.querySelector(".file_size")
+ const name = fileUploadItem.querySelector(".file_name")
+ const percentText = fileUploadItem.querySelector(".file_upload_percent")
+ const bar = fileUploadItem.querySelector(".file_progress_bar")
+ const cancel = fileUploadItem.querySelector(".file_cancel_upload")
+ let preCancel = false;
+
+ itemContainer.dataset.state = PENDING
+ name.textContent = file.name
+ size.textContent = formatBytes(file.size)
+ percentText.textContent = "0%"
+
+ uploadList.append(fileUploadItem)
+
+ // Cancel an upload before it even started.
+ function preCancelUpload() {
+ preCancel = true;
+ itemText.classList.add(CANCELLED);
+ bar.classList.add(CANCELLED);
+ itemContainer.dataset.state = CANCELLED;
+ itemContainer.style.background = 'var(--upload_modal_file_upload_complete_background)';
+ cancel.disabled = true;
+ cancel.removeEventListener("click", preCancelUpload);
+ uploadCancelButton.removeEventListener("click", preCancelUpload);
+ updateUploadTextAndList();
+ }
+
+ uploadCancelButton.addEventListener("click", preCancelUpload)
+ cancel.addEventListener("click", preCancelUpload)
+
+ // A callback function is return so that the upload doesn't start until
+ // we want it to. This is so that we have control over our desired concurrency.
+ return () => {
+ if (preCancel) {
+ return Promise.resolve()
+ }
+
+ // Upload the single file in a multipart request.
+ return new Promise(async (resolve, reject) => {
+ const fileHash = await get256FileHash(file);
+ const xhr = new XMLHttpRequest();
+ const formData = new FormData();
+ formData.append('file', file);
+
+ function onReadyStateChange(e) {
+ if (e.target.readyState == 4) {
+ if (e.target.status == 200) {
+ completeSuccess()
+ } else {
+ failedUpload(e.target.status)
+ }
+ }
+ }
+
+ function onError(e) {
+ failedUpload()
+ }
+
+ function onAbort(e) {
+ cancelUpload()
+ }
+
+ function onProgress (e) {
+ update(Math.round((e.loaded / e.total) * 100));
+ }
+
+ function update(uploadPercent) {
+ let wholeNumber = Math.floor(uploadPercent)
+ percentText.textContent = `${wholeNumber}%`
+ bar.style.width = `${wholeNumber}%`
+ }
+
+ function completeSuccess() {
+ cancel.textContent = '✔';
+ cancel.classList.add(COMPLETE);
+ bar.classList.add(COMPLETE);
+ cleanUp(COMPLETE)
+ }
+
+ function failedUpload(statusCode) {
+ cancel.textContent = `${statusCode} ⚠`;
+ itemText.classList.add(FAILED);
+ bar.classList.add(FAILED);
+ cleanUp(FAILED);
+ }
+
+ function cancelUpload() {
+ xhr.abort()
+ itemText.classList.add(CANCELLED);
+ bar.classList.add(CANCELLED);
+ cleanUp(CANCELLED);
+ }
+
+ function cleanUp(state) {
+ itemContainer.dataset.state = state;
+ itemContainer.style.background = 'var(--upload_modal_file_upload_complete_background)';
+ cancel.disabled = true;
+ cancel.removeEventListener("click", cancelUpload)
+ uploadCancelButton.removeEventListener("click", cancelUpload)
+ xhr.removeEventListener('readystatechange', onReadyStateChange);
+ xhr.removeEventListener("error", onError);
+ xhr.removeEventListener("abort", onAbort);
+ xhr.upload.removeEventListener('progress', onProgress);
+ resolve()
+ }
+
+ uploadCancelButton.addEventListener("click", cancelUpload)
+ cancel.addEventListener("click", cancelUpload)
+
+ if (CANCEL_UPLOAD) {
+ cancelUpload()
+ } else {
+ itemContainer.dataset.state = UPLOADING
+ xhr.addEventListener('readystatechange', onReadyStateChange);
+ xhr.addEventListener("error", onError);
+ xhr.addEventListener("abort", onAbort);
+ xhr.upload.addEventListener('progress', onProgress);
+ xhr.open('post', form.getAttribute("action"), true);
+ xhr.setRequestHeader('X-File-Hash', fileHash);
+ xhr.setRequestHeader('X-File-Hash-Function', 'SHA256');
+ xhr.send(formData);
+ }
+ })
+ }
+ }
}
- </script>
- "#))
+ "#))
+ }
}
}
}
@@ -681,7 +992,7 @@ pub fn render_error(
html! {
(DOCTYPE)
html {
- (page_header(&error_code.to_string(), false, &conf.favicon_route, &conf.css_route))
+ (page_header(&error_code.to_string(), false, conf.web_upload_concurrency, &conf.favicon_route, &conf.css_route))
body
{
diff --git a/tests/upload_files.rs b/tests/upload_files.rs
index 5fdf2cf..15730bb 100644
--- a/tests/upload_files.rs
+++ b/tests/upload_files.rs
@@ -3,6 +3,7 @@ use std::path::Path;
use assert_fs::fixture::TempDir;
use reqwest::blocking::{multipart, Client};
+use reqwest::header::HeaderMap;
use rstest::rstest;
use select::document::Document;
use select::predicate::{Attr, Text};
@@ -11,8 +12,29 @@ mod fixtures;
use crate::fixtures::{server, tmpdir, Error, TestServer};
+// Generate the hashes using the following
+// ```bash
+// $ sha256 -s 'this should be uploaded'
+// $ sha512 -s 'this should be uploaded'
+// ```
#[rstest]
-fn uploading_files_works(#[with(&["-u"])] server: TestServer) -> Result<(), Error> {
+#[case::no_hash(None, None)]
+#[case::only_hash(None, Some("test"))]
+#[case::partial_sha256_hash(Some("SHA256"), None)]
+#[case::partial_sha512_hash(Some("SHA512"), None)]
+#[case::sha256_hash(
+ Some("SHA256"),
+ Some("e37b14e22e7b3f50dadaf821c189af80f79b1f39fd5a8b3b4f536103735d4620")
+)]
+#[case::sha512_hash(
+ Some("SHA512"),
+ Some("03bcfc52c53904e34e06b95e8c3ee1275c66960c441417892e977d52687e28afae85b6039509060ee07da739e4e7fc3137acd142162c1456f723604f8365e154")
+)]
+fn uploading_files_works(
+ #[with(&["-u"])] server: TestServer,
+ #[case] sha_func: Option<&str>,
+ #[case] sha: Option<&str>,
+) -> Result<(), Error> {
let test_file_name = "uploaded test file.txt";
// Before uploading, check whether the uploaded file does not yet exist.
@@ -33,7 +55,16 @@ fn uploading_files_works(#[with(&["-u"])] server: TestServer) -> Result<(), Erro
.mime_str("text/plain")?;
let form = form.part("file_to_upload", part);
- let client = Client::new();
+ let mut headers = HeaderMap::new();
+ if let Some(sha_func) = sha_func.as_ref() {
+ headers.insert("X-File-Hash-Function", sha_func.parse()?);
+ }
+ if let Some(sha) = sha.as_ref() {
+ headers.insert("X-File-Hash", sha.parse()?);
+ }
+
+ let client = Client::builder().default_headers(headers).build()?;
+
client
.post(server.url().join(upload_action)?)
.multipart(form)
@@ -84,6 +115,65 @@ fn uploading_files_is_prevented(server: TestServer) -> Result<(), Error> {
Ok(())
}
+// Generated hashs with the following
+// ```bash
+// echo "invalid" | base64 | sha256
+// echo "invalid" | base64 | sha512
+// ```
+#[rstest]
+#[case::sha256_hash(
+ Some("SHA256"),
+ Some("f4ddf641a44e8fe8248cc086532cafaa8a914a21a937e40be67926ea074b955a")
+)]
+#[case::sha512_hash(
+ Some("SHA512"),
+ Some("d3fe39ab560dd7ba91e6e2f8c948066d696f2afcfc90bf9df32946512f6934079807f301235b88b72bf746b6a88bf111bc5abe5c711514ed0731d286985297ba")
+)]
+#[case::sha128_hash(Some("SHA128"), Some("invalid"))]
+fn uploading_files_with_invalid_sha_func_is_prevented(
+ #[with(&["-u"])] server: TestServer,
+ #[case] sha_func: Option<&str>,
+ #[case] sha: Option<&str>,
+) -> Result<(), Error> {
+ let test_file_name = "uploaded test file.txt";
+
+ // Before uploading, check whether the uploaded file does not yet exist.
+ let body = reqwest::blocking::get(server.url())?.error_for_status()?;
+ let parsed = Document::from_read(body)?;
+ assert!(parsed.find(Text).all(|x| x.text() != test_file_name));
+
+ // Perform the actual upload.
+ let form = multipart::Form::new();
+ let part = multipart::Part::text("this should be uploaded")
+ .file_name(test_file_name)
+ .mime_str("text/plain")?;
+ let form = form.part("file_to_upload", part);
+
+ let mut headers = HeaderMap::new();
+ if let Some(sha_func) = sha_func.as_ref() {
+ headers.insert("X-File-Hash-Function", sha_func.parse()?);
+ }
+ if let Some(sha) = sha.as_ref() {
+ headers.insert("X-File-Hash", sha.parse()?);
+ }
+
+ let client = Client::builder().default_headers(headers).build()?;
+
+ assert!(client
+ .post(server.url().join("/upload?path=/")?)
+ .multipart(form)
+ .send()?
+ .error_for_status()
+ .is_err());
+
+ // After uploading, check whether the uploaded file is NOT getting listed.
+ let body = reqwest::blocking::get(server.url())?;
+ let parsed = Document::from_read(body)?;
+ assert!(!parsed.find(Text).any(|x| x.text() == test_file_name));
+
+ Ok(())
+}
+
/// This test runs the server with --allowed-upload-dir argument and
/// checks that file upload to a different directory is actually prevented.
#[rstest]