From be81eaefa526fa80e04166e86978e3a95263b4e3 Mon Sep 17 00:00:00 2001 From: Alec Di Vito Date: Thu, 6 Jun 2024 18:42:20 -0400 Subject: feat: Added HTML and Javascript progress bar when uploading files --- src/args.rs | 8 ++ src/config.rs | 4 + src/errors.rs | 1 + src/file_op.rs | 18 +++-- src/renderer.rs | 243 ++++++++++++++++++++++++++++++++++++++++++++++++++++++-- 5 files changed, 261 insertions(+), 13 deletions(-) (limited to 'src') diff --git a/src/args.rs b/src/args.rs index 95c8bff..a58504b 100644 --- a/src/args.rs +++ b/src/args.rs @@ -165,6 +165,14 @@ pub struct CliArgs { /// When specified via environment variable, a path always neesd to the specified. #[arg(short = 'u', long = "upload-files", value_hint = ValueHint::FilePath, num_args(0..=1), value_delimiter(','), env = "MINISERVE_ALLOWED_UPLOAD_DIR")] pub allowed_upload_dir: Option>, + + /// Configure amount of concurrent uploads when visiting the website. Must have + /// upload-files option enabled for this setting to matter. + /// + /// For example, a value of 4 would mean that the web browser will only upload + /// 4 files at a time to the web server when using the web browser interface. + #[arg(long = "web-upload-files-concurrency", env = "MINISERVE_WEB_UPLOAD_CONCURRENCY", default_value = "0")] + pub web_upload_concurrency: usize, /// Enable creating directories #[arg( diff --git a/src/config.rs b/src/config.rs index 3643502..6e8b89e 100644 --- a/src/config.rs +++ b/src/config.rs @@ -101,6 +101,9 @@ pub struct MiniserveConfig { /// Enable file upload pub file_upload: bool, + /// Max amount of concurrency when uploading multiple files + pub web_upload_concurrency: usize, + /// List of allowed upload directories pub allowed_upload_dir: Vec, @@ -301,6 +304,7 @@ impl MiniserveConfig { show_qrcode: args.qrcode, mkdir_enabled: args.mkdir_enabled, file_upload: args.allowed_upload_dir.is_some(), + web_upload_concurrency: args.web_upload_concurrency, allowed_upload_dir, uploadable_media_type, tar_enabled: args.enable_tar, diff --git a/src/errors.rs b/src/errors.rs index 21f8f12..600834b 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -129,6 +129,7 @@ where let res = fut.await?.map_into_boxed_body(); if (res.status().is_client_error() || res.status().is_server_error()) + && res.request().path() != "/upload" && res .headers() .get(header::CONTENT_TYPE) diff --git a/src/file_op.rs b/src/file_op.rs index e22e3e9..9f5902c 100644 --- a/src/file_op.rs +++ b/src/file_op.rs @@ -4,7 +4,7 @@ use std::io::ErrorKind; use std::path::{Component, Path, PathBuf}; use actix_web::{http::header, web, HttpRequest, HttpResponse}; -use futures::TryFutureExt; +use futures::{StreamExt, TryFutureExt}; use futures::TryStreamExt; use serde::Deserialize; use tokio::fs::File; @@ -20,7 +20,7 @@ use crate::{ /// /// Returns total bytes written to file. async fn save_file( - field: actix_multipart::Field, + field: &mut actix_multipart::Field, file_path: PathBuf, overwrite_files: bool, ) -> Result { @@ -33,8 +33,8 @@ async fn save_file( RuntimeError::InsufficientPermissionsError(file_path.display().to_string()), ), Err(err) => Err(RuntimeError::IoError( - format!("Failed to create {}", file_path.display()), - err, + format!("Failed to create {}", file_path.display()), + err, )), Ok(v) => Ok(v), }?; @@ -164,7 +164,15 @@ async fn handle_multipart( } } - save_file(field, path.join(filename_path), overwrite_files).await + match save_file(&mut field, path.join(filename_path), overwrite_files).await { + Ok(bytes) => Ok(bytes), + Err(err) => { + // Required for file upload. If entire stream is not consumed, javascript + // XML HTTP Request will never complete. + while field.next().await.is_some() {} + Err(err) + }, + } } /// Query parameters used by upload and rm APIs diff --git a/src/renderer.rs b/src/renderer.rs index 3935d98..6c1f393 100644 --- a/src/renderer.rs +++ b/src/renderer.rs @@ -53,7 +53,7 @@ pub fn page( html! { (DOCTYPE) html { - (page_header(&title_path, conf.file_upload, &conf.favicon_route, &conf.css_route)) + (page_header(&title_path, conf.file_upload, conf.web_upload_concurrency, &conf.favicon_route, &conf.css_route)) body #drop-container { @@ -175,6 +175,37 @@ pub fn page( } } } + div.upload_area id="upload_area" { + template id="upload_file_item" { + li.upload_file_item { + div.upload_file_container { + div.upload_file_text { + span.file_upload_percent { "" } + {" - "} + span.file_size { "" } + {" - "} + span.file_name { "" } + } + button.file_cancel_upload { "✖" } + } + div.file_progress_bar {} + } + } + div.upload_container { + div.upload_header { + h4 style="margin:0px" id="upload_title" {} + } + div.upload_action { + p id="upload_action_text" { "Starting upload..." } + button.upload_cancel id="upload_cancel" { "CANCEL" } + } + div.upload_files { + ul.upload_file_list id="upload_file_list" { + + } + } + } + } } } } @@ -571,7 +602,7 @@ fn chevron_down() -> Markup { } /// Partial: page header -fn page_header(title: &str, file_upload: bool, favicon_route: &str, css_route: &str) -> Markup { +fn page_header(title: &str, file_upload: bool, web_file_concurrency: usize, favicon_route: &str, css_route: &str) -> Markup { html! { head { meta charset="utf-8"; @@ -612,9 +643,23 @@ fn page_header(title: &str, file_upload: bool, favicon_route: &str, css_route: & "#)) @if file_upload { - (PreEscaped(r#" - - "#)) + "#)) + } } } } @@ -677,7 +904,7 @@ pub fn render_error( html! { (DOCTYPE) html { - (page_header(&error_code.to_string(), false, &conf.favicon_route, &conf.css_route)) + (page_header(&error_code.to_string(), false, conf.web_upload_concurrency, &conf.favicon_route, &conf.css_route)) body { -- cgit v1.2.3 From 1d8b6d8f1f45047e2908506c490d175a8c0a65aa Mon Sep 17 00:00:00 2001 From: Alec Di Vito Date: Thu, 6 Jun 2024 19:08:47 -0400 Subject: chore: clean up --- src/args.rs | 2 +- src/renderer.rs | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) (limited to 'src') diff --git a/src/args.rs b/src/args.rs index a58504b..70ad208 100644 --- a/src/args.rs +++ b/src/args.rs @@ -162,7 +162,7 @@ pub struct CliArgs { /// The provided path is not a physical file system path. Instead, it's relative to the serve /// dir. For instance, if the serve dir is '/home/hello', set this to '/upload' to allow /// uploading to '/home/hello/upload'. - /// When specified via environment variable, a path always neesd to the specified. + /// When specified via environment variable, a path always needs to the specified. #[arg(short = 'u', long = "upload-files", value_hint = ValueHint::FilePath, num_args(0..=1), value_delimiter(','), env = "MINISERVE_ALLOWED_UPLOAD_DIR")] pub allowed_upload_dir: Option>, diff --git a/src/renderer.rs b/src/renderer.rs index 6c1f393..9af601c 100644 --- a/src/renderer.rs +++ b/src/renderer.rs @@ -702,8 +702,8 @@ fn page_header(title: &str, file_upload: bool, web_file_concurrency: usize, favi uploadFiles() }) - const queryLength = (state) => document.querySelectorAll(`[data-state='${state}']`).length; function updateUploadText() { + const queryLength = (state) => document.querySelectorAll(`[data-state='${state}']`).length; const total = document.querySelectorAll("[data-state]").length; const uploads = queryLength(UPLOADING); const pending = queryLength(PENDING); @@ -731,7 +731,7 @@ fn page_header(title: &str, file_upload: bool, web_file_concurrency: usize, favi // Update list of uploads Array.from(uploadList.querySelectorAll('li')) - .sort(({dataset: { state: a }}, {dataset: { state: b}}) => UPLOAD_ITEM_ORDER[a] >= UPLOAD_ITEM_ORDER[b]) + .sort(({ dataset: { state: a }}, {dataset: { state: b }}) => UPLOAD_ITEM_ORDER[a] >= UPLOAD_ITEM_ORDER[b]) .forEach((item) => item.parentNode.appendChild(item)) } @@ -748,7 +748,7 @@ fn page_header(title: &str, file_upload: bool, web_file_concurrency: usize, favi const iterator = callbacks.entries(); const concurrency = CONCURRENCY === 0 ? callbacks.length : CONCURRENCY; const workers = Array(concurrency).fill(iterator).map(doWork) - Promise.allSettled(workers).then(console.log.bind(null, 'done')) + Promise.allSettled(workers) .finally(() => { updateUploadText(); form.reset(); @@ -794,7 +794,6 @@ fn page_header(title: &str, file_upload: bool, web_file_concurrency: usize, favi formData.append('file', file); function onReadyStateChange(e) { - console.log('readystatechange', e) if (e.target.readyState == 4) { if (e.target.status == 200) { completeSuccess() -- cgit v1.2.3 From 413a63a60307bdf60229670b0f858963604d62a3 Mon Sep 17 00:00:00 2001 From: Alec Di Vito Date: Sun, 16 Feb 2025 23:35:26 -0500 Subject: feat: implement temporary file uploads and tweak mobile design --- src/args.rs | 10 ++-- src/errors.rs | 5 ++ src/file_op.rs | 140 +++++++++++++++++++++++++++++++++++++++++++++----------- src/renderer.rs | 83 +++++++++++++++++++++++++++------ 4 files changed, 195 insertions(+), 43 deletions(-) (limited to 'src') diff --git a/src/args.rs b/src/args.rs index f117b1c..17f6c84 100644 --- a/src/args.rs +++ b/src/args.rs @@ -165,13 +165,17 @@ pub struct CliArgs { /// When specified via environment variable, a path always needs to be specified. #[arg(short = 'u', long = "upload-files", value_hint = ValueHint::FilePath, num_args(0..=1), value_delimiter(','), env = "MINISERVE_ALLOWED_UPLOAD_DIR")] pub allowed_upload_dir: Option>, - + /// Configure amount of concurrent uploads when visiting the website. Must have /// upload-files option enabled for this setting to matter. - /// + /// /// For example, a value of 4 would mean that the web browser will only upload /// 4 files at a time to the web server when using the web browser interface. - #[arg(long = "web-upload-files-concurrency", env = "MINISERVE_WEB_UPLOAD_CONCURRENCY", default_value = "0")] + #[arg( + long = "web-upload-files-concurrency", + env = "MINISERVE_WEB_UPLOAD_CONCURRENCY", + default_value = "0" + )] pub web_upload_concurrency: usize, /// Enable creating directories diff --git a/src/errors.rs b/src/errors.rs index f0e22ab..24997fc 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -43,6 +43,10 @@ pub enum RuntimeError { #[error("File already exists, and the overwrite_files option has not been set")] DuplicateFileError, + /// Uploaded hash not correct + #[error("File hash that was provided did not match end result of uploaded file")] + UploadHashMismatchError, + /// Upload not allowed #[error("Upload not allowed to this directory")] UploadForbiddenError, @@ -86,6 +90,7 @@ impl ResponseError for RuntimeError { use StatusCode as S; match self { E::IoError(_, _) => S::INTERNAL_SERVER_ERROR, + E::UploadHashMismatchError => S::BAD_REQUEST, E::MultipartError(_) => S::BAD_REQUEST, E::DuplicateFileError => S::CONFLICT, E::UploadForbiddenError => S::FORBIDDEN, diff --git a/src/file_op.rs b/src/file_op.rs index 76a7234..367517a 100644 --- a/src/file_op.rs +++ b/src/file_op.rs @@ -4,10 +4,10 @@ use std::io::ErrorKind; use std::path::{Component, Path, PathBuf}; use actix_web::{http::header, web, HttpRequest, HttpResponse}; -use futures::{StreamExt, TryFutureExt}; -use futures::TryStreamExt; +use futures::{StreamExt, TryStreamExt}; use serde::Deserialize; -use tokio::fs::File; +use sha2::{Digest, Sha256}; +use tempfile::NamedTempFile; use tokio::io::AsyncWriteExt; use crate::{ @@ -15,6 +15,18 @@ use crate::{ file_utils::sanitize_path, }; +enum FileHash { + SHA256(String), +} + +impl FileHash { + pub fn get_hasher(&self) -> impl Digest { + match self { + Self::SHA256(_) => Sha256::new(), + } + } +} + /// Saves file data from a multipart form field (`field`) to `file_path`, optionally overwriting /// existing file. /// @@ -23,31 +35,84 @@ async fn save_file( field: &mut actix_multipart::Field, file_path: PathBuf, overwrite_files: bool, + file_hash: Option<&FileHash>, ) -> Result { if !overwrite_files && file_path.exists() { return Err(RuntimeError::DuplicateFileError); } - let file = match File::create(&file_path).await { - Err(err) if err.kind() == ErrorKind::PermissionDenied => Err( + let named_temp_file = match tokio::task::spawn_blocking(|| NamedTempFile::new()).await { + Err(err) => Err(RuntimeError::MultipartError(format!( + "Failed to complete spawned task to create named temp file. {}", + err + ))), + Ok(Err(err)) if err.kind() == ErrorKind::PermissionDenied => Err( RuntimeError::InsufficientPermissionsError(file_path.display().to_string()), ), - Err(err) => Err(RuntimeError::IoError( - format!("Failed to create {}", file_path.display()), - err, + Ok(Err(err)) => Err(RuntimeError::IoError( + format!("Failed to create temporary file {}", file_path.display()), + err, )), - Ok(v) => Ok(v), + Ok(Ok(file)) => Ok(file), }?; - let (_, written_len) = field - .map_err(|x| RuntimeError::MultipartError(x.to_string())) - .try_fold((file, 0u64), |(mut file, written_len), bytes| async move { - file.write_all(bytes.as_ref()) - .map_err(|e| RuntimeError::IoError("Failed to write to file".to_string(), e)) - .await?; - Ok((file, written_len + bytes.len() as u64)) - }) - .await?; + let (file, temp_path) = named_temp_file.keep().map_err(|err| { + RuntimeError::IoError("Failed to keep temporary file".into(), err.error.into()) + })?; + let mut temp_file = tokio::fs::File::from_std(file); + + let mut written_len = 0; + let mut hasher = file_hash.as_ref().map(|h| h.get_hasher()); + let mut error: Option = None; + + while let Some(Ok(bytes)) = field.next().await { + if let Some(hasher) = hasher.as_mut() { + hasher.update(&bytes) + } + if let Err(e) = temp_file.write_all(&bytes).await { + error = Some(RuntimeError::IoError( + "Failed to write to file".to_string(), + e, + )); + break; + } + written_len += bytes.len() as u64; + } + + drop(temp_file); + + if let Some(e) = error { + let _ = tokio::fs::remove_file(temp_path).await; + return Err(e); + } + + // There isn't a way to get notified when a request is cancelled + // by the user in actix it seems. References: + // - https://github.com/actix/actix-web/issues/1313 + // - https://github.com/actix/actix-web/discussions/3011 + // Therefore, we are relying on the fact that the web UI + // uploads a hash of the file. + if let Some(hasher) = hasher { + if let Some(FileHash::SHA256(expected_hash)) = file_hash { + let actual_hash = hex::encode(hasher.finalize()); + if &actual_hash != expected_hash { + let _ = tokio::fs::remove_file(&temp_path).await; + return Err(RuntimeError::UploadHashMismatchError); + } + } + } + + if let Err(e) = tokio::fs::rename(&temp_path, &file_path).await { + let _ = tokio::fs::remove_file(&temp_path).await; + return Err(RuntimeError::IoError( + format!( + "Failed to move temporary file {} to {}", + temp_path.display(), + file_path.display() + ), + e, + )); + } Ok(written_len) } @@ -60,6 +125,7 @@ async fn handle_multipart( allow_mkdir: bool, allow_hidden_paths: bool, allow_symlinks: bool, + file_hash: Option<&FileHash>, ) -> Result { let field_name = field.name().expect("No name field found").to_string(); @@ -168,15 +234,13 @@ async fn handle_multipart( } } - match save_file(&mut field, path.join(filename_path), overwrite_files).await { - Ok(bytes) => Ok(bytes), - Err(err) => { - // Required for file upload. If entire stream is not consumed, javascript - // XML HTTP Request will never complete. - while field.next().await.is_some() {} - Err(err) - }, - } + save_file( + &mut field, + path.join(filename_path), + overwrite_files, + file_hash, + ) + .await } /// Query parameters used by upload and rm APIs @@ -226,6 +290,27 @@ pub async fn upload_file( )), }?; + let mut file_hash: Option = None; + if let Some(hash) = req + .headers() + .get("X-File-Hash") + .and_then(|h| h.to_str().ok()) + { + if let Some(hash_funciton) = req + .headers() + .get("X-File-Hash-Function") + .and_then(|h| h.to_str().ok()) + { + match hash_funciton.to_ascii_uppercase().as_str() { + "SHA256" => { + file_hash = Some(FileHash::SHA256(hash.to_string())); + } + _ => {} + } + } + } + + let hash_ref = file_hash.as_ref(); actix_multipart::Multipart::new(req.headers(), payload) .map_err(|x| RuntimeError::MultipartError(x.to_string())) .and_then(|field| { @@ -236,6 +321,7 @@ pub async fn upload_file( conf.mkdir_enabled, conf.show_hidden, !conf.no_symlinks, + hash_ref, ) }) .try_collect::>() diff --git a/src/renderer.rs b/src/renderer.rs index 035309d..8a87228 100644 --- a/src/renderer.rs +++ b/src/renderer.rs @@ -193,6 +193,9 @@ pub fn page( div.upload_container { div.upload_header { h4 style="margin:0px" id="upload_title" {} + svg id="upload-toggle" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor" class="size-6" { + path stroke-linecap="round" stroke-linejoin="round" d="m4.5 15.75 7.5-7.5 7.5 7.5" {} + } } div.upload_action { p id="upload_action_text" { "Starting upload..." } @@ -663,6 +666,7 @@ fn page_header(title: &str, file_upload: bool, web_file_concurrency: usize, favi const uploadCancelButton = document.querySelector('#upload_cancel'); const uploadList = document.querySelector('#upload_file_list'); const fileUploadItemTemplate = document.querySelector('#upload_file_item'); + const uploadWidgetToggle = document.querySelector('#upload-toggle'); const dropContainer = document.querySelector('#drop-container'); const dragForm = document.querySelector('.drag-form'); @@ -696,6 +700,19 @@ fn page_header(title: &str, file_upload: bool, web_file_concurrency: usize, favi dragForm.style.display = 'none'; }; + uploadWidgetToggle.addEventListener('click', function (e) { + e.preventDefault(); + if (uploadArea.style.height === "100vh") { + uploadArea.style = "" + document.body.style = "" + uploadWidgetToggle.style = "" + } else { + uploadArea.style.height = "100vh" + document.body.style = "overflow: hidden" + uploadWidgetToggle.style = "transform: rotate(180deg)" + } + }) + uploadCancelButton.addEventListener('click', function (e) { e.preventDefault(); CANCEL_UPLOAD = true; @@ -706,7 +723,7 @@ fn page_header(title: &str, file_upload: bool, web_file_concurrency: usize, favi uploadFiles() }) - function updateUploadText() { + function updateUploadTextAndList() { const queryLength = (state) => document.querySelectorAll(`[data-state='${state}']`).length; const total = document.querySelectorAll("[data-state]").length; const uploads = queryLength(UPLOADING); @@ -733,16 +750,19 @@ fn page_header(title: &str, file_upload: bool, web_file_concurrency: usize, favi uploadTitle.textContent = headerText uploadActionText.textContent = statuses.join(', ') - // Update list of uploads - Array.from(uploadList.querySelectorAll('li')) - .sort(({ dataset: { state: a }}, {dataset: { state: b }}) => UPLOAD_ITEM_ORDER[a] >= UPLOAD_ITEM_ORDER[b]) - .forEach((item) => item.parentNode.appendChild(item)) + const items = Array.from(uploadList.querySelectorAll('li')); + items.sort((a, b) => UPLOAD_ITEM_ORDER[a.dataset.state] - UPLOAD_ITEM_ORDER[b.dataset.state]); + items.forEach((item, index) => { + if (uploadList.children[index] !== item) { + uploadList.insertBefore(item, uploadList.children[index]); + } + }); } async function doWork(iterator, i) { for (let [index, item] of iterator) { await item(); - updateUploadText(); + updateUploadTextAndList(); } } @@ -754,17 +774,17 @@ fn page_header(title: &str, file_upload: bool, web_file_concurrency: usize, favi const workers = Array(concurrency).fill(iterator).map(doWork) Promise.allSettled(workers) .finally(() => { - updateUploadText(); + updateUploadTextAndList(); form.reset(); setTimeout(() => { uploadArea.classList.remove('active'); }, 1000) setTimeout(() => { window.location.reload(); }, 1500) }) - updateUploadText(); + updateUploadTextAndList(); uploadArea.classList.add('active') uploadList.scrollTo(0, 0) } - + function formatBytes(bytes, decimals) { if (bytes == 0) return '0 Bytes'; var k = 1024, @@ -774,6 +794,19 @@ fn page_header(title: &str, file_upload: bool, web_file_concurrency: usize, favi return parseFloat((bytes / Math.pow(k, i)).toFixed(dm)) + ' ' + sizes[i]; } + document.querySelector('input[type="file"]').addEventListener('change', async (e) => { + const file = e.target.files[0]; + const hash = await hashFile(file); + console.log('File hash:', hash); + }); + + async function get256FileHash(file) { + const arrayBuffer = await file.arrayBuffer(); + const hashBuffer = await crypto.subtle.digest('SHA-256', arrayBuffer); + const hashArray = Array.from(new Uint8Array(hashBuffer)); + return hashArray.map(b => b.toString(16).padStart(2, '0')).join(''); + } + function uploadFile(file) { const fileUploadItem = fileUploadItemTemplate.content.cloneNode(true) const itemContainer = fileUploadItem.querySelector(".upload_file_item") @@ -783,16 +816,38 @@ fn page_header(title: &str, file_upload: bool, web_file_concurrency: usize, favi const percentText = fileUploadItem.querySelector(".file_upload_percent") const bar = fileUploadItem.querySelector(".file_progress_bar") const cancel = fileUploadItem.querySelector(".file_cancel_upload") + let preCancel = false; - itemContainer.dataset.state = 'pending' + itemContainer.dataset.state = PENDING name.textContent = file.name size.textContent = formatBytes(file.size) percentText.textContent = "0%" - + uploadList.append(fileUploadItem) + function preCancelUpload() { + console.log('cancelled') + preCancel = true; + itemText.classList.add(CANCELLED); + bar.classList.add(CANCELLED); + itemContainer.dataset.state = CANCELLED; + itemContainer.style.background = 'var(--upload_modal_file_upload_complete_background)'; + cancel.disabled = true; + cancel.removeEventListener("click", preCancelUpload); + uploadCancelButton.removeEventListener("click", preCancelUpload); + updateUploadTextAndList(); + } + + uploadCancelButton.addEventListener("click", preCancelUpload) + cancel.addEventListener("click", preCancelUpload) + return async () => { - return new Promise((resolve, reject) => { + if (preCancel) { + return Promise.resolve() + } + + return new Promise(async (resolve, reject) => { + const fileHash = await get256FileHash(file); const xhr = new XMLHttpRequest(); const formData = new FormData(); formData.append('file', file); @@ -865,12 +920,14 @@ fn page_header(title: &str, file_upload: bool, web_file_concurrency: usize, favi if (CANCEL_UPLOAD) { cancelUpload() } else { - itemContainer.dataset.state = 'uploading' + itemContainer.dataset.state = UPLOADING xhr.addEventListener('readystatechange', onReadyStateChange); xhr.addEventListener("error", onError); xhr.addEventListener("abort", onAbort); xhr.upload.addEventListener('progress', onProgress); xhr.open('post', form.getAttribute("action"), true); + xhr.setRequestHeader('X-File-Hash', fileHash); + xhr.setRequestHeader('X-File-Hash-Function', 'SHA256'); xhr.send(formData); } }) -- cgit v1.2.3 From 577044ddbd70f5f128512c1a021329fb4c7e7eb3 Mon Sep 17 00:00:00 2001 From: Alec Di Vito Date: Sat, 22 Feb 2025 13:44:16 -0500 Subject: feat: address comments; add in new argument (`temp-directory`); add comments to upload code; add tests --- src/args.rs | 22 +++++++++ src/config.rs | 10 ++++ src/errors.rs | 2 +- src/file_op.rs | 146 +++++++++++++++++++++++++++++++++++++++----------------- src/renderer.rs | 76 +++++++++++++++++++---------- 5 files changed, 188 insertions(+), 68 deletions(-) (limited to 'src') diff --git a/src/args.rs b/src/args.rs index 17f6c84..e9243f5 100644 --- a/src/args.rs +++ b/src/args.rs @@ -26,6 +26,20 @@ pub struct CliArgs { #[arg(value_hint = ValueHint::AnyPath, env = "MINISERVE_PATH")] pub path: Option, + /// The path to where file uploads will be written to before being moved to their + /// correct location. It's wise to make sure that this directory will be written to + /// disk and not into memory. + /// + /// This value will only be used **IF** file uploading is enabled. If this option is + /// not set, the operating system default temporary directory will be used. + #[arg( + long = "temp-directory", + value_hint = ValueHint::FilePath, + requires = "allowed_upload_dir", + env = "MINISERVER_TEMP_UPLOAD_DIRECTORY") + ] + pub temp_upload_directory: Option, + /// The name of a directory index file to serve, like "index.html" /// /// Normally, when miniserve serves a directory, it creates a listing for that directory. @@ -171,6 +185,14 @@ pub struct CliArgs { /// /// For example, a value of 4 would mean that the web browser will only upload /// 4 files at a time to the web server when using the web browser interface. + /// + /// When the value is kept at 0, it attempts to resolve all the uploads at once + /// in the web browser. + /// + /// NOTE: Web pages have a limit of how many active HTTP connections that they + /// can make at one time, so even though you might set a concurrency limit of + /// 100, the browser might only make progress on the max amount of connections + /// it allows the web page to have open. #[arg( long = "web-upload-files-concurrency", env = "MINISERVE_WEB_UPLOAD_CONCURRENCY", diff --git a/src/config.rs b/src/config.rs index 449d2ae..6a048c2 100644 --- a/src/config.rs +++ b/src/config.rs @@ -33,6 +33,9 @@ pub struct MiniserveConfig { /// Path to be served by miniserve pub path: std::path::PathBuf, + /// Temporary directory that should be used when files are uploaded to the server + pub temp_upload_directory: Option, + /// Port on which miniserve will be listening pub port: u16, @@ -275,9 +278,16 @@ impl MiniserveConfig { .transpose()? .unwrap_or_default(); + let temp_upload_directory = args.temp_upload_directory.as_ref().take().map(|v| if v.exists() && v.is_dir() { + Ok(v.clone()) + } else { + Err(anyhow!("Upload temporary directory must exist and be a directory. Validate that path {v:?} meets those requirements")) + }).transpose()?; + Ok(Self { verbose: args.verbose, path: args.path.unwrap_or_else(|| PathBuf::from(".")), + temp_upload_directory, port, interfaces, auth, diff --git a/src/errors.rs b/src/errors.rs index 24997fc..b7d75f2 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -44,7 +44,7 @@ pub enum RuntimeError { DuplicateFileError, /// Uploaded hash not correct - #[error("File hash that was provided did not match end result of uploaded file")] + #[error("File hash that was provided did not match checksum of uploaded file")] UploadHashMismatchError, /// Upload not allowed diff --git a/src/file_op.rs b/src/file_op.rs index 367517a..4e05e6c 100644 --- a/src/file_op.rs +++ b/src/file_op.rs @@ -5,8 +5,10 @@ use std::path::{Component, Path, PathBuf}; use actix_web::{http::header, web, HttpRequest, HttpResponse}; use futures::{StreamExt, TryStreamExt}; +use log::{info, warn}; use serde::Deserialize; -use sha2::{Digest, Sha256}; +use sha2::digest::DynDigest; +use sha2::{Digest, Sha256, Sha512}; use tempfile::NamedTempFile; use tokio::io::AsyncWriteExt; @@ -17,71 +19,125 @@ use crate::{ enum FileHash { SHA256(String), + SHA512(String), } impl FileHash { - pub fn get_hasher(&self) -> impl Digest { + pub fn get_hasher(&self) -> Box { match self { - Self::SHA256(_) => Sha256::new(), + Self::SHA256(_) => Box::new(Sha256::new()), + Self::SHA512(_) => Box::new(Sha512::new()), + } + } + + pub fn get_hash(&self) -> &str { + match self { + Self::SHA256(string) => &string, + Self::SHA512(string) => &string, } } } -/// Saves file data from a multipart form field (`field`) to `file_path`, optionally overwriting -/// existing file. +/// Saves file data from a multipart form field (`field`) to `file_path`. Optionally overwriting +/// existing file and comparing the uploaded file checksum to the user provided `file_hash`. /// /// Returns total bytes written to file. async fn save_file( field: &mut actix_multipart::Field, file_path: PathBuf, overwrite_files: bool, - file_hash: Option<&FileHash>, + file_checksum: Option<&FileHash>, + temporary_upload_directory: Option<&PathBuf>, ) -> Result { if !overwrite_files && file_path.exists() { return Err(RuntimeError::DuplicateFileError); } - let named_temp_file = match tokio::task::spawn_blocking(|| NamedTempFile::new()).await { + let temp_upload_directory = temporary_upload_directory.cloned(); + // Tempfile doesn't support async operations, so we'll do it on a background thread. + let temp_upload_directory_task = tokio::task::spawn_blocking(move || { + // If the user provided a temporary directory path, then use it. + if let Some(temp_directory) = temp_upload_directory { + NamedTempFile::new_in(temp_directory) + } else { + NamedTempFile::new() + } + }); + + // Validate that the temporary task completed successfully. + let named_temp_file_task = match temp_upload_directory_task.await { + Ok(named_temp_file) => Ok(named_temp_file), Err(err) => Err(RuntimeError::MultipartError(format!( - "Failed to complete spawned task to create named temp file. {}", - err + "Failed to complete spawned task to create named temp file. {err}", ))), - Ok(Err(err)) if err.kind() == ErrorKind::PermissionDenied => Err( + }?; + + // Validate the the temporary file was created successfully. + let named_temp_file = match named_temp_file_task { + Err(err) if err.kind() == ErrorKind::PermissionDenied => Err( RuntimeError::InsufficientPermissionsError(file_path.display().to_string()), ), - Ok(Err(err)) => Err(RuntimeError::IoError( + Err(err) => Err(RuntimeError::IoError( format!("Failed to create temporary file {}", file_path.display()), err, )), - Ok(Ok(file)) => Ok(file), + Ok(file) => Ok(file), }?; + // Convert the temporary file into a non-temporary file. This allows us + // to control the lifecycle of the file. This is useful for us because + // we need to convert the temporary file into an async enabled file and + // on successful upload, we want to move it to the target directory. let (file, temp_path) = named_temp_file.keep().map_err(|err| { RuntimeError::IoError("Failed to keep temporary file".into(), err.error.into()) })?; let mut temp_file = tokio::fs::File::from_std(file); let mut written_len = 0; - let mut hasher = file_hash.as_ref().map(|h| h.get_hasher()); - let mut error: Option = None; + let mut hasher = file_checksum.as_ref().map(|h| h.get_hasher()); + let mut save_upload_file_error: Option = None; + // This while loop take a stream (in this case `field`) and awaits + // new chunks from the websocket connection. The while loop reads + // the file from the HTTP connection and writes it to disk or until + // the stream from the multipart request is aborted. while let Some(Ok(bytes)) = field.next().await { + // If the hasher exists (if the user has also sent a chunksum with the request) + // then we want to update the hasher with the new bytes uploaded. if let Some(hasher) = hasher.as_mut() { hasher.update(&bytes) } + // Write the bytes from the stream into our temporary file. if let Err(e) = temp_file.write_all(&bytes).await { - error = Some(RuntimeError::IoError( - "Failed to write to file".to_string(), - e, - )); + // Failed to write to file. Drop it and return the error + save_upload_file_error = + Some(RuntimeError::IoError("Failed to write to file".into(), e)); break; } + // record the bytes written to the file. written_len += bytes.len() as u64; } + if save_upload_file_error.is_none() { + // Flush the changes to disk so that we are sure they are there. + if let Err(e) = temp_file.flush().await { + save_upload_file_error = Some(RuntimeError::IoError( + "Failed to flush all the file writes to disk".into(), + e, + )); + } + } + + // Drop the file expcitly here because IF there is an error when writing to the + // temp file, we won't be able to remove as per the comment in `tokio::fs::remove_file` + // > Note that there is no guarantee that the file is immediately deleted + // > (e.g. depending on platform, other open file descriptors may prevent immediate removal). drop(temp_file); - if let Some(e) = error { + // If there was an error during uploading. + if let Some(e) = save_upload_file_error { + // If there was an error when writing the file to disk, remove it and return + // the error that was encountered. let _ = tokio::fs::remove_file(temp_path).await; return Err(e); } @@ -90,26 +146,24 @@ async fn save_file( // by the user in actix it seems. References: // - https://github.com/actix/actix-web/issues/1313 // - https://github.com/actix/actix-web/discussions/3011 - // Therefore, we are relying on the fact that the web UI - // uploads a hash of the file. + // Therefore, we are relying on the fact that the web UI uploads a + // hash of the file to determine if it was completed uploaded or not. if let Some(hasher) = hasher { - if let Some(FileHash::SHA256(expected_hash)) = file_hash { + if let Some(expected_hash) = file_checksum.as_ref().map(|f| f.get_hash()) { let actual_hash = hex::encode(hasher.finalize()); if &actual_hash != expected_hash { + warn!("The expected file hash {expected_hash} did not match the calculated hash of {actual_hash}. This can be caused if a file upload was aborted."); let _ = tokio::fs::remove_file(&temp_path).await; return Err(RuntimeError::UploadHashMismatchError); } } } + info!("File upload successful to {temp_path:?}. Moving to {file_path:?}",); if let Err(e) = tokio::fs::rename(&temp_path, &file_path).await { let _ = tokio::fs::remove_file(&temp_path).await; return Err(RuntimeError::IoError( - format!( - "Failed to move temporary file {} to {}", - temp_path.display(), - file_path.display() - ), + format!("Failed to move temporary file {temp_path:?} to {file_path:?}",), e, )); } @@ -126,6 +180,7 @@ async fn handle_multipart( allow_hidden_paths: bool, allow_symlinks: bool, file_hash: Option<&FileHash>, + upload_directory: Option<&PathBuf>, ) -> Result { let field_name = field.name().expect("No name field found").to_string(); @@ -239,6 +294,7 @@ async fn handle_multipart( path.join(filename_path), overwrite_files, file_hash, + upload_directory, ) .await } @@ -290,25 +346,28 @@ pub async fn upload_file( )), }?; - let mut file_hash: Option = None; - if let Some(hash) = req - .headers() - .get("X-File-Hash") - .and_then(|h| h.to_str().ok()) - { - if let Some(hash_funciton) = req - .headers() + let upload_directory = conf.temp_upload_directory.as_ref(); + + let file_hash = if let (Some(hash), Some(hash_function)) = ( + req.headers() + .get("X-File-Hash") + .and_then(|h| h.to_str().ok()), + req.headers() .get("X-File-Hash-Function") - .and_then(|h| h.to_str().ok()) - { - match hash_funciton.to_ascii_uppercase().as_str() { - "SHA256" => { - file_hash = Some(FileHash::SHA256(hash.to_string())); - } - _ => {} + .and_then(|h| h.to_str().ok()), + ) { + match hash_function.to_ascii_uppercase().as_str() { + "SHA256" => Some(FileHash::SHA256(hash.to_string())), + "SHA512" => Some(FileHash::SHA512(hash.to_string())), + sha => { + return Err(RuntimeError::InvalidHttpRequestError(format!( + "Invalid header value found for 'X-File-Hash-Function'. Supported values are SHA256 or SHA512. Found {sha}.", + ))) } } - } + } else { + None + }; let hash_ref = file_hash.as_ref(); actix_multipart::Multipart::new(req.headers(), payload) @@ -322,6 +381,7 @@ pub async fn upload_file( conf.show_hidden, !conf.no_symlinks, hash_ref, + upload_directory, ) }) .try_collect::>() diff --git a/src/renderer.rs b/src/renderer.rs index 8a87228..fae3751 100644 --- a/src/renderer.rs +++ b/src/renderer.rs @@ -609,7 +609,13 @@ fn chevron_down() -> Markup { } /// Partial: page header -fn page_header(title: &str, file_upload: bool, web_file_concurrency: usize, favicon_route: &str, css_route: &str) -> Markup { +fn page_header( + title: &str, + file_upload: bool, + web_file_concurrency: usize, + favicon_route: &str, + css_route: &str, +) -> Markup { html! { head { meta charset="utf-8"; @@ -658,7 +664,9 @@ fn page_header(title: &str, file_upload: bool, web_file_concurrency: usize, favi const UPLOADING = 'uploading', PENDING = 'pending', COMPLETE = 'complete', CANCELLED = 'cancelled', FAILED = 'failed' const UPLOAD_ITEM_ORDER = { UPLOADING: 0, PENDING: 1, COMPLETE: 2, CANCELLED: 3, FAILED: 4 } let CANCEL_UPLOAD = false; - // File Upload + + // File Upload dom elements. Used for interacting with the + // upload container. const form = document.querySelector('#file_submit'); const uploadArea = document.querySelector('#upload_area'); const uploadTitle = document.querySelector('#upload_title'); @@ -700,6 +708,7 @@ fn page_header(title: &str, file_upload: bool, web_file_concurrency: usize, favi dragForm.style.display = 'none'; }; + // Event listener for toggling the upload widget display on mobile. uploadWidgetToggle.addEventListener('click', function (e) { e.preventDefault(); if (uploadArea.style.height === "100vh") { @@ -713,6 +722,7 @@ fn page_header(title: &str, file_upload: bool, web_file_concurrency: usize, favi } }) + // Cancel all active and pending uploads uploadCancelButton.addEventListener('click', function (e) { e.preventDefault(); CANCEL_UPLOAD = true; @@ -723,7 +733,11 @@ fn page_header(title: &str, file_upload: bool, web_file_concurrency: usize, favi uploadFiles() }) + // When uploads start, finish or are cancelled, the UI needs to reactively shows those + // updates of the state. This function updates the text on the upload widget to accurately + // show the state of all uploads. function updateUploadTextAndList() { + // All state is kept as `data-*` attributed on the HTML node. const queryLength = (state) => document.querySelectorAll(`[data-state='${state}']`).length; const total = document.querySelectorAll("[data-state]").length; const uploads = queryLength(UPLOADING); @@ -733,13 +747,13 @@ fn page_header(title: &str, file_upload: bool, web_file_concurrency: usize, favi const failed = queryLength(FAILED); const allCompleted = completed + cancelled + failed; - // Update header + // Update header text based on remaining uploads let headerText = `${total - allCompleted} uploads remaining...`; if (total === allCompleted) { headerText = `Complete! Reloading Page!` } - // Update sub header + // Build a summary of statuses for sub header const statuses = [] if (uploads > 0) { statuses.push(`Uploading ${uploads}`) } if (pending > 0) { statuses.push(`Pending ${pending}`) } @@ -749,29 +763,37 @@ fn page_header(title: &str, file_upload: bool, web_file_concurrency: usize, favi uploadTitle.textContent = headerText uploadActionText.textContent = statuses.join(', ') - - const items = Array.from(uploadList.querySelectorAll('li')); - items.sort((a, b) => UPLOAD_ITEM_ORDER[a.dataset.state] - UPLOAD_ITEM_ORDER[b.dataset.state]); - items.forEach((item, index) => { - if (uploadList.children[index] !== item) { - uploadList.insertBefore(item, uploadList.children[index]); - } - }); - } - - async function doWork(iterator, i) { - for (let [index, item] of iterator) { - await item(); - updateUploadTextAndList(); - } } + // Initiates the file upload process by disabling the ability for more files to be + // uploaded and creating async callbacks for each file that needs to be uploaded. + // Given the concurrency set by the server input arguments, it will try to process + // that many uploads at once function uploadFiles() { fileInput.disabled = true; + + // Map all the files into async callbacks (uploadFile is a function that returns a function) const callbacks = Array.from(fileInput.files).map(uploadFile); - const iterator = callbacks.entries(); + + // Get a list of all the callbacks const concurrency = CONCURRENCY === 0 ? callbacks.length : CONCURRENCY; - const workers = Array(concurrency).fill(iterator).map(doWork) + + // Worker function that continuously pulls tasks from the shared queue. + async function worker() { + while (callbacks.length > 0) { + // Remove a task from the front of the queue. + const task = callbacks.shift(); + if (task) { + await task(); + updateUploadTextAndList(); + } + } + } + + // Create a work stealing shared queue, split up between `concurrency` amount of workers. + const workers = Array.from({ length: concurrency }).map(worker); + + // Wait for all the workers to complete Promise.allSettled(workers) .finally(() => { updateUploadTextAndList(); @@ -797,7 +819,6 @@ fn page_header(title: &str, file_upload: bool, web_file_concurrency: usize, favi document.querySelector('input[type="file"]').addEventListener('change', async (e) => { const file = e.target.files[0]; const hash = await hashFile(file); - console.log('File hash:', hash); }); async function get256FileHash(file) { @@ -807,6 +828,10 @@ fn page_header(title: &str, file_upload: bool, web_file_concurrency: usize, favi return hashArray.map(b => b.toString(16).padStart(2, '0')).join(''); } + // Upload a file. This function will create a upload item in the upload + // widget from an HTML template. It then returns a promise which will + // be used to upload the file to the server and control the styles and + // interactions on the HTML list item. function uploadFile(file) { const fileUploadItem = fileUploadItemTemplate.content.cloneNode(true) const itemContainer = fileUploadItem.querySelector(".upload_file_item") @@ -825,8 +850,8 @@ fn page_header(title: &str, file_upload: bool, web_file_concurrency: usize, favi uploadList.append(fileUploadItem) + // Cancel an upload before it even started. function preCancelUpload() { - console.log('cancelled') preCancel = true; itemText.classList.add(CANCELLED); bar.classList.add(CANCELLED); @@ -841,11 +866,14 @@ fn page_header(title: &str, file_upload: bool, web_file_concurrency: usize, favi uploadCancelButton.addEventListener("click", preCancelUpload) cancel.addEventListener("click", preCancelUpload) - return async () => { + // A callback function is return so that the upload doesn't start until + // we want it to. This is so that we have control over our desired concurrency. + return () => { if (preCancel) { return Promise.resolve() } + // Upload the single file in a multipart request. return new Promise(async (resolve, reject) => { const fileHash = await get256FileHash(file); const xhr = new XMLHttpRequest(); -- cgit v1.2.3 From 33c79837f1e113a1ce83a461413acf474e973c63 Mon Sep 17 00:00:00 2001 From: Alec Di Vito Date: Sun, 2 Mar 2025 14:46:10 -0500 Subject: feat: validate temp dir exists through `value_parser` and fixed clippy issues --- src/args.rs | 15 +++++++++++++++ src/config.rs | 8 +------- src/file_op.rs | 50 ++++++++++++++++++++++++++++++++------------------ 3 files changed, 48 insertions(+), 25 deletions(-) (limited to 'src') diff --git a/src/args.rs b/src/args.rs index e9243f5..72ade7b 100644 --- a/src/args.rs +++ b/src/args.rs @@ -36,6 +36,7 @@ pub struct CliArgs { long = "temp-directory", value_hint = ValueHint::FilePath, requires = "allowed_upload_dir", + value_parser(validate_is_dir_and_exists), env = "MINISERVER_TEMP_UPLOAD_DIRECTORY") ] pub temp_upload_directory: Option, @@ -356,6 +357,20 @@ fn parse_interface(src: &str) -> Result { src.parse::() } +/// Validate that a path passed in is a directory and it exists. +fn validate_is_dir_and_exists(s: &str) -> Result { + let path = PathBuf::from(s); + if path.exists() && path.is_dir() { + Ok(path) + } else { + Err(format!( + "Upload temporary directory must exist and be a directory. \ + Validate that path {:?} meets those requirements.", + path + )) + } +} + #[derive(Clone, Debug, thiserror::Error)] pub enum AuthParseError { /// Might occur if the HTTP credential string does not respect the expected format diff --git a/src/config.rs b/src/config.rs index 6a048c2..4404959 100644 --- a/src/config.rs +++ b/src/config.rs @@ -278,16 +278,10 @@ impl MiniserveConfig { .transpose()? .unwrap_or_default(); - let temp_upload_directory = args.temp_upload_directory.as_ref().take().map(|v| if v.exists() && v.is_dir() { - Ok(v.clone()) - } else { - Err(anyhow!("Upload temporary directory must exist and be a directory. Validate that path {v:?} meets those requirements")) - }).transpose()?; - Ok(Self { verbose: args.verbose, path: args.path.unwrap_or_else(|| PathBuf::from(".")), - temp_upload_directory, + temp_upload_directory: args.temp_upload_directory, port, interfaces, auth, diff --git a/src/file_op.rs b/src/file_op.rs index 4e05e6c..afd6449 100644 --- a/src/file_op.rs +++ b/src/file_op.rs @@ -32,8 +32,8 @@ impl FileHash { pub fn get_hash(&self) -> &str { match self { - Self::SHA256(string) => &string, - Self::SHA512(string) => &string, + Self::SHA256(string) => string, + Self::SHA512(string) => string, } } } @@ -88,9 +88,9 @@ async fn save_file( // to control the lifecycle of the file. This is useful for us because // we need to convert the temporary file into an async enabled file and // on successful upload, we want to move it to the target directory. - let (file, temp_path) = named_temp_file.keep().map_err(|err| { - RuntimeError::IoError("Failed to keep temporary file".into(), err.error.into()) - })?; + let (file, temp_path) = named_temp_file + .keep() + .map_err(|err| RuntimeError::IoError("Failed to keep temporary file".into(), err.error))?; let mut temp_file = tokio::fs::File::from_std(file); let mut written_len = 0; @@ -151,7 +151,7 @@ async fn save_file( if let Some(hasher) = hasher { if let Some(expected_hash) = file_checksum.as_ref().map(|f| f.get_hash()) { let actual_hash = hex::encode(hasher.finalize()); - if &actual_hash != expected_hash { + if actual_hash != expected_hash { warn!("The expected file hash {expected_hash} did not match the calculated hash of {actual_hash}. This can be caused if a file upload was aborted."); let _ = tokio::fs::remove_file(&temp_path).await; return Err(RuntimeError::UploadHashMismatchError); @@ -171,17 +171,29 @@ async fn save_file( Ok(written_len) } -/// Handles a single field in a multipart form -async fn handle_multipart( - mut field: actix_multipart::Field, - path: PathBuf, +struct HandleMultipartOpts<'a> { overwrite_files: bool, allow_mkdir: bool, allow_hidden_paths: bool, allow_symlinks: bool, - file_hash: Option<&FileHash>, - upload_directory: Option<&PathBuf>, + file_hash: Option<&'a FileHash>, + upload_directory: Option<&'a PathBuf>, +} + +/// Handles a single field in a multipart form +async fn handle_multipart( + mut field: actix_multipart::Field, + path: PathBuf, + opts: HandleMultipartOpts<'_>, ) -> Result { + let HandleMultipartOpts { + overwrite_files, + allow_mkdir, + allow_hidden_paths, + allow_symlinks, + file_hash, + upload_directory, + } = opts; let field_name = field.name().expect("No name field found").to_string(); match tokio::fs::metadata(&path).await { @@ -376,12 +388,14 @@ pub async fn upload_file( handle_multipart( field, non_canonicalized_target_dir.clone(), - conf.overwrite_files, - conf.mkdir_enabled, - conf.show_hidden, - !conf.no_symlinks, - hash_ref, - upload_directory, + HandleMultipartOpts { + overwrite_files: conf.overwrite_files, + allow_mkdir: conf.mkdir_enabled, + allow_hidden_paths: conf.show_hidden, + allow_symlinks: !conf.no_symlinks, + file_hash: hash_ref, + upload_directory, + }, ) }) .try_collect::>() -- cgit v1.2.3