aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/args.rs22
-rw-r--r--src/config.rs10
-rw-r--r--src/errors.rs2
-rw-r--r--src/file_op.rs146
-rw-r--r--src/renderer.rs76
5 files changed, 188 insertions, 68 deletions
diff --git a/src/args.rs b/src/args.rs
index 17f6c84..e9243f5 100644
--- a/src/args.rs
+++ b/src/args.rs
@@ -26,6 +26,20 @@ pub struct CliArgs {
#[arg(value_hint = ValueHint::AnyPath, env = "MINISERVE_PATH")]
pub path: Option<PathBuf>,
+ /// The path to where file uploads will be written to before being moved to their
+ /// correct location. It's wise to make sure that this directory will be written to
+ /// disk and not into memory.
+ ///
+ /// This value will only be used **IF** file uploading is enabled. If this option is
+ /// not set, the operating system default temporary directory will be used.
+ #[arg(
+ long = "temp-directory",
+ value_hint = ValueHint::FilePath,
+ requires = "allowed_upload_dir",
+ env = "MINISERVER_TEMP_UPLOAD_DIRECTORY")
+ ]
+ pub temp_upload_directory: Option<PathBuf>,
+
/// The name of a directory index file to serve, like "index.html"
///
/// Normally, when miniserve serves a directory, it creates a listing for that directory.
@@ -171,6 +185,14 @@ pub struct CliArgs {
///
/// For example, a value of 4 would mean that the web browser will only upload
/// 4 files at a time to the web server when using the web browser interface.
+ ///
+ /// When the value is kept at 0, it attempts to resolve all the uploads at once
+ /// in the web browser.
+ ///
+ /// NOTE: Web pages have a limit of how many active HTTP connections that they
+ /// can make at one time, so even though you might set a concurrency limit of
+ /// 100, the browser might only make progress on the max amount of connections
+ /// it allows the web page to have open.
#[arg(
long = "web-upload-files-concurrency",
env = "MINISERVE_WEB_UPLOAD_CONCURRENCY",
diff --git a/src/config.rs b/src/config.rs
index 449d2ae..6a048c2 100644
--- a/src/config.rs
+++ b/src/config.rs
@@ -33,6 +33,9 @@ pub struct MiniserveConfig {
/// Path to be served by miniserve
pub path: std::path::PathBuf,
+ /// Temporary directory that should be used when files are uploaded to the server
+ pub temp_upload_directory: Option<std::path::PathBuf>,
+
/// Port on which miniserve will be listening
pub port: u16,
@@ -275,9 +278,16 @@ impl MiniserveConfig {
.transpose()?
.unwrap_or_default();
+ let temp_upload_directory = args.temp_upload_directory.as_ref().take().map(|v| if v.exists() && v.is_dir() {
+ Ok(v.clone())
+ } else {
+ Err(anyhow!("Upload temporary directory must exist and be a directory. Validate that path {v:?} meets those requirements"))
+ }).transpose()?;
+
Ok(Self {
verbose: args.verbose,
path: args.path.unwrap_or_else(|| PathBuf::from(".")),
+ temp_upload_directory,
port,
interfaces,
auth,
diff --git a/src/errors.rs b/src/errors.rs
index 24997fc..b7d75f2 100644
--- a/src/errors.rs
+++ b/src/errors.rs
@@ -44,7 +44,7 @@ pub enum RuntimeError {
DuplicateFileError,
/// Uploaded hash not correct
- #[error("File hash that was provided did not match end result of uploaded file")]
+ #[error("File hash that was provided did not match checksum of uploaded file")]
UploadHashMismatchError,
/// Upload not allowed
diff --git a/src/file_op.rs b/src/file_op.rs
index 367517a..4e05e6c 100644
--- a/src/file_op.rs
+++ b/src/file_op.rs
@@ -5,8 +5,10 @@ use std::path::{Component, Path, PathBuf};
use actix_web::{http::header, web, HttpRequest, HttpResponse};
use futures::{StreamExt, TryStreamExt};
+use log::{info, warn};
use serde::Deserialize;
-use sha2::{Digest, Sha256};
+use sha2::digest::DynDigest;
+use sha2::{Digest, Sha256, Sha512};
use tempfile::NamedTempFile;
use tokio::io::AsyncWriteExt;
@@ -17,71 +19,125 @@ use crate::{
enum FileHash {
SHA256(String),
+ SHA512(String),
}
impl FileHash {
- pub fn get_hasher(&self) -> impl Digest {
+ pub fn get_hasher(&self) -> Box<dyn DynDigest> {
match self {
- Self::SHA256(_) => Sha256::new(),
+ Self::SHA256(_) => Box::new(Sha256::new()),
+ Self::SHA512(_) => Box::new(Sha512::new()),
+ }
+ }
+
+ pub fn get_hash(&self) -> &str {
+ match self {
+ Self::SHA256(string) => &string,
+ Self::SHA512(string) => &string,
}
}
}
-/// Saves file data from a multipart form field (`field`) to `file_path`, optionally overwriting
-/// existing file.
+/// Saves file data from a multipart form field (`field`) to `file_path`. Optionally overwriting
+/// existing file and comparing the uploaded file checksum to the user provided `file_hash`.
///
/// Returns total bytes written to file.
async fn save_file(
field: &mut actix_multipart::Field,
file_path: PathBuf,
overwrite_files: bool,
- file_hash: Option<&FileHash>,
+ file_checksum: Option<&FileHash>,
+ temporary_upload_directory: Option<&PathBuf>,
) -> Result<u64, RuntimeError> {
if !overwrite_files && file_path.exists() {
return Err(RuntimeError::DuplicateFileError);
}
- let named_temp_file = match tokio::task::spawn_blocking(|| NamedTempFile::new()).await {
+ let temp_upload_directory = temporary_upload_directory.cloned();
+ // Tempfile doesn't support async operations, so we'll do it on a background thread.
+ let temp_upload_directory_task = tokio::task::spawn_blocking(move || {
+ // If the user provided a temporary directory path, then use it.
+ if let Some(temp_directory) = temp_upload_directory {
+ NamedTempFile::new_in(temp_directory)
+ } else {
+ NamedTempFile::new()
+ }
+ });
+
+ // Validate that the temporary task completed successfully.
+ let named_temp_file_task = match temp_upload_directory_task.await {
+ Ok(named_temp_file) => Ok(named_temp_file),
Err(err) => Err(RuntimeError::MultipartError(format!(
- "Failed to complete spawned task to create named temp file. {}",
- err
+ "Failed to complete spawned task to create named temp file. {err}",
))),
- Ok(Err(err)) if err.kind() == ErrorKind::PermissionDenied => Err(
+ }?;
+
+ // Validate the the temporary file was created successfully.
+ let named_temp_file = match named_temp_file_task {
+ Err(err) if err.kind() == ErrorKind::PermissionDenied => Err(
RuntimeError::InsufficientPermissionsError(file_path.display().to_string()),
),
- Ok(Err(err)) => Err(RuntimeError::IoError(
+ Err(err) => Err(RuntimeError::IoError(
format!("Failed to create temporary file {}", file_path.display()),
err,
)),
- Ok(Ok(file)) => Ok(file),
+ Ok(file) => Ok(file),
}?;
+ // Convert the temporary file into a non-temporary file. This allows us
+ // to control the lifecycle of the file. This is useful for us because
+ // we need to convert the temporary file into an async enabled file and
+ // on successful upload, we want to move it to the target directory.
let (file, temp_path) = named_temp_file.keep().map_err(|err| {
RuntimeError::IoError("Failed to keep temporary file".into(), err.error.into())
})?;
let mut temp_file = tokio::fs::File::from_std(file);
let mut written_len = 0;
- let mut hasher = file_hash.as_ref().map(|h| h.get_hasher());
- let mut error: Option<RuntimeError> = None;
+ let mut hasher = file_checksum.as_ref().map(|h| h.get_hasher());
+ let mut save_upload_file_error: Option<RuntimeError> = None;
+ // This while loop take a stream (in this case `field`) and awaits
+ // new chunks from the websocket connection. The while loop reads
+ // the file from the HTTP connection and writes it to disk or until
+ // the stream from the multipart request is aborted.
while let Some(Ok(bytes)) = field.next().await {
+ // If the hasher exists (if the user has also sent a chunksum with the request)
+ // then we want to update the hasher with the new bytes uploaded.
if let Some(hasher) = hasher.as_mut() {
hasher.update(&bytes)
}
+ // Write the bytes from the stream into our temporary file.
if let Err(e) = temp_file.write_all(&bytes).await {
- error = Some(RuntimeError::IoError(
- "Failed to write to file".to_string(),
- e,
- ));
+ // Failed to write to file. Drop it and return the error
+ save_upload_file_error =
+ Some(RuntimeError::IoError("Failed to write to file".into(), e));
break;
}
+ // record the bytes written to the file.
written_len += bytes.len() as u64;
}
+ if save_upload_file_error.is_none() {
+ // Flush the changes to disk so that we are sure they are there.
+ if let Err(e) = temp_file.flush().await {
+ save_upload_file_error = Some(RuntimeError::IoError(
+ "Failed to flush all the file writes to disk".into(),
+ e,
+ ));
+ }
+ }
+
+ // Drop the file expcitly here because IF there is an error when writing to the
+ // temp file, we won't be able to remove as per the comment in `tokio::fs::remove_file`
+ // > Note that there is no guarantee that the file is immediately deleted
+ // > (e.g. depending on platform, other open file descriptors may prevent immediate removal).
drop(temp_file);
- if let Some(e) = error {
+ // If there was an error during uploading.
+ if let Some(e) = save_upload_file_error {
+ // If there was an error when writing the file to disk, remove it and return
+ // the error that was encountered.
let _ = tokio::fs::remove_file(temp_path).await;
return Err(e);
}
@@ -90,26 +146,24 @@ async fn save_file(
// by the user in actix it seems. References:
// - https://github.com/actix/actix-web/issues/1313
// - https://github.com/actix/actix-web/discussions/3011
- // Therefore, we are relying on the fact that the web UI
- // uploads a hash of the file.
+ // Therefore, we are relying on the fact that the web UI uploads a
+ // hash of the file to determine if it was completed uploaded or not.
if let Some(hasher) = hasher {
- if let Some(FileHash::SHA256(expected_hash)) = file_hash {
+ if let Some(expected_hash) = file_checksum.as_ref().map(|f| f.get_hash()) {
let actual_hash = hex::encode(hasher.finalize());
if &actual_hash != expected_hash {
+ warn!("The expected file hash {expected_hash} did not match the calculated hash of {actual_hash}. This can be caused if a file upload was aborted.");
let _ = tokio::fs::remove_file(&temp_path).await;
return Err(RuntimeError::UploadHashMismatchError);
}
}
}
+ info!("File upload successful to {temp_path:?}. Moving to {file_path:?}",);
if let Err(e) = tokio::fs::rename(&temp_path, &file_path).await {
let _ = tokio::fs::remove_file(&temp_path).await;
return Err(RuntimeError::IoError(
- format!(
- "Failed to move temporary file {} to {}",
- temp_path.display(),
- file_path.display()
- ),
+ format!("Failed to move temporary file {temp_path:?} to {file_path:?}",),
e,
));
}
@@ -126,6 +180,7 @@ async fn handle_multipart(
allow_hidden_paths: bool,
allow_symlinks: bool,
file_hash: Option<&FileHash>,
+ upload_directory: Option<&PathBuf>,
) -> Result<u64, RuntimeError> {
let field_name = field.name().expect("No name field found").to_string();
@@ -239,6 +294,7 @@ async fn handle_multipart(
path.join(filename_path),
overwrite_files,
file_hash,
+ upload_directory,
)
.await
}
@@ -290,25 +346,28 @@ pub async fn upload_file(
)),
}?;
- let mut file_hash: Option<FileHash> = None;
- if let Some(hash) = req
- .headers()
- .get("X-File-Hash")
- .and_then(|h| h.to_str().ok())
- {
- if let Some(hash_funciton) = req
- .headers()
+ let upload_directory = conf.temp_upload_directory.as_ref();
+
+ let file_hash = if let (Some(hash), Some(hash_function)) = (
+ req.headers()
+ .get("X-File-Hash")
+ .and_then(|h| h.to_str().ok()),
+ req.headers()
.get("X-File-Hash-Function")
- .and_then(|h| h.to_str().ok())
- {
- match hash_funciton.to_ascii_uppercase().as_str() {
- "SHA256" => {
- file_hash = Some(FileHash::SHA256(hash.to_string()));
- }
- _ => {}
+ .and_then(|h| h.to_str().ok()),
+ ) {
+ match hash_function.to_ascii_uppercase().as_str() {
+ "SHA256" => Some(FileHash::SHA256(hash.to_string())),
+ "SHA512" => Some(FileHash::SHA512(hash.to_string())),
+ sha => {
+ return Err(RuntimeError::InvalidHttpRequestError(format!(
+ "Invalid header value found for 'X-File-Hash-Function'. Supported values are SHA256 or SHA512. Found {sha}.",
+ )))
}
}
- }
+ } else {
+ None
+ };
let hash_ref = file_hash.as_ref();
actix_multipart::Multipart::new(req.headers(), payload)
@@ -322,6 +381,7 @@ pub async fn upload_file(
conf.show_hidden,
!conf.no_symlinks,
hash_ref,
+ upload_directory,
)
})
.try_collect::<Vec<u64>>()
diff --git a/src/renderer.rs b/src/renderer.rs
index 8a87228..fae3751 100644
--- a/src/renderer.rs
+++ b/src/renderer.rs
@@ -609,7 +609,13 @@ fn chevron_down() -> Markup {
}
/// Partial: page header
-fn page_header(title: &str, file_upload: bool, web_file_concurrency: usize, favicon_route: &str, css_route: &str) -> Markup {
+fn page_header(
+ title: &str,
+ file_upload: bool,
+ web_file_concurrency: usize,
+ favicon_route: &str,
+ css_route: &str,
+) -> Markup {
html! {
head {
meta charset="utf-8";
@@ -658,7 +664,9 @@ fn page_header(title: &str, file_upload: bool, web_file_concurrency: usize, favi
const UPLOADING = 'uploading', PENDING = 'pending', COMPLETE = 'complete', CANCELLED = 'cancelled', FAILED = 'failed'
const UPLOAD_ITEM_ORDER = { UPLOADING: 0, PENDING: 1, COMPLETE: 2, CANCELLED: 3, FAILED: 4 }
let CANCEL_UPLOAD = false;
- // File Upload
+
+ // File Upload dom elements. Used for interacting with the
+ // upload container.
const form = document.querySelector('#file_submit');
const uploadArea = document.querySelector('#upload_area');
const uploadTitle = document.querySelector('#upload_title');
@@ -700,6 +708,7 @@ fn page_header(title: &str, file_upload: bool, web_file_concurrency: usize, favi
dragForm.style.display = 'none';
};
+ // Event listener for toggling the upload widget display on mobile.
uploadWidgetToggle.addEventListener('click', function (e) {
e.preventDefault();
if (uploadArea.style.height === "100vh") {
@@ -713,6 +722,7 @@ fn page_header(title: &str, file_upload: bool, web_file_concurrency: usize, favi
}
})
+ // Cancel all active and pending uploads
uploadCancelButton.addEventListener('click', function (e) {
e.preventDefault();
CANCEL_UPLOAD = true;
@@ -723,7 +733,11 @@ fn page_header(title: &str, file_upload: bool, web_file_concurrency: usize, favi
uploadFiles()
})
+ // When uploads start, finish or are cancelled, the UI needs to reactively shows those
+ // updates of the state. This function updates the text on the upload widget to accurately
+ // show the state of all uploads.
function updateUploadTextAndList() {
+ // All state is kept as `data-*` attributed on the HTML node.
const queryLength = (state) => document.querySelectorAll(`[data-state='${state}']`).length;
const total = document.querySelectorAll("[data-state]").length;
const uploads = queryLength(UPLOADING);
@@ -733,13 +747,13 @@ fn page_header(title: &str, file_upload: bool, web_file_concurrency: usize, favi
const failed = queryLength(FAILED);
const allCompleted = completed + cancelled + failed;
- // Update header
+ // Update header text based on remaining uploads
let headerText = `${total - allCompleted} uploads remaining...`;
if (total === allCompleted) {
headerText = `Complete! Reloading Page!`
}
- // Update sub header
+ // Build a summary of statuses for sub header
const statuses = []
if (uploads > 0) { statuses.push(`Uploading ${uploads}`) }
if (pending > 0) { statuses.push(`Pending ${pending}`) }
@@ -749,29 +763,37 @@ fn page_header(title: &str, file_upload: bool, web_file_concurrency: usize, favi
uploadTitle.textContent = headerText
uploadActionText.textContent = statuses.join(', ')
-
- const items = Array.from(uploadList.querySelectorAll('li'));
- items.sort((a, b) => UPLOAD_ITEM_ORDER[a.dataset.state] - UPLOAD_ITEM_ORDER[b.dataset.state]);
- items.forEach((item, index) => {
- if (uploadList.children[index] !== item) {
- uploadList.insertBefore(item, uploadList.children[index]);
- }
- });
- }
-
- async function doWork(iterator, i) {
- for (let [index, item] of iterator) {
- await item();
- updateUploadTextAndList();
- }
}
+ // Initiates the file upload process by disabling the ability for more files to be
+ // uploaded and creating async callbacks for each file that needs to be uploaded.
+ // Given the concurrency set by the server input arguments, it will try to process
+ // that many uploads at once
function uploadFiles() {
fileInput.disabled = true;
+
+ // Map all the files into async callbacks (uploadFile is a function that returns a function)
const callbacks = Array.from(fileInput.files).map(uploadFile);
- const iterator = callbacks.entries();
+
+ // Get a list of all the callbacks
const concurrency = CONCURRENCY === 0 ? callbacks.length : CONCURRENCY;
- const workers = Array(concurrency).fill(iterator).map(doWork)
+
+ // Worker function that continuously pulls tasks from the shared queue.
+ async function worker() {
+ while (callbacks.length > 0) {
+ // Remove a task from the front of the queue.
+ const task = callbacks.shift();
+ if (task) {
+ await task();
+ updateUploadTextAndList();
+ }
+ }
+ }
+
+ // Create a work stealing shared queue, split up between `concurrency` amount of workers.
+ const workers = Array.from({ length: concurrency }).map(worker);
+
+ // Wait for all the workers to complete
Promise.allSettled(workers)
.finally(() => {
updateUploadTextAndList();
@@ -797,7 +819,6 @@ fn page_header(title: &str, file_upload: bool, web_file_concurrency: usize, favi
document.querySelector('input[type="file"]').addEventListener('change', async (e) => {
const file = e.target.files[0];
const hash = await hashFile(file);
- console.log('File hash:', hash);
});
async function get256FileHash(file) {
@@ -807,6 +828,10 @@ fn page_header(title: &str, file_upload: bool, web_file_concurrency: usize, favi
return hashArray.map(b => b.toString(16).padStart(2, '0')).join('');
}
+ // Upload a file. This function will create a upload item in the upload
+ // widget from an HTML template. It then returns a promise which will
+ // be used to upload the file to the server and control the styles and
+ // interactions on the HTML list item.
function uploadFile(file) {
const fileUploadItem = fileUploadItemTemplate.content.cloneNode(true)
const itemContainer = fileUploadItem.querySelector(".upload_file_item")
@@ -825,8 +850,8 @@ fn page_header(title: &str, file_upload: bool, web_file_concurrency: usize, favi
uploadList.append(fileUploadItem)
+ // Cancel an upload before it even started.
function preCancelUpload() {
- console.log('cancelled')
preCancel = true;
itemText.classList.add(CANCELLED);
bar.classList.add(CANCELLED);
@@ -841,11 +866,14 @@ fn page_header(title: &str, file_upload: bool, web_file_concurrency: usize, favi
uploadCancelButton.addEventListener("click", preCancelUpload)
cancel.addEventListener("click", preCancelUpload)
- return async () => {
+ // A callback function is return so that the upload doesn't start until
+ // we want it to. This is so that we have control over our desired concurrency.
+ return () => {
if (preCancel) {
return Promise.resolve()
}
+ // Upload the single file in a multipart request.
return new Promise(async (resolve, reject) => {
const fileHash = await get256FileHash(file);
const xhr = new XMLHttpRequest();