diff options
Diffstat (limited to '')
-rw-r--r-- | src/args.rs | 49 | ||||
-rw-r--r-- | src/config.rs | 8 | ||||
-rw-r--r-- | src/errors.rs | 6 | ||||
-rw-r--r-- | src/file_op.rs | 222 | ||||
-rw-r--r-- | src/renderer.rs | 327 |
5 files changed, 577 insertions, 35 deletions
diff --git a/src/args.rs b/src/args.rs index 922e78b..72ade7b 100644 --- a/src/args.rs +++ b/src/args.rs @@ -26,6 +26,21 @@ pub struct CliArgs { #[arg(value_hint = ValueHint::AnyPath, env = "MINISERVE_PATH")] pub path: Option<PathBuf>, + /// The path to where file uploads will be written to before being moved to their + /// correct location. It's wise to make sure that this directory will be written to + /// disk and not into memory. + /// + /// This value will only be used **IF** file uploading is enabled. If this option is + /// not set, the operating system default temporary directory will be used. + #[arg( + long = "temp-directory", + value_hint = ValueHint::FilePath, + requires = "allowed_upload_dir", + value_parser(validate_is_dir_and_exists), + env = "MINISERVER_TEMP_UPLOAD_DIRECTORY") + ] + pub temp_upload_directory: Option<PathBuf>, + /// The name of a directory index file to serve, like "index.html" /// /// Normally, when miniserve serves a directory, it creates a listing for that directory. @@ -166,6 +181,26 @@ pub struct CliArgs { #[arg(short = 'u', long = "upload-files", value_hint = ValueHint::FilePath, num_args(0..=1), value_delimiter(','), env = "MINISERVE_ALLOWED_UPLOAD_DIR")] pub allowed_upload_dir: Option<Vec<PathBuf>>, + /// Configure amount of concurrent uploads when visiting the website. Must have + /// upload-files option enabled for this setting to matter. + /// + /// For example, a value of 4 would mean that the web browser will only upload + /// 4 files at a time to the web server when using the web browser interface. + /// + /// When the value is kept at 0, it attempts to resolve all the uploads at once + /// in the web browser. + /// + /// NOTE: Web pages have a limit of how many active HTTP connections that they + /// can make at one time, so even though you might set a concurrency limit of + /// 100, the browser might only make progress on the max amount of connections + /// it allows the web page to have open. + #[arg( + long = "web-upload-files-concurrency", + env = "MINISERVE_WEB_UPLOAD_CONCURRENCY", + default_value = "0" + )] + pub web_upload_concurrency: usize, + /// Enable creating directories #[arg( short = 'U', @@ -322,6 +357,20 @@ fn parse_interface(src: &str) -> Result<IpAddr, std::net::AddrParseError> { src.parse::<IpAddr>() } +/// Validate that a path passed in is a directory and it exists. +fn validate_is_dir_and_exists(s: &str) -> Result<PathBuf, String> { + let path = PathBuf::from(s); + if path.exists() && path.is_dir() { + Ok(path) + } else { + Err(format!( + "Upload temporary directory must exist and be a directory. \ + Validate that path {:?} meets those requirements.", + path + )) + } +} + #[derive(Clone, Debug, thiserror::Error)] pub enum AuthParseError { /// Might occur if the HTTP credential string does not respect the expected format diff --git a/src/config.rs b/src/config.rs index 953bba1..4404959 100644 --- a/src/config.rs +++ b/src/config.rs @@ -33,6 +33,9 @@ pub struct MiniserveConfig { /// Path to be served by miniserve pub path: std::path::PathBuf, + /// Temporary directory that should be used when files are uploaded to the server + pub temp_upload_directory: Option<std::path::PathBuf>, + /// Port on which miniserve will be listening pub port: u16, @@ -101,6 +104,9 @@ pub struct MiniserveConfig { /// Enable file upload pub file_upload: bool, + /// Max amount of concurrency when uploading multiple files + pub web_upload_concurrency: usize, + /// List of allowed upload directories pub allowed_upload_dir: Vec<String>, @@ -275,6 +281,7 @@ impl MiniserveConfig { Ok(Self { verbose: args.verbose, path: args.path.unwrap_or_else(|| PathBuf::from(".")), + temp_upload_directory: args.temp_upload_directory, port, interfaces, auth, @@ -295,6 +302,7 @@ impl MiniserveConfig { show_qrcode: args.qrcode, mkdir_enabled: args.mkdir_enabled, file_upload: args.allowed_upload_dir.is_some(), + web_upload_concurrency: args.web_upload_concurrency, allowed_upload_dir, uploadable_media_type, tar_enabled: args.enable_tar, diff --git a/src/errors.rs b/src/errors.rs index 99c15ff..b7d75f2 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -43,6 +43,10 @@ pub enum RuntimeError { #[error("File already exists, and the overwrite_files option has not been set")] DuplicateFileError, + /// Uploaded hash not correct + #[error("File hash that was provided did not match checksum of uploaded file")] + UploadHashMismatchError, + /// Upload not allowed #[error("Upload not allowed to this directory")] UploadForbiddenError, @@ -86,6 +90,7 @@ impl ResponseError for RuntimeError { use StatusCode as S; match self { E::IoError(_, _) => S::INTERNAL_SERVER_ERROR, + E::UploadHashMismatchError => S::BAD_REQUEST, E::MultipartError(_) => S::BAD_REQUEST, E::DuplicateFileError => S::CONFLICT, E::UploadForbiddenError => S::FORBIDDEN, @@ -132,6 +137,7 @@ where let res = fut.await?.map_into_boxed_body(); if (res.status().is_client_error() || res.status().is_server_error()) + && res.request().path() != "/upload" && res .headers() .get(header::CONTENT_TYPE) diff --git a/src/file_op.rs b/src/file_op.rs index 18bdcbe..afd6449 100644 --- a/src/file_op.rs +++ b/src/file_op.rs @@ -4,10 +4,12 @@ use std::io::ErrorKind; use std::path::{Component, Path, PathBuf}; use actix_web::{http::header, web, HttpRequest, HttpResponse}; -use futures::TryFutureExt; -use futures::TryStreamExt; +use futures::{StreamExt, TryStreamExt}; +use log::{info, warn}; use serde::Deserialize; -use tokio::fs::File; +use sha2::digest::DynDigest; +use sha2::{Digest, Sha256, Sha512}; +use tempfile::NamedTempFile; use tokio::io::AsyncWriteExt; use crate::{ @@ -15,52 +17,183 @@ use crate::{ file_utils::sanitize_path, }; -/// Saves file data from a multipart form field (`field`) to `file_path`, optionally overwriting -/// existing file. +enum FileHash { + SHA256(String), + SHA512(String), +} + +impl FileHash { + pub fn get_hasher(&self) -> Box<dyn DynDigest> { + match self { + Self::SHA256(_) => Box::new(Sha256::new()), + Self::SHA512(_) => Box::new(Sha512::new()), + } + } + + pub fn get_hash(&self) -> &str { + match self { + Self::SHA256(string) => string, + Self::SHA512(string) => string, + } + } +} + +/// Saves file data from a multipart form field (`field`) to `file_path`. Optionally overwriting +/// existing file and comparing the uploaded file checksum to the user provided `file_hash`. /// /// Returns total bytes written to file. async fn save_file( - field: actix_multipart::Field, + field: &mut actix_multipart::Field, file_path: PathBuf, overwrite_files: bool, + file_checksum: Option<&FileHash>, + temporary_upload_directory: Option<&PathBuf>, ) -> Result<u64, RuntimeError> { if !overwrite_files && file_path.exists() { return Err(RuntimeError::DuplicateFileError); } - let file = match File::create(&file_path).await { + let temp_upload_directory = temporary_upload_directory.cloned(); + // Tempfile doesn't support async operations, so we'll do it on a background thread. + let temp_upload_directory_task = tokio::task::spawn_blocking(move || { + // If the user provided a temporary directory path, then use it. + if let Some(temp_directory) = temp_upload_directory { + NamedTempFile::new_in(temp_directory) + } else { + NamedTempFile::new() + } + }); + + // Validate that the temporary task completed successfully. + let named_temp_file_task = match temp_upload_directory_task.await { + Ok(named_temp_file) => Ok(named_temp_file), + Err(err) => Err(RuntimeError::MultipartError(format!( + "Failed to complete spawned task to create named temp file. {err}", + ))), + }?; + + // Validate the the temporary file was created successfully. + let named_temp_file = match named_temp_file_task { Err(err) if err.kind() == ErrorKind::PermissionDenied => Err( RuntimeError::InsufficientPermissionsError(file_path.display().to_string()), ), Err(err) => Err(RuntimeError::IoError( - format!("Failed to create {}", file_path.display()), + format!("Failed to create temporary file {}", file_path.display()), err, )), - Ok(v) => Ok(v), + Ok(file) => Ok(file), }?; - let (_, written_len) = field - .map_err(|x| RuntimeError::MultipartError(x.to_string())) - .try_fold((file, 0u64), |(mut file, written_len), bytes| async move { - file.write_all(bytes.as_ref()) - .map_err(|e| RuntimeError::IoError("Failed to write to file".to_string(), e)) - .await?; - Ok((file, written_len + bytes.len() as u64)) - }) - .await?; + // Convert the temporary file into a non-temporary file. This allows us + // to control the lifecycle of the file. This is useful for us because + // we need to convert the temporary file into an async enabled file and + // on successful upload, we want to move it to the target directory. + let (file, temp_path) = named_temp_file + .keep() + .map_err(|err| RuntimeError::IoError("Failed to keep temporary file".into(), err.error))?; + let mut temp_file = tokio::fs::File::from_std(file); + + let mut written_len = 0; + let mut hasher = file_checksum.as_ref().map(|h| h.get_hasher()); + let mut save_upload_file_error: Option<RuntimeError> = None; + + // This while loop take a stream (in this case `field`) and awaits + // new chunks from the websocket connection. The while loop reads + // the file from the HTTP connection and writes it to disk or until + // the stream from the multipart request is aborted. + while let Some(Ok(bytes)) = field.next().await { + // If the hasher exists (if the user has also sent a chunksum with the request) + // then we want to update the hasher with the new bytes uploaded. + if let Some(hasher) = hasher.as_mut() { + hasher.update(&bytes) + } + // Write the bytes from the stream into our temporary file. + if let Err(e) = temp_file.write_all(&bytes).await { + // Failed to write to file. Drop it and return the error + save_upload_file_error = + Some(RuntimeError::IoError("Failed to write to file".into(), e)); + break; + } + // record the bytes written to the file. + written_len += bytes.len() as u64; + } + + if save_upload_file_error.is_none() { + // Flush the changes to disk so that we are sure they are there. + if let Err(e) = temp_file.flush().await { + save_upload_file_error = Some(RuntimeError::IoError( + "Failed to flush all the file writes to disk".into(), + e, + )); + } + } + + // Drop the file expcitly here because IF there is an error when writing to the + // temp file, we won't be able to remove as per the comment in `tokio::fs::remove_file` + // > Note that there is no guarantee that the file is immediately deleted + // > (e.g. depending on platform, other open file descriptors may prevent immediate removal). + drop(temp_file); + + // If there was an error during uploading. + if let Some(e) = save_upload_file_error { + // If there was an error when writing the file to disk, remove it and return + // the error that was encountered. + let _ = tokio::fs::remove_file(temp_path).await; + return Err(e); + } + + // There isn't a way to get notified when a request is cancelled + // by the user in actix it seems. References: + // - https://github.com/actix/actix-web/issues/1313 + // - https://github.com/actix/actix-web/discussions/3011 + // Therefore, we are relying on the fact that the web UI uploads a + // hash of the file to determine if it was completed uploaded or not. + if let Some(hasher) = hasher { + if let Some(expected_hash) = file_checksum.as_ref().map(|f| f.get_hash()) { + let actual_hash = hex::encode(hasher.finalize()); + if actual_hash != expected_hash { + warn!("The expected file hash {expected_hash} did not match the calculated hash of {actual_hash}. This can be caused if a file upload was aborted."); + let _ = tokio::fs::remove_file(&temp_path).await; + return Err(RuntimeError::UploadHashMismatchError); + } + } + } + + info!("File upload successful to {temp_path:?}. Moving to {file_path:?}",); + if let Err(e) = tokio::fs::rename(&temp_path, &file_path).await { + let _ = tokio::fs::remove_file(&temp_path).await; + return Err(RuntimeError::IoError( + format!("Failed to move temporary file {temp_path:?} to {file_path:?}",), + e, + )); + } Ok(written_len) } -/// Handles a single field in a multipart form -async fn handle_multipart( - mut field: actix_multipart::Field, - path: PathBuf, +struct HandleMultipartOpts<'a> { overwrite_files: bool, allow_mkdir: bool, allow_hidden_paths: bool, allow_symlinks: bool, + file_hash: Option<&'a FileHash>, + upload_directory: Option<&'a PathBuf>, +} + +/// Handles a single field in a multipart form +async fn handle_multipart( + mut field: actix_multipart::Field, + path: PathBuf, + opts: HandleMultipartOpts<'_>, ) -> Result<u64, RuntimeError> { + let HandleMultipartOpts { + overwrite_files, + allow_mkdir, + allow_hidden_paths, + allow_symlinks, + file_hash, + upload_directory, + } = opts; let field_name = field.name().expect("No name field found").to_string(); match tokio::fs::metadata(&path).await { @@ -168,7 +301,14 @@ async fn handle_multipart( } } - save_file(field, path.join(filename_path), overwrite_files).await + save_file( + &mut field, + path.join(filename_path), + overwrite_files, + file_hash, + upload_directory, + ) + .await } /// Query parameters used by upload and rm APIs @@ -218,16 +358,44 @@ pub async fn upload_file( )), }?; + let upload_directory = conf.temp_upload_directory.as_ref(); + + let file_hash = if let (Some(hash), Some(hash_function)) = ( + req.headers() + .get("X-File-Hash") + .and_then(|h| h.to_str().ok()), + req.headers() + .get("X-File-Hash-Function") + .and_then(|h| h.to_str().ok()), + ) { + match hash_function.to_ascii_uppercase().as_str() { + "SHA256" => Some(FileHash::SHA256(hash.to_string())), + "SHA512" => Some(FileHash::SHA512(hash.to_string())), + sha => { + return Err(RuntimeError::InvalidHttpRequestError(format!( + "Invalid header value found for 'X-File-Hash-Function'. Supported values are SHA256 or SHA512. Found {sha}.", + ))) + } + } + } else { + None + }; + + let hash_ref = file_hash.as_ref(); actix_multipart::Multipart::new(req.headers(), payload) .map_err(|x| RuntimeError::MultipartError(x.to_string())) .and_then(|field| { handle_multipart( field, non_canonicalized_target_dir.clone(), - conf.overwrite_files, - conf.mkdir_enabled, - conf.show_hidden, - !conf.no_symlinks, + HandleMultipartOpts { + overwrite_files: conf.overwrite_files, + allow_mkdir: conf.mkdir_enabled, + allow_hidden_paths: conf.show_hidden, + allow_symlinks: !conf.no_symlinks, + file_hash: hash_ref, + upload_directory, + }, ) }) .try_collect::<Vec<u64>>() diff --git a/src/renderer.rs b/src/renderer.rs index ca49413..fae3751 100644 --- a/src/renderer.rs +++ b/src/renderer.rs @@ -52,7 +52,7 @@ pub fn page( html! { (DOCTYPE) html { - (page_header(&title_path, conf.file_upload, &conf.favicon_route, &conf.css_route)) + (page_header(&title_path, conf.file_upload, conf.web_upload_concurrency, &conf.favicon_route, &conf.css_route)) body #drop-container { @@ -174,6 +174,40 @@ pub fn page( } } } + div.upload_area id="upload_area" { + template id="upload_file_item" { + li.upload_file_item { + div.upload_file_container { + div.upload_file_text { + span.file_upload_percent { "" } + {" - "} + span.file_size { "" } + {" - "} + span.file_name { "" } + } + button.file_cancel_upload { "✖" } + } + div.file_progress_bar {} + } + } + div.upload_container { + div.upload_header { + h4 style="margin:0px" id="upload_title" {} + svg id="upload-toggle" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor" class="size-6" { + path stroke-linecap="round" stroke-linejoin="round" d="m4.5 15.75 7.5-7.5 7.5 7.5" {} + } + } + div.upload_action { + p id="upload_action_text" { "Starting upload..." } + button.upload_cancel id="upload_cancel" { "CANCEL" } + } + div.upload_files { + ul.upload_file_list id="upload_file_list" { + + } + } + } + } } } } @@ -575,7 +609,13 @@ fn chevron_down() -> Markup { } /// Partial: page header -fn page_header(title: &str, file_upload: bool, favicon_route: &str, css_route: &str) -> Markup { +fn page_header( + title: &str, + file_upload: bool, + web_file_concurrency: usize, + favicon_route: &str, + css_route: &str, +) -> Markup { html! { head { meta charset="utf-8"; @@ -616,9 +656,26 @@ fn page_header(title: &str, file_upload: bool, favicon_route: &str, css_route: & "#)) @if file_upload { - (PreEscaped(r#" - <script> + script { + (format!("const CONCURRENCY = {web_file_concurrency};")) + (PreEscaped(r#" window.onload = function() { + // Constants + const UPLOADING = 'uploading', PENDING = 'pending', COMPLETE = 'complete', CANCELLED = 'cancelled', FAILED = 'failed' + const UPLOAD_ITEM_ORDER = { UPLOADING: 0, PENDING: 1, COMPLETE: 2, CANCELLED: 3, FAILED: 4 } + let CANCEL_UPLOAD = false; + + // File Upload dom elements. Used for interacting with the + // upload container. + const form = document.querySelector('#file_submit'); + const uploadArea = document.querySelector('#upload_area'); + const uploadTitle = document.querySelector('#upload_title'); + const uploadActionText = document.querySelector('#upload_action_text'); + const uploadCancelButton = document.querySelector('#upload_cancel'); + const uploadList = document.querySelector('#upload_file_list'); + const fileUploadItemTemplate = document.querySelector('#upload_file_item'); + const uploadWidgetToggle = document.querySelector('#upload-toggle'); + const dropContainer = document.querySelector('#drop-container'); const dragForm = document.querySelector('.drag-form'); const fileInput = document.querySelector('#file-input'); @@ -647,12 +704,266 @@ fn page_header(title: &str, file_upload: bool, favicon_route: &str, css_route: & dropContainer.ondrop = function(e) { e.preventDefault(); fileInput.files = e.dataTransfer.files; - file_submit.submit(); + form.requestSubmit(); dragForm.style.display = 'none'; }; + + // Event listener for toggling the upload widget display on mobile. + uploadWidgetToggle.addEventListener('click', function (e) { + e.preventDefault(); + if (uploadArea.style.height === "100vh") { + uploadArea.style = "" + document.body.style = "" + uploadWidgetToggle.style = "" + } else { + uploadArea.style.height = "100vh" + document.body.style = "overflow: hidden" + uploadWidgetToggle.style = "transform: rotate(180deg)" + } + }) + + // Cancel all active and pending uploads + uploadCancelButton.addEventListener('click', function (e) { + e.preventDefault(); + CANCEL_UPLOAD = true; + }) + + form.addEventListener('submit', function (e) { + e.preventDefault() + uploadFiles() + }) + + // When uploads start, finish or are cancelled, the UI needs to reactively shows those + // updates of the state. This function updates the text on the upload widget to accurately + // show the state of all uploads. + function updateUploadTextAndList() { + // All state is kept as `data-*` attributed on the HTML node. + const queryLength = (state) => document.querySelectorAll(`[data-state='${state}']`).length; + const total = document.querySelectorAll("[data-state]").length; + const uploads = queryLength(UPLOADING); + const pending = queryLength(PENDING); + const completed = queryLength(COMPLETE); + const cancelled = queryLength(CANCELLED); + const failed = queryLength(FAILED); + const allCompleted = completed + cancelled + failed; + + // Update header text based on remaining uploads + let headerText = `${total - allCompleted} uploads remaining...`; + if (total === allCompleted) { + headerText = `Complete! Reloading Page!` + } + + // Build a summary of statuses for sub header + const statuses = [] + if (uploads > 0) { statuses.push(`Uploading ${uploads}`) } + if (pending > 0) { statuses.push(`Pending ${pending}`) } + if (completed > 0) { statuses.push(`Complete ${completed}`) } + if (cancelled > 0) { statuses.push(`Cancelled ${cancelled}`) } + if (failed > 0) { statuses.push(`Failed ${failed}`) } + + uploadTitle.textContent = headerText + uploadActionText.textContent = statuses.join(', ') + } + + // Initiates the file upload process by disabling the ability for more files to be + // uploaded and creating async callbacks for each file that needs to be uploaded. + // Given the concurrency set by the server input arguments, it will try to process + // that many uploads at once + function uploadFiles() { + fileInput.disabled = true; + + // Map all the files into async callbacks (uploadFile is a function that returns a function) + const callbacks = Array.from(fileInput.files).map(uploadFile); + + // Get a list of all the callbacks + const concurrency = CONCURRENCY === 0 ? callbacks.length : CONCURRENCY; + + // Worker function that continuously pulls tasks from the shared queue. + async function worker() { + while (callbacks.length > 0) { + // Remove a task from the front of the queue. + const task = callbacks.shift(); + if (task) { + await task(); + updateUploadTextAndList(); + } + } + } + + // Create a work stealing shared queue, split up between `concurrency` amount of workers. + const workers = Array.from({ length: concurrency }).map(worker); + + // Wait for all the workers to complete + Promise.allSettled(workers) + .finally(() => { + updateUploadTextAndList(); + form.reset(); + setTimeout(() => { uploadArea.classList.remove('active'); }, 1000) + setTimeout(() => { window.location.reload(); }, 1500) + }) + + updateUploadTextAndList(); + uploadArea.classList.add('active') + uploadList.scrollTo(0, 0) + } + + function formatBytes(bytes, decimals) { + if (bytes == 0) return '0 Bytes'; + var k = 1024, + dm = decimals || 2, + sizes = ['Bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'], + i = Math.floor(Math.log(bytes) / Math.log(k)); + return parseFloat((bytes / Math.pow(k, i)).toFixed(dm)) + ' ' + sizes[i]; + } + + document.querySelector('input[type="file"]').addEventListener('change', async (e) => { + const file = e.target.files[0]; + const hash = await hashFile(file); + }); + + async function get256FileHash(file) { + const arrayBuffer = await file.arrayBuffer(); + const hashBuffer = await crypto.subtle.digest('SHA-256', arrayBuffer); + const hashArray = Array.from(new Uint8Array(hashBuffer)); + return hashArray.map(b => b.toString(16).padStart(2, '0')).join(''); + } + + // Upload a file. This function will create a upload item in the upload + // widget from an HTML template. It then returns a promise which will + // be used to upload the file to the server and control the styles and + // interactions on the HTML list item. + function uploadFile(file) { + const fileUploadItem = fileUploadItemTemplate.content.cloneNode(true) + const itemContainer = fileUploadItem.querySelector(".upload_file_item") + const itemText = fileUploadItem.querySelector(".upload_file_text") + const size = fileUploadItem.querySelector(".file_size") + const name = fileUploadItem.querySelector(".file_name") + const percentText = fileUploadItem.querySelector(".file_upload_percent") + const bar = fileUploadItem.querySelector(".file_progress_bar") + const cancel = fileUploadItem.querySelector(".file_cancel_upload") + let preCancel = false; + + itemContainer.dataset.state = PENDING + name.textContent = file.name + size.textContent = formatBytes(file.size) + percentText.textContent = "0%" + + uploadList.append(fileUploadItem) + + // Cancel an upload before it even started. + function preCancelUpload() { + preCancel = true; + itemText.classList.add(CANCELLED); + bar.classList.add(CANCELLED); + itemContainer.dataset.state = CANCELLED; + itemContainer.style.background = 'var(--upload_modal_file_upload_complete_background)'; + cancel.disabled = true; + cancel.removeEventListener("click", preCancelUpload); + uploadCancelButton.removeEventListener("click", preCancelUpload); + updateUploadTextAndList(); + } + + uploadCancelButton.addEventListener("click", preCancelUpload) + cancel.addEventListener("click", preCancelUpload) + + // A callback function is return so that the upload doesn't start until + // we want it to. This is so that we have control over our desired concurrency. + return () => { + if (preCancel) { + return Promise.resolve() + } + + // Upload the single file in a multipart request. + return new Promise(async (resolve, reject) => { + const fileHash = await get256FileHash(file); + const xhr = new XMLHttpRequest(); + const formData = new FormData(); + formData.append('file', file); + + function onReadyStateChange(e) { + if (e.target.readyState == 4) { + if (e.target.status == 200) { + completeSuccess() + } else { + failedUpload(e.target.status) + } + } + } + + function onError(e) { + failedUpload() + } + + function onAbort(e) { + cancelUpload() + } + + function onProgress (e) { + update(Math.round((e.loaded / e.total) * 100)); + } + + function update(uploadPercent) { + let wholeNumber = Math.floor(uploadPercent) + percentText.textContent = `${wholeNumber}%` + bar.style.width = `${wholeNumber}%` + } + + function completeSuccess() { + cancel.textContent = '✔'; + cancel.classList.add(COMPLETE); + bar.classList.add(COMPLETE); + cleanUp(COMPLETE) + } + + function failedUpload(statusCode) { + cancel.textContent = `${statusCode} ⚠`; + itemText.classList.add(FAILED); + bar.classList.add(FAILED); + cleanUp(FAILED); + } + + function cancelUpload() { + xhr.abort() + itemText.classList.add(CANCELLED); + bar.classList.add(CANCELLED); + cleanUp(CANCELLED); + } + + function cleanUp(state) { + itemContainer.dataset.state = state; + itemContainer.style.background = 'var(--upload_modal_file_upload_complete_background)'; + cancel.disabled = true; + cancel.removeEventListener("click", cancelUpload) + uploadCancelButton.removeEventListener("click", cancelUpload) + xhr.removeEventListener('readystatechange', onReadyStateChange); + xhr.removeEventListener("error", onError); + xhr.removeEventListener("abort", onAbort); + xhr.upload.removeEventListener('progress', onProgress); + resolve() + } + + uploadCancelButton.addEventListener("click", cancelUpload) + cancel.addEventListener("click", cancelUpload) + + if (CANCEL_UPLOAD) { + cancelUpload() + } else { + itemContainer.dataset.state = UPLOADING + xhr.addEventListener('readystatechange', onReadyStateChange); + xhr.addEventListener("error", onError); + xhr.addEventListener("abort", onAbort); + xhr.upload.addEventListener('progress', onProgress); + xhr.open('post', form.getAttribute("action"), true); + xhr.setRequestHeader('X-File-Hash', fileHash); + xhr.setRequestHeader('X-File-Hash-Function', 'SHA256'); + xhr.send(formData); + } + }) + } + } } - </script> - "#)) + "#)) + } } } } @@ -681,7 +992,7 @@ pub fn render_error( html! { (DOCTYPE) html { - (page_header(&error_code.to_string(), false, &conf.favicon_route, &conf.css_route)) + (page_header(&error_code.to_string(), false, conf.web_upload_concurrency, &conf.favicon_route, &conf.css_route)) body { |