fix broken uploads (fix #540), simplify cfg, fix format

This commit is contained in:
jb-alvarado 2024-02-21 14:43:39 +01:00
parent 02b4e9d964
commit 14abd7d5be
9 changed files with 73 additions and 43 deletions

9
Cargo.lock generated
View File

@ -1231,7 +1231,7 @@ checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5"
[[package]]
name = "ffplayout"
version = "0.20.4"
version = "0.20.5"
dependencies = [
"chrono",
"clap",
@ -1253,7 +1253,7 @@ dependencies = [
[[package]]
name = "ffplayout-api"
version = "0.20.4"
version = "0.20.5"
dependencies = [
"actix-files",
"actix-multipart",
@ -1292,7 +1292,7 @@ dependencies = [
[[package]]
name = "ffplayout-lib"
version = "0.20.4"
version = "0.20.5"
dependencies = [
"chrono",
"crossbeam-channel",
@ -1302,6 +1302,7 @@ dependencies = [
"lettre",
"lexical-sort",
"log",
"num-traits",
"rand",
"regex",
"reqwest",
@ -3461,7 +3462,7 @@ dependencies = [
[[package]]
name = "tests"
version = "0.20.4"
version = "0.20.5"
dependencies = [
"chrono",
"crossbeam-channel",

View File

@ -4,7 +4,7 @@ default-members = ["ffplayout-api", "ffplayout-engine", "tests"]
resolver = "2"
[workspace.package]
version = "0.20.4"
version = "0.20.5"
license = "GPL-3.0"
repository = "https://github.com/ffplayout/ffplayout"
authors = ["Jonathan Baecker <jonbae77@gmail.com>"]

View File

@ -994,10 +994,18 @@ pub async fn remove(
async fn save_file(
pool: web::Data<Pool<Sqlite>>,
id: web::Path<i32>,
req: HttpRequest,
payload: Multipart,
obj: web::Query<FileObj>,
) -> Result<HttpResponse, ServiceError> {
upload(&pool.into_inner(), *id, payload, &obj.path, false).await
let size: u64 = req
.headers()
.get("content-length")
.and_then(|cl| cl.to_str().ok())
.and_then(|cls| cls.parse().ok())
.unwrap_or(0);
upload(&pool.into_inner(), *id, size, payload, &obj.path, false).await
}
/// **Get File**
@ -1070,6 +1078,7 @@ async fn get_public(public: web::Path<String>) -> Result<actix_files::NamedFile,
async fn import_playlist(
pool: web::Data<Pool<Sqlite>>,
id: web::Path<i32>,
req: HttpRequest,
payload: Multipart,
obj: web::Query<ImportObj>,
) -> Result<HttpResponse, ServiceError> {
@ -1077,8 +1086,14 @@ async fn import_playlist(
let path = env::temp_dir().join(file);
let (config, _) = playout_config(&pool.clone().into_inner(), &id).await?;
let channel = handles::select_channel(&pool.clone().into_inner(), &id).await?;
let size: u64 = req
.headers()
.get("content-length")
.and_then(|cl| cl.to_str().ok())
.and_then(|cls| cls.parse().ok())
.unwrap_or(0);
upload(&pool.into_inner(), *id, payload, &path, true).await?;
upload(&pool.into_inner(), *id, size, payload, &path, true).await?;
import_file(&config, &obj.date, Some(channel.name), &path)?;
fs::remove_file(path)?;

View File

@ -11,8 +11,7 @@ use actix_web::{
use actix_web_grants::authorities::AttachAuthorities;
use actix_web_httpauth::{extractors::bearer::BearerAuth, middleware::HttpAuthentication};
#[cfg(not(debug_assertions))]
#[cfg(feature = "embed_frontend")]
#[cfg(all(not(debug_assertions), feature = "embed_frontend"))]
use actix_web_static_files::ResourceFiles;
use clap::Parser;
@ -34,8 +33,7 @@ use utils::public_path;
use ffplayout_lib::utils::{init_logging, PlayoutConfig};
#[cfg(not(debug_assertions))]
#[cfg(feature = "embed_frontend")]
#[cfg(all(not(debug_assertions), feature = "embed_frontend"))]
include!(concat!(env!("OUT_DIR"), "/generated.rs"));
lazy_static! {
@ -172,8 +170,7 @@ async fn main() -> std::io::Result<()> {
web_app = web_app.service(get_public);
}
#[cfg(not(debug_assertions))]
#[cfg(feature = "embed_frontend")]
#[cfg(all(not(debug_assertions), feature = "embed_frontend"))]
{
// in release mode embed frontend
let generated = generate();
@ -184,10 +181,7 @@ async fn main() -> std::io::Result<()> {
#[cfg(any(debug_assertions, not(feature = "embed_frontend")))]
{
// in debug mode get frontend from path
web_app = web_app.service(
Files::new("/", public_path())
.index_file("index.html"),
);
web_app = web_app.service(Files::new("/", public_path()).index_file("index.html"));
}
web_app

View File

@ -308,6 +308,7 @@ async fn valid_path(conn: &Pool<Sqlite>, id: i32, path: &str) -> Result<PathBuf,
pub async fn upload(
conn: &Pool<Sqlite>,
id: i32,
_size: u64,
mut payload: Multipart,
path: &Path,
abs_path: bool,
@ -324,23 +325,47 @@ pub async fn upload(
.get_filename()
.map_or_else(|| rand_string.to_string(), sanitize_filename::sanitize);
let filepath;
if abs_path {
filepath = path.to_path_buf();
let filepath = if abs_path {
path.to_path_buf()
} else {
let target_path = valid_path(conn, id, &path.to_string_lossy()).await?;
filepath = target_path.join(filename);
}
valid_path(conn, id, &path.to_string_lossy())
.await?
.join(filename)
};
let filepath_clone = filepath.clone();
let _file_size = match filepath.metadata() {
Ok(metadata) => metadata.len(),
Err(_) => 0,
};
// INFO: File exist check should be enough because file size and content length are different.
// The error catching in the loop should normally prevent unfinished files from existing on disk.
// If this is not enough, a second check can be implemented: is_close(file_size as i64, size as i64, 1000)
if filepath.is_file() {
return Err(ServiceError::BadRequest("Target already exists!".into()));
return Err(ServiceError::Conflict("Target already exists!".into()));
}
let mut f = web::block(|| std::fs::File::create(filepath)).await??;
let mut f = web::block(|| std::fs::File::create(filepath_clone)).await??;
while let Some(chunk) = field.try_next().await? {
f = web::block(move || f.write_all(&chunk).map(|_| f)).await??;
loop {
match field.try_next().await {
Ok(Some(chunk)) => {
f = web::block(move || f.write_all(&chunk).map(|_| f)).await??;
}
Ok(None) => break,
Err(e) => {
if e.to_string().contains("stream is incomplete") {
info!("Delete non finished file: {filepath:?}");
tokio::fs::remove_file(filepath).await?
}
return Err(e.into());
}
}
}
}

View File

@ -140,9 +140,9 @@ pub fn db_path() -> Result<&'static str, Box<dyn std::error::Error>> {
return Ok(Box::leak(
absolute_path.to_string_lossy().to_string().into_boxed_str(),
));
} else {
error!("Given database path is not writable!");
}
error!("Given database path is not writable!");
}
}
@ -169,12 +169,10 @@ pub fn public_path() -> PathBuf {
return path;
}
#[cfg(debug_assertions)]
{
let path = PathBuf::from("./ffplayout-frontend/.output/public/");
if path.is_dir() {
return path;
}
let path = PathBuf::from("./ffplayout-frontend/.output/public/");
if cfg!(debug_assertions) && path.is_dir() {
return path;
}
PathBuf::from("./public/")

@ -1 +1 @@
Subproject commit 28bcfcd1a399a9dd5363541f5f01203c7d968e18
Subproject commit 8eb543ddae035e448ea0886ec1d44256de2e6f07

View File

@ -17,6 +17,7 @@ file-rotate = "0.7"
lettre = { version = "0.11", features = ["builder", "rustls-tls", "smtp-transport"], default-features = false }
lexical-sort = "0.3"
log = "0.4"
num-traits = "0.2"
rand = "0.8"
regex = "1"
reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "rustls-tls"] }

View File

@ -435,12 +435,8 @@ pub fn file_extension(filename: &Path) -> Option<&str> {
/// Test if given numbers are close to each other,
/// with a third number for setting the maximum range.
pub fn is_close(a: f64, b: f64, to: f64) -> bool {
if (a - b).abs() < to {
return true;
}
false
pub fn is_close<T: num_traits::Signed + std::cmp::PartialOrd>(a: T, b: T, to: T) -> bool {
(a - b).abs() < to
}
/// add duration from all media clips