1
0
Fork 0

I am still alive in case you were wondering

This commit is contained in:
Honbra 2024-04-14 22:02:46 +02:00
parent 1e0c7f5041
commit 4de6254f08
Signed by: honbra
GPG key ID: B61CC9ADABE2D952
18 changed files with 628 additions and 345 deletions

1
.gitignore vendored
View file

@ -3,3 +3,4 @@
/config.toml /config.toml
/temp /temp
/files /files
*.env

View file

@ -0,0 +1,15 @@
{
"db_name": "PostgreSQL",
"query": "INSERT INTO file (hash, mime) VALUES ($1, $2) ON CONFLICT DO NOTHING",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Bytea",
"Text"
]
},
"nullable": []
},
"hash": "9019613c29507ab3aacc861edc4acd1ec5b4a60f4cae5599557c9b54b19960ea"
}

View file

@ -0,0 +1,28 @@
{
"db_name": "PostgreSQL",
"query": "SELECT file_hash, mime FROM file_key JOIN file ON file_hash = hash WHERE id = $1",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "file_hash",
"type_info": "Bytea"
},
{
"ordinal": 1,
"name": "mime",
"type_info": "Text"
}
],
"parameters": {
"Left": [
"Uuid"
]
},
"nullable": [
false,
true
]
},
"hash": "d2a03886009405f5abe777c6f3b387df796d340a2119ede3b74bdeccf42c4f51"
}

View file

@ -0,0 +1,15 @@
{
"db_name": "PostgreSQL",
"query": "INSERT INTO file_key (id, file_hash) VALUES ($1, $2)",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Uuid",
"Bytea"
]
},
"nullable": []
},
"hash": "e3ba3d043ee6f16689304d82ec02a1444fddb6e43323769ccd0d42ea5d9570c0"
}

614
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -4,22 +4,26 @@ version = "0.1.0"
edition = "2021" edition = "2021"
[dependencies] [dependencies]
axum = { version = "0.6.20", default-features = false, features = ["http1", "json", "macros", "matched-path", "tokio", "tower-log", "tracing"] } axum = { version = "0.7.5", default-features = false, features = ["http1", "json", "macros", "matched-path", "tokio", "tower-log", "tracing"] }
eyre = "0.6.8" axum-extra = { version = "0.9.3", features = ["async-read-body"] }
figment = { version = "0.10.11", features = ["env", "toml"] } bytes = "1.6.0"
eyre = "0.6.12"
figment = { version = "0.10.15", features = ["env", "toml"] }
futures-util = { version = "0.3.30", default-features = false } futures-util = { version = "0.3.30", default-features = false }
hex = "0.4.3" hex = "0.4.3"
http = "0.2.9" http = "1.1.0"
serde = { version = "1.0.189", features = ["derive"] } http-body-util = "0.1.1"
mime = "0.3.17"
serde = { version = "1.0.197", features = ["derive"] }
sha2 = "0.10.8" sha2 = "0.10.8"
sqlx = { version = "0.7.3", features = ["runtime-tokio", "postgres", "uuid"] } sqlx = { version = "0.7.4", features = ["runtime-tokio", "postgres", "uuid"] }
thiserror = "1.0.51" thiserror = "1.0.58"
tokio = { version = "1.33.0", features = ["rt-multi-thread", "macros", "fs", "io-std"] } tokio = { version = "1.37.0", features = ["rt-multi-thread", "macros", "fs", "io-std"] }
tokio-util = { version = "0.7.10", features = ["io"] } tokio-util = { version = "0.7.10", features = ["io"] }
tower-http = { version = "0.4.4", features = ["trace"] } tower-http = { version = "0.5.2", features = ["trace", "fs"] }
tracing = "0.1.37" tracing = "0.1.40"
tracing-subscriber = "0.3.17" tracing-subscriber = "0.3.18"
ulid = { version = "1.1.0", features = ["uuid", "serde"] } ulid = { version = "1.1.2", features = ["uuid", "serde"] }
url = { version = "2.5.0", features = ["serde"] } url = { version = "2.5.0", features = ["serde"] }
uuid = "1.7.0" uuid = "1.7.0"

View file

@ -8,11 +8,11 @@
"rust-analyzer-src": "rust-analyzer-src" "rust-analyzer-src": "rust-analyzer-src"
}, },
"locked": { "locked": {
"lastModified": 1706595721, "lastModified": 1712384501,
"narHash": "sha256-nf5/lPawM20WO1gHAlsUwDEJ4v+InC3BYYV1EBsBJZk=", "narHash": "sha256-AZmYmEnc1ZkSlxUJVUtGh9VFAqWPr+xtNIiBqD2eKfc=",
"owner": "nix-community", "owner": "nix-community",
"repo": "fenix", "repo": "fenix",
"rev": "46a368edf5f1cc16573157797e5acead834d5b2c", "rev": "99c6241db5ca5363c05c8f4acbdf3a4e8fc42844",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -26,11 +26,11 @@
"systems": "systems" "systems": "systems"
}, },
"locked": { "locked": {
"lastModified": 1705309234, "lastModified": 1710146030,
"narHash": "sha256-uNRRNRKmJyCRC/8y1RqBkqWBLM034y4qN7EprSdmgyA=", "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
"owner": "numtide", "owner": "numtide",
"repo": "flake-utils", "repo": "flake-utils",
"rev": "1ef2e671c3b0c19053962c07dbda38332dcebf26", "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -41,11 +41,11 @@
}, },
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1706371002, "lastModified": 1712439257,
"narHash": "sha256-dwuorKimqSYgyu8Cw6ncKhyQjUDOyuXoxDTVmAXq88s=", "narHash": "sha256-aSpiNepFOMk9932HOax0XwNxbA38GOUVOiXfUVPOrck=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "c002c6aa977ad22c60398daaa9be52f2203d0006", "rev": "ff0dbd94265ac470dda06a657d5fe49de93b4599",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -65,11 +65,11 @@
"rust-analyzer-src": { "rust-analyzer-src": {
"flake": false, "flake": false,
"locked": { "locked": {
"lastModified": 1706540258, "lastModified": 1712156296,
"narHash": "sha256-6RTztJE21l0hfWHp0qMWBurWnoFmUxYEDCvaGTnQYcA=", "narHash": "sha256-St7ZQrkrr5lmQX9wC1ZJAFxL8W7alswnyZk9d1se3Us=",
"owner": "rust-lang", "owner": "rust-lang",
"repo": "rust-analyzer", "repo": "rust-analyzer",
"rev": "d13951f25c4cb880bff09a3c02a796ecc51f69ac", "rev": "8e581ac348e223488622f4d3003cb2bd412bf27e",
"type": "github" "type": "github"
}, },
"original": { "original": {

View file

@ -45,10 +45,6 @@
sqlfluff sqlfluff
sqlx-cli sqlx-cli
]; ];
# LD_LIBRARY_PATH = "${lib.makeLibraryPath buildInputs}";
# ssh -NL /home/honbra/.s.PGSQL.5432:/var/run/postgresql/.s.PGSQL.5432 <user>@<host>
# good luck setting up /home/honbra on your machine
DATABASE_URL = "postgresql:///ncpn?host=/home/honbra&user=honbra";
}; };
} }
); );

View file

@ -0,0 +1,2 @@
DROP TABLE IF EXISTS file_key;
DROP TABLE IF EXISTS file;

View file

@ -0,0 +1,10 @@
CREATE TABLE IF NOT EXISTS file (
hash BYTEA PRIMARY KEY,
mime TEXT
);
CREATE TABLE IF NOT EXISTS file_key (
id UUID PRIMARY KEY,
file_hash BYTEA REFERENCES file (hash) NOT NULL,
expires_at TIMESTAMP
);

View file

@ -1,14 +1,10 @@
use std::path::PathBuf; use std::{path::PathBuf, sync::Arc};
use axum::{ use axum::{body::Body, extract::State, routing::post, Json, Router};
extract::{BodyStream, State},
routing::post,
Json, Router,
};
use futures_util::TryStreamExt; use futures_util::TryStreamExt;
use serde::Serialize; use serde::Serialize;
use sha2::{Digest, Sha256}; use sha2::{Digest, Sha256};
use sqlx::PgPool; use sqlx::{query, PgPool};
use tokio::{ use tokio::{
fs::{self, File}, fs::{self, File},
io, io,
@ -16,23 +12,32 @@ use tokio::{
use tokio_util::io::StreamReader; use tokio_util::io::StreamReader;
use tracing::{error, field, info, instrument}; use tracing::{error, field, info, instrument};
use ulid::Ulid; use ulid::Ulid;
use uuid::Uuid;
use crate::error::AppError; use crate::{config::Config, error::AppError};
pub fn router(db: PgPool) -> Router { #[derive(Clone)]
Router::new().route("/", post(upload_file)).with_state(db) struct SharedState {
db: PgPool,
config: Arc<Config>,
}
pub fn router(db: PgPool, config: Arc<Config>) -> Router {
Router::new()
.route("/", post(upload_file))
.with_state(SharedState { db, config })
} }
#[derive(Debug, Serialize)] #[derive(Debug, Serialize)]
struct UploadedFile { struct UploadedFile {
id: Ulid, key: Ulid,
hash: String, hash: String,
} }
#[instrument(skip(_db, body))] #[instrument(skip(db, body))]
async fn upload_file( async fn upload_file(
State(_db): State<PgPool>, State(SharedState { db, config }): State<SharedState>,
body: BodyStream, body: Body,
) -> Result<Json<UploadedFile>, AppError> { ) -> Result<Json<UploadedFile>, AppError> {
let id_temp = Ulid::new(); let id_temp = Ulid::new();
let file_path_temp = PathBuf::from("temp").join(id_temp.to_string()); let file_path_temp = PathBuf::from("temp").join(id_temp.to_string());
@ -42,6 +47,7 @@ async fn upload_file(
let mut file_temp = File::create(&file_path_temp).await?; let mut file_temp = File::create(&file_path_temp).await?;
let better_body = body let better_body = body
.into_data_stream()
.inspect_ok(|b| hasher.update(b)) .inspect_ok(|b| hasher.update(b))
.map_err(|err| io::Error::new(io::ErrorKind::Other, err)); .map_err(|err| io::Error::new(io::ErrorKind::Other, err));
let mut reader = StreamReader::new(better_body); let mut reader = StreamReader::new(better_body);
@ -85,8 +91,27 @@ async fn upload_file(
return Err(err.into()); return Err(err.into());
} }
Ok(Json(UploadedFile { let key = Ulid::new();
id: id_temp, query!(
"INSERT INTO file (hash, mime) VALUES ($1, $2) ON CONFLICT DO NOTHING",
&hash[..],
"video/mp4", // I was testing with a video lol
)
.execute(&db)
.await?;
let result = query!(
"INSERT INTO file_key (id, file_hash) VALUES ($1, $2)",
Uuid::from(key),
&hash[..],
)
.execute(&db)
.await?;
match result.rows_affected() {
1 => Ok(Json(UploadedFile {
key,
hash: hash_hex, hash: hash_hex,
})) })),
rows => Err(AppError::ImpossibleAffectedRows(rows)),
}
} }

View file

@ -1,8 +1,8 @@
use axum::{ use axum::{
extract::{Path, State}, extract::{Path, State},
routing::{get, post},
Json, Router, Json, Router,
}; };
use axum_extra::routing::Resource;
use http::StatusCode; use http::StatusCode;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use sqlx::{query, PgPool}; use sqlx::{query, PgPool};
@ -13,13 +13,13 @@ use uuid::Uuid;
use crate::error::AppError; use crate::error::AppError;
pub fn router(db: PgPool) -> Router { pub fn router(db: PgPool) -> Router {
Router::new() let links = Resource::named("links")
.route("/", post(create_link)) .create(create_link)
.route( .show(get_link_info)
"/:id", .update(update_link)
get(get_link_info).put(update_link).delete(delete_link), .destroy(delete_link);
)
.with_state(db) Router::new().merge(links).with_state(db)
} }
#[derive(Serialize)] #[derive(Serialize)]
@ -74,7 +74,7 @@ async fn create_link(
slug, slug,
destination: destination.to_string(), destination: destination.to_string(),
})), })),
0 => Err(AppError::LinkExists(id)), 0 => Err(AppError::ApiLinkExists(id)),
rows => Err(AppError::ImpossibleAffectedRows(rows)), rows => Err(AppError::ImpossibleAffectedRows(rows)),
} }
} }
@ -99,7 +99,7 @@ async fn update_link(
match result.rows_affected() { match result.rows_affected() {
1 => Ok(StatusCode::NO_CONTENT), 1 => Ok(StatusCode::NO_CONTENT),
0 => Err(AppError::LinkNotFound(id)), 0 => Err(AppError::ApiLinkNotFound(id)),
rows => Err(AppError::ImpossibleAffectedRows(rows)), rows => Err(AppError::ImpossibleAffectedRows(rows)),
} }
} }
@ -114,7 +114,7 @@ async fn delete_link(
match result.rows_affected() { match result.rows_affected() {
1 => Ok(StatusCode::NO_CONTENT), 1 => Ok(StatusCode::NO_CONTENT),
0 => Err(AppError::LinkNotFound(id)), 0 => Err(AppError::ApiLinkNotFound(id)),
rows => Err(AppError::ImpossibleAffectedRows(rows)), rows => Err(AppError::ImpossibleAffectedRows(rows)),
} }
} }

View file

@ -1,11 +1,15 @@
mod files; mod files;
mod links; mod links;
use std::sync::Arc;
use axum::Router; use axum::Router;
use sqlx::PgPool; use sqlx::PgPool;
pub fn router(db: PgPool) -> Router { use crate::config::Config;
pub fn router(db: PgPool, config: Arc<Config>) -> Router {
Router::new() Router::new()
.nest("/files", files::router(db.clone())) .nest("/files", files::router(db.clone(), config))
.nest("/links", links::router(db)) .nest("/links", links::router(db))
} }

View file

@ -1,6 +1,8 @@
mod api; mod api;
mod root; mod root;
use std::sync::Arc;
use axum::{body::Body, Router}; use axum::{body::Body, Router};
use http::Request; use http::Request;
use sqlx::{postgres::PgConnectOptions, PgPool}; use sqlx::{postgres::PgConnectOptions, PgPool};
@ -19,8 +21,10 @@ pub async fn build_app(config: Config) -> eyre::Result<Router> {
) )
.await?; .await?;
Ok(root::router(db.clone(), config.default_destination) let config = Arc::new(config);
.nest("/api", api::router(db))
Ok(root::router(db.clone(), config.clone())
.nest("/api", api::router(db, config))
.layer( .layer(
TraceLayer::new_for_http() TraceLayer::new_for_http()
.make_span_with(|request: &Request<Body>| { .make_span_with(|request: &Request<Body>| {

View file

@ -1,39 +1,39 @@
use std::sync::Arc; use std::sync::Arc;
use axum::{ use axum::{
body::Body,
extract::{Path, State}, extract::{Path, State},
response::Redirect, response::Redirect,
routing::get, routing::get,
Router, BoxError, Router,
}; };
use bytes::Bytes;
use http::{Request, Response};
use http_body_util::{combinators::UnsyncBoxBody, BodyExt};
use mime::Mime;
use sqlx::{query, PgPool}; use sqlx::{query, PgPool};
use tower_http::services::ServeFile;
use tracing::{error, field, instrument}; use tracing::{error, field, instrument};
use ulid::Ulid; use ulid::Ulid;
use url::Url;
use uuid::Uuid; use uuid::Uuid;
use crate::error::AppError; use crate::{config::Config, error::AppError};
#[derive(Clone)] #[derive(Clone)]
struct SharedState { struct SharedState {
db: PgPool, db: PgPool,
default_destination: Arc<Url>, config: Arc<Config>,
} }
pub fn router(db: PgPool, default_destination: Url) -> Router { pub fn router(db: PgPool, config: Arc<Config>) -> Router {
Router::new() Router::new()
.route("/:slug", get(redirect)) .route("/:slug", get(redirect_link))
.with_state(SharedState { .route("/f/:key", get(redirect_file))
db, .with_state(SharedState { db, config })
default_destination: Arc::new(default_destination),
})
} }
async fn redirect( async fn redirect_link(
State(SharedState { State(SharedState { db, .. }): State<SharedState>,
db,
default_destination,
}): State<SharedState>,
Path(slug): Path<String>, Path(slug): Path<String>,
) -> Result<Redirect, AppError> { ) -> Result<Redirect, AppError> {
let result = query!("SELECT id, destination FROM link WHERE slug = $1", slug) let result = query!("SELECT id, destination FROM link WHERE slug = $1", slug)
@ -41,13 +41,13 @@ async fn redirect(
.await? .await?
.map(|r| (Ulid::from(r.id), r.destination)); .map(|r| (Ulid::from(r.id), r.destination));
Ok(match result { match result {
Some((id, destination)) => { Some((id, destination)) => {
tokio::spawn(increase_visit_count(id, db)); tokio::spawn(increase_visit_count(id, db));
Redirect::temporary(&destination) Ok(Redirect::temporary(&destination))
}
None => Err(AppError::LinkNotFound(slug)),
} }
None => Redirect::temporary(default_destination.as_str()),
})
} }
#[instrument(skip(db))] #[instrument(skip(db))]
@ -67,3 +67,33 @@ async fn increase_visit_count(id: Ulid, db: PgPool) {
_ => {} _ => {}
} }
} }
async fn redirect_file(
State(SharedState { db, config }): State<SharedState>,
Path(key): Path<Ulid>,
request: Request<Body>,
) -> Result<Response<UnsyncBoxBody<Bytes, BoxError>>, AppError> {
let result = query!(
"SELECT file_hash, mime FROM file_key JOIN file ON file_hash = hash WHERE id = $1",
Uuid::from(key)
)
.fetch_optional(&db)
.await?
.map(|r| (r.file_hash, r.mime));
match result {
Some((file_hash, mime)) => {
let mime: Option<Mime> = mime.map_or(None, |m| m.parse().ok());
let file_path = config.file_store_dir.join(hex::encode(file_hash));
let mut sf = match mime {
Some(mime) => ServeFile::new_with_mime(file_path, &mime),
None => ServeFile::new(file_path),
};
match sf.try_call(request).await {
Ok(response) => Ok(response.map(|body| body.map_err(Into::into).boxed_unsync())),
Err(err) => Err(AppError::Io(err)),
}
}
None => Err(AppError::FileKeyNotFound(key)),
}
}

View file

@ -17,6 +17,11 @@ pub struct Config {
#[serde(default = "default2_destination")] #[serde(default = "default2_destination")]
pub default_destination: Url, pub default_destination: Url,
#[serde(default = "default_file_store_dir")]
pub file_store_dir: PathBuf,
#[serde(default = "default_file_temp_dir")]
pub file_temp_dir: PathBuf,
} }
fn default_listen_addr() -> SocketAddr { fn default_listen_addr() -> SocketAddr {
@ -38,3 +43,11 @@ fn default_db_database() -> String {
fn default2_destination() -> Url { fn default2_destination() -> Url {
"https://goob.cc/r".parse().expect("hardcoded URL is valid") "https://goob.cc/r".parse().expect("hardcoded URL is valid")
} }
fn default_file_store_dir() -> PathBuf {
PathBuf::from("files")
}
fn default_file_temp_dir() -> PathBuf {
PathBuf::from("temp")
}

View file

@ -1,4 +1,4 @@
use axum::response::{IntoResponse, Response}; use axum::{body::Body, response::IntoResponse};
use http::StatusCode; use http::StatusCode;
use tracing::{error, field}; use tracing::{error, field};
use ulid::Ulid; use ulid::Ulid;
@ -6,9 +6,13 @@ use ulid::Ulid;
#[derive(Debug, thiserror::Error)] #[derive(Debug, thiserror::Error)]
pub enum AppError { pub enum AppError {
#[error("link already exists ({0})")] #[error("link already exists ({0})")]
LinkExists(Ulid), ApiLinkExists(Ulid),
#[error("link not found ({0})")] #[error("link not found ({0})")]
LinkNotFound(Ulid), ApiLinkNotFound(Ulid),
#[error("link not found ({0})")]
LinkNotFound(String),
#[error("file key not found ({0})")]
FileKeyNotFound(Ulid),
#[error("database returned an impossible number of affected rows ({0})")] #[error("database returned an impossible number of affected rows ({0})")]
ImpossibleAffectedRows(u64), ImpossibleAffectedRows(u64),
#[error("database error")] #[error("database error")]
@ -20,31 +24,31 @@ pub enum AppError {
} }
impl IntoResponse for AppError { impl IntoResponse for AppError {
fn into_response(self) -> Response { fn into_response(self) -> axum::http::Response<Body> {
error!(err = field::display(&self)); error!(err = field::display(&self));
match self { match self {
Self::LinkExists(_) => (StatusCode::BAD_REQUEST, "Link already exists").into_response(), Self::ApiLinkExists(_) => (StatusCode::BAD_REQUEST, "Link already exists"),
Self::LinkNotFound(_) => (StatusCode::NOT_FOUND, "Link not found").into_response(), Self::ApiLinkNotFound(_) | Self::LinkNotFound(_) => {
(StatusCode::NOT_FOUND, "Link not found")
}
Self::FileKeyNotFound(_) => (StatusCode::NOT_FOUND, "File key not found"),
Self::ImpossibleAffectedRows(_) => ( Self::ImpossibleAffectedRows(_) => (
StatusCode::INTERNAL_SERVER_ERROR, StatusCode::INTERNAL_SERVER_ERROR,
"Database returned an impossible number of affected rows", "Database returned an impossible number of affected rows",
) ),
.into_response(),
Self::Database(_) => ( Self::Database(_) => (
StatusCode::INTERNAL_SERVER_ERROR, StatusCode::INTERNAL_SERVER_ERROR,
"A database error has occured", "A database error has occured",
) ),
.into_response(),
Self::Io(_) => ( Self::Io(_) => (
StatusCode::INTERNAL_SERVER_ERROR, StatusCode::INTERNAL_SERVER_ERROR,
"An I/O error has occured", "An I/O error has occured",
) ),
.into_response(), Self::Other(_) => (
Self::Other(err) => (
StatusCode::INTERNAL_SERVER_ERROR, StatusCode::INTERNAL_SERVER_ERROR,
format!("An error has occured:\n{err:?}"), "An unknown error has occured",
) ),
.into_response(), }
} .into_response()
} }
} }

View file

@ -7,8 +7,8 @@ use figment::{
providers::{Env, Format, Toml}, providers::{Env, Format, Toml},
Figment, Figment,
}; };
use tokio::runtime::Runtime; use tokio::{net::TcpListener, runtime::Runtime};
use tracing::{debug, field, Level}; use tracing::Level;
use tracing_subscriber::{filter, layer::SubscriberExt, util::SubscriberInitExt}; use tracing_subscriber::{filter, layer::SubscriberExt, util::SubscriberInitExt};
use self::app::build_app; use self::app::build_app;
@ -41,16 +41,12 @@ fn main() -> eyre::Result<()> {
rt.block_on(async move { rt.block_on(async move {
let listen_addr = config.listen_addr; let listen_addr = config.listen_addr;
let router = build_app(config) let app = build_app(config).await.context("failed to build app")?;
let listener = TcpListener::bind(&listen_addr)
.await .await
.context("failed to build app")? .context("failed to bind listener")?;
.into_make_service();
debug!(addr = field::display(&listen_addr), "binding"); axum::serve(listener, app)
axum::Server::try_bind(&listen_addr)
.context("unable to bind to server address")?
.serve(router)
.await .await
.context("server encountered a runtime error")?; .context("server encountered a runtime error")?;