diff --git a/trifid-api/build.rs b/trifid-api/build.rs index 3c284bb..b8a52dd 100644 --- a/trifid-api/build.rs +++ b/trifid-api/build.rs @@ -1,3 +1,3 @@ fn main() { println!("cargo:rerun-if-changed=migrations/"); -} \ No newline at end of file +} diff --git a/trifid-api/src/auth_tokens.rs b/trifid-api/src/auth_tokens.rs index f548044..abfe8cb 100644 --- a/trifid-api/src/auth_tokens.rs +++ b/trifid-api/src/auth_tokens.rs @@ -14,93 +14,125 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::error::Error; use actix_web::HttpRequest; +use std::error::Error; -use sea_orm::{ColumnTrait, Condition, DatabaseConnection, EntityTrait, QueryFilter}; +use crate::timers::expired; use crate::tokens::get_token_type; -use trifid_api_entities::entity::{auth_token, session_token}; +use sea_orm::{ColumnTrait, Condition, DatabaseConnection, EntityTrait, QueryFilter}; use trifid_api_entities::entity::api_key; use trifid_api_entities::entity::api_key_scope; use trifid_api_entities::entity::user; -use crate::timers::expired; +use trifid_api_entities::entity::{auth_token, session_token}; pub enum TokenInfo { SessionToken(SessionTokenInfo), AuthToken(AuthTokenInfo), ApiToken(ApiTokenInfo), - NotPresent + NotPresent, } pub struct SessionTokenInfo { pub token: String, pub user: SessionTokenUser, - pub expires_at: i64 + pub expires_at: i64, } pub struct SessionTokenUser { pub id: String, - pub email: String + pub email: String, } pub struct ApiTokenInfo { pub scopes: Vec, - pub organization: String + pub organization: String, } pub struct AuthTokenInfo { pub token: String, - pub session_info: SessionTokenInfo + pub session_info: SessionTokenInfo, } -pub async fn enforce_session(req: &HttpRequest, db: &DatabaseConnection) -> Result> { - let header = req.headers().get("Authorization").ok_or("Missing authorization header")?; +pub async fn enforce_session( + req: &HttpRequest, + db: &DatabaseConnection, +) -> Result> { + let header = req + .headers() + .get("Authorization") + .ok_or("Missing authorization header")?; let authorization = header.to_str()?; let authorization_split: Vec<&str> = authorization.split(' ').collect(); if authorization_split[0] != "Bearer" { - return Err("Not a bearer token".into()) + return Err("Not a bearer token".into()); } let tokens = &authorization_split[1..]; - let sess_token = tokens.iter().find(|i| get_token_type(i).unwrap_or("n-sess") == "sess").copied().ok_or("Missing session token")?; + let sess_token = tokens + .iter() + .find(|i| get_token_type(i).unwrap_or("n-sess") == "sess") + .copied() + .ok_or("Missing session token")?; - let token: session_token::Model = session_token::Entity::find().filter(session_token::Column::Id.eq(sess_token)).one(db).await?.ok_or("Invalid session token")?; + let token: session_token::Model = session_token::Entity::find() + .filter(session_token::Column::Id.eq(sess_token)) + .one(db) + .await? + .ok_or("Invalid session token")?; if expired(token.expires_on as u64) { return Err("Token expired".into()); } - let user: user::Model = user::Entity::find().filter(user::Column::Id.eq(token.user)).one(db).await?.ok_or("Session token has a nonexistent user")?; + let user: user::Model = user::Entity::find() + .filter(user::Column::Id.eq(token.user)) + .one(db) + .await? + .ok_or("Session token has a nonexistent user")?; Ok(TokenInfo::SessionToken(SessionTokenInfo { token: token.id, user: SessionTokenUser { id: user.id, - email: user.email + email: user.email, }, expires_at: token.expires_on, })) } -pub async fn enforce_2fa(req: &HttpRequest, db: &DatabaseConnection) -> Result> { +pub async fn enforce_2fa( + req: &HttpRequest, + db: &DatabaseConnection, +) -> Result> { let session_data = match enforce_session(req, db).await? { TokenInfo::SessionToken(i) => i, - _ => unreachable!() + _ => unreachable!(), }; - let header = req.headers().get("Authorization").ok_or("Missing authorization header")?; + let header = req + .headers() + .get("Authorization") + .ok_or("Missing authorization header")?; let authorization = header.to_str()?; let authorization_split: Vec<&str> = authorization.split(' ').collect(); if authorization_split[0] != "Bearer" { - return Err("Not a bearer token".into()) + return Err("Not a bearer token".into()); } let tokens = &authorization_split[1..]; - let auth_token = tokens.iter().find(|i| get_token_type(**i).unwrap_or("n-auth") == "auth").copied().ok_or("Missing auth token")?; + let auth_token = tokens + .iter() + .find(|i| get_token_type(i).unwrap_or("n-auth") == "auth") + .copied() + .ok_or("Missing auth token")?; - let token: auth_token::Model = auth_token::Entity::find().filter(auth_token::Column::Id.eq(auth_token)).one(db).await?.ok_or("Invalid session token")?; + let token: auth_token::Model = auth_token::Entity::find() + .filter(auth_token::Column::Id.eq(auth_token)) + .one(db) + .await? + .ok_or("Invalid session token")?; if expired(token.expires_on as u64) { return Err("Token expired".into()); @@ -112,17 +144,28 @@ pub async fn enforce_2fa(req: &HttpRequest, db: &DatabaseConnection) -> Result Result> { - let header = req.headers().get("Authorization").ok_or("Missing authorization header")?; +pub async fn enforce_api_token( + req: &HttpRequest, + scopes: &[&str], + db: &DatabaseConnection, +) -> Result> { + let header = req + .headers() + .get("Authorization") + .ok_or("Missing authorization header")?; let authorization = header.to_str()?; let authorization_split: Vec<&str> = authorization.split(' ').collect(); if authorization_split[0] != "Bearer" { - return Err("Not a bearer token".into()) + return Err("Not a bearer token".into()); } let tokens = &authorization_split[1..]; - let api_token = tokens.iter().find(|i| get_token_type(**i).unwrap_or("n-tfkey") == "tfkey").copied().ok_or("Missing api token")?; + let api_token = tokens + .iter() + .find(|i| get_token_type(i).unwrap_or("n-tfkey") == "tfkey") + .copied() + .ok_or("Missing api token")?; // API tokens are special and have a different form than other keys. // They follow the form: @@ -135,10 +178,19 @@ pub async fn enforce_api_token(req: &HttpRequest, scopes: &[&str], db: &Database let token_id = format!("{}-{}", api_token_split[0], api_token_split[1]); let token_key = api_token_split[2].to_string(); - let token: api_key::Model = api_key::Entity::find().filter( - Condition::all().add(api_key::Column::Id.eq(token_id)).add(api_key::Column::Key.eq(token_key)) - ).one(db).await?.ok_or("Invalid api token")?; - let token_scopes: Vec = api_key_scope::Entity::find().filter(api_key_scope::Column::ApiKey.eq(api_token)).all(db).await?; + let token: api_key::Model = api_key::Entity::find() + .filter( + Condition::all() + .add(api_key::Column::Id.eq(token_id)) + .add(api_key::Column::Key.eq(token_key)), + ) + .one(db) + .await? + .ok_or("Invalid api token")?; + let token_scopes: Vec = api_key_scope::Entity::find() + .filter(api_key_scope::Column::ApiKey.eq(api_token)) + .all(db) + .await?; let token_scopes: Vec<&str> = token_scopes.iter().map(|i| i.scope.as_str()).collect(); for scope in scopes { @@ -151,4 +203,4 @@ pub async fn enforce_api_token(req: &HttpRequest, scopes: &[&str], db: &Database scopes: token_scopes.iter().map(|i| i.to_string()).collect(), organization: token.organization, })) -} \ No newline at end of file +} diff --git a/trifid-api/src/config.rs b/trifid-api/src/config.rs index e3560fb..e1d3f95 100644 --- a/trifid-api/src/config.rs +++ b/trifid-api/src/config.rs @@ -14,11 +14,11 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::fs; -use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; use log::error; use once_cell::sync::Lazy; -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; +use std::fs; +use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; pub static CONFIG: Lazy = Lazy::new(|| { let config_str = match fs::read_to_string("/etc/trifid/config.toml") { @@ -43,7 +43,7 @@ pub struct TrifidConfig { pub database: TrifidConfigDatabase, pub server: TrifidConfigServer, pub tokens: TrifidConfigTokens, - pub crypto: TrifidConfigCryptography + pub crypto: TrifidConfigCryptography, } #[derive(Serialize, Deserialize, Debug)] @@ -62,13 +62,13 @@ pub struct TrifidConfigDatabase { #[serde(default = "time_defaults")] pub max_lifetime: u64, #[serde(default = "sqlx_logging_default")] - pub sqlx_logging: bool + pub sqlx_logging: bool, } #[derive(Serialize, Deserialize, Debug)] pub struct TrifidConfigServer { #[serde(default = "socketaddr_8080")] - pub bind: SocketAddr + pub bind: SocketAddr, } #[derive(Serialize, Deserialize, Debug)] @@ -80,20 +80,38 @@ pub struct TrifidConfigTokens { #[serde(default = "totp_setup_timeout_time")] pub totp_setup_timeout_time_seconds: u64, #[serde(default = "mfa_tokens_expiry_time")] - pub mfa_tokens_expiry_time_seconds: u64 + pub mfa_tokens_expiry_time_seconds: u64, } #[derive(Serialize, Deserialize, Debug)] pub struct TrifidConfigCryptography { - pub data_encryption_key: String + pub data_encryption_key: String, } -fn max_connections_default() -> u32 { 100 } -fn min_connections_default() -> u32 { 5 } -fn time_defaults() -> u64 { 8 } -fn sqlx_logging_default() -> bool { true } -fn socketaddr_8080() -> SocketAddr { SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::from([0, 0, 0, 0]), 8080)) } -fn magic_link_expiry_time() -> u64 { 3600 } // 1 hour -fn session_token_expiry_time() -> u64 { 15780000 } // 6 months -fn totp_setup_timeout_time() -> u64 { 600 } // 10 minutes -fn mfa_tokens_expiry_time() -> u64 { 600 } // 10 minutes \ No newline at end of file +fn max_connections_default() -> u32 { + 100 +} +fn min_connections_default() -> u32 { + 5 +} +fn time_defaults() -> u64 { + 8 +} +fn sqlx_logging_default() -> bool { + true +} +fn socketaddr_8080() -> SocketAddr { + SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::from([0, 0, 0, 0]), 8080)) +} +fn magic_link_expiry_time() -> u64 { + 3600 +} // 1 hour +fn session_token_expiry_time() -> u64 { + 15780000 +} // 6 months +fn totp_setup_timeout_time() -> u64 { + 600 +} // 10 minutes +fn mfa_tokens_expiry_time() -> u64 { + 600 +} // 10 minutes diff --git a/trifid-api/src/crypto.rs b/trifid-api/src/crypto.rs index 9d8dc63..4add179 100644 --- a/trifid-api/src/crypto.rs +++ b/trifid-api/src/crypto.rs @@ -14,25 +14,33 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::error::Error; -use aes_gcm::{Aes256Gcm, KeyInit, Nonce}; -use aes_gcm::aead::{Aead, Payload}; -use rand::Rng; -use trifid_pki::rand_core::OsRng; use crate::config::TrifidConfig; +use aes_gcm::aead::{Aead, Payload}; +use aes_gcm::{Aes256Gcm, KeyInit, Nonce}; +use rand::Rng; +use std::error::Error; +use trifid_pki::rand_core::OsRng; pub fn get_cipher_from_config(config: &TrifidConfig) -> Result> { let key_slice = hex::decode(&config.crypto.data_encryption_key)?; Ok(Aes256Gcm::new_from_slice(&key_slice)?) } -pub fn encrypt_with_nonce(plaintext: &[u8], nonce: [u8; 12], cipher: &Aes256Gcm) -> Result, aes_gcm::Error> { +pub fn encrypt_with_nonce( + plaintext: &[u8], + nonce: [u8; 12], + cipher: &Aes256Gcm, +) -> Result, aes_gcm::Error> { let nonce = Nonce::from_slice(&nonce); let ciphertext = cipher.encrypt(nonce, plaintext)?; Ok(ciphertext) } -pub fn decrypt_with_nonce(ciphertext: &[u8], nonce: [u8; 12], cipher: &Aes256Gcm) -> Result, aes_gcm::Error> { +pub fn decrypt_with_nonce( + ciphertext: &[u8], + nonce: [u8; 12], + cipher: &Aes256Gcm, +) -> Result, aes_gcm::Error> { let nonce = Nonce::from_slice(&nonce); let plaintext = cipher.decrypt(nonce, Payload::from(ciphertext))?; Ok(plaintext) @@ -40,4 +48,4 @@ pub fn decrypt_with_nonce(ciphertext: &[u8], nonce: [u8; 12], cipher: &Aes256Gcm pub fn generate_random_iv() -> [u8; 12] { OsRng.gen() -} \ No newline at end of file +} diff --git a/trifid-api/src/cursor.rs b/trifid-api/src/cursor.rs index 91f0a07..14a486f 100644 --- a/trifid-api/src/cursor.rs +++ b/trifid-api/src/cursor.rs @@ -14,13 +14,13 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::error::Error; use base64::Engine; -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; +use std::error::Error; #[derive(Serialize, Deserialize, Debug, Clone)] pub struct Cursor { - pub page: u64 + pub page: u64, } impl TryFrom for String { @@ -41,9 +41,7 @@ impl TryFrom for Cursor { fn try_from(value: String) -> Result { if value.is_empty() { // If empty, it's page 0 - return Ok(Cursor { - page: 0 - }) + return Ok(Cursor { page: 0 }); } // Base64-decode the value let json_bytes = base64::engine::general_purpose::STANDARD.decode(value)?; @@ -53,4 +51,4 @@ impl TryFrom for Cursor { let cursor = serde_json::from_str(&json_str)?; Ok(cursor) } -} \ No newline at end of file +} diff --git a/trifid-api/src/error.rs b/trifid-api/src/error.rs index 4efa931..73017f9 100644 --- a/trifid-api/src/error.rs +++ b/trifid-api/src/error.rs @@ -15,11 +15,11 @@ // along with this program. If not, see . use actix_web::error::{JsonPayloadError, PayloadError}; -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize, Debug, Clone)] pub struct APIErrorsResponse { - pub errors: Vec + pub errors: Vec, } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct APIError { @@ -27,10 +27,12 @@ pub struct APIError { pub message: String, #[serde(skip_serializing_if = "is_none")] #[serde(default)] - pub path: Option + pub path: Option, } -fn is_none(o: &Option) -> bool { o.is_none() } +fn is_none(o: &Option) -> bool { + o.is_none() +} impl From<&JsonPayloadError> for APIError { fn from(value: &JsonPayloadError) -> Self { @@ -87,58 +89,44 @@ impl From<&JsonPayloadError> for APIError { impl From<&PayloadError> for APIError { fn from(value: &PayloadError) -> Self { match value { - PayloadError::Incomplete(e) => { - APIError { - code: "ERR_UNEXPECTED_EOF".to_string(), - message: match e { - None => "Payload reached EOF but was incomplete".to_string(), - Some(e) => format!("Payload reached EOF but was incomplete: {}", e) - }, - path: None, - } - } - PayloadError::EncodingCorrupted => { - APIError { - code: "ERR_CORRUPTED_PAYLOAD".to_string(), - message: "Payload content encoding corrupted".to_string(), - path: None, - } - } - PayloadError::Overflow => { - APIError { - code: "ERR_PAYLOAD_OVERFLOW".to_string(), - message: "Payload reached size limit".to_string(), - path: None, - } - } - PayloadError::UnknownLength => { - APIError { - code: "ERR_PAYLOAD_UNKNOWN_LENGTH".to_string(), - message: "Unable to determine payload length".to_string(), - path: None, - } - } - PayloadError::Http2Payload(e) => { - APIError { - code: "ERR_HTTP2_ERROR".to_string(), - message: format!("HTTP/2 error: {}", e), - path: None, - } - } - PayloadError::Io(e) => { - APIError { - code: "ERR_IO_ERROR".to_string(), - message: format!("I/O error: {}", e), - path: None, - } - } - _ => { - APIError { - code: "ERR_UNKNOWN_ERROR".to_string(), - message: "An unknown error has occured".to_string(), - path: None, - } - } + PayloadError::Incomplete(e) => APIError { + code: "ERR_UNEXPECTED_EOF".to_string(), + message: match e { + None => "Payload reached EOF but was incomplete".to_string(), + Some(e) => format!("Payload reached EOF but was incomplete: {}", e), + }, + path: None, + }, + PayloadError::EncodingCorrupted => APIError { + code: "ERR_CORRUPTED_PAYLOAD".to_string(), + message: "Payload content encoding corrupted".to_string(), + path: None, + }, + PayloadError::Overflow => APIError { + code: "ERR_PAYLOAD_OVERFLOW".to_string(), + message: "Payload reached size limit".to_string(), + path: None, + }, + PayloadError::UnknownLength => APIError { + code: "ERR_PAYLOAD_UNKNOWN_LENGTH".to_string(), + message: "Unable to determine payload length".to_string(), + path: None, + }, + PayloadError::Http2Payload(e) => APIError { + code: "ERR_HTTP2_ERROR".to_string(), + message: format!("HTTP/2 error: {}", e), + path: None, + }, + PayloadError::Io(e) => APIError { + code: "ERR_IO_ERROR".to_string(), + message: format!("I/O error: {}", e), + path: None, + }, + _ => APIError { + code: "ERR_UNKNOWN_ERROR".to_string(), + message: "An unknown error has occured".to_string(), + path: None, + }, } } -} \ No newline at end of file +} diff --git a/trifid-api/src/magic_link.rs b/trifid-api/src/magic_link.rs index 19b6ff8..e09261a 100644 --- a/trifid-api/src/magic_link.rs +++ b/trifid-api/src/magic_link.rs @@ -14,12 +14,12 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::error::Error; use log::info; +use std::error::Error; pub fn send_magic_link(token: &str) -> Result<(), Box> { // TODO: actually do this info!("sent magic link {}", token); Ok(()) -} \ No newline at end of file +} diff --git a/trifid-api/src/main.rs b/trifid-api/src/main.rs index 1679f4d..a77ee9d 100644 --- a/trifid-api/src/main.rs +++ b/trifid-api/src/main.rs @@ -14,30 +14,33 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::error::Error; -use std::time::Duration; use actix_request_identifier::RequestIdentifier; -use actix_web::{App, HttpResponse, HttpServer, web::{Data, JsonConfig}}; +use actix_web::{ + web::{Data, JsonConfig}, + App, HttpResponse, HttpServer, +}; use log::{info, Level}; use sea_orm::{ConnectOptions, Database, DatabaseConnection}; +use std::error::Error; +use std::time::Duration; -use trifid_api_migration::{Migrator, MigratorTrait}; use crate::config::CONFIG; use crate::error::{APIError, APIErrorsResponse}; use crate::tokens::random_id_no_id; +use trifid_api_migration::{Migrator, MigratorTrait}; -pub mod config; -pub mod routes; -pub mod error; -pub mod tokens; -pub mod timers; -pub mod magic_link; pub mod auth_tokens; -pub mod cursor; +pub mod config; pub mod crypto; +pub mod cursor; +pub mod error; +pub mod magic_link; +pub mod routes; +pub mod timers; +pub mod tokens; pub struct AppState { - pub conn: DatabaseConnection + pub conn: DatabaseConnection, } #[actix_web::main] @@ -61,9 +64,7 @@ async fn main() -> Result<(), Box> { info!("Performing database migration..."); Migrator::up(&db, None).await?; - let data = Data::new(AppState { - conn: db - }); + let data = Data::new(AppState { conn: db }); HttpServer::new(move || { App::new() @@ -73,11 +74,10 @@ async fn main() -> Result<(), Box> { actix_web::error::InternalError::from_response( err, HttpResponse::BadRequest().json(APIErrorsResponse { - errors: vec![ - api_error - ], - }) - ).into() + errors: vec![api_error], + }), + ) + .into() })) .wrap(RequestIdentifier::with_generator(random_id_no_id)) .service(routes::v1::auth::magic_link::magic_link_request) @@ -100,7 +100,11 @@ async fn main() -> Result<(), Box> { .service(routes::v1::hosts::get_host) .service(routes::v1::hosts::delete_host) .service(routes::v1::hosts::edit_host) - }).bind(CONFIG.server.bind)?.run().await?; + .service(routes::v1::hosts::block_host) + }) + .bind(CONFIG.server.bind)? + .run() + .await?; Ok(()) } diff --git a/trifid-api/src/routes/mod.rs b/trifid-api/src/routes/mod.rs index 5dd9fd0..a3a6d96 100644 --- a/trifid-api/src/routes/mod.rs +++ b/trifid-api/src/routes/mod.rs @@ -1 +1 @@ -pub mod v1; \ No newline at end of file +pub mod v1; diff --git a/trifid-api/src/routes/v1/auth/magic_link.rs b/trifid-api/src/routes/v1/auth/magic_link.rs index a363600..0c29e5f 100644 --- a/trifid-api/src/routes/v1/auth/magic_link.rs +++ b/trifid-api/src/routes/v1/auth/magic_link.rs @@ -19,29 +19,29 @@ // This endpoint is considered done. No major features should be added or removed, unless it fixes bugs. // This endpoint requires the `definednetworking` extension to be enabled to be used. -use actix_web::{HttpResponse, post}; -use actix_web::web::{Data, Json}; -use log::error; -use sea_orm::{ActiveModelTrait, ColumnTrait, EntityTrait, IntoActiveModel, QueryFilter}; -use serde::{Serialize, Deserialize}; -use trifid_api_entities::entity::user::Entity as UserEntity; -use trifid_api_entities::entity::user; -use crate::AppState; use crate::config::CONFIG; use crate::error::{APIError, APIErrorsResponse}; use crate::magic_link::send_magic_link; use crate::timers::expires_in_seconds; use crate::tokens::random_token; +use crate::AppState; +use actix_web::web::{Data, Json}; +use actix_web::{post, HttpResponse}; +use log::error; +use sea_orm::{ActiveModelTrait, ColumnTrait, EntityTrait, IntoActiveModel, QueryFilter}; +use serde::{Deserialize, Serialize}; +use trifid_api_entities::entity::user; +use trifid_api_entities::entity::user::Entity as UserEntity; #[derive(Serialize, Deserialize, Clone, Debug)] pub struct MagicLinkRequest { - pub email: String + pub email: String, } #[derive(Serialize, Deserialize, Clone, Debug)] pub struct MagicLinkResponse { pub data: MagicLinkResponseData, - pub metadata: MagicLinkResponseMetadata + pub metadata: MagicLinkResponseMetadata, } #[derive(Serialize, Deserialize, Clone, Debug)] pub struct MagicLinkResponseData {} @@ -50,19 +50,23 @@ pub struct MagicLinkResponseMetadata {} #[post("/v1/auth/magic-link")] pub async fn magic_link_request(data: Data, req: Json) -> HttpResponse { - let user: Option = match UserEntity::find().filter(user::Column::Email.eq(&req.email)).one(&data.conn).await { + let user: Option = match UserEntity::find() + .filter(user::Column::Email.eq(&req.email)) + .one(&data.conn) + .await + { Ok(r) => r, Err(e) => { error!("database error: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_DB_ERROR".to_string(), - message: "There was an error with the database request, please try again later.".to_string(), - path: None, - } - ], - }) + errors: vec![APIError { + code: "ERR_DB_ERROR".to_string(), + message: + "There was an error with the database request, please try again later." + .to_string(), + path: None, + }], + }); } }; @@ -70,13 +74,11 @@ pub async fn magic_link_request(data: Data, req: Json u, None => { return HttpResponse::Unauthorized().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_USER_DOES_NOT_EXIST".to_string(), - message: "That user does not exist.".to_string(), - path: None, - } - ], + errors: vec![APIError { + code: "ERR_USER_DOES_NOT_EXIST".to_string(), + message: "That user does not exist.".to_string(), + path: None, + }], }) } }; @@ -92,14 +94,14 @@ pub async fn magic_link_request(data: Data, req: Json { error!("error sending magic link: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_ML_ERROR".to_string(), - message: "There was an error sending the magic link email, please try again later.".to_string(), - path: None, - } - ], - }) + errors: vec![APIError { + code: "ERR_ML_ERROR".to_string(), + message: + "There was an error sending the magic link email, please try again later." + .to_string(), + path: None, + }], + }); } } @@ -110,19 +112,19 @@ pub async fn magic_link_request(data: Data, req: Json { error!("database error: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_DB_ERROR".to_string(), - message: "There was an error with the database request, please try again later.".to_string(), - path: None, - } - ], - }) + errors: vec![APIError { + code: "ERR_DB_ERROR".to_string(), + message: + "There was an error with the database request, please try again later." + .to_string(), + path: None, + }], + }); } } HttpResponse::Ok().json(MagicLinkResponse { data: MagicLinkResponseData {}, - metadata: MagicLinkResponseMetadata {} + metadata: MagicLinkResponseMetadata {}, }) -} \ No newline at end of file +} diff --git a/trifid-api/src/routes/v1/auth/mod.rs b/trifid-api/src/routes/v1/auth/mod.rs index 29a43ea..d4db6ee 100644 --- a/trifid-api/src/routes/v1/auth/mod.rs +++ b/trifid-api/src/routes/v1/auth/mod.rs @@ -1,3 +1,3 @@ pub mod magic_link; +pub mod totp; pub mod verify_magic_link; -pub mod totp; \ No newline at end of file diff --git a/trifid-api/src/routes/v1/auth/totp.rs b/trifid-api/src/routes/v1/auth/totp.rs index a1302ff..045734b 100644 --- a/trifid-api/src/routes/v1/auth/totp.rs +++ b/trifid-api/src/routes/v1/auth/totp.rs @@ -19,95 +19,95 @@ // This endpoint is considered done. No major features should be added or removed, unless it fixes bugs. // This endpoint requires the `definednetworking` extension to be enabled to be used. -use actix_web::{HttpRequest, HttpResponse, post}; -use actix_web::web::{Data, Json}; -use log::{debug, error}; -use serde::{Serialize, Deserialize}; -use trifid_api_entities::entity::totp_authenticator; -use crate::AppState; use crate::auth_tokens::{enforce_session, TokenInfo}; use crate::error::{APIError, APIErrorsResponse}; -use sea_orm::{EntityTrait, QueryFilter, ColumnTrait, IntoActiveModel, ActiveModelTrait}; +use crate::AppState; +use actix_web::web::{Data, Json}; +use actix_web::{post, HttpRequest, HttpResponse}; +use log::{debug, error}; +use sea_orm::{ActiveModelTrait, ColumnTrait, EntityTrait, IntoActiveModel, QueryFilter}; +use serde::{Deserialize, Serialize}; +use trifid_api_entities::entity::totp_authenticator; -use totp_rs::{Secret, TOTP}; -use trifid_api_entities::entity::auth_token; use crate::config::CONFIG; use crate::timers::expires_in_seconds; use crate::tokens::random_token; +use totp_rs::{Secret, TOTP}; +use trifid_api_entities::entity::auth_token; #[derive(Serialize, Deserialize, Debug, Clone)] pub struct TotpRequest { - pub code: String + pub code: String, } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct TotpResponse { pub data: TotpResponseData, - pub metadata: TotpResponseMetadata + pub metadata: TotpResponseMetadata, } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct TotpResponseData { #[serde(rename = "authToken")] - pub auth_token: String + pub auth_token: String, } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct TotpResponseMetadata {} #[post("/v1/auth/totp")] -pub async fn totp_request(req: Json, req_data: HttpRequest, db: Data) -> HttpResponse { +pub async fn totp_request( + req: Json, + req_data: HttpRequest, + db: Data, +) -> HttpResponse { // require a user session let session_token = match enforce_session(&req_data, &db.conn).await { - Ok(r) => { - match r { - TokenInfo::SessionToken(i) => i, - _ => unreachable!() - } - } + Ok(r) => match r { + TokenInfo::SessionToken(i) => i, + _ => unreachable!(), + }, Err(e) => { error!("error enforcing session: {}", e); return HttpResponse::Unauthorized().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_UNAUTHORIZED".to_string(), - message: "Unauthorized".to_string(), - path: None, - } - ], + errors: vec![APIError { + code: "ERR_UNAUTHORIZED".to_string(), + message: "Unauthorized".to_string(), + path: None, + }], }); } }; // determine if the user has a totp authenticator - let auther = match totp_authenticator::Entity::find().filter(totp_authenticator::Column::User.eq(&session_token.user.id)).one(&db.conn).await { + let auther = match totp_authenticator::Entity::find() + .filter(totp_authenticator::Column::User.eq(&session_token.user.id)) + .one(&db.conn) + .await + { Ok(r) => r, Err(e) => { error!("database error: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_DB_ERROR".to_string(), - message: "There was an error with the database request, please try again later.".to_string(), - path: None, - } - ], + errors: vec![APIError { + code: "ERR_DB_ERROR".to_string(), + message: + "There was an error with the database request, please try again later." + .to_string(), + path: None, + }], }); } }; let auther = match auther { - Some(a) => { - a - }, + Some(a) => a, None => { return HttpResponse::BadRequest().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_USER_NO_TOTP".to_string(), - message: "This user does not have a totp authenticator".to_string(), - path: None, - } - ] + errors: vec![APIError { + code: "ERR_USER_NO_TOTP".to_string(), + message: "This user does not have a totp authenticator".to_string(), + path: None, + }], }); } }; @@ -118,30 +118,26 @@ pub async fn totp_request(req: Json, req_data: HttpRequest, db: Dat Err(e) => { error!("totp url error: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_SECRET_ERROR".to_string(), - message: "There was an error parsing the totpmachine. Please try again later.".to_string(), - path: None, - } - ], + errors: vec![APIError { + code: "ERR_SECRET_ERROR".to_string(), + message: "There was an error parsing the totpmachine. Please try again later." + .to_string(), + path: None, + }], }); } }; - let valid = match totpmachine.check_current(&req.code) { Ok(valid) => valid, Err(e) => { error!("system time error: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_TIME_ERROR".to_string(), - message: "There was an with the server-side time clock.".to_string(), - path: None, - } - ], + errors: vec![APIError { + code: "ERR_TIME_ERROR".to_string(), + message: "There was an with the server-side time clock.".to_string(), + path: None, + }], }); } }; @@ -150,14 +146,12 @@ pub async fn totp_request(req: Json, req_data: HttpRequest, db: Dat debug!("current: {}", totpmachine.generate_current().unwrap()); error!("user send invalid totp code"); return HttpResponse::Unauthorized().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_UNAUTHORIZED".to_string(), - message: "Unauthorized".to_string(), - path: None, - } - ], - }) + errors: vec![APIError { + code: "ERR_UNAUTHORIZED".to_string(), + message: "Unauthorized".to_string(), + path: None, + }], + }); } let model: auth_token::Model = auth_token::Model { @@ -172,13 +166,11 @@ pub async fn totp_request(req: Json, req_data: HttpRequest, db: Dat Err(e) => { error!("database error: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_DB_ERROR".to_string(), - message: "There was an error issuing the authentication token.".to_string(), - path: None, - } - ], + errors: vec![APIError { + code: "ERR_DB_ERROR".to_string(), + message: "There was an error issuing the authentication token.".to_string(), + path: None, + }], }); } } @@ -187,4 +179,4 @@ pub async fn totp_request(req: Json, req_data: HttpRequest, db: Dat data: TotpResponseData { auth_token: token }, metadata: TotpResponseMetadata {}, }) -} \ No newline at end of file +} diff --git a/trifid-api/src/routes/v1/auth/verify_magic_link.rs b/trifid-api/src/routes/v1/auth/verify_magic_link.rs index cd1e93b..884ad75 100644 --- a/trifid-api/src/routes/v1/auth/verify_magic_link.rs +++ b/trifid-api/src/routes/v1/auth/verify_magic_link.rs @@ -19,56 +19,65 @@ // This endpoint is considered done. No major features should be added or removed, unless it fixes bugs. // This endpoint requires the `definednetworking` extension to be enabled to be used. -use actix_web::{HttpResponse, post}; -use actix_web::web::{Data, Json}; -use log::error; -use sea_orm::{ActiveModelTrait, ColumnTrait, EntityTrait, IntoActiveModel, ModelTrait, QueryFilter}; -use serde::{Serialize, Deserialize}; -use crate::AppState; -use trifid_api_entities::entity::magic_link; -use trifid_api_entities::entity::magic_link::Model; -use trifid_api_entities::entity::session_token; use crate::config::CONFIG; use crate::error::{APIError, APIErrorsResponse}; use crate::timers::{expired, expires_in_seconds}; use crate::tokens::random_token; +use crate::AppState; +use actix_web::web::{Data, Json}; +use actix_web::{post, HttpResponse}; +use log::error; +use sea_orm::{ + ActiveModelTrait, ColumnTrait, EntityTrait, IntoActiveModel, ModelTrait, QueryFilter, +}; +use serde::{Deserialize, Serialize}; +use trifid_api_entities::entity::magic_link; +use trifid_api_entities::entity::magic_link::Model; +use trifid_api_entities::entity::session_token; #[derive(Serialize, Deserialize, Debug, Clone)] pub struct VerifyMagicLinkRequest { #[serde(rename = "magicLinkToken")] - pub magic_link_token: String + pub magic_link_token: String, } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct VerifyMagicLinkResponse { pub data: VerifyMagicLinkResponseData, - pub metadata: VerifyMagicLinkResponseMetadata + pub metadata: VerifyMagicLinkResponseMetadata, } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct VerifyMagicLinkResponseData { #[serde(rename = "sessionToken")] - pub session_token: String + pub session_token: String, } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct VerifyMagicLinkResponseMetadata {} #[post("/v1/auth/verify-magic-link")] -pub async fn verify_magic_link_request(db: Data, req: Json) -> HttpResponse { - let link: Option = match magic_link::Entity::find().filter(magic_link::Column::Id.eq(&req.magic_link_token)).one(&db.conn).await { +pub async fn verify_magic_link_request( + db: Data, + req: Json, +) -> HttpResponse { + let link: Option = match magic_link::Entity::find() + .filter(magic_link::Column::Id.eq(&req.magic_link_token)) + .one(&db.conn) + .await + { Ok(r) => r, Err(e) => { error!("database error: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_DB_ERROR".to_string(), - message: "There was an error with the database request, please try again later.".to_string(), - path: None, - } - ], - }) + errors: vec![APIError { + code: "ERR_DB_ERROR".to_string(), + message: + "There was an error with the database request, please try again later." + .to_string(), + path: None, + }], + }); } }; @@ -76,27 +85,23 @@ pub async fn verify_magic_link_request(db: Data, req: Json l, None => { return HttpResponse::Unauthorized().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_UNAUTHORIZED".to_string(), - message: "Unauthorized".to_string(), - path: None - } - ] + errors: vec![APIError { + code: "ERR_UNAUTHORIZED".to_string(), + message: "Unauthorized".to_string(), + path: None, + }], }) } }; if expired(link.expires_on as u64) { return HttpResponse::Unauthorized().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_EXPIRED".to_string(), - message: "Magic link token expired".to_string(), - path: None - } - ] - }) + errors: vec![APIError { + code: "ERR_EXPIRED".to_string(), + message: "Magic link token expired".to_string(), + path: None, + }], + }); } let user = link.user.clone(); @@ -106,14 +111,14 @@ pub async fn verify_magic_link_request(db: Data, req: Json { error!("database error: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_DB_ERROR".to_string(), - message: "There was an error with the database request, please try again later.".to_string(), - path: None, - } - ], - }) + errors: vec![APIError { + code: "ERR_DB_ERROR".to_string(), + message: + "There was an error with the database request, please try again later." + .to_string(), + path: None, + }], + }); } } @@ -130,23 +135,21 @@ pub async fn verify_magic_link_request(db: Data, req: Json { error!("database error: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_DB_ERROR".to_string(), - message: "There was an error with the database request, please try again later.".to_string(), - path: None, - } - ], - }) + errors: vec![APIError { + code: "ERR_DB_ERROR".to_string(), + message: + "There was an error with the database request, please try again later." + .to_string(), + path: None, + }], + }); } } - HttpResponse::Ok().json( - VerifyMagicLinkResponse { - data: VerifyMagicLinkResponseData { - session_token: token, - }, - metadata: VerifyMagicLinkResponseMetadata {}, - } - ) -} \ No newline at end of file + HttpResponse::Ok().json(VerifyMagicLinkResponse { + data: VerifyMagicLinkResponseData { + session_token: token, + }, + metadata: VerifyMagicLinkResponseMetadata {}, + }) +} diff --git a/trifid-api/src/routes/v1/hosts.rs b/trifid-api/src/routes/v1/hosts.rs index 152ae12..bf8a26e 100644 --- a/trifid-api/src/routes/v1/hosts.rs +++ b/trifid-api/src/routes/v1/hosts.rs @@ -40,24 +40,27 @@ // This endpoint requires the `definednetworking` extension to be enabled to be used. // This endpoint has additional functionality enabled by the extended_hosts feature flag. -use std::net::{Ipv4Addr, SocketAddrV4}; -use std::str::FromStr; -use std::time::{SystemTime, UNIX_EPOCH}; -use actix_web::{HttpRequest, HttpResponse, get, post, delete, put}; -use actix_web::web::{Data, Json, Path, Query}; -use chrono::{TimeZone, Utc}; -use log::{debug, error}; -use sea_orm::{EntityTrait, QueryFilter, ColumnTrait, QueryOrder, PaginatorTrait, IntoActiveModel, ActiveModelTrait, ModelTrait}; -use sea_orm::ActiveValue::Set; -use serde::{Serialize, Deserialize}; -use trifid_api_entities::entity::{host, host_static_address, network, organization}; -use crate::AppState; use crate::auth_tokens::{enforce_2fa, enforce_api_token, TokenInfo}; use crate::cursor::Cursor; use crate::error::{APIError, APIErrorsResponse}; use crate::routes::v1::trifid::SUPPORTED_EXTENSIONS; use crate::timers::TIME_FORMAT; use crate::tokens::random_id; +use crate::AppState; +use actix_web::web::{Data, Json, Path, Query}; +use actix_web::{delete, get, post, put, HttpRequest, HttpResponse}; +use chrono::{TimeZone, Utc}; +use log::{debug, error}; +use sea_orm::ActiveValue::Set; +use sea_orm::{ + ActiveModelTrait, ColumnTrait, EntityTrait, IntoActiveModel, ModelTrait, PaginatorTrait, + QueryFilter, QueryOrder, +}; +use serde::{Deserialize, Serialize}; +use std::net::{Ipv4Addr, SocketAddrV4}; +use std::str::FromStr; +use std::time::{SystemTime, UNIX_EPOCH}; +use trifid_api_entities::entity::{host, host_static_address, network, organization}; #[derive(Serialize, Deserialize)] pub struct ListHostsRequestOpts { @@ -66,13 +69,13 @@ pub struct ListHostsRequestOpts { #[serde(default)] pub cursor: String, #[serde(default = "page_default", rename = "pageSize")] - pub page_size: u64 + pub page_size: u64, } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct ListHostsResponse { pub data: Vec, - pub metadata: ListHostsResponseMetadata + pub metadata: ListHostsResponseMetadata, } #[derive(Serialize, Deserialize, Debug, Clone)] @@ -88,13 +91,13 @@ pub struct ListHostsResponseMetadata { #[serde(default, rename = "nextCursor")] pub next_cursor: Option, #[serde(default)] - pub page: Option + pub page: Option, } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct ListHostsResponseMetadataPage { pub count: u64, - pub start: u64 + pub start: u64, } #[derive(Serialize, Deserialize, Debug, Clone)] @@ -121,7 +124,7 @@ pub struct HostResponse { pub created_at: String, #[serde(rename = "isBlocked")] pub is_blocked: bool, - pub metadata: HostResponseMetadata + pub metadata: HostResponseMetadata, } #[derive(Serialize, Deserialize, Debug, Clone)] @@ -131,20 +134,31 @@ pub struct HostResponseMetadata { pub version: String, pub platform: String, #[serde(rename = "updateAvailable")] - pub update_available: bool + pub update_available: bool, } -fn page_default() -> u64 { 25 } - +fn page_default() -> u64 { + 25 +} #[get("/v1/hosts")] -pub async fn get_hosts(opts: Query, req_info: HttpRequest, db: Data) -> HttpResponse { +pub async fn get_hosts( + opts: Query, + req_info: HttpRequest, + db: Data, +) -> HttpResponse { // For this endpoint, you either need to be a fully authenticated user OR a token with hosts:list - let session_info = enforce_2fa(&req_info, &db.conn).await.unwrap_or(TokenInfo::NotPresent); - let api_token_info = enforce_api_token(&req_info, &["hosts:list"], &db.conn).await.unwrap_or(TokenInfo::NotPresent); + let session_info = enforce_2fa(&req_info, &db.conn) + .await + .unwrap_or(TokenInfo::NotPresent); + let api_token_info = enforce_api_token(&req_info, &["hosts:list"], &db.conn) + .await + .unwrap_or(TokenInfo::NotPresent); // If neither are present, throw an error - if matches!(session_info, TokenInfo::NotPresent) && matches!(api_token_info, TokenInfo::NotPresent) { + if matches!(session_info, TokenInfo::NotPresent) + && matches!(api_token_info, TokenInfo::NotPresent) + { return HttpResponse::Unauthorized().json(APIErrorsResponse { errors: vec![ APIError { @@ -153,11 +167,13 @@ pub async fn get_hosts(opts: Query, req_info: HttpRequest, path: None, } ], - }) + }); } // If both are present, throw an error - if matches!(session_info, TokenInfo::AuthToken(_)) && matches!(api_token_info, TokenInfo::ApiToken(_)) { + if matches!(session_info, TokenInfo::AuthToken(_)) + && matches!(api_token_info, TokenInfo::ApiToken(_)) + { return HttpResponse::BadRequest().json(APIErrorsResponse { errors: vec![ APIError { @@ -166,7 +182,7 @@ pub async fn get_hosts(opts: Query, req_info: HttpRequest, path: None } ], - }) + }); } let org_id = match api_token_info { @@ -175,10 +191,14 @@ pub async fn get_hosts(opts: Query, req_info: HttpRequest, // we have a session token, which means we have to do a db request to get the organization that this user owns let user = match session_info { TokenInfo::AuthToken(tkn) => tkn.session_info.user, - _ => unreachable!() + _ => unreachable!(), }; - let org = match organization::Entity::find().filter(organization::Column::Owner.eq(user.id)).one(&db.conn).await { + let org = match organization::Entity::find() + .filter(organization::Column::Owner.eq(user.id)) + .one(&db.conn) + .await + { Ok(r) => r, Err(e) => { error!("database error: {}", e); @@ -205,14 +225,18 @@ pub async fn get_hosts(opts: Query, req_info: HttpRequest, path: None } ], - }) + }); } } }; let net_id; - let net = match network::Entity::find().filter(network::Column::Organization.eq(&org_id)).one(&db.conn).await { + let net = match network::Entity::find() + .filter(network::Column::Organization.eq(&org_id)) + .one(&db.conn) + .await + { Ok(r) => r, Err(e) => { error!("database error: {}", e); @@ -232,14 +256,13 @@ pub async fn get_hosts(opts: Query, req_info: HttpRequest, net_id = net.id; } else { return HttpResponse::Unauthorized().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_NO_NET".to_string(), - message: "This user does not own any networks. Try using an API token instead.".to_string(), - path: None - } - ], - }) + errors: vec![APIError { + code: "ERR_NO_NET".to_string(), + message: "This user does not own any networks. Try using an API token instead." + .to_string(), + path: None, + }], + }); } let cursor: Cursor = match opts.cursor.clone().try_into() { @@ -247,18 +270,19 @@ pub async fn get_hosts(opts: Query, req_info: HttpRequest, Err(e) => { error!("invalid cursor: {}", e); return HttpResponse::BadRequest().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_INVALID_CURSOR".to_string(), - message: "The provided cursor was invalid, please try again later.".to_string(), - path: None - } - ], - }) + errors: vec![APIError { + code: "ERR_INVALID_CURSOR".to_string(), + message: "The provided cursor was invalid, please try again later.".to_string(), + path: None, + }], + }); } }; - let host_pages = host::Entity::find().filter(host::Column::Network.eq(&net_id)).order_by_asc(host::Column::CreatedAt).paginate(&db.conn, opts.page_size); + let host_pages = host::Entity::find() + .filter(host::Column::Network.eq(&net_id)) + .order_by_asc(host::Column::CreatedAt) + .paginate(&db.conn, opts.page_size); let total = match host_pages.num_items().await { Ok(r) => r, @@ -311,7 +335,11 @@ pub async fn get_hosts(opts: Query, req_info: HttpRequest, for u in models { // fetch static addresses - let ips = match host_static_address::Entity::find().filter(host_static_address::Column::Host.eq(&u.id)).all(&db.conn).await { + let ips = match host_static_address::Entity::find() + .filter(host_static_address::Column::Host.eq(&u.id)) + .all(&db.conn) + .await + { Ok(r) => r, Err(e) => { error!("database error: {}", e); @@ -334,14 +362,26 @@ pub async fn get_hosts(opts: Query, req_info: HttpRequest, role_id: u.role, name: u.name, ip_address: u.ip, - static_addresses: ips.iter().map(|u| SocketAddrV4::from_str(&u.address).unwrap()).collect(), + static_addresses: ips + .iter() + .map(|u| SocketAddrV4::from_str(&u.address).unwrap()) + .collect(), listen_port: u.listen_port as u16, is_lighthouse: u.is_lighthouse, is_relay: u.is_relay, - created_at: Utc.timestamp_opt(u.created_at, 0).unwrap().format(TIME_FORMAT).to_string(), + created_at: Utc + .timestamp_opt(u.created_at, 0) + .unwrap() + .format(TIME_FORMAT) + .to_string(), is_blocked: u.is_blocked, metadata: HostResponseMetadata { - last_seen_at: Some(Utc.timestamp_opt(u.last_seen_at, 0).unwrap().format(TIME_FORMAT).to_string()), + last_seen_at: Some( + Utc.timestamp_opt(u.last_seen_at, 0) + .unwrap() + .format(TIME_FORMAT) + .to_string(), + ), version: u.last_version.to_string(), platform: u.last_platform, update_available: u.last_out_of_date, @@ -355,20 +395,28 @@ pub async fn get_hosts(opts: Query, req_info: HttpRequest, data: models_mapped, metadata: ListHostsResponseMetadata { total_count: total, - has_next_page: cursor.page+1 != pages, + has_next_page: cursor.page + 1 != pages, has_prev_page: cursor.page != 0, prev_cursor: if cursor.page != 0 { - match (Cursor { page: cursor.page - 1 }).try_into() { + match (Cursor { + page: cursor.page - 1, + }) + .try_into() + { Ok(r) => Some(r), - Err(_) => None + Err(_) => None, } } else { None }, - next_cursor: if cursor.page+1 != pages { - match (Cursor { page: cursor.page + 1 }).try_into() { + next_cursor: if cursor.page + 1 != pages { + match (Cursor { + page: cursor.page + 1, + }) + .try_into() + { Ok(r) => Some(r), - Err(_) => None + Err(_) => None, } } else { None @@ -378,7 +426,9 @@ pub async fn get_hosts(opts: Query, req_info: HttpRequest, count, start: opts.page_size * cursor.page, }) - } else { None }, + } else { + None + }, }, }) } @@ -399,26 +449,36 @@ pub struct CreateHostRequest { #[serde(rename = "isLighthouse")] pub is_lighthouse: bool, #[serde(rename = "isRelay")] - pub is_relay: bool + pub is_relay: bool, } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct CreateHostResponse { pub data: HostResponse, - pub metadata: CreateHostResponseMetadata + pub metadata: CreateHostResponseMetadata, } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct CreateHostResponseMetadata {} #[post("/v1/hosts")] -pub async fn create_hosts_request(req: Json, req_info: HttpRequest, db: Data) -> HttpResponse { +pub async fn create_hosts_request( + req: Json, + req_info: HttpRequest, + db: Data, +) -> HttpResponse { // For this endpoint, you either need to be a fully authenticated user OR a token with hosts:create - let session_info = enforce_2fa(&req_info, &db.conn).await.unwrap_or(TokenInfo::NotPresent); - let api_token_info = enforce_api_token(&req_info, &["hosts:create"], &db.conn).await.unwrap_or(TokenInfo::NotPresent); + let session_info = enforce_2fa(&req_info, &db.conn) + .await + .unwrap_or(TokenInfo::NotPresent); + let api_token_info = enforce_api_token(&req_info, &["hosts:create"], &db.conn) + .await + .unwrap_or(TokenInfo::NotPresent); // If neither are present, throw an error - if matches!(session_info, TokenInfo::NotPresent) && matches!(api_token_info, TokenInfo::NotPresent) { + if matches!(session_info, TokenInfo::NotPresent) + && matches!(api_token_info, TokenInfo::NotPresent) + { return HttpResponse::Unauthorized().json(APIErrorsResponse { errors: vec![ APIError { @@ -427,11 +487,13 @@ pub async fn create_hosts_request(req: Json, req_info: HttpRe path: None, } ], - }) + }); } // If both are present, throw an error - if matches!(session_info, TokenInfo::AuthToken(_)) && matches!(api_token_info, TokenInfo::ApiToken(_)) { + if matches!(session_info, TokenInfo::AuthToken(_)) + && matches!(api_token_info, TokenInfo::ApiToken(_)) + { return HttpResponse::BadRequest().json(APIErrorsResponse { errors: vec![ APIError { @@ -440,20 +502,23 @@ pub async fn create_hosts_request(req: Json, req_info: HttpRe path: None } ], - }) + }); } - let org_id = match api_token_info { TokenInfo::ApiToken(tkn) => tkn.organization, _ => { // we have a session token, which means we have to do a db request to get the organization that this user owns let user = match session_info { TokenInfo::AuthToken(tkn) => tkn.session_info.user, - _ => unreachable!() + _ => unreachable!(), }; - let org = match organization::Entity::find().filter(organization::Column::Owner.eq(user.id)).one(&db.conn).await { + let org = match organization::Entity::find() + .filter(organization::Column::Owner.eq(user.id)) + .one(&db.conn) + .await + { Ok(r) => r, Err(e) => { error!("database error: {}", e); @@ -480,14 +545,18 @@ pub async fn create_hosts_request(req: Json, req_info: HttpRe path: None } ], - }) + }); } } }; let net_id; - let net = match network::Entity::find().filter(network::Column::Organization.eq(&org_id)).one(&db.conn).await { + let net = match network::Entity::find() + .filter(network::Column::Organization.eq(&org_id)) + .one(&db.conn) + .await + { Ok(r) => r, Err(e) => { error!("database error: {}", e); @@ -507,14 +576,13 @@ pub async fn create_hosts_request(req: Json, req_info: HttpRe net_id = net.id; } else { return HttpResponse::Unauthorized().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_NO_NET".to_string(), - message: "This user does not own any networks. Try using an API token instead.".to_string(), - path: None - } - ], - }) + errors: vec![APIError { + code: "ERR_NO_NET".to_string(), + message: "This user does not own any networks. Try using an API token instead." + .to_string(), + path: None, + }], + }); } if net_id != req.network_id { @@ -526,31 +594,27 @@ pub async fn create_hosts_request(req: Json, req_info: HttpRe path: None } ], - }) + }); } if req.is_lighthouse && req.is_relay { return HttpResponse::BadRequest().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_CANNOT_BE_RELAY_AND_LIGHTHOUSE".to_string(), - message: "A host cannot be a relay and a lighthouse at the same time.".to_string(), - path: None - } - ], - }) + errors: vec![APIError { + code: "ERR_CANNOT_BE_RELAY_AND_LIGHTHOUSE".to_string(), + message: "A host cannot be a relay and a lighthouse at the same time.".to_string(), + path: None, + }], + }); } if req.is_lighthouse || req.is_relay && req.static_addresses.is_empty() { return HttpResponse::BadRequest().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_NEEDS_STATIC_ADDR".to_string(), - message: "A relay or lighthouse requires at least one static address.".to_string(), - path: None - } - ], - }) + errors: vec![APIError { + code: "ERR_NEEDS_STATIC_ADDR".to_string(), + message: "A relay or lighthouse requires at least one static address.".to_string(), + path: None, + }], + }); } let new_host_model = host::Model { @@ -563,20 +627,25 @@ pub async fn create_hosts_request(req: Json, req_info: HttpRe is_lighthouse: req.is_lighthouse, is_relay: req.is_relay, counter: 0, - created_at: SystemTime::now().duration_since(UNIX_EPOCH).expect("time went backwards").as_secs() as i64, + created_at: SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("time went backwards") + .as_secs() as i64, is_blocked: false, last_seen_at: 0, last_version: 0, last_platform: "".to_string(), last_out_of_date: false, }; - let static_addresses: Vec = req.static_addresses.iter().map(|u| { - host_static_address::Model { + let static_addresses: Vec = req + .static_addresses + .iter() + .map(|u| host_static_address::Model { id: random_id("hsaddress"), host: new_host_model.id.clone(), address: u.to_string(), - } - }).collect(); + }) + .collect(); let new_host_model_clone = new_host_model.clone(); let static_addresses_clone = static_addresses.clone(); @@ -587,14 +656,13 @@ pub async fn create_hosts_request(req: Json, req_info: HttpRe Err(e) => { error!("database error: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_DB_ERROR".to_string(), - message: "There was an error creating the new host. Please try again later".to_string(), - path: None - } - ], - }) + errors: vec![APIError { + code: "ERR_DB_ERROR".to_string(), + message: "There was an error creating the new host. Please try again later" + .to_string(), + path: None, + }], + }); } } @@ -605,14 +673,13 @@ pub async fn create_hosts_request(req: Json, req_info: HttpRe Err(e) => { error!("database error: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_DB_ERROR".to_string(), - message: "There was an error creating the new host. Please try again later".to_string(), - path: None - } - ], - }) + errors: vec![APIError { + code: "ERR_DB_ERROR".to_string(), + message: "There was an error creating the new host. Please try again later" + .to_string(), + path: None, + }], + }); } } } @@ -629,10 +696,19 @@ pub async fn create_hosts_request(req: Json, req_info: HttpRe listen_port: req.listen_port, is_lighthouse: req.is_lighthouse, is_relay: req.is_relay, - created_at: Utc.timestamp_opt(new_host_model_clone.created_at, 0).unwrap().format(TIME_FORMAT).to_string(), + created_at: Utc + .timestamp_opt(new_host_model_clone.created_at, 0) + .unwrap() + .format(TIME_FORMAT) + .to_string(), is_blocked: false, metadata: HostResponseMetadata { - last_seen_at: Some(Utc.timestamp_opt(new_host_model_clone.last_seen_at, 0).unwrap().format(TIME_FORMAT).to_string()), + last_seen_at: Some( + Utc.timestamp_opt(new_host_model_clone.last_seen_at, 0) + .unwrap() + .format(TIME_FORMAT) + .to_string(), + ), version: new_host_model_clone.last_version.to_string(), platform: new_host_model_clone.last_platform, update_available: new_host_model_clone.last_out_of_date, @@ -645,7 +721,7 @@ pub async fn create_hosts_request(req: Json, req_info: HttpRe #[derive(Serialize, Deserialize)] pub struct GetHostResponse { pub data: HostResponse, - pub metadata: GetHostResponseMetadata + pub metadata: GetHostResponseMetadata, } #[derive(Serialize, Deserialize)] pub struct GetHostResponseMetadata {} @@ -653,11 +729,17 @@ pub struct GetHostResponseMetadata {} #[get("/v1/hosts/{host_id}")] pub async fn get_host(id: Path, req_info: HttpRequest, db: Data) -> HttpResponse { // For this endpoint, you either need to be a fully authenticated user OR a token with hosts:read - let session_info = enforce_2fa(&req_info, &db.conn).await.unwrap_or(TokenInfo::NotPresent); - let api_token_info = enforce_api_token(&req_info, &["hosts:read"], &db.conn).await.unwrap_or(TokenInfo::NotPresent); + let session_info = enforce_2fa(&req_info, &db.conn) + .await + .unwrap_or(TokenInfo::NotPresent); + let api_token_info = enforce_api_token(&req_info, &["hosts:read"], &db.conn) + .await + .unwrap_or(TokenInfo::NotPresent); // If neither are present, throw an error - if matches!(session_info, TokenInfo::NotPresent) && matches!(api_token_info, TokenInfo::NotPresent) { + if matches!(session_info, TokenInfo::NotPresent) + && matches!(api_token_info, TokenInfo::NotPresent) + { return HttpResponse::Unauthorized().json(APIErrorsResponse { errors: vec![ APIError { @@ -666,11 +748,13 @@ pub async fn get_host(id: Path, req_info: HttpRequest, db: Data, req_info: HttpRequest, db: Data, req_info: HttpRequest, db: Data tkn.session_info.user, - _ => unreachable!() + _ => unreachable!(), }; - let org = match organization::Entity::find().filter(organization::Column::Owner.eq(user.id)).one(&db.conn).await { + let org = match organization::Entity::find() + .filter(organization::Column::Owner.eq(user.id)) + .one(&db.conn) + .await + { Ok(r) => r, Err(e) => { error!("database error: {}", e); @@ -718,14 +806,18 @@ pub async fn get_host(id: Path, req_info: HttpRequest, db: Data r, Err(e) => { error!("database error: {}", e); @@ -745,29 +837,31 @@ pub async fn get_host(id: Path, req_info: HttpRequest, db: Data h, Err(e) => { error!("Database error: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_DB_ERROR".to_string(), - message: "There was an error with the database query. Please try again later.".to_string(), - path: None - } - ], - }) + errors: vec![APIError { + code: "ERR_DB_ERROR".to_string(), + message: "There was an error with the database query. Please try again later." + .to_string(), + path: None, + }], + }); } }; @@ -775,42 +869,44 @@ pub async fn get_host(id: Path, req_info: HttpRequest, db: Data h, None => { return HttpResponse::Unauthorized().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_UNAUTHORIZED".to_string(), - message: "This resource does not exist or you do not have permission to access it.".to_string(), - path: None - } - ], + errors: vec![APIError { + code: "ERR_UNAUTHORIZED".to_string(), + message: + "This resource does not exist or you do not have permission to access it." + .to_string(), + path: None, + }], }) } }; if host.network != net_id { return HttpResponse::Unauthorized().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_UNAUTHORIZED".to_string(), - message: "This resource does not exist or you do not have permission to access it.".to_string(), - path: None - } - ], - }) + errors: vec![APIError { + code: "ERR_UNAUTHORIZED".to_string(), + message: "This resource does not exist or you do not have permission to access it." + .to_string(), + path: None, + }], + }); } - let static_addresses = match host_static_address::Entity::find().filter(host_static_address::Column::Host.eq(&host.id)).all(&db.conn).await { + let static_addresses = match host_static_address::Entity::find() + .filter(host_static_address::Column::Host.eq(&host.id)) + .all(&db.conn) + .await + { Ok(h) => h, Err(e) => { error!("Database error: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_DB_ERROR".to_string(), - message: "There was an error with the database query. Please try again later.".to_string(), - path: None - } - ], - }) + errors: vec![APIError { + code: "ERR_DB_ERROR".to_string(), + message: "There was an error with the database query. Please try again later." + .to_string(), + path: None, + }], + }); } }; @@ -822,14 +918,26 @@ pub async fn get_host(id: Path, req_info: HttpRequest, db: Data, req_info: HttpRequest, db: Data, req_info: HttpRequest, db: Data) -> HttpResponse { +pub async fn delete_host( + id: Path, + req_info: HttpRequest, + db: Data, +) -> HttpResponse { // For this endpoint, you either need to be a fully authenticated user OR a token with hosts:delete - let session_info = enforce_2fa(&req_info, &db.conn).await.unwrap_or(TokenInfo::NotPresent); - let api_token_info = enforce_api_token(&req_info, &["hosts:delete"], &db.conn).await.unwrap_or(TokenInfo::NotPresent); + let session_info = enforce_2fa(&req_info, &db.conn) + .await + .unwrap_or(TokenInfo::NotPresent); + let api_token_info = enforce_api_token(&req_info, &["hosts:delete"], &db.conn) + .await + .unwrap_or(TokenInfo::NotPresent); // If neither are present, throw an error - if matches!(session_info, TokenInfo::NotPresent) && matches!(api_token_info, TokenInfo::NotPresent) { + if matches!(session_info, TokenInfo::NotPresent) + && matches!(api_token_info, TokenInfo::NotPresent) + { return HttpResponse::Unauthorized().json(APIErrorsResponse { errors: vec![ APIError { @@ -865,11 +983,13 @@ pub async fn delete_host(id: Path, req_info: HttpRequest, db: Data, req_info: HttpRequest, db: Data, req_info: HttpRequest, db: Data tkn.session_info.user, - _ => unreachable!() + _ => unreachable!(), }; - let org = match organization::Entity::find().filter(organization::Column::Owner.eq(user.id)).one(&db.conn).await { + let org = match organization::Entity::find() + .filter(organization::Column::Owner.eq(user.id)) + .one(&db.conn) + .await + { Ok(r) => r, Err(e) => { error!("database error: {}", e); @@ -917,14 +1041,18 @@ pub async fn delete_host(id: Path, req_info: HttpRequest, db: Data r, Err(e) => { error!("database error: {}", e); @@ -944,29 +1072,31 @@ pub async fn delete_host(id: Path, req_info: HttpRequest, db: Data h, Err(e) => { error!("Database error: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_DB_ERROR".to_string(), - message: "There was an error with the database query. Please try again later.".to_string(), - path: None - } - ], - }) + errors: vec![APIError { + code: "ERR_DB_ERROR".to_string(), + message: "There was an error with the database query. Please try again later." + .to_string(), + path: None, + }], + }); } }; @@ -974,42 +1104,44 @@ pub async fn delete_host(id: Path, req_info: HttpRequest, db: Data h, None => { return HttpResponse::Unauthorized().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_UNAUTHORIZED".to_string(), - message: "This resource does not exist or you do not have permission to access it.".to_string(), - path: None - } - ], + errors: vec![APIError { + code: "ERR_UNAUTHORIZED".to_string(), + message: + "This resource does not exist or you do not have permission to access it." + .to_string(), + path: None, + }], }) } }; if host.network != net_id { return HttpResponse::Unauthorized().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_UNAUTHORIZED".to_string(), - message: "This resource does not exist or you do not have permission to access it.".to_string(), - path: None - } - ], - }) + errors: vec![APIError { + code: "ERR_UNAUTHORIZED".to_string(), + message: "This resource does not exist or you do not have permission to access it." + .to_string(), + path: None, + }], + }); } - let static_addresses = match host_static_address::Entity::find().filter(host_static_address::Column::Host.eq(&host.id)).all(&db.conn).await { + let static_addresses = match host_static_address::Entity::find() + .filter(host_static_address::Column::Host.eq(&host.id)) + .all(&db.conn) + .await + { Ok(h) => h, Err(e) => { error!("Database error: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_DB_ERROR".to_string(), - message: "There was an error with the database query. Please try again later.".to_string(), - path: None - } - ], - }) + errors: vec![APIError { + code: "ERR_DB_ERROR".to_string(), + message: "There was an error with the database query. Please try again later." + .to_string(), + path: None, + }], + }); } }; @@ -1018,14 +1150,13 @@ pub async fn delete_host(id: Path, req_info: HttpRequest, db: Data { error!("Database error: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_DB_ERROR".to_string(), - message: "There was an error with the database query. Please try again later.".to_string(), - path: None - } - ], - }) + errors: vec![APIError { + code: "ERR_DB_ERROR".to_string(), + message: "There was an error with the database query. Please try again later." + .to_string(), + path: None, + }], + }); } } @@ -1035,14 +1166,14 @@ pub async fn delete_host(id: Path, req_info: HttpRequest, db: Data { error!("Database error: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_DB_ERROR".to_string(), - message: "There was an error with the database query. Please try again later.".to_string(), - path: None - } - ], - }) + errors: vec![APIError { + code: "ERR_DB_ERROR".to_string(), + message: + "There was an error with the database query. Please try again later." + .to_string(), + path: None, + }], + }); } } } @@ -1062,31 +1193,43 @@ pub struct EditHostRequest { // t+features:extended_hosts pub name: Option, // t+features:extended_hosts - pub ip: Option + pub ip: Option, } #[derive(Serialize, Deserialize)] pub struct EditHostExtensionQuery { - pub extension: Option + pub extension: Option, } #[derive(Serialize, Deserialize)] pub struct EditHostResponse { pub data: HostResponse, - pub metadata: EditHostResponseMetadata + pub metadata: EditHostResponseMetadata, } #[derive(Serialize, Deserialize)] pub struct EditHostResponseMetadata {} #[put("/v1/hosts/{host_id}")] -pub async fn edit_host(id: Path, query: Query, req: Json, req_info: HttpRequest, db: Data) -> HttpResponse { +pub async fn edit_host( + id: Path, + query: Query, + req: Json, + req_info: HttpRequest, + db: Data, +) -> HttpResponse { // For this endpoint, you either need to be a fully authenticated user OR a token with hosts:edit - let session_info = enforce_2fa(&req_info, &db.conn).await.unwrap_or(TokenInfo::NotPresent); - let api_token_info = enforce_api_token(&req_info, &["hosts:edit"], &db.conn).await.unwrap_or(TokenInfo::NotPresent); + let session_info = enforce_2fa(&req_info, &db.conn) + .await + .unwrap_or(TokenInfo::NotPresent); + let api_token_info = enforce_api_token(&req_info, &["hosts:edit"], &db.conn) + .await + .unwrap_or(TokenInfo::NotPresent); // If neither are present, throw an error - if matches!(session_info, TokenInfo::NotPresent) && matches!(api_token_info, TokenInfo::NotPresent) { + if matches!(session_info, TokenInfo::NotPresent) + && matches!(api_token_info, TokenInfo::NotPresent) + { return HttpResponse::Unauthorized().json(APIErrorsResponse { errors: vec![ APIError { @@ -1095,11 +1238,13 @@ pub async fn edit_host(id: Path, query: Query, r path: None, } ], - }) + }); } // If both are present, throw an error - if matches!(session_info, TokenInfo::AuthToken(_)) && matches!(api_token_info, TokenInfo::ApiToken(_)) { + if matches!(session_info, TokenInfo::AuthToken(_)) + && matches!(api_token_info, TokenInfo::ApiToken(_)) + { return HttpResponse::BadRequest().json(APIErrorsResponse { errors: vec![ APIError { @@ -1108,7 +1253,7 @@ pub async fn edit_host(id: Path, query: Query, r path: None } ], - }) + }); } let org_id = match api_token_info { @@ -1117,10 +1262,14 @@ pub async fn edit_host(id: Path, query: Query, r // we have a session token, which means we have to do a db request to get the organization that this user owns let user = match session_info { TokenInfo::AuthToken(tkn) => tkn.session_info.user, - _ => unreachable!() + _ => unreachable!(), }; - let org = match organization::Entity::find().filter(organization::Column::Owner.eq(user.id)).one(&db.conn).await { + let org = match organization::Entity::find() + .filter(organization::Column::Owner.eq(user.id)) + .one(&db.conn) + .await + { Ok(r) => r, Err(e) => { error!("database error: {}", e); @@ -1147,14 +1296,18 @@ pub async fn edit_host(id: Path, query: Query, r path: None } ], - }) + }); } } }; let net_id; - let net = match network::Entity::find().filter(network::Column::Organization.eq(&org_id)).one(&db.conn).await { + let net = match network::Entity::find() + .filter(network::Column::Organization.eq(&org_id)) + .one(&db.conn) + .await + { Ok(r) => r, Err(e) => { error!("database error: {}", e); @@ -1174,29 +1327,31 @@ pub async fn edit_host(id: Path, query: Query, r net_id = net.id; } else { return HttpResponse::Unauthorized().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_NO_NET".to_string(), - message: "This user does not own any networks. Try using an API token instead.".to_string(), - path: None - } - ], - }) + errors: vec![APIError { + code: "ERR_NO_NET".to_string(), + message: "This user does not own any networks. Try using an API token instead." + .to_string(), + path: None, + }], + }); } - let host = match host::Entity::find().filter(host::Column::Id.eq(id.into_inner())).one(&db.conn).await { + let host = match host::Entity::find() + .filter(host::Column::Id.eq(id.into_inner())) + .one(&db.conn) + .await + { Ok(h) => h, Err(e) => { error!("Database error: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_DB_ERROR".to_string(), - message: "There was an error with the database query. Please try again later.".to_string(), - path: None - } - ], - }) + errors: vec![APIError { + code: "ERR_DB_ERROR".to_string(), + message: "There was an error with the database query. Please try again later." + .to_string(), + path: None, + }], + }); } }; @@ -1204,42 +1359,44 @@ pub async fn edit_host(id: Path, query: Query, r Some(h) => h, None => { return HttpResponse::Unauthorized().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_UNAUTHORIZED".to_string(), - message: "This resource does not exist or you do not have permission to access it.".to_string(), - path: None - } - ], + errors: vec![APIError { + code: "ERR_UNAUTHORIZED".to_string(), + message: + "This resource does not exist or you do not have permission to access it." + .to_string(), + path: None, + }], }) } }; if host.network != net_id { return HttpResponse::Unauthorized().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_UNAUTHORIZED".to_string(), - message: "This resource does not exist or you do not have permission to access it.".to_string(), - path: None - } - ], - }) + errors: vec![APIError { + code: "ERR_UNAUTHORIZED".to_string(), + message: "This resource does not exist or you do not have permission to access it." + .to_string(), + path: None, + }], + }); } - let static_addresses = match host_static_address::Entity::find().filter(host_static_address::Column::Host.eq(&host.id)).all(&db.conn).await { + let static_addresses = match host_static_address::Entity::find() + .filter(host_static_address::Column::Host.eq(&host.id)) + .all(&db.conn) + .await + { Ok(h) => h, Err(e) => { error!("Database error: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_DB_ERROR".to_string(), - message: "There was an error with the database query. Please try again later.".to_string(), - path: None - } - ], - }) + errors: vec![APIError { + code: "ERR_DB_ERROR".to_string(), + message: "There was an error with the database query. Please try again later." + .to_string(), + path: None, + }], + }); } }; @@ -1249,26 +1406,35 @@ pub async fn edit_host(id: Path, query: Query, r Err(e) => { error!("Database error: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_DB_ERROR".to_string(), - message: "There was an error with the database query. Please try again later.".to_string(), - path: None - } - ], - }) + errors: vec![APIError { + code: "ERR_DB_ERROR".to_string(), + message: + "There was an error with the database query. Please try again later." + .to_string(), + path: None, + }], + }); } } } - let mut host_clone = host.clone(); + let host_clone = host.clone(); let mut host_active_model = host_clone.into_active_model(); host_active_model.listen_port = Set(req.listen_port as i32); - debug!("{:?} {} {:?} {:?} {}", query.extension, SUPPORTED_EXTENSIONS.contains(&"extended_hosts"), req.name, req.ip, query.extension == Some("extended_hosts".to_string())); + debug!( + "{:?} {} {:?} {:?} {}", + query.extension, + SUPPORTED_EXTENSIONS.contains(&"extended_hosts"), + req.name, + req.ip, + query.extension == Some("extended_hosts".to_string()) + ); - if query.extension == Some("extended_hosts".to_string()) && SUPPORTED_EXTENSIONS.contains(&"extended_hosts") { + if query.extension == Some("extended_hosts".to_string()) + && SUPPORTED_EXTENSIONS.contains(&"extended_hosts") + { if let Some(new_host_name) = req.name.clone() { debug!("updated host name"); host_active_model.name = Set(new_host_name); @@ -1279,30 +1445,30 @@ pub async fn edit_host(id: Path, query: Query, r } } - let host = match host_active_model.update(&db.conn).await { Ok(h) => h, Err(e) => { error!("Database error: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_DB_ERROR".to_string(), - message: "There was an error with the database query. Please try again later.".to_string(), - path: None - } - ], - }) + errors: vec![APIError { + code: "ERR_DB_ERROR".to_string(), + message: "There was an error with the database query. Please try again later." + .to_string(), + path: None, + }], + }); } }; - let static_addresses: Vec = req.static_addresses.iter().map(|u| { - host_static_address::Model { + let static_addresses: Vec = req + .static_addresses + .iter() + .map(|u| host_static_address::Model { id: random_id("hsaddress"), host: host.id.clone(), address: u.to_string(), - } - }).collect(); + }) + .collect(); for rule in &static_addresses { let active_model = rule.clone().into_active_model(); @@ -1311,14 +1477,13 @@ pub async fn edit_host(id: Path, query: Query, r Err(e) => { error!("database error: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_DB_ERROR".to_string(), - message: "There was an error creating the new host. Please try again later".to_string(), - path: None - } - ], - }) + errors: vec![APIError { + code: "ERR_DB_ERROR".to_string(), + message: "There was an error creating the new host. Please try again later" + .to_string(), + path: None, + }], + }); } } } @@ -1335,10 +1500,19 @@ pub async fn edit_host(id: Path, query: Query, r listen_port: host.listen_port as u16, is_lighthouse: host.is_lighthouse, is_relay: host.is_relay, - created_at: Utc.timestamp_opt(host.created_at, 0).unwrap().format(TIME_FORMAT).to_string(), + created_at: Utc + .timestamp_opt(host.created_at, 0) + .unwrap() + .format(TIME_FORMAT) + .to_string(), is_blocked: host.is_blocked, metadata: HostResponseMetadata { - last_seen_at: Some(Utc.timestamp_opt(host.last_seen_at, 0).unwrap().format(TIME_FORMAT).to_string()), + last_seen_at: Some( + Utc.timestamp_opt(host.last_seen_at, 0) + .unwrap() + .format(TIME_FORMAT) + .to_string(), + ), version: host.last_version.to_string(), platform: host.last_platform, update_available: host.last_out_of_date, @@ -1346,4 +1520,263 @@ pub async fn edit_host(id: Path, query: Query, r }, metadata: EditHostResponseMetadata {}, }) -} \ No newline at end of file +} + +#[derive(Serialize, Deserialize)] +pub struct BlockHostResponse { + pub data: BlockHostResponseData, + pub metadata: BlockHostResponseMetadata, +} + +#[derive(Serialize, Deserialize)] +pub struct BlockHostResponseData { + pub host: HostResponse, +} + +#[derive(Serialize, Deserialize)] +pub struct BlockHostResponseMetadata {} + +#[post("/v1/hosts/{host_id}/block")] +pub async fn block_host( + id: Path, + req_info: HttpRequest, + db: Data, +) -> HttpResponse { + let session_info = enforce_2fa(&req_info, &db.conn) + .await + .unwrap_or(TokenInfo::NotPresent); + let api_token_info = enforce_api_token(&req_info, &["hosts:block"], &db.conn) + .await + .unwrap_or(TokenInfo::NotPresent); + + // If neither are present, throw an error + if matches!(session_info, TokenInfo::NotPresent) + && matches!(api_token_info, TokenInfo::NotPresent) + { + return HttpResponse::Unauthorized().json(APIErrorsResponse { + errors: vec![ + APIError { + code: "ERR_UNAUTHORIZED".to_string(), + message: "This endpoint requires either a fully authenticated user or a token with the hosts:block scope".to_string(), + path: None, + } + ], + }); + } + + // If both are present, throw an error + if matches!(session_info, TokenInfo::AuthToken(_)) + && matches!(api_token_info, TokenInfo::ApiToken(_)) + { + return HttpResponse::BadRequest().json(APIErrorsResponse { + errors: vec![ + APIError { + code: "ERR_AMBIGUOUS_AUTHENTICATION".to_string(), + message: "Both a user token and an API token with the proper scope was provided. Please only provide one.".to_string(), + path: None + } + ], + }); + } + + let org_id = match api_token_info { + TokenInfo::ApiToken(tkn) => tkn.organization, + _ => { + // we have a session token, which means we have to do a db request to get the organization that this user owns + let user = match session_info { + TokenInfo::AuthToken(tkn) => tkn.session_info.user, + _ => unreachable!(), + }; + + let org = match organization::Entity::find() + .filter(organization::Column::Owner.eq(user.id)) + .one(&db.conn) + .await + { + Ok(r) => r, + Err(e) => { + error!("database error: {}", e); + return HttpResponse::InternalServerError().json(APIErrorsResponse { + errors: vec![ + APIError { + code: "ERR_DB_ERROR".to_string(), + message: "There was an error performing the database request, please try again later.".to_string(), + path: None, + } + ], + }); + } + }; + + if let Some(org) = org { + org.id + } else { + return HttpResponse::Unauthorized().json(APIErrorsResponse { + errors: vec![ + APIError { + code: "ERR_NO_ORG".to_string(), + message: "This user does not own any organizations. Try using an API token instead.".to_string(), + path: None + } + ], + }); + } + } + }; + + let net_id; + + let net = match network::Entity::find() + .filter(network::Column::Organization.eq(&org_id)) + .one(&db.conn) + .await + { + Ok(r) => r, + Err(e) => { + error!("database error: {}", e); + return HttpResponse::InternalServerError().json(APIErrorsResponse { + errors: vec![ + APIError { + code: "ERR_DB_ERROR".to_string(), + message: "There was an error performing the database request, please try again later.".to_string(), + path: None, + } + ], + }); + } + }; + + if let Some(net) = net { + net_id = net.id; + } else { + return HttpResponse::Unauthorized().json(APIErrorsResponse { + errors: vec![APIError { + code: "ERR_NO_NET".to_string(), + message: "This user does not own any networks. Try using an API token instead." + .to_string(), + path: None, + }], + }); + } + + let host = match host::Entity::find() + .filter(host::Column::Id.eq(id.into_inner())) + .one(&db.conn) + .await + { + Ok(h) => h, + Err(e) => { + error!("Database error: {}", e); + return HttpResponse::InternalServerError().json(APIErrorsResponse { + errors: vec![APIError { + code: "ERR_DB_ERROR".to_string(), + message: "There was an error with the database query. Please try again later." + .to_string(), + path: None, + }], + }); + } + }; + + let host = match host { + Some(h) => h, + None => { + return HttpResponse::Unauthorized().json(APIErrorsResponse { + errors: vec![APIError { + code: "ERR_UNAUTHORIZED".to_string(), + message: + "This resource does not exist or you do not have permission to access it." + .to_string(), + path: None, + }], + }) + } + }; + + if host.network != net_id { + return HttpResponse::Unauthorized().json(APIErrorsResponse { + errors: vec![APIError { + code: "ERR_UNAUTHORIZED".to_string(), + message: "This resource does not exist or you do not have permission to access it." + .to_string(), + path: None, + }], + }); + } + + let mut host_active = host.into_active_model(); + + host_active.is_blocked = Set(true); + + let host = match host_active.update(&db.conn).await { + Ok(h) => h, + Err(e) => { + error!("Database error: {}", e); + return HttpResponse::InternalServerError().json(APIErrorsResponse { + errors: vec![APIError { + code: "ERR_DB_ERROR".to_string(), + message: "There was an error with the database query. Please try again later." + .to_string(), + path: None, + }], + }); + } + }; + + let static_addresses = match host_static_address::Entity::find() + .filter(host_static_address::Column::Host.eq(&host.id)) + .all(&db.conn) + .await + { + Ok(h) => h, + Err(e) => { + error!("Database error: {}", e); + return HttpResponse::InternalServerError().json(APIErrorsResponse { + errors: vec![APIError { + code: "ERR_DB_ERROR".to_string(), + message: "There was an error with the database query. Please try again later." + .to_string(), + path: None, + }], + }); + } + }; + + HttpResponse::Ok().json(BlockHostResponse { + data: BlockHostResponseData { + host: HostResponse { + id: host.id, + organization_id: org_id, + network_id: net_id, + role_id: host.role, + name: host.name, + ip_address: host.ip.to_string(), + static_addresses: static_addresses + .iter() + .map(|u| SocketAddrV4::from_str(&u.address).unwrap()) + .collect(), + listen_port: host.listen_port as u16, + is_lighthouse: host.is_lighthouse, + is_relay: host.is_relay, + created_at: Utc + .timestamp_opt(host.created_at, 0) + .unwrap() + .format(TIME_FORMAT) + .to_string(), + is_blocked: host.is_blocked, + metadata: HostResponseMetadata { + last_seen_at: Some( + Utc.timestamp_opt(host.last_seen_at, 0) + .unwrap() + .format(TIME_FORMAT) + .to_string(), + ), + version: host.last_version.to_string(), + platform: host.last_platform, + update_available: host.last_out_of_date, + }, + }, + }, + metadata: BlockHostResponseMetadata {}, + }) +} diff --git a/trifid-api/src/routes/v1/mod.rs b/trifid-api/src/routes/v1/mod.rs index 011b224..8b65d8c 100644 --- a/trifid-api/src/routes/v1/mod.rs +++ b/trifid-api/src/routes/v1/mod.rs @@ -1,9 +1,9 @@ pub mod auth; -pub mod signup; -pub mod totp_authenticators; -pub mod verify_totp_authenticators; +pub mod hosts; pub mod networks; pub mod organization; pub mod roles; +pub mod signup; +pub mod totp_authenticators; pub mod trifid; -pub mod hosts; \ No newline at end of file +pub mod verify_totp_authenticators; diff --git a/trifid-api/src/routes/v1/networks.rs b/trifid-api/src/routes/v1/networks.rs index 8a2c010..9a0497a 100644 --- a/trifid-api/src/routes/v1/networks.rs +++ b/trifid-api/src/routes/v1/networks.rs @@ -24,24 +24,24 @@ // This endpoint is considered done. No major features should be added or removed, unless it fixes bugs. // This endpoint requires the `definednetworking` extension to be enabled to be used. -use serde::{Serialize, Deserialize}; -use actix_web::{get, HttpRequest, HttpResponse}; +use crate::auth_tokens::{enforce_2fa, enforce_api_token, TokenInfo}; +use crate::cursor::Cursor; +use crate::error::{APIError, APIErrorsResponse}; +use crate::timers::TIME_FORMAT; +use crate::AppState; use actix_web::web::{Data, Path, Query}; +use actix_web::{get, HttpRequest, HttpResponse}; use chrono::{TimeZone, Utc}; use log::error; use sea_orm::{ColumnTrait, EntityTrait, PaginatorTrait, QueryFilter, QueryOrder}; -use crate::AppState; -use crate::auth_tokens::{enforce_2fa, enforce_api_token, TokenInfo}; -use crate::error::{APIError, APIErrorsResponse}; -use trifid_api_entities::entity::organization; +use serde::{Deserialize, Serialize}; use trifid_api_entities::entity::network; -use crate::cursor::Cursor; -use crate::timers::TIME_FORMAT; +use trifid_api_entities::entity::organization; #[derive(Serialize, Deserialize, Debug, Clone)] pub struct GetNetworksResponse { pub data: Vec, - pub metadata: GetNetworksResponseMetadata + pub metadata: GetNetworksResponseMetadata, } #[derive(Serialize, Deserialize, Debug, Clone)] @@ -56,7 +56,7 @@ pub struct GetNetworksResponseData { pub created_at: String, // 2023-03-22T18:55:47.009Z pub name: String, #[serde(rename = "lighthousesAsRelays")] - pub lighthouses_as_relays: bool + pub lighthouses_as_relays: bool, } #[derive(Serialize, Deserialize, Debug, Clone)] @@ -72,16 +72,18 @@ pub struct GetNetworksResponseMetadata { #[serde(default, rename = "nextCursor")] pub next_cursor: Option, #[serde(default)] - pub page: Option + pub page: Option, } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct GetNetworksResponseMetadataPage { pub count: u64, - pub start: u64 + pub start: u64, } -fn u64_25() -> u64 { 25 } +fn u64_25() -> u64 { + 25 +} #[derive(Serialize, Deserialize, Debug, Clone)] pub struct GetNetworksQueryParams { @@ -90,17 +92,27 @@ pub struct GetNetworksQueryParams { #[serde(default)] pub cursor: String, #[serde(default = "u64_25", rename = "pageSize")] - pub page_size: u64 + pub page_size: u64, } #[get("/v1/networks")] -pub async fn get_networks(opts: Query, req_info: HttpRequest, db: Data) -> HttpResponse { +pub async fn get_networks( + opts: Query, + req_info: HttpRequest, + db: Data, +) -> HttpResponse { // For this endpoint, you either need to be a fully authenticated user OR a token with networks:list - let session_info = enforce_2fa(&req_info, &db.conn).await.unwrap_or(TokenInfo::NotPresent); - let api_token_info = enforce_api_token(&req_info, &["networks:list"], &db.conn).await.unwrap_or(TokenInfo::NotPresent); + let session_info = enforce_2fa(&req_info, &db.conn) + .await + .unwrap_or(TokenInfo::NotPresent); + let api_token_info = enforce_api_token(&req_info, &["networks:list"], &db.conn) + .await + .unwrap_or(TokenInfo::NotPresent); // If neither are present, throw an error - if matches!(session_info, TokenInfo::NotPresent) && matches!(api_token_info, TokenInfo::NotPresent) { + if matches!(session_info, TokenInfo::NotPresent) + && matches!(api_token_info, TokenInfo::NotPresent) + { return HttpResponse::Unauthorized().json(APIErrorsResponse { errors: vec![ APIError { @@ -109,11 +121,13 @@ pub async fn get_networks(opts: Query, req_info: HttpReq path: None, } ], - }) + }); } // If both are present, throw an error - if matches!(session_info, TokenInfo::AuthToken(_)) && matches!(api_token_info, TokenInfo::ApiToken(_)) { + if matches!(session_info, TokenInfo::AuthToken(_)) + && matches!(api_token_info, TokenInfo::ApiToken(_)) + { return HttpResponse::BadRequest().json(APIErrorsResponse { errors: vec![ APIError { @@ -122,7 +136,7 @@ pub async fn get_networks(opts: Query, req_info: HttpReq path: None } ], - }) + }); } let org = match api_token_info { @@ -131,10 +145,14 @@ pub async fn get_networks(opts: Query, req_info: HttpReq // we have a session token, which means we have to do a db request to get the organization that this user owns let user = match session_info { TokenInfo::AuthToken(tkn) => tkn.session_info.user, - _ => unreachable!() + _ => unreachable!(), }; - let org = match organization::Entity::find().filter(organization::Column::Owner.eq(user.id)).one(&db.conn).await { + let org = match organization::Entity::find() + .filter(organization::Column::Owner.eq(user.id)) + .one(&db.conn) + .await + { Ok(r) => r, Err(e) => { error!("database error: {}", e); @@ -161,7 +179,7 @@ pub async fn get_networks(opts: Query, req_info: HttpReq path: None } ], - }) + }); } } }; @@ -171,18 +189,19 @@ pub async fn get_networks(opts: Query, req_info: HttpReq Err(e) => { error!("invalid cursor: {}", e); return HttpResponse::BadRequest().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_INVALID_CURSOR".to_string(), - message: "The provided cursor was invalid, please try again later.".to_string(), - path: None - } - ], - }) + errors: vec![APIError { + code: "ERR_INVALID_CURSOR".to_string(), + message: "The provided cursor was invalid, please try again later.".to_string(), + path: None, + }], + }); } }; - let network_pages = network::Entity::find().filter(network::Column::Organization.eq(org)).order_by_asc(network::Column::CreatedAt).paginate(&db.conn, opts.page_size); + let network_pages = network::Entity::find() + .filter(network::Column::Organization.eq(org)) + .order_by_asc(network::Column::CreatedAt) + .paginate(&db.conn, opts.page_size); let total = match network_pages.num_items().await { Ok(r) => r, @@ -230,17 +249,22 @@ pub async fn get_networks(opts: Query, req_info: HttpReq }); } }; - let models_mapped: Vec = models.iter().map(|u| { - GetNetworksResponseData { + let models_mapped: Vec = models + .iter() + .map(|u| GetNetworksResponseData { id: u.id.clone(), cidr: u.cidr.clone(), organization_id: u.organization.clone(), signing_ca_id: u.signing_ca.clone(), - created_at: Utc.timestamp_opt(u.created_at, 0).unwrap().format(TIME_FORMAT).to_string(), + created_at: Utc + .timestamp_opt(u.created_at, 0) + .unwrap() + .format(TIME_FORMAT) + .to_string(), name: u.name.clone(), lighthouses_as_relays: u.lighthouses_as_relays, - } - }).collect(); + }) + .collect(); let count = models_mapped.len() as u64; @@ -248,20 +272,28 @@ pub async fn get_networks(opts: Query, req_info: HttpReq data: models_mapped, metadata: GetNetworksResponseMetadata { total_count: total, - has_next_page: cursor.page+1 != pages, + has_next_page: cursor.page + 1 != pages, has_prev_page: cursor.page != 0, prev_cursor: if cursor.page != 0 { - match (Cursor { page: cursor.page - 1 }).try_into() { + match (Cursor { + page: cursor.page - 1, + }) + .try_into() + { Ok(r) => Some(r), - Err(_) => None + Err(_) => None, } } else { None }, - next_cursor: if cursor.page+1 != pages { - match (Cursor { page: cursor.page + 1 }).try_into() { + next_cursor: if cursor.page + 1 != pages { + match (Cursor { + page: cursor.page + 1, + }) + .try_into() + { Ok(r) => Some(r), - Err(_) => None + Err(_) => None, } } else { None @@ -271,19 +303,31 @@ pub async fn get_networks(opts: Query, req_info: HttpReq count, start: opts.page_size * cursor.page, }) - } else { None }, + } else { + None + }, }, }) } #[get("/v1/networks/{network_id}")] -pub async fn get_network_request(net: Path, req_info: HttpRequest, db: Data) -> HttpResponse { +pub async fn get_network_request( + net: Path, + req_info: HttpRequest, + db: Data, +) -> HttpResponse { // For this endpoint, you either need to be a fully authenticated user OR a token with networks:list - let session_info = enforce_2fa(&req_info, &db.conn).await.unwrap_or(TokenInfo::NotPresent); - let api_token_info = enforce_api_token(&req_info, &["networks:read"], &db.conn).await.unwrap_or(TokenInfo::NotPresent); + let session_info = enforce_2fa(&req_info, &db.conn) + .await + .unwrap_or(TokenInfo::NotPresent); + let api_token_info = enforce_api_token(&req_info, &["networks:read"], &db.conn) + .await + .unwrap_or(TokenInfo::NotPresent); // If neither are present, throw an error - if matches!(session_info, TokenInfo::NotPresent) && matches!(api_token_info, TokenInfo::NotPresent) { + if matches!(session_info, TokenInfo::NotPresent) + && matches!(api_token_info, TokenInfo::NotPresent) + { return HttpResponse::Unauthorized().json(APIErrorsResponse { errors: vec![ APIError { @@ -292,11 +336,13 @@ pub async fn get_network_request(net: Path, req_info: HttpRequest, db: D path: None, } ], - }) + }); } // If both are present, throw an error - if matches!(session_info, TokenInfo::AuthToken(_)) && matches!(api_token_info, TokenInfo::ApiToken(_)) { + if matches!(session_info, TokenInfo::AuthToken(_)) + && matches!(api_token_info, TokenInfo::ApiToken(_)) + { return HttpResponse::BadRequest().json(APIErrorsResponse { errors: vec![ APIError { @@ -305,10 +351,14 @@ pub async fn get_network_request(net: Path, req_info: HttpRequest, db: D path: None } ], - }) + }); } - let network: Option = match network::Entity::find().filter(network::Column::Id.eq(net.into_inner())).one(&db.conn).await { + let network: Option = match network::Entity::find() + .filter(network::Column::Id.eq(net.into_inner())) + .one(&db.conn) + .await + { Ok(r) => r, Err(e) => { error!("database error: {}", e); @@ -331,7 +381,11 @@ pub async fn get_network_request(net: Path, req_info: HttpRequest, db: D cidr: network.cidr, organization_id: network.organization, signing_ca_id: network.signing_ca, - created_at: Utc.timestamp_opt(network.created_at, 0).unwrap().format(TIME_FORMAT).to_string(), + created_at: Utc + .timestamp_opt(network.created_at, 0) + .unwrap() + .format(TIME_FORMAT) + .to_string(), name: network.name, lighthouses_as_relays: network.lighthouses_as_relays, }, @@ -339,22 +393,19 @@ pub async fn get_network_request(net: Path, req_info: HttpRequest, db: D }) } else { HttpResponse::NotFound().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_MISSING_NETWORK".to_string(), - message: "Network does not exist".to_string(), - path: None, - } - ], + errors: vec![APIError { + code: "ERR_MISSING_NETWORK".to_string(), + message: "Network does not exist".to_string(), + path: None, + }], }) - } } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct GetNetworkResponse { pub data: GetNetworksResponseData, - pub metadata: GetNetworkResponseMetadata + pub metadata: GetNetworkResponseMetadata, } #[derive(Serialize, Deserialize, Debug, Clone)] -pub struct GetNetworkResponseMetadata {} \ No newline at end of file +pub struct GetNetworkResponseMetadata {} diff --git a/trifid-api/src/routes/v1/organization.rs b/trifid-api/src/routes/v1/organization.rs index 1be91ca..bd11080 100644 --- a/trifid-api/src/routes/v1/organization.rs +++ b/trifid-api/src/routes/v1/organization.rs @@ -19,59 +19,66 @@ // While this endpoint is considered done, help is wanted with reverse engineering the original API. Major features should not be added or removed unless it is replacing this endpoint with the correct, DN-compatible endpoint. // This endpoint requires the `definednetworking` extension to be enabled to be used. -use std::time::{Duration, SystemTime, UNIX_EPOCH}; -use actix_web::{HttpRequest, HttpResponse}; -use actix_web::web::{Data, Json}; -use serde::{Serialize, Deserialize}; -use crate::AppState; -use actix_web::post; -use log::error; -use sea_orm::{ActiveModelTrait, ColumnTrait, EntityTrait, IntoActiveModel, QueryFilter}; -use trifid_pki::cert::{NebulaCertificate, NebulaCertificateDetails, serialize_x25519_private}; -use trifid_pki::ed25519_dalek::SigningKey; -use trifid_pki::rand_core::OsRng; -use trifid_api_entities::entity::{network, organization, signing_ca}; use crate::auth_tokens::{enforce_2fa, TokenInfo}; use crate::config::CONFIG; use crate::crypto::{encrypt_with_nonce, generate_random_iv, get_cipher_from_config}; use crate::error::{APIError, APIErrorsResponse}; use crate::tokens::random_id; +use crate::AppState; +use actix_web::post; +use actix_web::web::{Data, Json}; +use actix_web::{HttpRequest, HttpResponse}; +use log::error; +use sea_orm::{ActiveModelTrait, ColumnTrait, EntityTrait, IntoActiveModel, QueryFilter}; +use serde::{Deserialize, Serialize}; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; +use trifid_api_entities::entity::{network, organization, signing_ca}; +use trifid_pki::cert::{serialize_x25519_private, NebulaCertificate, NebulaCertificateDetails}; +use trifid_pki::ed25519_dalek::SigningKey; +use trifid_pki::rand_core::OsRng; #[derive(Serialize, Deserialize)] pub struct OrgCreateRequest { - pub cidr: String + pub cidr: String, } #[derive(Serialize, Deserialize)] pub struct OrgCreateResponse { pub organization: String, pub ca: String, - pub network: String + pub network: String, } #[post("/v1/organization")] -pub async fn create_org_request(req: Json, req_info: HttpRequest, db: Data) -> HttpResponse { +pub async fn create_org_request( + req: Json, + req_info: HttpRequest, + db: Data, +) -> HttpResponse { // For this endpoint, you need to be a fully authenticated user - let session_info = enforce_2fa(&req_info, &db.conn).await.unwrap_or(TokenInfo::NotPresent); - + let session_info = enforce_2fa(&req_info, &db.conn) + .await + .unwrap_or(TokenInfo::NotPresent); // we have a session token, which means we have to do a db request to get the organization that this user owns let user = match session_info { TokenInfo::AuthToken(tkn) => tkn.session_info.user, _ => { return HttpResponse::Unauthorized().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_UNAUTHORIZED".to_string(), - message: "Unauthorized".to_string(), - path: None, - } - ], + errors: vec![APIError { + code: "ERR_UNAUTHORIZED".to_string(), + message: "Unauthorized".to_string(), + path: None, + }], }) } }; - let org = match organization::Entity::find().filter(organization::Column::Owner.eq(&user.id)).one(&db.conn).await { + let org = match organization::Entity::find() + .filter(organization::Column::Owner.eq(&user.id)) + .one(&db.conn) + .await + { Ok(r) => r, Err(e) => { error!("database error: {}", e); @@ -89,14 +96,12 @@ pub async fn create_org_request(req: Json, req_info: HttpReque if org.is_some() { return HttpResponse::BadRequest().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_USER_ALREADY_OWNS_ORG".to_string(), - message: "This user already owns an organization".to_string(), - path: None, - } - ], - }) + errors: vec![APIError { + code: "ERR_USER_ALREADY_OWNS_ORG".to_string(), + message: "This user already owns an organization".to_string(), + path: None, + }], + }); } let org = organization::Model { @@ -201,7 +206,12 @@ pub async fn create_org_request(req: Json, req_info: HttpReque organization: org.id.clone(), cert: ca_key_encrypted, key: ca_crt, - expires: cert.details.not_after.duration_since(UNIX_EPOCH).expect("Time went backwards").as_secs() as i64, + expires: cert + .details + .not_after + .duration_since(UNIX_EPOCH) + .expect("Time went backwards") + .as_secs() as i64, nonce: iv_hex, }; @@ -210,7 +220,10 @@ pub async fn create_org_request(req: Json, req_info: HttpReque cidr: req.cidr.clone(), organization: org.id.clone(), signing_ca: signing_ca.id.clone(), - created_at: SystemTime::now().duration_since(UNIX_EPOCH).expect("Time went backwards").as_secs() as i64, + created_at: SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("Time went backwards") + .as_secs() as i64, name: "Network1".to_string(), lighthouses_as_relays: true, }; @@ -274,4 +287,4 @@ pub async fn create_org_request(req: Json, req_info: HttpReque ca: new_signing_ca_id, network: new_network_id, }) -} \ No newline at end of file +} diff --git a/trifid-api/src/routes/v1/roles.rs b/trifid-api/src/routes/v1/roles.rs index 990dfb9..b1a5c0a 100644 --- a/trifid-api/src/routes/v1/roles.rs +++ b/trifid-api/src/routes/v1/roles.rs @@ -35,24 +35,27 @@ // This endpoint has full parity with the original API. It has been recreated from the original API documentation. // This endpoint is considered done. No major features should be added or removed, unless it fixes bugs. -use std::time::{SystemTime, UNIX_EPOCH}; -use actix_web::{get, HttpRequest, HttpResponse, post, put}; +use crate::auth_tokens::{enforce_2fa, enforce_api_token, TokenInfo}; +use crate::cursor::Cursor; +use crate::error::{APIError, APIErrorsResponse}; +use crate::timers::TIME_FORMAT; +use crate::tokens::random_id; +use crate::AppState; +use actix_web::delete; use actix_web::web::{Data, Json, Path, Query}; +use actix_web::{get, post, put, HttpRequest, HttpResponse}; use chrono::{TimeZone, Utc}; use log::error; -use serde::{Deserialize, Serialize}; -use crate::AppState; -use crate::auth_tokens::{enforce_2fa, enforce_api_token, TokenInfo}; -use crate::error::{APIError, APIErrorsResponse}; -use trifid_api_entities::entity::organization; -use sea_orm::{EntityTrait, QueryFilter, ColumnTrait, IntoActiveModel, ActiveModelTrait, QueryOrder, PaginatorTrait, ModelTrait}; -use trifid_api_entities::entity::firewall_rule; -use trifid_api_entities::entity::role; -use crate::cursor::Cursor; -use crate::tokens::random_id; -use actix_web::delete; use sea_orm::ActiveValue::Set; -use crate::timers::TIME_FORMAT; +use sea_orm::{ + ActiveModelTrait, ColumnTrait, EntityTrait, IntoActiveModel, ModelTrait, PaginatorTrait, + QueryFilter, QueryOrder, +}; +use serde::{Deserialize, Serialize}; +use std::time::{SystemTime, UNIX_EPOCH}; +use trifid_api_entities::entity::firewall_rule; +use trifid_api_entities::entity::organization; +use trifid_api_entities::entity::role; #[derive(Serialize, Deserialize, Debug, Clone)] pub struct CreateRoleRequest { @@ -61,7 +64,7 @@ pub struct CreateRoleRequest { #[serde(default)] pub description: String, #[serde(default, rename = "firewallRules")] - pub firewall_rules: Vec + pub firewall_rules: Vec, } #[derive(Serialize, Deserialize, Debug, Clone)] @@ -70,7 +73,7 @@ pub struct UpdateRoleRequest { #[serde(default)] pub description: String, #[serde(default, rename = "firewallRules")] - pub firewall_rules: Vec + pub firewall_rules: Vec, } #[derive(Serialize, Deserialize, Debug, Clone)] @@ -93,19 +96,19 @@ pub enum RoleProtocol { #[serde(rename = "UDP")] Udp, #[serde(rename = "ICMP")] - Icmp + Icmp, } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct RolePortRange { pub from: u16, - pub to: u16 + pub to: u16, } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct RoleCreateResponse { pub data: RoleResponse, - pub metadata: RoleCreateResponseMetadata + pub metadata: RoleCreateResponseMetadata, } #[derive(Serialize, Deserialize, Debug, Clone)] @@ -118,20 +121,30 @@ pub struct RoleResponse { #[serde(rename = "createdAt")] pub created_at: String, #[serde(rename = "modifiedAt")] - pub modified_at: String + pub modified_at: String, } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct RoleCreateResponseMetadata {} #[post("/v1/roles")] -pub async fn create_role_request(req: Json, req_info: HttpRequest, db: Data) -> HttpResponse { +pub async fn create_role_request( + req: Json, + req_info: HttpRequest, + db: Data, +) -> HttpResponse { // For this endpoint, you either need to be a fully authenticated user OR a token with roles:create - let session_info = enforce_2fa(&req_info, &db.conn).await.unwrap_or(TokenInfo::NotPresent); - let api_token_info = enforce_api_token(&req_info, &["roles:create"], &db.conn).await.unwrap_or(TokenInfo::NotPresent); + let session_info = enforce_2fa(&req_info, &db.conn) + .await + .unwrap_or(TokenInfo::NotPresent); + let api_token_info = enforce_api_token(&req_info, &["roles:create"], &db.conn) + .await + .unwrap_or(TokenInfo::NotPresent); // If neither are present, throw an error - if matches!(session_info, TokenInfo::NotPresent) && matches!(api_token_info, TokenInfo::NotPresent) { + if matches!(session_info, TokenInfo::NotPresent) + && matches!(api_token_info, TokenInfo::NotPresent) + { return HttpResponse::Unauthorized().json(APIErrorsResponse { errors: vec![ APIError { @@ -140,11 +153,13 @@ pub async fn create_role_request(req: Json, req_info: HttpReq path: None, } ], - }) + }); } // If both are present, throw an error - if matches!(session_info, TokenInfo::AuthToken(_)) && matches!(api_token_info, TokenInfo::ApiToken(_)) { + if matches!(session_info, TokenInfo::AuthToken(_)) + && matches!(api_token_info, TokenInfo::ApiToken(_)) + { return HttpResponse::BadRequest().json(APIErrorsResponse { errors: vec![ APIError { @@ -153,7 +168,7 @@ pub async fn create_role_request(req: Json, req_info: HttpReq path: None } ], - }) + }); } let org = match api_token_info { @@ -162,10 +177,14 @@ pub async fn create_role_request(req: Json, req_info: HttpReq // we have a session token, which means we have to do a db request to get the organization that this user owns let user = match session_info { TokenInfo::AuthToken(tkn) => tkn.session_info.user, - _ => unreachable!() + _ => unreachable!(), }; - let org = match organization::Entity::find().filter(organization::Column::Owner.eq(user.id)).one(&db.conn).await { + let org = match organization::Entity::find() + .filter(organization::Column::Owner.eq(user.id)) + .one(&db.conn) + .await + { Ok(r) => r, Err(e) => { error!("database error: {}", e); @@ -192,7 +211,7 @@ pub async fn create_role_request(req: Json, req_info: HttpReq path: None } ], - }) + }); } } }; @@ -202,20 +221,36 @@ pub async fn create_role_request(req: Json, req_info: HttpReq name: req.name.clone(), description: req.description.clone(), organization: org, - created_at: SystemTime::now().duration_since(UNIX_EPOCH).expect("Time went backwards").as_secs() as i64, - modified_at: SystemTime::now().duration_since(UNIX_EPOCH).expect("Time went backwards").as_secs() as i64, + created_at: SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("Time went backwards") + .as_secs() as i64, + modified_at: SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("Time went backwards") + .as_secs() as i64, }; - let firewall_rules: Vec = req.firewall_rules.iter().map(|i| { - firewall_rule::Model { + let firewall_rules: Vec = req + .firewall_rules + .iter() + .map(|i| firewall_rule::Model { id: random_id("rule"), role: new_role_model.id.clone(), protocol: i.protocol.to_string(), description: i.description.clone(), allowed_role_id: i.allowed_role_id.clone(), - port_range_from: i.port_range.as_ref().unwrap_or(&RolePortRange { from: 0, to: 65535 }).from as i32, - port_range_to: i.port_range.as_ref().unwrap_or(&RolePortRange { from: 0, to: 65535 }).to as i32, - } - }).collect(); + port_range_from: i + .port_range + .as_ref() + .unwrap_or(&RolePortRange { from: 0, to: 65535 }) + .from as i32, + port_range_to: i + .port_range + .as_ref() + .unwrap_or(&RolePortRange { from: 0, to: 65535 }) + .to as i32, + }) + .collect(); let new_role_model_clone = new_role_model.clone(); let firewall_rules_clone = firewall_rules.clone(); @@ -226,14 +261,13 @@ pub async fn create_role_request(req: Json, req_info: HttpReq Err(e) => { error!("database error: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_DB_ERROR".to_string(), - message: "There was an error creating the new role. Please try again later".to_string(), - path: None - } - ], - }) + errors: vec![APIError { + code: "ERR_DB_ERROR".to_string(), + message: "There was an error creating the new role. Please try again later" + .to_string(), + path: None, + }], + }); } } @@ -244,14 +278,13 @@ pub async fn create_role_request(req: Json, req_info: HttpReq Err(e) => { error!("database error: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_DB_ERROR".to_string(), - message: "There was an error creating the new role. Please try again later".to_string(), - path: None - } - ], - }) + errors: vec![APIError { + code: "ERR_DB_ERROR".to_string(), + message: "There was an error creating the new role. Please try again later" + .to_string(), + path: None, + }], + }); } } } @@ -262,8 +295,16 @@ pub async fn create_role_request(req: Json, req_info: HttpReq name: Some(new_role_model_clone.name.clone()), description: Some(new_role_model_clone.description), firewall_rules: req.firewall_rules.clone(), - created_at: Utc.timestamp_opt(new_role_model_clone.created_at, 0).unwrap().format(TIME_FORMAT).to_string(), - modified_at: Utc.timestamp_opt(new_role_model_clone.modified_at, 0).unwrap().format(TIME_FORMAT).to_string(), + created_at: Utc + .timestamp_opt(new_role_model_clone.created_at, 0) + .unwrap() + .format(TIME_FORMAT) + .to_string(), + modified_at: Utc + .timestamp_opt(new_role_model_clone.modified_at, 0) + .unwrap() + .format(TIME_FORMAT) + .to_string(), }, metadata: RoleCreateResponseMetadata {}, }) @@ -275,7 +316,7 @@ impl ToString for RoleProtocol { RoleProtocol::Any => "ANY".to_string(), RoleProtocol::Tcp => "TCP".to_string(), RoleProtocol::Udp => "UDP".to_string(), - RoleProtocol::Icmp => "ICMP".to_string() + RoleProtocol::Icmp => "ICMP".to_string(), } } } @@ -287,15 +328,17 @@ pub struct ListRolesRequestOpts { #[serde(default)] pub cursor: String, #[serde(default = "page_default", rename = "pageSize")] - pub page_size: u64 + pub page_size: u64, } -fn page_default() -> u64 { 25 } +fn page_default() -> u64 { + 25 +} #[derive(Serialize, Deserialize, Debug, Clone)] pub struct GetRolesResponse { pub data: Vec, - pub metadata: GetRolesResponseMetadata + pub metadata: GetRolesResponseMetadata, } #[derive(Serialize, Deserialize, Debug, Clone)] @@ -311,23 +354,33 @@ pub struct GetRolesResponseMetadata { #[serde(default, rename = "nextCursor")] pub next_cursor: Option, #[serde(default)] - pub page: Option + pub page: Option, } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct GetRolesResponseMetadataPage { pub count: u64, - pub start: u64 + pub start: u64, } #[get("/v1/roles")] -pub async fn get_roles(opts: Query, req_info: HttpRequest, db: Data) -> HttpResponse { +pub async fn get_roles( + opts: Query, + req_info: HttpRequest, + db: Data, +) -> HttpResponse { // For this endpoint, you either need to be a fully authenticated user OR a token with roles:list - let session_info = enforce_2fa(&req_info, &db.conn).await.unwrap_or(TokenInfo::NotPresent); - let api_token_info = enforce_api_token(&req_info, &["roles:list"], &db.conn).await.unwrap_or(TokenInfo::NotPresent); + let session_info = enforce_2fa(&req_info, &db.conn) + .await + .unwrap_or(TokenInfo::NotPresent); + let api_token_info = enforce_api_token(&req_info, &["roles:list"], &db.conn) + .await + .unwrap_or(TokenInfo::NotPresent); // If neither are present, throw an error - if matches!(session_info, TokenInfo::NotPresent) && matches!(api_token_info, TokenInfo::NotPresent) { + if matches!(session_info, TokenInfo::NotPresent) + && matches!(api_token_info, TokenInfo::NotPresent) + { return HttpResponse::Unauthorized().json(APIErrorsResponse { errors: vec![ APIError { @@ -336,11 +389,13 @@ pub async fn get_roles(opts: Query, req_info: HttpRequest, path: None, } ], - }) + }); } // If both are present, throw an error - if matches!(session_info, TokenInfo::AuthToken(_)) && matches!(api_token_info, TokenInfo::ApiToken(_)) { + if matches!(session_info, TokenInfo::AuthToken(_)) + && matches!(api_token_info, TokenInfo::ApiToken(_)) + { return HttpResponse::BadRequest().json(APIErrorsResponse { errors: vec![ APIError { @@ -349,7 +404,7 @@ pub async fn get_roles(opts: Query, req_info: HttpRequest, path: None } ], - }) + }); } let org = match api_token_info { @@ -358,10 +413,14 @@ pub async fn get_roles(opts: Query, req_info: HttpRequest, // we have a session token, which means we have to do a db request to get the organization that this user owns let user = match session_info { TokenInfo::AuthToken(tkn) => tkn.session_info.user, - _ => unreachable!() + _ => unreachable!(), }; - let org = match organization::Entity::find().filter(organization::Column::Owner.eq(user.id)).one(&db.conn).await { + let org = match organization::Entity::find() + .filter(organization::Column::Owner.eq(user.id)) + .one(&db.conn) + .await + { Ok(r) => r, Err(e) => { error!("database error: {}", e); @@ -388,7 +447,7 @@ pub async fn get_roles(opts: Query, req_info: HttpRequest, path: None } ], - }) + }); } } }; @@ -398,18 +457,19 @@ pub async fn get_roles(opts: Query, req_info: HttpRequest, Err(e) => { error!("invalid cursor: {}", e); return HttpResponse::BadRequest().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_INVALID_CURSOR".to_string(), - message: "The provided cursor was invalid, please try again later.".to_string(), - path: None - } - ], - }) + errors: vec![APIError { + code: "ERR_INVALID_CURSOR".to_string(), + message: "The provided cursor was invalid, please try again later.".to_string(), + path: None, + }], + }); } }; - let network_pages = role::Entity::find().filter(role::Column::Organization.eq(org)).order_by_asc(role::Column::CreatedAt).paginate(&db.conn, opts.page_size); + let network_pages = role::Entity::find() + .filter(role::Column::Organization.eq(org)) + .order_by_asc(role::Column::CreatedAt) + .paginate(&db.conn, opts.page_size); let total = match network_pages.num_items().await { Ok(r) => r, @@ -462,7 +522,11 @@ pub async fn get_roles(opts: Query, req_info: HttpRequest, for u in models { // fetch firewall rules - let rules = match firewall_rule::Entity::find().filter(firewall_rule::Column::Role.eq(&u.id)).all(&db.conn).await { + let rules = match firewall_rule::Entity::find() + .filter(firewall_rule::Column::Role.eq(&u.id)) + .all(&db.conn) + .await + { Ok(r) => r, Err(e) => { error!("database error: {}", e); @@ -478,41 +542,52 @@ pub async fn get_roles(opts: Query, req_info: HttpRequest, } }; - let rules: Vec = rules.iter().map(|r| { - let protocol = match r.protocol.as_str() { - "ANY" => RoleProtocol::Any, - "TCP" => RoleProtocol::Tcp, - "UDP" => RoleProtocol::Udp, - "ICMP" => RoleProtocol::Icmp, - _ => unreachable!("database has been corrupted or manually edited") - }; + let rules: Vec = rules + .iter() + .map(|r| { + let protocol = match r.protocol.as_str() { + "ANY" => RoleProtocol::Any, + "TCP" => RoleProtocol::Tcp, + "UDP" => RoleProtocol::Udp, + "ICMP" => RoleProtocol::Icmp, + _ => unreachable!("database has been corrupted or manually edited"), + }; + let port_range = if r.port_range_from == 0 && r.port_range_to == 65535 + || matches!(protocol, RoleProtocol::Icmp) + { + None + } else { + Some(RolePortRange { + from: r.port_range_from as u16, + to: r.port_range_to as u16, + }) + }; - - let port_range = if r.port_range_from == 0 && r.port_range_to == 65535 || matches!(protocol, RoleProtocol::Icmp) { - None - } else { - Some(RolePortRange { - from: r.port_range_from as u16, - to: r.port_range_to as u16, - }) - }; - - RoleFirewallRule { - protocol, - description: r.description.clone(), - allowed_role_id: r.allowed_role_id.clone(), - port_range, - } - }).collect(); + RoleFirewallRule { + protocol, + description: r.description.clone(), + allowed_role_id: r.allowed_role_id.clone(), + port_range, + } + }) + .collect(); models_mapped.push(RoleResponse { id: Some(u.id.clone()), name: Some(u.name), description: Some(u.description), firewall_rules: rules, - created_at: Utc.timestamp_opt(u.created_at, 0).unwrap().format(TIME_FORMAT).to_string(), - modified_at: Utc.timestamp_opt(u.modified_at, 0).unwrap().format(TIME_FORMAT).to_string(), + created_at: Utc + .timestamp_opt(u.created_at, 0) + .unwrap() + .format(TIME_FORMAT) + .to_string(), + modified_at: Utc + .timestamp_opt(u.modified_at, 0) + .unwrap() + .format(TIME_FORMAT) + .to_string(), }) } @@ -522,20 +597,28 @@ pub async fn get_roles(opts: Query, req_info: HttpRequest, data: models_mapped, metadata: GetRolesResponseMetadata { total_count: total, - has_next_page: cursor.page+1 != pages, + has_next_page: cursor.page + 1 != pages, has_prev_page: cursor.page != 0, prev_cursor: if cursor.page != 0 { - match (Cursor { page: cursor.page - 1 }).try_into() { + match (Cursor { + page: cursor.page - 1, + }) + .try_into() + { Ok(r) => Some(r), - Err(_) => None + Err(_) => None, } } else { None }, - next_cursor: if cursor.page+1 != pages { - match (Cursor { page: cursor.page + 1 }).try_into() { + next_cursor: if cursor.page + 1 != pages { + match (Cursor { + page: cursor.page + 1, + }) + .try_into() + { Ok(r) => Some(r), - Err(_) => None + Err(_) => None, } } else { None @@ -545,19 +628,31 @@ pub async fn get_roles(opts: Query, req_info: HttpRequest, count, start: opts.page_size * cursor.page, }) - } else { None }, + } else { + None + }, }, }) } #[get("/v1/roles/{role_id}")] -pub async fn get_role(net: Path, req_info: HttpRequest, db: Data) -> HttpResponse { +pub async fn get_role( + net: Path, + req_info: HttpRequest, + db: Data, +) -> HttpResponse { // For this endpoint, you either need to be a fully authenticated user OR a token with roles:read - let session_info = enforce_2fa(&req_info, &db.conn).await.unwrap_or(TokenInfo::NotPresent); - let api_token_info = enforce_api_token(&req_info, &["roles:read"], &db.conn).await.unwrap_or(TokenInfo::NotPresent); + let session_info = enforce_2fa(&req_info, &db.conn) + .await + .unwrap_or(TokenInfo::NotPresent); + let api_token_info = enforce_api_token(&req_info, &["roles:read"], &db.conn) + .await + .unwrap_or(TokenInfo::NotPresent); // If neither are present, throw an error - if matches!(session_info, TokenInfo::NotPresent) && matches!(api_token_info, TokenInfo::NotPresent) { + if matches!(session_info, TokenInfo::NotPresent) + && matches!(api_token_info, TokenInfo::NotPresent) + { return HttpResponse::Unauthorized().json(APIErrorsResponse { errors: vec![ APIError { @@ -566,11 +661,13 @@ pub async fn get_role(net: Path, req_info: HttpRequest, db: Data, req_info: HttpRequest, db: Data = match role::Entity::find().filter(role::Column::Id.eq(net.into_inner())).one(&db.conn).await { + let role: Option = match role::Entity::find() + .filter(role::Column::Id.eq(net.into_inner())) + .one(&db.conn) + .await + { Ok(r) => r, Err(e) => { error!("database error: {}", e); @@ -598,8 +699,13 @@ pub async fn get_role(net: Path, req_info: HttpRequest, db: Data r, Err(e) => { error!("database error: {}", e); @@ -615,34 +721,36 @@ pub async fn get_role(net: Path, req_info: HttpRequest, db: Data = rules.iter().map(|r| { - let protocol = match r.protocol.as_str() { - "ANY" => RoleProtocol::Any, - "TCP" => RoleProtocol::Tcp, - "UDP" => RoleProtocol::Udp, - "ICMP" => RoleProtocol::Icmp, - _ => unreachable!("database has been corrupted or manually edited") - }; + let rules: Vec = rules + .iter() + .map(|r| { + let protocol = match r.protocol.as_str() { + "ANY" => RoleProtocol::Any, + "TCP" => RoleProtocol::Tcp, + "UDP" => RoleProtocol::Udp, + "ICMP" => RoleProtocol::Icmp, + _ => unreachable!("database has been corrupted or manually edited"), + }; + let port_range = if r.port_range_from == 0 && r.port_range_to == 65535 + || matches!(protocol, RoleProtocol::Icmp) + { + None + } else { + Some(RolePortRange { + from: r.port_range_from as u16, + to: r.port_range_to as u16, + }) + }; - - let port_range = if r.port_range_from == 0 && r.port_range_to == 65535 || matches!(protocol, RoleProtocol::Icmp) { - None - } else { - Some(RolePortRange { - from: r.port_range_from as u16, - to: r.port_range_to as u16, - }) - }; - - RoleFirewallRule { - protocol, - description: r.description.clone(), - allowed_role_id: r.allowed_role_id.clone(), - port_range, - } - }).collect(); - + RoleFirewallRule { + protocol, + description: r.description.clone(), + allowed_role_id: r.allowed_role_id.clone(), + port_range, + } + }) + .collect(); HttpResponse::Ok().json(GetRoleResponse { data: RoleResponse { @@ -650,41 +758,56 @@ pub async fn get_role(net: Path, req_info: HttpRequest, db: Data, req_info: HttpRequest, db: Data) -> HttpResponse { +pub async fn delete_role( + role: Path, + req_info: HttpRequest, + db: Data, +) -> HttpResponse { // For this endpoint, you either need to be a fully authenticated user OR a token with roles:delete - let session_info = enforce_2fa(&req_info, &db.conn).await.unwrap_or(TokenInfo::NotPresent); - let api_token_info = enforce_api_token(&req_info, &["roles:delete"], &db.conn).await.unwrap_or(TokenInfo::NotPresent); + let session_info = enforce_2fa(&req_info, &db.conn) + .await + .unwrap_or(TokenInfo::NotPresent); + let api_token_info = enforce_api_token(&req_info, &["roles:delete"], &db.conn) + .await + .unwrap_or(TokenInfo::NotPresent); // If neither are present, throw an error - if matches!(session_info, TokenInfo::NotPresent) && matches!(api_token_info, TokenInfo::NotPresent) { + if matches!(session_info, TokenInfo::NotPresent) + && matches!(api_token_info, TokenInfo::NotPresent) + { return HttpResponse::Unauthorized().json(APIErrorsResponse { errors: vec![ APIError { @@ -693,11 +816,13 @@ pub async fn delete_role(role: Path, req_info: HttpRequest, db: Data, req_info: HttpRequest, db: Data = match role::Entity::find().filter(role::Column::Id.eq(role.into_inner())).one(&db.conn).await { + let role: Option = match role::Entity::find() + .filter(role::Column::Id.eq(role.into_inner())) + .one(&db.conn) + .await + { Ok(r) => r, Err(e) => { error!("database error: {}", e); @@ -748,13 +877,11 @@ pub async fn delete_role(role: Path, req_info: HttpRequest, db: Data, req_info: HttpRequest, db: Data + pub firewall_rules: Vec, } #[put("/v1/roles/{role_id}")] -pub async fn update_role_request(role: Path, req: Json, req_info: HttpRequest, db: Data) -> HttpResponse { +pub async fn update_role_request( + role: Path, + req: Json, + req_info: HttpRequest, + db: Data, +) -> HttpResponse { // For this endpoint, you either need to be a fully authenticated user OR a token with roles:create - let session_info = enforce_2fa(&req_info, &db.conn).await.unwrap_or(TokenInfo::NotPresent); - let api_token_info = enforce_api_token(&req_info, &["roles:create"], &db.conn).await.unwrap_or(TokenInfo::NotPresent); + let session_info = enforce_2fa(&req_info, &db.conn) + .await + .unwrap_or(TokenInfo::NotPresent); + let api_token_info = enforce_api_token(&req_info, &["roles:create"], &db.conn) + .await + .unwrap_or(TokenInfo::NotPresent); // If neither are present, throw an error - if matches!(session_info, TokenInfo::NotPresent) && matches!(api_token_info, TokenInfo::NotPresent) { + if matches!(session_info, TokenInfo::NotPresent) + && matches!(api_token_info, TokenInfo::NotPresent) + { return HttpResponse::Unauthorized().json(APIErrorsResponse { errors: vec![ APIError { @@ -794,11 +932,13 @@ pub async fn update_role_request(role: Path, req: Json, req: Json tkn.organization, + match api_token_info { + TokenInfo::ApiToken(_) => (), _ => { // we have a session token, which means we have to do a db request to get the organization that this user owns let user = match session_info { TokenInfo::AuthToken(tkn) => tkn.session_info.user, - _ => unreachable!() + _ => unreachable!(), }; - let org = match organization::Entity::find().filter(organization::Column::Owner.eq(user.id)).one(&db.conn).await { + let org = match organization::Entity::find() + .filter(organization::Column::Owner.eq(user.id)) + .one(&db.conn) + .await + { Ok(r) => r, Err(e) => { error!("database error: {}", e); @@ -835,9 +979,7 @@ pub async fn update_role_request(role: Path, req: Json, req: Json = match firewall_rule::Entity::find().filter(firewall_rule::Column::Role.eq(role.clone())).all(&db.conn).await { + let existing_rules: Vec = match firewall_rule::Entity::find() + .filter(firewall_rule::Column::Role.eq(role.clone())) + .all(&db.conn) + .await + { Ok(r) => r, Err(e) => { error!("database error: {}", e); @@ -885,7 +1031,11 @@ pub async fn update_role_request(role: Path, req: Json r, Err(e) => { error!("database error: {}", e); @@ -905,20 +1055,23 @@ pub async fn update_role_request(role: Path, req: Json r, None => { return HttpResponse::Unauthorized().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_UNAUTHORIZED".to_string(), - message: "This resource does not exist or you do not have permission to access it.".to_string(), - path: None - } - ] + errors: vec![APIError { + code: "ERR_UNAUTHORIZED".to_string(), + message: + "This resource does not exist or you do not have permission to access it." + .to_string(), + path: None, + }], }) } }; let mut role_active_model = role.clone().into_active_model(); - role_active_model.modified_at = Set(SystemTime::now().duration_since(UNIX_EPOCH).expect("Time went backwards").as_secs() as i64); + role_active_model.modified_at = Set(SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("Time went backwards") + .as_secs() as i64); role_active_model.description = Set(req.description.clone()); let role = match role_active_model.update(&db.conn).await { @@ -937,17 +1090,27 @@ pub async fn update_role_request(role: Path, req: Json = req.firewall_rules.iter().map(|i| { - firewall_rule::Model { + let firewall_rules: Vec = req + .firewall_rules + .iter() + .map(|i| firewall_rule::Model { id: random_id("rule"), role: role.id.clone(), protocol: i.protocol.to_string(), description: i.description.clone(), allowed_role_id: i.allowed_role_id.clone(), - port_range_from: i.port_range.as_ref().unwrap_or(&RolePortRange { from: 0, to: 65535 }).from as i32, - port_range_to: i.port_range.as_ref().unwrap_or(&RolePortRange { from: 0, to: 65535 }).to as i32, - } - }).collect(); + port_range_from: i + .port_range + .as_ref() + .unwrap_or(&RolePortRange { from: 0, to: 65535 }) + .from as i32, + port_range_to: i + .port_range + .as_ref() + .unwrap_or(&RolePortRange { from: 0, to: 65535 }) + .to as i32, + }) + .collect(); let firewall_rules_clone = firewall_rules.clone(); @@ -958,14 +1121,13 @@ pub async fn update_role_request(role: Path, req: Json { error!("database error: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_DB_ERROR".to_string(), - message: "There was an error creating the new role. Please try again later".to_string(), - path: None - } - ], - }) + errors: vec![APIError { + code: "ERR_DB_ERROR".to_string(), + message: "There was an error creating the new role. Please try again later" + .to_string(), + path: None, + }], + }); } } } @@ -976,8 +1138,16 @@ pub async fn update_role_request(role: Path, req: Json, - pub metadata: SignupResponseMetadata + pub metadata: SignupResponseMetadata, } #[derive(Serialize, Deserialize, Clone, Debug)] pub struct SignupResponseData {} @@ -50,37 +50,39 @@ pub struct SignupResponseMetadata {} #[post("/v1/signup")] pub async fn signup_request(data: Data, req: Json) -> HttpResponse { - let user: Vec = match UserEntity::find().filter(user::Column::Email.eq(&req.email)).all(&data.conn).await { + let user: Vec = match UserEntity::find() + .filter(user::Column::Email.eq(&req.email)) + .all(&data.conn) + .await + { Ok(r) => r, Err(e) => { error!("database error: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_DB_ERROR".to_string(), - message: "There was an error with the database request, please try again later.".to_string(), - path: None, - } - ], - }) + errors: vec![APIError { + code: "ERR_DB_ERROR".to_string(), + message: + "There was an error with the database request, please try again later." + .to_string(), + path: None, + }], + }); } }; if !user.is_empty() { return HttpResponse::Unauthorized().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_USER_EXISTS".to_string(), - message: "That user already exists.".to_string(), - path: None, - } - ], - }) + errors: vec![APIError { + code: "ERR_USER_EXISTS".to_string(), + message: "That user already exists.".to_string(), + path: None, + }], + }); } let model = user::Model { id: random_id("user"), - email: req.email.clone() + email: req.email.clone(), }; let id = model.id.clone(); @@ -91,14 +93,14 @@ pub async fn signup_request(data: Data, req: Json) -> H Err(e) => { error!("database error: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_DB_ERROR".to_string(), - message: "There was an error with the database request, please try again later.".to_string(), - path: None, - } - ], - }) + errors: vec![APIError { + code: "ERR_DB_ERROR".to_string(), + message: + "There was an error with the database request, please try again later." + .to_string(), + path: None, + }], + }); } } @@ -113,14 +115,14 @@ pub async fn signup_request(data: Data, req: Json) -> H Err(e) => { error!("error sending magic link: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_ML_ERROR".to_string(), - message: "There was an error sending the magic link email, please try again later.".to_string(), - path: None, - } - ], - }) + errors: vec![APIError { + code: "ERR_ML_ERROR".to_string(), + message: + "There was an error sending the magic link email, please try again later." + .to_string(), + path: None, + }], + }); } } @@ -131,19 +133,19 @@ pub async fn signup_request(data: Data, req: Json) -> H Err(e) => { error!("database error: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_DB_ERROR".to_string(), - message: "There was an error with the database request, please try again later.".to_string(), - path: None, - } - ], - }) + errors: vec![APIError { + code: "ERR_DB_ERROR".to_string(), + message: + "There was an error with the database request, please try again later." + .to_string(), + path: None, + }], + }); } } HttpResponse::Ok().json(SignupResponse { data: None, - metadata: SignupResponseMetadata {} + metadata: SignupResponseMetadata {}, }) -} \ No newline at end of file +} diff --git a/trifid-api/src/routes/v1/totp_authenticators.rs b/trifid-api/src/routes/v1/totp_authenticators.rs index d20a621..e80b3dc 100644 --- a/trifid-api/src/routes/v1/totp_authenticators.rs +++ b/trifid-api/src/routes/v1/totp_authenticators.rs @@ -19,19 +19,21 @@ // This endpoint is considered done. No major features should be added or removed, unless it fixes bugs. // This endpoint requires the `definednetworking` extension to be enabled to be used. -use serde::{Serialize, Deserialize}; -use actix_web::{HttpRequest, HttpResponse, post}; -use actix_web::web::{Data, Json}; -use log::error; -use sea_orm::{ActiveModelTrait, ColumnTrait, EntityTrait, IntoActiveModel, ModelTrait, QueryFilter}; -use totp_rs::{Algorithm, Secret, TOTP}; -use crate::AppState; use crate::auth_tokens::{enforce_session, TokenInfo}; -use crate::error::{APIError, APIErrorsResponse}; -use trifid_api_entities::entity::totp_authenticator; use crate::config::CONFIG; +use crate::error::{APIError, APIErrorsResponse}; use crate::timers::expires_in_seconds; -use crate::tokens::{random_token}; +use crate::tokens::random_token; +use crate::AppState; +use actix_web::web::{Data, Json}; +use actix_web::{post, HttpRequest, HttpResponse}; +use log::error; +use sea_orm::{ + ActiveModelTrait, ColumnTrait, EntityTrait, IntoActiveModel, ModelTrait, QueryFilter, +}; +use serde::{Deserialize, Serialize}; +use totp_rs::{Algorithm, Secret, TOTP}; +use trifid_api_entities::entity::totp_authenticator; #[derive(Serialize, Deserialize, Debug, Clone)] pub struct TotpAuthenticatorsRequest {} @@ -54,55 +56,57 @@ pub struct TotpAuthenticatorsResponse { } #[post("/v1/totp-authenticators")] -pub async fn totp_authenticators_request(db: Data, req_data: HttpRequest, _req: Json) -> HttpResponse { +pub async fn totp_authenticators_request( + db: Data, + req_data: HttpRequest, + _req: Json, +) -> HttpResponse { // require a user session let session_token = match enforce_session(&req_data, &db.conn).await { - Ok(r) => { - match r { - TokenInfo::SessionToken(i) => i, - _ => unreachable!() - } - } + Ok(r) => match r { + TokenInfo::SessionToken(i) => i, + _ => unreachable!(), + }, Err(e) => { error!("error enforcing session: {}", e); return HttpResponse::Unauthorized().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_UNAUTHORIZED".to_string(), - message: "Unauthorized".to_string(), - path: None, - } - ], + errors: vec![APIError { + code: "ERR_UNAUTHORIZED".to_string(), + message: "Unauthorized".to_string(), + path: None, + }], }); } }; // determine if the user has a totp authenticator - let auther = match totp_authenticator::Entity::find().filter(totp_authenticator::Column::User.eq(&session_token.user.id)).one(&db.conn).await { + let auther = match totp_authenticator::Entity::find() + .filter(totp_authenticator::Column::User.eq(&session_token.user.id)) + .one(&db.conn) + .await + { Ok(r) => r, Err(e) => { error!("database error: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_DB_ERROR".to_string(), - message: "There was an error with the database request, please try again later.".to_string(), - path: None, - } - ], + errors: vec![APIError { + code: "ERR_DB_ERROR".to_string(), + message: + "There was an error with the database request, please try again later." + .to_string(), + path: None, + }], }); } }; if let Some(auther) = auther { if auther.verified { return HttpResponse::BadRequest().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_ALREADY_HAS_TOTP".to_string(), - message: "This user already has a totp authenticator".to_string(), - path: None, - } - ] + errors: vec![APIError { + code: "ERR_ALREADY_HAS_TOTP".to_string(), + message: "This user already has a totp authenticator".to_string(), + path: None, + }], }); } match auther.delete(&db.conn).await { @@ -110,38 +114,48 @@ pub async fn totp_authenticators_request(db: Data, req_data: HttpReque Err(e) => { error!("database error: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_DB_ERROR".to_string(), - message: "There was an error with the database request, please try again later.".to_string(), - path: None, - } - ], - }) + errors: vec![APIError { + code: "ERR_DB_ERROR".to_string(), + message: + "There was an error with the database request, please try again later." + .to_string(), + path: None, + }], + }); } } } let secret = Secret::generate_secret(); - let totpmachine = match TOTP::new(Algorithm::SHA1, 6, 1, 30, secret.to_bytes().expect("Invalid randomized data"), Some("trifid-api".to_string()), session_token.user.email) { + let totpmachine = match TOTP::new( + Algorithm::SHA1, + 6, + 1, + 30, + secret.to_bytes().expect("Invalid randomized data"), + Some("trifid-api".to_string()), + session_token.user.email, + ) { Ok(m) => m, Err(e) => { error!("totp machine create error: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_SECRET_ERR".to_string(), - message: "There was an error configuring the authenticator, please try again later.".to_string(), - path: None, - } - ], + errors: vec![APIError { + code: "ERR_SECRET_ERR".to_string(), + message: + "There was an error configuring the authenticator, please try again later." + .to_string(), + path: None, + }], }); } }; let model = totp_authenticator::Model { id: random_token("totp"), - secret: Secret::Raw(totpmachine.secret.clone()).to_encoded().to_string(), + secret: Secret::Raw(totpmachine.secret.clone()) + .to_encoded() + .to_string(), url: totpmachine.get_url(), verified: false, expires_on: expires_in_seconds(CONFIG.tokens.totp_setup_timeout_time_seconds) as i64, @@ -157,14 +171,14 @@ pub async fn totp_authenticators_request(db: Data, req_data: HttpReque Err(e) => { error!("database error: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_DB_ERROR".to_string(), - message: "There was an error with the database request, please try again later.".to_string(), - path: None, - } - ], - }) + errors: vec![APIError { + code: "ERR_DB_ERROR".to_string(), + message: + "There was an error with the database request, please try again later." + .to_string(), + path: None, + }], + }); } } @@ -176,4 +190,4 @@ pub async fn totp_authenticators_request(db: Data, req_data: HttpReque }, metadata: TotpAuthenticatorsResponseMetadata {}, }) -} \ No newline at end of file +} diff --git a/trifid-api/src/routes/v1/trifid.rs b/trifid-api/src/routes/v1/trifid.rs index a7f9fb7..3445887 100644 --- a/trifid-api/src/routes/v1/trifid.rs +++ b/trifid-api/src/routes/v1/trifid.rs @@ -32,14 +32,19 @@ // If the request returns a non-200 response, or does not follow the typical TrifidExtensions schema, that server should be assumed to only support t+features:definednetworking. // Endpoint specs (#REQTYPE) can indicate they require a feature by adding t+features:[feature] -use actix_web::{HttpResponse, get}; +use actix_web::{get, HttpResponse}; use serde::{Deserialize, Serialize}; -pub const SUPPORTED_EXTENSIONS: &[&str] = &["definednetworking", "trifidextensions", "extended_roles", "extended_hosts"]; +pub const SUPPORTED_EXTENSIONS: &[&str] = &[ + "definednetworking", + "trifidextensions", + "extended_roles", + "extended_hosts", +]; #[derive(Serialize, Deserialize)] pub struct TrifidExtensionsResponse { - pub extensions: Vec + pub extensions: Vec, } #[get("/v1/trifid_extensions")] @@ -47,4 +52,4 @@ pub async fn trifid_extensions() -> HttpResponse { HttpResponse::Ok().json(TrifidExtensionsResponse { extensions: SUPPORTED_EXTENSIONS.iter().map(|u| u.to_string()).collect(), }) -} \ No newline at end of file +} diff --git a/trifid-api/src/routes/v1/verify_totp_authenticators.rs b/trifid-api/src/routes/v1/verify_totp_authenticators.rs index 2199c1d..418fead 100644 --- a/trifid-api/src/routes/v1/verify_totp_authenticators.rs +++ b/trifid-api/src/routes/v1/verify_totp_authenticators.rs @@ -19,81 +19,85 @@ // This endpoint is considered done. No major features should be added or removed, unless it fixes bugs. // This endpoint requires the `definednetworking` extension to be enabled to be used. -use actix_web::{HttpRequest, HttpResponse, post}; -use actix_web::web::{Data, Json}; -use log::{debug, error}; -use serde::{Serialize, Deserialize}; -use trifid_api_entities::entity::totp_authenticator; -use crate::AppState; use crate::auth_tokens::{enforce_session, TokenInfo}; -use crate::error::{APIError, APIErrorsResponse}; -use sea_orm::{EntityTrait, QueryFilter, ColumnTrait, IntoActiveModel, ActiveModelTrait}; -use sea_orm::ActiveValue::Set; -use totp_rs::{Secret, TOTP}; -use trifid_api_entities::entity::auth_token; use crate::config::CONFIG; +use crate::error::{APIError, APIErrorsResponse}; use crate::timers::expires_in_seconds; use crate::tokens::random_token; +use crate::AppState; +use actix_web::web::{Data, Json}; +use actix_web::{post, HttpRequest, HttpResponse}; +use log::{debug, error}; +use sea_orm::ActiveValue::Set; +use sea_orm::{ActiveModelTrait, ColumnTrait, EntityTrait, IntoActiveModel, QueryFilter}; +use serde::{Deserialize, Serialize}; +use totp_rs::{Secret, TOTP}; +use trifid_api_entities::entity::auth_token; +use trifid_api_entities::entity::totp_authenticator; #[derive(Serialize, Deserialize, Debug, Clone)] pub struct VerifyTotpAuthenticatorsRequest { #[serde(rename = "totpToken")] pub totp_token: String, - pub code: String + pub code: String, } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct VerifyTotpAuthenticatorsResponse { pub data: VerifyTotpAuthenticatorsResponseData, - pub metadata: VerifyTotpAuthenticatorsResponseMetadata + pub metadata: VerifyTotpAuthenticatorsResponseMetadata, } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct VerifyTotpAuthenticatorsResponseData { #[serde(rename = "authToken")] - pub auth_token: String + pub auth_token: String, } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct VerifyTotpAuthenticatorsResponseMetadata {} #[post("/v1/verify-totp-authenticators")] -pub async fn verify_totp_authenticators_request(req: Json, req_data: HttpRequest, db: Data) -> HttpResponse { +pub async fn verify_totp_authenticators_request( + req: Json, + req_data: HttpRequest, + db: Data, +) -> HttpResponse { // require a user session let session_token = match enforce_session(&req_data, &db.conn).await { - Ok(r) => { - match r { - TokenInfo::SessionToken(i) => i, - _ => unreachable!() - } - } + Ok(r) => match r { + TokenInfo::SessionToken(i) => i, + _ => unreachable!(), + }, Err(e) => { error!("error enforcing session: {}", e); return HttpResponse::Unauthorized().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_UNAUTHORIZED".to_string(), - message: "Unauthorized".to_string(), - path: None, - } - ], + errors: vec![APIError { + code: "ERR_UNAUTHORIZED".to_string(), + message: "Unauthorized".to_string(), + path: None, + }], }); } }; // determine if the user has a totp authenticator - let auther = match totp_authenticator::Entity::find().filter(totp_authenticator::Column::Id.eq(&req.totp_token)).one(&db.conn).await { + let auther = match totp_authenticator::Entity::find() + .filter(totp_authenticator::Column::Id.eq(&req.totp_token)) + .one(&db.conn) + .await + { Ok(r) => r, Err(e) => { error!("database error: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_DB_ERROR".to_string(), - message: "There was an error with the database request, please try again later.".to_string(), - path: None, - } - ], + errors: vec![APIError { + code: "ERR_DB_ERROR".to_string(), + message: + "There was an error with the database request, please try again later." + .to_string(), + path: None, + }], }); } }; @@ -101,26 +105,22 @@ pub async fn verify_totp_authenticators_request(req: Json { if a.verified { return HttpResponse::BadRequest().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_ALREADY_HAS_TOTP".to_string(), - message: "This user already has a totp authenticator".to_string(), - path: None, - } - ] + errors: vec![APIError { + code: "ERR_ALREADY_HAS_TOTP".to_string(), + message: "This user already has a totp authenticator".to_string(), + path: None, + }], }); } a - }, + } None => { return HttpResponse::BadRequest().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_USER_NO_TOTP".to_string(), - message: "This user does not have a totp authenticator".to_string(), - path: None, - } - ] + errors: vec![APIError { + code: "ERR_USER_NO_TOTP".to_string(), + message: "This user does not have a totp authenticator".to_string(), + path: None, + }], }); } }; @@ -131,30 +131,26 @@ pub async fn verify_totp_authenticators_request(req: Json { error!("totp url error: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_SECRET_ERROR".to_string(), - message: "There was an error parsing the totpmachine. Please try again later.".to_string(), - path: None, - } - ], + errors: vec![APIError { + code: "ERR_SECRET_ERROR".to_string(), + message: "There was an error parsing the totpmachine. Please try again later." + .to_string(), + path: None, + }], }); } }; - let valid = match totpmachine.check_current(&req.code) { Ok(valid) => valid, Err(e) => { error!("system time error: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_TIME_ERROR".to_string(), - message: "There was an with the server-side time clock.".to_string(), - path: None, - } - ], + errors: vec![APIError { + code: "ERR_TIME_ERROR".to_string(), + message: "There was an with the server-side time clock.".to_string(), + path: None, + }], }); } }; @@ -163,14 +159,12 @@ pub async fn verify_totp_authenticators_request(req: Json { error!("database error: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_DB_ERROR".to_string(), - message: "There was an error updating the totpmachine, please try again later.".to_string(), - path: None, - } - ], - }) + errors: vec![APIError { + code: "ERR_DB_ERROR".to_string(), + message: "There was an error updating the totpmachine, please try again later." + .to_string(), + path: None, + }], + }); } } @@ -205,13 +198,11 @@ pub async fn verify_totp_authenticators_request(req: Json { error!("database error: {}", e); return HttpResponse::InternalServerError().json(APIErrorsResponse { - errors: vec![ - APIError { - code: "ERR_DB_ERROR".to_string(), - message: "There was an error issuing the authentication token.".to_string(), - path: None, - } - ], + errors: vec![APIError { + code: "ERR_DB_ERROR".to_string(), + message: "There was an error issuing the authentication token.".to_string(), + path: None, + }], }); } } @@ -220,4 +211,4 @@ pub async fn verify_totp_authenticators_request(req: Json u64 { - (SystemTime::now() + Duration::from_secs(seconds)).duration_since(UNIX_EPOCH).expect("Time went backwards").as_secs() + (SystemTime::now() + Duration::from_secs(seconds)) + .duration_since(UNIX_EPOCH) + .expect("Time went backwards") + .as_secs() } pub fn expired(time: u64) -> bool { UNIX_EPOCH + Duration::from_secs(time) < SystemTime::now() -} \ No newline at end of file +} diff --git a/trifid-api/src/tokens.rs b/trifid-api/src/tokens.rs index 2ab6afa..cb276f1 100644 --- a/trifid-api/src/tokens.rs +++ b/trifid-api/src/tokens.rs @@ -20,7 +20,8 @@ use rand::Rng; pub const ID_CHARSET: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"; pub const ID_LEN: u32 = 26; -pub const TOKEN_CHARSET: &[u8] = b"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_"; +pub const TOKEN_CHARSET: &[u8] = + b"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_"; pub const TOKEN_LEN: u32 = 43; // 26 @@ -38,16 +39,22 @@ pub fn random_id_no_id() -> HeaderValue { // 43 // format: [TYPE]-[43 chars] pub fn random_token(identifier: &str) -> String { - format!("{}-{}", identifier, random_with_charset(TOKEN_LEN, TOKEN_CHARSET)) + format!( + "{}-{}", + identifier, + random_with_charset(TOKEN_LEN, TOKEN_CHARSET) + ) } fn random_with_charset(len: u32, charset: &[u8]) -> String { - (0..len).map(|_| { - let idx = rand::thread_rng().gen_range(0..charset.len()); - charset[idx] as char - }).collect() + (0..len) + .map(|_| { + let idx = rand::thread_rng().gen_range(0..charset.len()); + charset[idx] as char + }) + .collect() } pub fn get_token_type(token: &str) -> Option<&str> { - token.split('-').collect::>().get(0).copied() -} \ No newline at end of file + token.split('-').collect::>().first().copied() +} diff --git a/trifid-api/trifid_api_entities/src/lib.rs b/trifid-api/trifid_api_entities/src/lib.rs index bccca66..e8c3d6a 100644 --- a/trifid-api/trifid_api_entities/src/lib.rs +++ b/trifid-api/trifid_api_entities/src/lib.rs @@ -1 +1 @@ -pub mod entity; \ No newline at end of file +pub mod entity; diff --git a/trifid-api/trifid_api_migration/src/m20230402_162601_create_table_users.rs b/trifid-api/trifid_api_migration/src/m20230402_162601_create_table_users.rs index 1e04c6a..abbe7dd 100644 --- a/trifid-api/trifid_api_migration/src/m20230402_162601_create_table_users.rs +++ b/trifid-api/trifid_api_migration/src/m20230402_162601_create_table_users.rs @@ -13,12 +13,15 @@ impl MigrationTrait for Migration { .if_not_exists() .col(ColumnDef::new(User::Id).string().not_null().primary_key()) .col(ColumnDef::new(User::Email).string().not_null().unique_key()) - .to_owned() - ).await + .to_owned(), + ) + .await } async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { - manager.drop_table(Table::drop().table(User::Table).to_owned()).await + manager + .drop_table(Table::drop().table(User::Table).to_owned()) + .await } } @@ -27,5 +30,5 @@ impl MigrationTrait for Migration { pub enum User { Table, Id, - Email + Email, } diff --git a/trifid-api/trifid_api_migration/src/m20230402_183515_create_table_magic_links.rs b/trifid-api/trifid_api_migration/src/m20230402_183515_create_table_magic_links.rs index 43c70df..4cd2bb6 100644 --- a/trifid-api/trifid_api_migration/src/m20230402_183515_create_table_magic_links.rs +++ b/trifid-api/trifid_api_migration/src/m20230402_183515_create_table_magic_links.rs @@ -1,6 +1,5 @@ -use sea_orm_migration::prelude::*; use crate::m20230402_162601_create_table_users::User; - +use sea_orm_migration::prelude::*; #[derive(DeriveMigrationName)] pub struct Migration; @@ -8,26 +7,40 @@ pub struct Migration; #[async_trait::async_trait] impl MigrationTrait for Migration { async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { - manager.create_table( - Table::create() - .table(MagicLink::Table) - .if_not_exists() - .col(ColumnDef::new(MagicLink::Id).string().not_null().primary_key()) - .col(ColumnDef::new(MagicLink::User).string().not_null()) - .col(ColumnDef::new(MagicLink::ExpiresOn).big_integer().not_null()) - .foreign_key( - ForeignKey::create() - .name("fk_magiclink_user_users_id") - .from(MagicLink::Table, MagicLink::User) - .to(User::Table, User::Id) - .on_delete(ForeignKeyAction::Cascade) - .on_update(ForeignKeyAction::Cascade) - ).to_owned() - ).await + manager + .create_table( + Table::create() + .table(MagicLink::Table) + .if_not_exists() + .col( + ColumnDef::new(MagicLink::Id) + .string() + .not_null() + .primary_key(), + ) + .col(ColumnDef::new(MagicLink::User).string().not_null()) + .col( + ColumnDef::new(MagicLink::ExpiresOn) + .big_integer() + .not_null(), + ) + .foreign_key( + ForeignKey::create() + .name("fk_magiclink_user_users_id") + .from(MagicLink::Table, MagicLink::User) + .to(User::Table, User::Id) + .on_delete(ForeignKeyAction::Cascade) + .on_update(ForeignKeyAction::Cascade), + ) + .to_owned(), + ) + .await } async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { - manager.drop_table(Table::drop().table(MagicLink::Table).to_owned()).await + manager + .drop_table(Table::drop().table(MagicLink::Table).to_owned()) + .await } } @@ -37,5 +50,5 @@ pub enum MagicLink { Table, Id, User, - ExpiresOn + ExpiresOn, } diff --git a/trifid-api/trifid_api_migration/src/m20230402_213712_create_table_session_tokens.rs b/trifid-api/trifid_api_migration/src/m20230402_213712_create_table_session_tokens.rs index 8c51606..128d7e5 100644 --- a/trifid-api/trifid_api_migration/src/m20230402_213712_create_table_session_tokens.rs +++ b/trifid-api/trifid_api_migration/src/m20230402_213712_create_table_session_tokens.rs @@ -1,5 +1,5 @@ -use sea_orm_migration::prelude::*; use crate::m20230402_162601_create_table_users::User; +use sea_orm_migration::prelude::*; #[derive(DeriveMigrationName)] pub struct Migration; @@ -7,26 +7,39 @@ pub struct Migration; #[async_trait::async_trait] impl MigrationTrait for Migration { async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { - manager.create_table( - Table::create() - .table(SessionToken::Table) - .if_not_exists() - .col(ColumnDef::new(SessionToken::Id).string().not_null().primary_key()) - .col(ColumnDef::new(SessionToken::User).string().not_null()) - .col(ColumnDef::new(SessionToken::ExpiresOn).big_integer().not_null()) - .foreign_key( - ForeignKey::create() - .from(SessionToken::Table, SessionToken::User) - .to(User::Table, User::Id) - .on_delete(ForeignKeyAction::Cascade) - .on_update(ForeignKeyAction::Cascade) - ) - .to_owned() - ).await + manager + .create_table( + Table::create() + .table(SessionToken::Table) + .if_not_exists() + .col( + ColumnDef::new(SessionToken::Id) + .string() + .not_null() + .primary_key(), + ) + .col(ColumnDef::new(SessionToken::User).string().not_null()) + .col( + ColumnDef::new(SessionToken::ExpiresOn) + .big_integer() + .not_null(), + ) + .foreign_key( + ForeignKey::create() + .from(SessionToken::Table, SessionToken::User) + .to(User::Table, User::Id) + .on_delete(ForeignKeyAction::Cascade) + .on_update(ForeignKeyAction::Cascade), + ) + .to_owned(), + ) + .await } async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { - manager.drop_table(Table::drop().table(SessionToken::Table).to_owned()).await + manager + .drop_table(Table::drop().table(SessionToken::Table).to_owned()) + .await } } @@ -36,5 +49,5 @@ pub enum SessionToken { Table, Id, User, - ExpiresOn + ExpiresOn, } diff --git a/trifid-api/trifid_api_migration/src/m20230402_232316_create_table_organizations.rs b/trifid-api/trifid_api_migration/src/m20230402_232316_create_table_organizations.rs index 86d18cd..90e82c0 100644 --- a/trifid-api/trifid_api_migration/src/m20230402_232316_create_table_organizations.rs +++ b/trifid-api/trifid_api_migration/src/m20230402_232316_create_table_organizations.rs @@ -1,5 +1,5 @@ -use sea_orm_migration::prelude::*; use crate::m20230402_162601_create_table_users::User; +use sea_orm_migration::prelude::*; #[derive(DeriveMigrationName)] pub struct Migration; @@ -7,23 +7,39 @@ pub struct Migration; #[async_trait::async_trait] impl MigrationTrait for Migration { async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { - manager.create_table( - Table::create().table(Organization::Table) - .col(ColumnDef::new(Organization::Id).string().not_null().primary_key()) - .col(ColumnDef::new(Organization::Name).string().not_null()) - .col(ColumnDef::new(Organization::Owner).string().not_null().unique_key()) - .foreign_key( - ForeignKey::create() - .from(Organization::Table, Organization::Owner) - .to(User::Table, User::Id) - .on_delete(ForeignKeyAction::Cascade) - .on_update(ForeignKeyAction::Cascade) - ).to_owned() - ).await + manager + .create_table( + Table::create() + .table(Organization::Table) + .col( + ColumnDef::new(Organization::Id) + .string() + .not_null() + .primary_key(), + ) + .col(ColumnDef::new(Organization::Name).string().not_null()) + .col( + ColumnDef::new(Organization::Owner) + .string() + .not_null() + .unique_key(), + ) + .foreign_key( + ForeignKey::create() + .from(Organization::Table, Organization::Owner) + .to(User::Table, User::Id) + .on_delete(ForeignKeyAction::Cascade) + .on_update(ForeignKeyAction::Cascade), + ) + .to_owned(), + ) + .await } async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { - manager.drop_table(Table::drop().table(Organization::Table).to_owned()).await + manager + .drop_table(Table::drop().table(Organization::Table).to_owned()) + .await } } @@ -33,5 +49,5 @@ pub enum Organization { Table, Id, Name, - Owner + Owner, } diff --git a/trifid-api/trifid_api_migration/src/m20230402_233043_create_table_api_keys.rs b/trifid-api/trifid_api_migration/src/m20230402_233043_create_table_api_keys.rs index c0d36c3..250545b 100644 --- a/trifid-api/trifid_api_migration/src/m20230402_233043_create_table_api_keys.rs +++ b/trifid-api/trifid_api_migration/src/m20230402_233043_create_table_api_keys.rs @@ -1,5 +1,5 @@ -use sea_orm_migration::prelude::*; use crate::m20230402_232316_create_table_organizations::Organization; +use sea_orm_migration::prelude::*; #[derive(DeriveMigrationName)] pub struct Migration; @@ -7,25 +7,29 @@ pub struct Migration; #[async_trait::async_trait] impl MigrationTrait for Migration { async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { - manager.create_table( - Table::create() - .table(ApiKey::Table) - .col(ColumnDef::new(ApiKey::Id).string().not_null().primary_key()) - .col(ColumnDef::new(ApiKey::Key).string().not_null().unique_key()) - .col(ColumnDef::new(ApiKey::Organization).string().not_null()) - .foreign_key( - ForeignKey::create() - .from(ApiKey::Table, ApiKey::Organization) - .to(Organization::Table, Organization::Id) - .on_delete(ForeignKeyAction::Cascade) - .on_update(ForeignKeyAction::Cascade) - ) - .to_owned() - ).await + manager + .create_table( + Table::create() + .table(ApiKey::Table) + .col(ColumnDef::new(ApiKey::Id).string().not_null().primary_key()) + .col(ColumnDef::new(ApiKey::Key).string().not_null().unique_key()) + .col(ColumnDef::new(ApiKey::Organization).string().not_null()) + .foreign_key( + ForeignKey::create() + .from(ApiKey::Table, ApiKey::Organization) + .to(Organization::Table, Organization::Id) + .on_delete(ForeignKeyAction::Cascade) + .on_update(ForeignKeyAction::Cascade), + ) + .to_owned(), + ) + .await } async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { - manager.drop_table(Table::drop().table(ApiKey::Table).to_owned()).await + manager + .drop_table(Table::drop().table(ApiKey::Table).to_owned()) + .await } } @@ -35,5 +39,5 @@ pub enum ApiKey { Table, Id, Key, - Organization + Organization, } diff --git a/trifid-api/trifid_api_migration/src/m20230402_233047_create_table_api_keys_scopes.rs b/trifid-api/trifid_api_migration/src/m20230402_233047_create_table_api_keys_scopes.rs index 359aa24..233c6b4 100644 --- a/trifid-api/trifid_api_migration/src/m20230402_233047_create_table_api_keys_scopes.rs +++ b/trifid-api/trifid_api_migration/src/m20230402_233047_create_table_api_keys_scopes.rs @@ -1,5 +1,5 @@ -use sea_orm_migration::prelude::*; use crate::m20230402_233043_create_table_api_keys::ApiKey; +use sea_orm_migration::prelude::*; #[derive(DeriveMigrationName)] pub struct Migration; @@ -7,24 +7,34 @@ pub struct Migration; #[async_trait::async_trait] impl MigrationTrait for Migration { async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { - manager.create_table( - Table::create() - .table(ApiKeyScope::Table) - .col(ColumnDef::new(ApiKeyScope::Id).string().not_null().primary_key()) - .col(ColumnDef::new(ApiKeyScope::Scope).string().not_null()) - .col(ColumnDef::new(ApiKeyScope::ApiKey).string().not_null()) - .foreign_key( - ForeignKey::create() - .from(ApiKeyScope::Table, ApiKeyScope::ApiKey) - .to(ApiKey::Table, ApiKey::Id) - .on_delete(ForeignKeyAction::Cascade) - .on_update(ForeignKeyAction::Cascade) - ).to_owned() - ).await + manager + .create_table( + Table::create() + .table(ApiKeyScope::Table) + .col( + ColumnDef::new(ApiKeyScope::Id) + .string() + .not_null() + .primary_key(), + ) + .col(ColumnDef::new(ApiKeyScope::Scope).string().not_null()) + .col(ColumnDef::new(ApiKeyScope::ApiKey).string().not_null()) + .foreign_key( + ForeignKey::create() + .from(ApiKeyScope::Table, ApiKeyScope::ApiKey) + .to(ApiKey::Table, ApiKey::Id) + .on_delete(ForeignKeyAction::Cascade) + .on_update(ForeignKeyAction::Cascade), + ) + .to_owned(), + ) + .await } async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { - manager.drop_table(Table::drop().table(ApiKeyScope::Table).to_owned()).await + manager + .drop_table(Table::drop().table(ApiKeyScope::Table).to_owned()) + .await } } @@ -34,5 +44,5 @@ pub enum ApiKeyScope { Table, Id, Scope, - ApiKey + ApiKey, } diff --git a/trifid-api/trifid_api_migration/src/m20230402_234025_create_table_totp_authenticators.rs b/trifid-api/trifid_api_migration/src/m20230402_234025_create_table_totp_authenticators.rs index 72b2379..31d5324 100644 --- a/trifid-api/trifid_api_migration/src/m20230402_234025_create_table_totp_authenticators.rs +++ b/trifid-api/trifid_api_migration/src/m20230402_234025_create_table_totp_authenticators.rs @@ -1,5 +1,5 @@ -use sea_orm_migration::prelude::*; use crate::m20230402_162601_create_table_users::User; +use sea_orm_migration::prelude::*; #[derive(DeriveMigrationName)] pub struct Migration; @@ -7,27 +7,60 @@ pub struct Migration; #[async_trait::async_trait] impl MigrationTrait for Migration { async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { - manager.create_table( - Table::create() - .table(TotpAuthenticator::Table) - .col(ColumnDef::new(TotpAuthenticator::Id).string().not_null().primary_key()) - .col(ColumnDef::new(TotpAuthenticator::Secret).string().not_null().unique_key()) - .col(ColumnDef::new(TotpAuthenticator::Url).string().not_null().unique_key()) - .col(ColumnDef::new(TotpAuthenticator::Verified).boolean().not_null()) - .col(ColumnDef::new(TotpAuthenticator::ExpiresOn).big_integer().not_null()) - .col(ColumnDef::new(TotpAuthenticator::User).string().not_null().unique_key()) - .foreign_key( - ForeignKey::create() - .from(TotpAuthenticator::Table, TotpAuthenticator::User) - .to(User::Table, User::Id) - .on_delete(ForeignKeyAction::Cascade) - .on_update(ForeignKeyAction::Cascade) - ).to_owned() - ).await + manager + .create_table( + Table::create() + .table(TotpAuthenticator::Table) + .col( + ColumnDef::new(TotpAuthenticator::Id) + .string() + .not_null() + .primary_key(), + ) + .col( + ColumnDef::new(TotpAuthenticator::Secret) + .string() + .not_null() + .unique_key(), + ) + .col( + ColumnDef::new(TotpAuthenticator::Url) + .string() + .not_null() + .unique_key(), + ) + .col( + ColumnDef::new(TotpAuthenticator::Verified) + .boolean() + .not_null(), + ) + .col( + ColumnDef::new(TotpAuthenticator::ExpiresOn) + .big_integer() + .not_null(), + ) + .col( + ColumnDef::new(TotpAuthenticator::User) + .string() + .not_null() + .unique_key(), + ) + .foreign_key( + ForeignKey::create() + .from(TotpAuthenticator::Table, TotpAuthenticator::User) + .to(User::Table, User::Id) + .on_delete(ForeignKeyAction::Cascade) + .on_update(ForeignKeyAction::Cascade), + ) + .to_owned(), + ) + .await } async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { - manager.drop_table(Table::drop().table(TotpAuthenticator::Table).to_owned()).await + manager + .drop_table(Table::drop().table(TotpAuthenticator::Table).to_owned()) + .await } } @@ -40,5 +73,5 @@ pub enum TotpAuthenticator { Url, Verified, ExpiresOn, - User + User, } diff --git a/trifid-api/trifid_api_migration/src/m20230403_002256_create_table_auth_tokens.rs b/trifid-api/trifid_api_migration/src/m20230403_002256_create_table_auth_tokens.rs index 12a0a67..03c4c89 100644 --- a/trifid-api/trifid_api_migration/src/m20230403_002256_create_table_auth_tokens.rs +++ b/trifid-api/trifid_api_migration/src/m20230403_002256_create_table_auth_tokens.rs @@ -1,5 +1,5 @@ -use sea_orm_migration::prelude::*; use crate::m20230402_162601_create_table_users::User; +use sea_orm_migration::prelude::*; #[derive(DeriveMigrationName)] pub struct Migration; @@ -7,26 +7,39 @@ pub struct Migration; #[async_trait::async_trait] impl MigrationTrait for Migration { async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { - manager.create_table( - Table::create() - .table(AuthToken::Table) - .if_not_exists() - .col(ColumnDef::new(AuthToken::Id).string().not_null().primary_key()) - .col(ColumnDef::new(AuthToken::User).string().not_null()) - .col(ColumnDef::new(AuthToken::ExpiresOn).big_integer().not_null()) - .foreign_key( - ForeignKey::create() - .from(AuthToken::Table, AuthToken::User) - .to(User::Table, User::Id) - .on_delete(ForeignKeyAction::Cascade) - .on_update(ForeignKeyAction::Cascade) - ) - .to_owned() - ).await + manager + .create_table( + Table::create() + .table(AuthToken::Table) + .if_not_exists() + .col( + ColumnDef::new(AuthToken::Id) + .string() + .not_null() + .primary_key(), + ) + .col(ColumnDef::new(AuthToken::User).string().not_null()) + .col( + ColumnDef::new(AuthToken::ExpiresOn) + .big_integer() + .not_null(), + ) + .foreign_key( + ForeignKey::create() + .from(AuthToken::Table, AuthToken::User) + .to(User::Table, User::Id) + .on_delete(ForeignKeyAction::Cascade) + .on_update(ForeignKeyAction::Cascade), + ) + .to_owned(), + ) + .await } async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { - manager.drop_table(Table::drop().table(AuthToken::Table).to_owned()).await + manager + .drop_table(Table::drop().table(AuthToken::Table).to_owned()) + .await } } @@ -36,5 +49,5 @@ pub enum AuthToken { Table, Id, User, - ExpiresOn + ExpiresOn, } diff --git a/trifid-api/trifid_api_migration/src/m20230403_142517_create_table_signing_cas.rs b/trifid-api/trifid_api_migration/src/m20230403_142517_create_table_signing_cas.rs index 084dcd0..8e02a78 100644 --- a/trifid-api/trifid_api_migration/src/m20230403_142517_create_table_signing_cas.rs +++ b/trifid-api/trifid_api_migration/src/m20230403_142517_create_table_signing_cas.rs @@ -6,21 +6,40 @@ pub struct Migration; #[async_trait::async_trait] impl MigrationTrait for Migration { async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { - manager.create_table( - Table::create() - .table(SigningCA::Table) - .col(ColumnDef::new(SigningCA::Id).string().not_null().primary_key()) - .col(ColumnDef::new(SigningCA::Organization).string().not_null()) - .col(ColumnDef::new(SigningCA::Cert).string().not_null()) - .col(ColumnDef::new(SigningCA::Key).string().not_null().unique_key()) - .col(ColumnDef::new(SigningCA::Expires).big_integer().not_null()) - .col(ColumnDef::new(SigningCA::Nonce).string().not_null().unique_key()) - .to_owned() - ).await + manager + .create_table( + Table::create() + .table(SigningCA::Table) + .col( + ColumnDef::new(SigningCA::Id) + .string() + .not_null() + .primary_key(), + ) + .col(ColumnDef::new(SigningCA::Organization).string().not_null()) + .col(ColumnDef::new(SigningCA::Cert).string().not_null()) + .col( + ColumnDef::new(SigningCA::Key) + .string() + .not_null() + .unique_key(), + ) + .col(ColumnDef::new(SigningCA::Expires).big_integer().not_null()) + .col( + ColumnDef::new(SigningCA::Nonce) + .string() + .not_null() + .unique_key(), + ) + .to_owned(), + ) + .await } async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { - manager.drop_table(Table::drop().table(SigningCA::Table).to_owned()).await + manager + .drop_table(Table::drop().table(SigningCA::Table).to_owned()) + .await } } @@ -33,5 +52,5 @@ pub enum SigningCA { Cert, Key, Expires, - Nonce + Nonce, } diff --git a/trifid-api/trifid_api_migration/src/m20230403_173431_create_table_networks.rs b/trifid-api/trifid_api_migration/src/m20230403_173431_create_table_networks.rs index a85fcd4..696ba62 100644 --- a/trifid-api/trifid_api_migration/src/m20230403_173431_create_table_networks.rs +++ b/trifid-api/trifid_api_migration/src/m20230403_173431_create_table_networks.rs @@ -1,6 +1,6 @@ -use sea_orm_migration::prelude::*; use crate::m20230402_232316_create_table_organizations::Organization; use crate::m20230403_142517_create_table_signing_cas::SigningCA; +use sea_orm_migration::prelude::*; #[derive(DeriveMigrationName)] pub struct Migration; @@ -8,36 +8,59 @@ pub struct Migration; #[async_trait::async_trait] impl MigrationTrait for Migration { async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { - manager.create_table( - Table::create() - .table(Network::Table) - .col(ColumnDef::new(Network::Id).string().not_null().primary_key()) - .col(ColumnDef::new(Network::Cidr).string().not_null()) - .col(ColumnDef::new(Network::Organization).string().not_null().unique_key()) - .col(ColumnDef::new(Network::SigningCA).string().not_null().unique_key()) - .col(ColumnDef::new(Network::CreatedAt).big_integer().not_null()) - .col(ColumnDef::new(Network::Name).string().not_null()) - .col(ColumnDef::new(Network::LighthousesAsRelays).boolean().not_null()) - .foreign_key( - ForeignKey::create() - .from(Network::Table, Network::Organization) - .to(Organization::Table, Organization::Id) - .on_delete(ForeignKeyAction::Cascade) - .on_update(ForeignKeyAction::Cascade) - ) - .foreign_key( - ForeignKey::create() - .from(Network::Table, Network::SigningCA) - .to(SigningCA::Table, SigningCA::Id) - .on_delete(ForeignKeyAction::Cascade) - .on_update(ForeignKeyAction::Cascade) - ) - .to_owned() - ).await + manager + .create_table( + Table::create() + .table(Network::Table) + .col( + ColumnDef::new(Network::Id) + .string() + .not_null() + .primary_key(), + ) + .col(ColumnDef::new(Network::Cidr).string().not_null()) + .col( + ColumnDef::new(Network::Organization) + .string() + .not_null() + .unique_key(), + ) + .col( + ColumnDef::new(Network::SigningCA) + .string() + .not_null() + .unique_key(), + ) + .col(ColumnDef::new(Network::CreatedAt).big_integer().not_null()) + .col(ColumnDef::new(Network::Name).string().not_null()) + .col( + ColumnDef::new(Network::LighthousesAsRelays) + .boolean() + .not_null(), + ) + .foreign_key( + ForeignKey::create() + .from(Network::Table, Network::Organization) + .to(Organization::Table, Organization::Id) + .on_delete(ForeignKeyAction::Cascade) + .on_update(ForeignKeyAction::Cascade), + ) + .foreign_key( + ForeignKey::create() + .from(Network::Table, Network::SigningCA) + .to(SigningCA::Table, SigningCA::Id) + .on_delete(ForeignKeyAction::Cascade) + .on_update(ForeignKeyAction::Cascade), + ) + .to_owned(), + ) + .await } async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { - manager.drop_table(Table::drop().table(Network::Table).to_owned()).await + manager + .drop_table(Table::drop().table(Network::Table).to_owned()) + .await } } @@ -51,5 +74,5 @@ pub enum Network { SigningCA, CreatedAt, Name, - LighthousesAsRelays + LighthousesAsRelays, } diff --git a/trifid-api/trifid_api_migration/src/m20230404_133809_create_table_roles.rs b/trifid-api/trifid_api_migration/src/m20230404_133809_create_table_roles.rs index 1cecc8a..eaa340b 100644 --- a/trifid-api/trifid_api_migration/src/m20230404_133809_create_table_roles.rs +++ b/trifid-api/trifid_api_migration/src/m20230404_133809_create_table_roles.rs @@ -1,5 +1,5 @@ -use sea_orm_migration::prelude::*; use crate::m20230402_232316_create_table_organizations::Organization; +use sea_orm_migration::prelude::*; #[derive(DeriveMigrationName)] pub struct Migration; @@ -7,27 +7,32 @@ pub struct Migration; #[async_trait::async_trait] impl MigrationTrait for Migration { async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { - manager.create_table( - Table::create() - .table(Role::Table) - .col(ColumnDef::new(Role::Id).string().not_null().primary_key()) - .col(ColumnDef::new(Role::Name).string().not_null().unique_key()) - .col(ColumnDef::new(Role::Description).string().not_null()) - .col(ColumnDef::new(Role::Organization).string().not_null()) - .col(ColumnDef::new(Role::CreatedAt).big_integer().not_null()) - .col(ColumnDef::new(Role::ModifiedAt).big_integer().not_null()) - .foreign_key( - ForeignKey::create() - .from(Role::Table, Role::Organization) - .to(Organization::Table, Organization::Id) - .on_update(ForeignKeyAction::Cascade) - .on_delete(ForeignKeyAction::Cascade) - ).to_owned() - ).await + manager + .create_table( + Table::create() + .table(Role::Table) + .col(ColumnDef::new(Role::Id).string().not_null().primary_key()) + .col(ColumnDef::new(Role::Name).string().not_null().unique_key()) + .col(ColumnDef::new(Role::Description).string().not_null()) + .col(ColumnDef::new(Role::Organization).string().not_null()) + .col(ColumnDef::new(Role::CreatedAt).big_integer().not_null()) + .col(ColumnDef::new(Role::ModifiedAt).big_integer().not_null()) + .foreign_key( + ForeignKey::create() + .from(Role::Table, Role::Organization) + .to(Organization::Table, Organization::Id) + .on_update(ForeignKeyAction::Cascade) + .on_delete(ForeignKeyAction::Cascade), + ) + .to_owned(), + ) + .await } async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { - manager.drop_table(Table::drop().table(Role::Table).to_owned()).await + manager + .drop_table(Table::drop().table(Role::Table).to_owned()) + .await } } @@ -40,5 +45,5 @@ pub enum Role { Description, Organization, CreatedAt, - ModifiedAt + ModifiedAt, } diff --git a/trifid-api/trifid_api_migration/src/m20230404_133813_create_table_firewall_rules.rs b/trifid-api/trifid_api_migration/src/m20230404_133813_create_table_firewall_rules.rs index a44f3b2..ec3a5aa 100644 --- a/trifid-api/trifid_api_migration/src/m20230404_133813_create_table_firewall_rules.rs +++ b/trifid-api/trifid_api_migration/src/m20230404_133813_create_table_firewall_rules.rs @@ -1,5 +1,5 @@ -use sea_orm_migration::prelude::*; use crate::m20230404_133809_create_table_roles::Role; +use sea_orm_migration::prelude::*; #[derive(DeriveMigrationName)] pub struct Migration; @@ -7,35 +7,57 @@ pub struct Migration; #[async_trait::async_trait] impl MigrationTrait for Migration { async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { - manager.create_table( - Table::create() - .table(FirewallRule::Table) - .col(ColumnDef::new(FirewallRule::Id).string().not_null().primary_key()) - .col(ColumnDef::new(FirewallRule::Role).string().not_null()) - .col(ColumnDef::new(FirewallRule::Protocol).string().not_null()) - .col(ColumnDef::new(FirewallRule::Description).string().not_null()) - .col(ColumnDef::new(FirewallRule::AllowedRoleID).string().null()) - .col(ColumnDef::new(FirewallRule::PortRangeFrom).integer().not_null()) - .col(ColumnDef::new(FirewallRule::PortRangeTo).integer().not_null()) - .foreign_key( - ForeignKey::create() - .from(FirewallRule::Table, FirewallRule::Role) - .to(Role::Table, Role::Id) - .on_delete(ForeignKeyAction::Cascade) - .on_update(ForeignKeyAction::Cascade) - ) - .foreign_key( - ForeignKey::create() - .from(FirewallRule::Table, FirewallRule::AllowedRoleID) - .to(Role::Table, Role::Id) - .on_delete(ForeignKeyAction::Cascade) - .on_delete(ForeignKeyAction::Cascade) - ).to_owned() - ).await + manager + .create_table( + Table::create() + .table(FirewallRule::Table) + .col( + ColumnDef::new(FirewallRule::Id) + .string() + .not_null() + .primary_key(), + ) + .col(ColumnDef::new(FirewallRule::Role).string().not_null()) + .col(ColumnDef::new(FirewallRule::Protocol).string().not_null()) + .col( + ColumnDef::new(FirewallRule::Description) + .string() + .not_null(), + ) + .col(ColumnDef::new(FirewallRule::AllowedRoleID).string().null()) + .col( + ColumnDef::new(FirewallRule::PortRangeFrom) + .integer() + .not_null(), + ) + .col( + ColumnDef::new(FirewallRule::PortRangeTo) + .integer() + .not_null(), + ) + .foreign_key( + ForeignKey::create() + .from(FirewallRule::Table, FirewallRule::Role) + .to(Role::Table, Role::Id) + .on_delete(ForeignKeyAction::Cascade) + .on_update(ForeignKeyAction::Cascade), + ) + .foreign_key( + ForeignKey::create() + .from(FirewallRule::Table, FirewallRule::AllowedRoleID) + .to(Role::Table, Role::Id) + .on_delete(ForeignKeyAction::Cascade) + .on_delete(ForeignKeyAction::Cascade), + ) + .to_owned(), + ) + .await } async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { - manager.drop_table(Table::drop().table(FirewallRule::Table).to_owned()).await + manager + .drop_table(Table::drop().table(FirewallRule::Table).to_owned()) + .await } } @@ -49,5 +71,5 @@ pub enum FirewallRule { Description, AllowedRoleID, PortRangeFrom, - PortRangeTo + PortRangeTo, } diff --git a/trifid-api/trifid_api_migration/src/m20230427_170037_create_table_hosts.rs b/trifid-api/trifid_api_migration/src/m20230427_170037_create_table_hosts.rs index c1de66f..b3ce88c 100644 --- a/trifid-api/trifid_api_migration/src/m20230427_170037_create_table_hosts.rs +++ b/trifid-api/trifid_api_migration/src/m20230427_170037_create_table_hosts.rs @@ -1,6 +1,6 @@ -use sea_orm_migration::prelude::*; use crate::m20230403_173431_create_table_networks::Network; use crate::m20230404_133809_create_table_roles::Role; +use sea_orm_migration::prelude::*; #[derive(DeriveMigrationName)] pub struct Migration; @@ -8,56 +8,58 @@ pub struct Migration; #[async_trait::async_trait] impl MigrationTrait for Migration { async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { - manager.create_table( - Table::create() - .table(Host::Table) - .col(ColumnDef::new(Host::Id).string().not_null().primary_key()) - .col(ColumnDef::new(Host::Name).string().not_null()) - .col(ColumnDef::new(Host::Network).string().not_null()) - .col(ColumnDef::new(Host::Role).string().not_null()) - .col(ColumnDef::new(Host::IP).string().not_null()) - .col(ColumnDef::new(Host::ListenPort).unsigned().not_null()) - .col(ColumnDef::new(Host::IsLighthouse).boolean().not_null()) - .col(ColumnDef::new(Host::IsRelay).boolean().not_null()) - .col(ColumnDef::new(Host::Counter).unsigned().not_null()) - .col(ColumnDef::new(Host::CreatedAt).big_integer().not_null()) - .col(ColumnDef::new(Host::IsBlocked).boolean().not_null()) - .col(ColumnDef::new(Host::LastSeenAt).big_integer().not_null()) - .col(ColumnDef::new(Host::LastVersion).integer().not_null()) - .col(ColumnDef::new(Host::LastPlatform).string().not_null()) - .col(ColumnDef::new(Host::LastOutOfDate).boolean().not_null()) - .foreign_key( - ForeignKey::create() - .from(Host::Table, Host::Network) - .to(Network::Table, Network::Id) - .on_update(ForeignKeyAction::Cascade) - .on_delete(ForeignKeyAction::Cascade) - ) - .foreign_key( - ForeignKey::create() - .from(Host::Table, Host::Role) - .to(Role::Table, Role::Id) - .on_update(ForeignKeyAction::Cascade) - .on_delete(ForeignKeyAction::Cascade) - ) - .index( - Index::create() - .name("idx-hosts-net-name-unique") - .table(Host::Table) - .col(Host::Network) - .col(Host::Name) - .unique() - ) - .index( - Index::create() - .name("idx-hosts-net-ip-unique") - .table(Host::Table) - .col(Host::Network) - .col(Host::IP) - .unique() - ) - .to_owned() - ).await + manager + .create_table( + Table::create() + .table(Host::Table) + .col(ColumnDef::new(Host::Id).string().not_null().primary_key()) + .col(ColumnDef::new(Host::Name).string().not_null()) + .col(ColumnDef::new(Host::Network).string().not_null()) + .col(ColumnDef::new(Host::Role).string().not_null()) + .col(ColumnDef::new(Host::IP).string().not_null()) + .col(ColumnDef::new(Host::ListenPort).unsigned().not_null()) + .col(ColumnDef::new(Host::IsLighthouse).boolean().not_null()) + .col(ColumnDef::new(Host::IsRelay).boolean().not_null()) + .col(ColumnDef::new(Host::Counter).unsigned().not_null()) + .col(ColumnDef::new(Host::CreatedAt).big_integer().not_null()) + .col(ColumnDef::new(Host::IsBlocked).boolean().not_null()) + .col(ColumnDef::new(Host::LastSeenAt).big_integer().not_null()) + .col(ColumnDef::new(Host::LastVersion).integer().not_null()) + .col(ColumnDef::new(Host::LastPlatform).string().not_null()) + .col(ColumnDef::new(Host::LastOutOfDate).boolean().not_null()) + .foreign_key( + ForeignKey::create() + .from(Host::Table, Host::Network) + .to(Network::Table, Network::Id) + .on_update(ForeignKeyAction::Cascade) + .on_delete(ForeignKeyAction::Cascade), + ) + .foreign_key( + ForeignKey::create() + .from(Host::Table, Host::Role) + .to(Role::Table, Role::Id) + .on_update(ForeignKeyAction::Cascade) + .on_delete(ForeignKeyAction::Cascade), + ) + .index( + Index::create() + .name("idx-hosts-net-name-unique") + .table(Host::Table) + .col(Host::Network) + .col(Host::Name) + .unique(), + ) + .index( + Index::create() + .name("idx-hosts-net-ip-unique") + .table(Host::Table) + .col(Host::Network) + .col(Host::IP) + .unique(), + ) + .to_owned(), + ) + .await } async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { @@ -85,5 +87,5 @@ pub enum Host { LastSeenAt, LastVersion, LastPlatform, - LastOutOfDate + LastOutOfDate, } diff --git a/trifid-api/trifid_api_migration/src/m20230427_171517_create_table_hosts_static_addresses.rs b/trifid-api/trifid_api_migration/src/m20230427_171517_create_table_hosts_static_addresses.rs index 7de7d76..f0d9166 100644 --- a/trifid-api/trifid_api_migration/src/m20230427_171517_create_table_hosts_static_addresses.rs +++ b/trifid-api/trifid_api_migration/src/m20230427_171517_create_table_hosts_static_addresses.rs @@ -1,5 +1,5 @@ -use sea_orm_migration::prelude::*; use crate::m20230427_170037_create_table_hosts::Host; +use sea_orm_migration::prelude::*; #[derive(DeriveMigrationName)] pub struct Migration; @@ -11,15 +11,24 @@ impl MigrationTrait for Migration { .create_table( Table::create() .table(HostStaticAddress::Table) - .col(ColumnDef::new(HostStaticAddress::Id).string().not_null().primary_key()) + .col( + ColumnDef::new(HostStaticAddress::Id) + .string() + .not_null() + .primary_key(), + ) .col(ColumnDef::new(HostStaticAddress::Host).string().not_null()) - .col(ColumnDef::new(HostStaticAddress::Address).string().not_null()) + .col( + ColumnDef::new(HostStaticAddress::Address) + .string() + .not_null(), + ) .foreign_key( ForeignKey::create() .from(HostStaticAddress::Table, HostStaticAddress::Host) .to(Host::Table, Host::Id) .on_update(ForeignKeyAction::Cascade) - .on_delete(ForeignKeyAction::Cascade) + .on_delete(ForeignKeyAction::Cascade), ) .to_owned(), ) @@ -39,5 +48,5 @@ pub enum HostStaticAddress { Table, Id, Host, - Address + Address, } diff --git a/trifid-api/trifid_api_migration/src/m20230427_171529_create_table_hosts_config_overrides.rs b/trifid-api/trifid_api_migration/src/m20230427_171529_create_table_hosts_config_overrides.rs index 86ee883..082d3a6 100644 --- a/trifid-api/trifid_api_migration/src/m20230427_171529_create_table_hosts_config_overrides.rs +++ b/trifid-api/trifid_api_migration/src/m20230427_171529_create_table_hosts_config_overrides.rs @@ -1,5 +1,5 @@ -use sea_orm_migration::prelude::*; use crate::m20230427_170037_create_table_hosts::Host; +use sea_orm_migration::prelude::*; #[derive(DeriveMigrationName)] pub struct Migration; @@ -7,30 +7,41 @@ pub struct Migration; #[async_trait::async_trait] impl MigrationTrait for Migration { async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { - manager.create_table( - Table::create() - .table(HostConfigOverride::Table) - .col(ColumnDef::new(HostConfigOverride::Id).string().not_null().primary_key()) - .col(ColumnDef::new(HostConfigOverride::Key).string().not_null()) - .col(ColumnDef::new(HostConfigOverride::Value).string().not_null()) - .col(ColumnDef::new(HostConfigOverride::Host).string().not_null()) - .foreign_key( - ForeignKey::create() - .from(HostConfigOverride::Table, HostConfigOverride::Host) - .to(Host::Table, Host::Id) - .on_delete(ForeignKeyAction::Cascade) - .on_update(ForeignKeyAction::Cascade) - ) - .index( - Index::create() - .name("idx_hosts_config_overrides-key-host-unique") - .table(HostConfigOverride::Table) - .col(HostConfigOverride::Key) - .col(HostConfigOverride::Id) - .unique() - ) - .to_owned() - ).await + manager + .create_table( + Table::create() + .table(HostConfigOverride::Table) + .col( + ColumnDef::new(HostConfigOverride::Id) + .string() + .not_null() + .primary_key(), + ) + .col(ColumnDef::new(HostConfigOverride::Key).string().not_null()) + .col( + ColumnDef::new(HostConfigOverride::Value) + .string() + .not_null(), + ) + .col(ColumnDef::new(HostConfigOverride::Host).string().not_null()) + .foreign_key( + ForeignKey::create() + .from(HostConfigOverride::Table, HostConfigOverride::Host) + .to(Host::Table, Host::Id) + .on_delete(ForeignKeyAction::Cascade) + .on_update(ForeignKeyAction::Cascade), + ) + .index( + Index::create() + .name("idx_hosts_config_overrides-key-host-unique") + .table(HostConfigOverride::Table) + .col(HostConfigOverride::Key) + .col(HostConfigOverride::Id) + .unique(), + ) + .to_owned(), + ) + .await } async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { diff --git a/trifid-api/trifid_api_migration/trifid_api_entities/src/entity/mod.rs b/trifid-api/trifid_api_migration/trifid_api_entities/src/entity/mod.rs index 41382c0..35ee697 100644 --- a/trifid-api/trifid_api_migration/trifid_api_entities/src/entity/mod.rs +++ b/trifid-api/trifid_api_migration/trifid_api_entities/src/entity/mod.rs @@ -1,16 +1,16 @@ //! `SeaORM` Entity. Generated by sea-orm-codegen 0.11.2 -pub mod prelude ; +pub mod prelude; -pub mod api_key ; -pub mod api_key_scope ; -pub mod auth_token ; -pub mod firewall_rule ; -pub mod magic_link ; -pub mod network ; -pub mod organization ; -pub mod role ; -pub mod session_token ; -pub mod signing_ca ; -pub mod totp_authenticator ; -pub mod user ; \ No newline at end of file +pub mod api_key; +pub mod api_key_scope; +pub mod auth_token; +pub mod firewall_rule; +pub mod magic_link; +pub mod network; +pub mod organization; +pub mod role; +pub mod session_token; +pub mod signing_ca; +pub mod totp_authenticator; +pub mod user; diff --git a/trifid-api/trifid_api_migration/trifid_api_entities/src/entity/network.rs b/trifid-api/trifid_api_migration/trifid_api_entities/src/entity/network.rs index 832f0e8..7d575e6 100644 --- a/trifid-api/trifid_api_migration/trifid_api_entities/src/entity/network.rs +++ b/trifid-api/trifid_api_migration/trifid_api_entities/src/entity/network.rs @@ -1,15 +1,52 @@ //! `SeaORM` Entity. Generated by sea-orm-codegen 0.11.2 +use sea_orm::entity::prelude::*; +#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)] +#[sea_orm(table_name = "network")] +pub struct Model { + #[sea_orm(primary_key, auto_increment = false)] + pub id: String, + pub cidr: String, + #[sea_orm(unique)] + pub organization: String, + #[sea_orm(unique)] + pub signing_ca: String, + pub created_at: i64, + pub name: String, + pub lighthouses_as_relays: bool, +} -use sea_orm :: entity :: prelude :: * ; +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm( + belongs_to = "super::organization::Entity", + from = "Column::Organization", + to = "super::organization::Column::Id", + on_update = "Cascade", + on_delete = "Cascade" + )] + Organization, + #[sea_orm( + belongs_to = "super::signing_ca::Entity", + from = "Column::SigningCa", + to = "super::signing_ca::Column::Id", + on_update = "Cascade", + on_delete = "Cascade" + )] + SigningCa, +} -# [derive (Clone , Debug , PartialEq , DeriveEntityModel , Eq)] # [sea_orm (table_name = "network")] pub struct Model { # [sea_orm (primary_key , auto_increment = false)] pub id : String , pub cidr : String , # [sea_orm (unique)] pub organization : String , # [sea_orm (unique)] pub signing_ca : String , pub created_at : i64 , pub name : String , pub lighthouses_as_relays : bool , } +impl Related for Entity { + fn to() -> RelationDef { + Relation::Organization.def() + } +} -# [derive (Copy , Clone , Debug , EnumIter , DeriveRelation)] pub enum Relation { # [sea_orm (belongs_to = "super::organization::Entity" , from = "Column::Organization" , to = "super::organization::Column::Id" , on_update = "Cascade" , on_delete = "Cascade" ,)] Organization , # [sea_orm (belongs_to = "super::signing_ca::Entity" , from = "Column::SigningCa" , to = "super::signing_ca::Column::Id" , on_update = "Cascade" , on_delete = "Cascade" ,)] SigningCa , } +impl Related for Entity { + fn to() -> RelationDef { + Relation::SigningCa.def() + } +} -impl Related < super :: organization :: Entity > for Entity { fn to () -> RelationDef { Relation :: Organization . def () } } - -impl Related < super :: signing_ca :: Entity > for Entity { fn to () -> RelationDef { Relation :: SigningCa . def () } } - -impl ActiveModelBehavior for ActiveModel { } \ No newline at end of file +impl ActiveModelBehavior for ActiveModel {} diff --git a/trifid-api/trifid_api_migration/trifid_api_entities/src/entity/organization.rs b/trifid-api/trifid_api_migration/trifid_api_entities/src/entity/organization.rs index fdaf8e3..25d4ff9 100644 --- a/trifid-api/trifid_api_migration/trifid_api_entities/src/entity/organization.rs +++ b/trifid-api/trifid_api_migration/trifid_api_entities/src/entity/organization.rs @@ -1,19 +1,57 @@ //! `SeaORM` Entity. Generated by sea-orm-codegen 0.11.2 +use sea_orm::entity::prelude::*; +#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)] +#[sea_orm(table_name = "organization")] +pub struct Model { + #[sea_orm(primary_key, auto_increment = false)] + pub id: String, + pub name: String, + #[sea_orm(unique)] + pub owner: String, +} -use sea_orm :: entity :: prelude :: * ; +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm(has_many = "super::api_key::Entity")] + ApiKey, + #[sea_orm(has_one = "super::network::Entity")] + Network, + #[sea_orm(has_many = "super::role::Entity")] + Role, + #[sea_orm( + belongs_to = "super::user::Entity", + from = "Column::Owner", + to = "super::user::Column::Id", + on_update = "Cascade", + on_delete = "Cascade" + )] + User, +} -# [derive (Clone , Debug , PartialEq , DeriveEntityModel , Eq)] # [sea_orm (table_name = "organization")] pub struct Model { # [sea_orm (primary_key , auto_increment = false)] pub id : String , pub name : String , # [sea_orm (unique)] pub owner : String , } +impl Related for Entity { + fn to() -> RelationDef { + Relation::ApiKey.def() + } +} -# [derive (Copy , Clone , Debug , EnumIter , DeriveRelation)] pub enum Relation { # [sea_orm (has_many = "super::api_key::Entity")] ApiKey , # [sea_orm (has_one = "super::network::Entity")] Network , # [sea_orm (has_many = "super::role::Entity")] Role , # [sea_orm (belongs_to = "super::user::Entity" , from = "Column::Owner" , to = "super::user::Column::Id" , on_update = "Cascade" , on_delete = "Cascade" ,)] User , } +impl Related for Entity { + fn to() -> RelationDef { + Relation::Network.def() + } +} -impl Related < super :: api_key :: Entity > for Entity { fn to () -> RelationDef { Relation :: ApiKey . def () } } +impl Related for Entity { + fn to() -> RelationDef { + Relation::Role.def() + } +} -impl Related < super :: network :: Entity > for Entity { fn to () -> RelationDef { Relation :: Network . def () } } +impl Related for Entity { + fn to() -> RelationDef { + Relation::User.def() + } +} -impl Related < super :: role :: Entity > for Entity { fn to () -> RelationDef { Relation :: Role . def () } } - -impl Related < super :: user :: Entity > for Entity { fn to () -> RelationDef { Relation :: User . def () } } - -impl ActiveModelBehavior for ActiveModel { } \ No newline at end of file +impl ActiveModelBehavior for ActiveModel {} diff --git a/trifid-api/trifid_api_migration/trifid_api_entities/src/entity/prelude.rs b/trifid-api/trifid_api_migration/trifid_api_entities/src/entity/prelude.rs index 2314fbc..93cce0c 100644 --- a/trifid-api/trifid_api_migration/trifid_api_entities/src/entity/prelude.rs +++ b/trifid-api/trifid_api_migration/trifid_api_entities/src/entity/prelude.rs @@ -1,14 +1,14 @@ //! `SeaORM` Entity. Generated by sea-orm-codegen 0.11.2 -pub use super :: api_key :: Entity as ApiKey ; -pub use super :: api_key_scope :: Entity as ApiKeyScope ; -pub use super :: auth_token :: Entity as AuthToken ; -pub use super :: firewall_rule :: Entity as FirewallRule ; -pub use super :: magic_link :: Entity as MagicLink ; -pub use super :: network :: Entity as Network ; -pub use super :: organization :: Entity as Organization ; -pub use super :: role :: Entity as Role ; -pub use super :: session_token :: Entity as SessionToken ; -pub use super :: signing_ca :: Entity as SigningCa ; -pub use super :: totp_authenticator :: Entity as TotpAuthenticator ; -pub use super :: user :: Entity as User ; \ No newline at end of file +pub use super::api_key::Entity as ApiKey; +pub use super::api_key_scope::Entity as ApiKeyScope; +pub use super::auth_token::Entity as AuthToken; +pub use super::firewall_rule::Entity as FirewallRule; +pub use super::magic_link::Entity as MagicLink; +pub use super::network::Entity as Network; +pub use super::organization::Entity as Organization; +pub use super::role::Entity as Role; +pub use super::session_token::Entity as SessionToken; +pub use super::signing_ca::Entity as SigningCa; +pub use super::totp_authenticator::Entity as TotpAuthenticator; +pub use super::user::Entity as User; diff --git a/trifid-api/trifid_api_migration/trifid_api_entities/src/entity/role.rs b/trifid-api/trifid_api_migration/trifid_api_entities/src/entity/role.rs index 9ce3a13..b31b820 100644 --- a/trifid-api/trifid_api_migration/trifid_api_entities/src/entity/role.rs +++ b/trifid-api/trifid_api_migration/trifid_api_entities/src/entity/role.rs @@ -1,13 +1,36 @@ //! `SeaORM` Entity. Generated by sea-orm-codegen 0.11.2 +use sea_orm::entity::prelude::*; +#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)] +#[sea_orm(table_name = "role")] +pub struct Model { + #[sea_orm(primary_key, auto_increment = false)] + pub id: String, + #[sea_orm(unique)] + pub name: String, + pub description: String, + pub organization: String, + pub created_at: i64, + pub modified_at: i64, +} -use sea_orm :: entity :: prelude :: * ; +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm( + belongs_to = "super::organization::Entity", + from = "Column::Organization", + to = "super::organization::Column::Id", + on_update = "Cascade", + on_delete = "Cascade" + )] + Organization, +} -# [derive (Clone , Debug , PartialEq , DeriveEntityModel , Eq)] # [sea_orm (table_name = "role")] pub struct Model { # [sea_orm (primary_key , auto_increment = false)] pub id : String , # [sea_orm (unique)] pub name : String , pub description : String , pub organization : String , pub created_at : i64 , pub modified_at : i64 , } +impl Related for Entity { + fn to() -> RelationDef { + Relation::Organization.def() + } +} -# [derive (Copy , Clone , Debug , EnumIter , DeriveRelation)] pub enum Relation { # [sea_orm (belongs_to = "super::organization::Entity" , from = "Column::Organization" , to = "super::organization::Column::Id" , on_update = "Cascade" , on_delete = "Cascade" ,)] Organization , } - -impl Related < super :: organization :: Entity > for Entity { fn to () -> RelationDef { Relation :: Organization . def () } } - -impl ActiveModelBehavior for ActiveModel { } \ No newline at end of file +impl ActiveModelBehavior for ActiveModel {} diff --git a/trifid-api/trifid_api_migration/trifid_api_entities/src/entity/session_token.rs b/trifid-api/trifid_api_migration/trifid_api_entities/src/entity/session_token.rs index 23d37ba..a648966 100644 --- a/trifid-api/trifid_api_migration/trifid_api_entities/src/entity/session_token.rs +++ b/trifid-api/trifid_api_migration/trifid_api_entities/src/entity/session_token.rs @@ -1,13 +1,32 @@ //! `SeaORM` Entity. Generated by sea-orm-codegen 0.11.2 +use sea_orm::entity::prelude::*; +#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)] +#[sea_orm(table_name = "session_token")] +pub struct Model { + #[sea_orm(primary_key, auto_increment = false)] + pub id: String, + pub user: String, + pub expires_on: i64, +} -use sea_orm :: entity :: prelude :: * ; +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm( + belongs_to = "super::user::Entity", + from = "Column::User", + to = "super::user::Column::Id", + on_update = "Cascade", + on_delete = "Cascade" + )] + User, +} -# [derive (Clone , Debug , PartialEq , DeriveEntityModel , Eq)] # [sea_orm (table_name = "session_token")] pub struct Model { # [sea_orm (primary_key , auto_increment = false)] pub id : String , pub user : String , pub expires_on : i64 , } +impl Related for Entity { + fn to() -> RelationDef { + Relation::User.def() + } +} -# [derive (Copy , Clone , Debug , EnumIter , DeriveRelation)] pub enum Relation { # [sea_orm (belongs_to = "super::user::Entity" , from = "Column::User" , to = "super::user::Column::Id" , on_update = "Cascade" , on_delete = "Cascade" ,)] User , } - -impl Related < super :: user :: Entity > for Entity { fn to () -> RelationDef { Relation :: User . def () } } - -impl ActiveModelBehavior for ActiveModel { } \ No newline at end of file +impl ActiveModelBehavior for ActiveModel {} diff --git a/trifid-api/trifid_api_migration/trifid_api_entities/src/entity/signing_ca.rs b/trifid-api/trifid_api_migration/trifid_api_entities/src/entity/signing_ca.rs index 72516a6..d0770a9 100644 --- a/trifid-api/trifid_api_migration/trifid_api_entities/src/entity/signing_ca.rs +++ b/trifid-api/trifid_api_migration/trifid_api_entities/src/entity/signing_ca.rs @@ -1,13 +1,31 @@ //! `SeaORM` Entity. Generated by sea-orm-codegen 0.11.2 +use sea_orm::entity::prelude::*; +#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)] +#[sea_orm(table_name = "signing_ca")] +pub struct Model { + #[sea_orm(primary_key, auto_increment = false)] + pub id: String, + pub organization: String, + pub cert: String, + #[sea_orm(unique)] + pub key: String, + pub expires: i64, + #[sea_orm(unique)] + pub nonce: String, +} -use sea_orm :: entity :: prelude :: * ; +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm(has_one = "super::network::Entity")] + Network, +} -# [derive (Clone , Debug , PartialEq , DeriveEntityModel , Eq)] # [sea_orm (table_name = "signing_ca")] pub struct Model { # [sea_orm (primary_key , auto_increment = false)] pub id : String , pub organization : String , pub cert : String , # [sea_orm (unique)] pub key : String , pub expires : i64 , # [sea_orm (unique)] pub nonce : String , } +impl Related for Entity { + fn to() -> RelationDef { + Relation::Network.def() + } +} -# [derive (Copy , Clone , Debug , EnumIter , DeriveRelation)] pub enum Relation { # [sea_orm (has_one = "super::network::Entity")] Network , } - -impl Related < super :: network :: Entity > for Entity { fn to () -> RelationDef { Relation :: Network . def () } } - -impl ActiveModelBehavior for ActiveModel { } \ No newline at end of file +impl ActiveModelBehavior for ActiveModel {} diff --git a/trifid-api/trifid_api_migration/trifid_api_entities/src/entity/totp_authenticator.rs b/trifid-api/trifid_api_migration/trifid_api_entities/src/entity/totp_authenticator.rs index 630c391..5437596 100644 --- a/trifid-api/trifid_api_migration/trifid_api_entities/src/entity/totp_authenticator.rs +++ b/trifid-api/trifid_api_migration/trifid_api_entities/src/entity/totp_authenticator.rs @@ -1,13 +1,38 @@ //! `SeaORM` Entity. Generated by sea-orm-codegen 0.11.2 +use sea_orm::entity::prelude::*; +#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)] +#[sea_orm(table_name = "totp_authenticator")] +pub struct Model { + #[sea_orm(primary_key, auto_increment = false)] + pub id: String, + #[sea_orm(unique)] + pub secret: String, + #[sea_orm(unique)] + pub url: String, + pub verified: bool, + pub expires_on: i64, + #[sea_orm(unique)] + pub user: String, +} -use sea_orm :: entity :: prelude :: * ; +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm( + belongs_to = "super::user::Entity", + from = "Column::User", + to = "super::user::Column::Id", + on_update = "Cascade", + on_delete = "Cascade" + )] + User, +} -# [derive (Clone , Debug , PartialEq , DeriveEntityModel , Eq)] # [sea_orm (table_name = "totp_authenticator")] pub struct Model { # [sea_orm (primary_key , auto_increment = false)] pub id : String , # [sea_orm (unique)] pub secret : String , # [sea_orm (unique)] pub url : String , pub verified : bool , pub expires_on : i64 , # [sea_orm (unique)] pub user : String , } +impl Related for Entity { + fn to() -> RelationDef { + Relation::User.def() + } +} -# [derive (Copy , Clone , Debug , EnumIter , DeriveRelation)] pub enum Relation { # [sea_orm (belongs_to = "super::user::Entity" , from = "Column::User" , to = "super::user::Column::Id" , on_update = "Cascade" , on_delete = "Cascade" ,)] User , } - -impl Related < super :: user :: Entity > for Entity { fn to () -> RelationDef { Relation :: User . def () } } - -impl ActiveModelBehavior for ActiveModel { } \ No newline at end of file +impl ActiveModelBehavior for ActiveModel {} diff --git a/trifid-api/trifid_api_migration/trifid_api_entities/src/entity/user.rs b/trifid-api/trifid_api_migration/trifid_api_entities/src/entity/user.rs index 23fc564..0d7b61f 100644 --- a/trifid-api/trifid_api_migration/trifid_api_entities/src/entity/user.rs +++ b/trifid-api/trifid_api_migration/trifid_api_entities/src/entity/user.rs @@ -1,21 +1,58 @@ //! `SeaORM` Entity. Generated by sea-orm-codegen 0.11.2 +use sea_orm::entity::prelude::*; +#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)] +#[sea_orm(table_name = "user")] +pub struct Model { + #[sea_orm(primary_key, auto_increment = false)] + pub id: String, + #[sea_orm(unique)] + pub email: String, +} -use sea_orm :: entity :: prelude :: * ; +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm(has_many = "super::auth_token::Entity")] + AuthToken, + #[sea_orm(has_many = "super::magic_link::Entity")] + MagicLink, + #[sea_orm(has_one = "super::organization::Entity")] + Organization, + #[sea_orm(has_many = "super::session_token::Entity")] + SessionToken, + #[sea_orm(has_one = "super::totp_authenticator::Entity")] + TotpAuthenticator, +} -# [derive (Clone , Debug , PartialEq , DeriveEntityModel , Eq)] # [sea_orm (table_name = "user")] pub struct Model { # [sea_orm (primary_key , auto_increment = false)] pub id : String , # [sea_orm (unique)] pub email : String , } +impl Related for Entity { + fn to() -> RelationDef { + Relation::AuthToken.def() + } +} -# [derive (Copy , Clone , Debug , EnumIter , DeriveRelation)] pub enum Relation { # [sea_orm (has_many = "super::auth_token::Entity")] AuthToken , # [sea_orm (has_many = "super::magic_link::Entity")] MagicLink , # [sea_orm (has_one = "super::organization::Entity")] Organization , # [sea_orm (has_many = "super::session_token::Entity")] SessionToken , # [sea_orm (has_one = "super::totp_authenticator::Entity")] TotpAuthenticator , } +impl Related for Entity { + fn to() -> RelationDef { + Relation::MagicLink.def() + } +} -impl Related < super :: auth_token :: Entity > for Entity { fn to () -> RelationDef { Relation :: AuthToken . def () } } +impl Related for Entity { + fn to() -> RelationDef { + Relation::Organization.def() + } +} -impl Related < super :: magic_link :: Entity > for Entity { fn to () -> RelationDef { Relation :: MagicLink . def () } } +impl Related for Entity { + fn to() -> RelationDef { + Relation::SessionToken.def() + } +} -impl Related < super :: organization :: Entity > for Entity { fn to () -> RelationDef { Relation :: Organization . def () } } +impl Related for Entity { + fn to() -> RelationDef { + Relation::TotpAuthenticator.def() + } +} -impl Related < super :: session_token :: Entity > for Entity { fn to () -> RelationDef { Relation :: SessionToken . def () } } - -impl Related < super :: totp_authenticator :: Entity > for Entity { fn to () -> RelationDef { Relation :: TotpAuthenticator . def () } } - -impl ActiveModelBehavior for ActiveModel { } \ No newline at end of file +impl ActiveModelBehavior for ActiveModel {}