host blocking + general code cleanup

This commit is contained in:
c0repwn3r 2023-05-10 20:32:19 -04:00
parent a068741986
commit 6d01d8703b
Signed by: core
GPG Key ID: FDBF740DADDCEECF
49 changed files with 2745 additions and 1598 deletions

View File

@ -1,3 +1,3 @@
fn main() { fn main() {
println!("cargo:rerun-if-changed=migrations/"); println!("cargo:rerun-if-changed=migrations/");
} }

View File

@ -14,93 +14,125 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>. // along with this program. If not, see <https://www.gnu.org/licenses/>.
use std::error::Error;
use actix_web::HttpRequest; use actix_web::HttpRequest;
use std::error::Error;
use sea_orm::{ColumnTrait, Condition, DatabaseConnection, EntityTrait, QueryFilter}; use crate::timers::expired;
use crate::tokens::get_token_type; use crate::tokens::get_token_type;
use trifid_api_entities::entity::{auth_token, session_token}; use sea_orm::{ColumnTrait, Condition, DatabaseConnection, EntityTrait, QueryFilter};
use trifid_api_entities::entity::api_key; use trifid_api_entities::entity::api_key;
use trifid_api_entities::entity::api_key_scope; use trifid_api_entities::entity::api_key_scope;
use trifid_api_entities::entity::user; use trifid_api_entities::entity::user;
use crate::timers::expired; use trifid_api_entities::entity::{auth_token, session_token};
pub enum TokenInfo { pub enum TokenInfo {
SessionToken(SessionTokenInfo), SessionToken(SessionTokenInfo),
AuthToken(AuthTokenInfo), AuthToken(AuthTokenInfo),
ApiToken(ApiTokenInfo), ApiToken(ApiTokenInfo),
NotPresent NotPresent,
} }
pub struct SessionTokenInfo { pub struct SessionTokenInfo {
pub token: String, pub token: String,
pub user: SessionTokenUser, pub user: SessionTokenUser,
pub expires_at: i64 pub expires_at: i64,
} }
pub struct SessionTokenUser { pub struct SessionTokenUser {
pub id: String, pub id: String,
pub email: String pub email: String,
} }
pub struct ApiTokenInfo { pub struct ApiTokenInfo {
pub scopes: Vec<String>, pub scopes: Vec<String>,
pub organization: String pub organization: String,
} }
pub struct AuthTokenInfo { pub struct AuthTokenInfo {
pub token: String, pub token: String,
pub session_info: SessionTokenInfo pub session_info: SessionTokenInfo,
} }
pub async fn enforce_session(req: &HttpRequest, db: &DatabaseConnection) -> Result<TokenInfo, Box<dyn Error>> { pub async fn enforce_session(
let header = req.headers().get("Authorization").ok_or("Missing authorization header")?; req: &HttpRequest,
db: &DatabaseConnection,
) -> Result<TokenInfo, Box<dyn Error>> {
let header = req
.headers()
.get("Authorization")
.ok_or("Missing authorization header")?;
let authorization = header.to_str()?; let authorization = header.to_str()?;
let authorization_split: Vec<&str> = authorization.split(' ').collect(); let authorization_split: Vec<&str> = authorization.split(' ').collect();
if authorization_split[0] != "Bearer" { if authorization_split[0] != "Bearer" {
return Err("Not a bearer token".into()) return Err("Not a bearer token".into());
} }
let tokens = &authorization_split[1..]; let tokens = &authorization_split[1..];
let sess_token = tokens.iter().find(|i| get_token_type(i).unwrap_or("n-sess") == "sess").copied().ok_or("Missing session token")?; let sess_token = tokens
.iter()
.find(|i| get_token_type(i).unwrap_or("n-sess") == "sess")
.copied()
.ok_or("Missing session token")?;
let token: session_token::Model = session_token::Entity::find().filter(session_token::Column::Id.eq(sess_token)).one(db).await?.ok_or("Invalid session token")?; let token: session_token::Model = session_token::Entity::find()
.filter(session_token::Column::Id.eq(sess_token))
.one(db)
.await?
.ok_or("Invalid session token")?;
if expired(token.expires_on as u64) { if expired(token.expires_on as u64) {
return Err("Token expired".into()); return Err("Token expired".into());
} }
let user: user::Model = user::Entity::find().filter(user::Column::Id.eq(token.user)).one(db).await?.ok_or("Session token has a nonexistent user")?; let user: user::Model = user::Entity::find()
.filter(user::Column::Id.eq(token.user))
.one(db)
.await?
.ok_or("Session token has a nonexistent user")?;
Ok(TokenInfo::SessionToken(SessionTokenInfo { Ok(TokenInfo::SessionToken(SessionTokenInfo {
token: token.id, token: token.id,
user: SessionTokenUser { user: SessionTokenUser {
id: user.id, id: user.id,
email: user.email email: user.email,
}, },
expires_at: token.expires_on, expires_at: token.expires_on,
})) }))
} }
pub async fn enforce_2fa(req: &HttpRequest, db: &DatabaseConnection) -> Result<TokenInfo, Box<dyn Error>> { pub async fn enforce_2fa(
req: &HttpRequest,
db: &DatabaseConnection,
) -> Result<TokenInfo, Box<dyn Error>> {
let session_data = match enforce_session(req, db).await? { let session_data = match enforce_session(req, db).await? {
TokenInfo::SessionToken(i) => i, TokenInfo::SessionToken(i) => i,
_ => unreachable!() _ => unreachable!(),
}; };
let header = req.headers().get("Authorization").ok_or("Missing authorization header")?; let header = req
.headers()
.get("Authorization")
.ok_or("Missing authorization header")?;
let authorization = header.to_str()?; let authorization = header.to_str()?;
let authorization_split: Vec<&str> = authorization.split(' ').collect(); let authorization_split: Vec<&str> = authorization.split(' ').collect();
if authorization_split[0] != "Bearer" { if authorization_split[0] != "Bearer" {
return Err("Not a bearer token".into()) return Err("Not a bearer token".into());
} }
let tokens = &authorization_split[1..]; let tokens = &authorization_split[1..];
let auth_token = tokens.iter().find(|i| get_token_type(**i).unwrap_or("n-auth") == "auth").copied().ok_or("Missing auth token")?; let auth_token = tokens
.iter()
.find(|i| get_token_type(i).unwrap_or("n-auth") == "auth")
.copied()
.ok_or("Missing auth token")?;
let token: auth_token::Model = auth_token::Entity::find().filter(auth_token::Column::Id.eq(auth_token)).one(db).await?.ok_or("Invalid session token")?; let token: auth_token::Model = auth_token::Entity::find()
.filter(auth_token::Column::Id.eq(auth_token))
.one(db)
.await?
.ok_or("Invalid session token")?;
if expired(token.expires_on as u64) { if expired(token.expires_on as u64) {
return Err("Token expired".into()); return Err("Token expired".into());
@ -112,17 +144,28 @@ pub async fn enforce_2fa(req: &HttpRequest, db: &DatabaseConnection) -> Result<T
})) }))
} }
pub async fn enforce_api_token(req: &HttpRequest, scopes: &[&str], db: &DatabaseConnection) -> Result<TokenInfo, Box<dyn Error>> { pub async fn enforce_api_token(
let header = req.headers().get("Authorization").ok_or("Missing authorization header")?; req: &HttpRequest,
scopes: &[&str],
db: &DatabaseConnection,
) -> Result<TokenInfo, Box<dyn Error>> {
let header = req
.headers()
.get("Authorization")
.ok_or("Missing authorization header")?;
let authorization = header.to_str()?; let authorization = header.to_str()?;
let authorization_split: Vec<&str> = authorization.split(' ').collect(); let authorization_split: Vec<&str> = authorization.split(' ').collect();
if authorization_split[0] != "Bearer" { if authorization_split[0] != "Bearer" {
return Err("Not a bearer token".into()) return Err("Not a bearer token".into());
} }
let tokens = &authorization_split[1..]; let tokens = &authorization_split[1..];
let api_token = tokens.iter().find(|i| get_token_type(**i).unwrap_or("n-tfkey") == "tfkey").copied().ok_or("Missing api token")?; let api_token = tokens
.iter()
.find(|i| get_token_type(i).unwrap_or("n-tfkey") == "tfkey")
.copied()
.ok_or("Missing api token")?;
// API tokens are special and have a different form than other keys. // API tokens are special and have a different form than other keys.
// They follow the form: // They follow the form:
@ -135,10 +178,19 @@ pub async fn enforce_api_token(req: &HttpRequest, scopes: &[&str], db: &Database
let token_id = format!("{}-{}", api_token_split[0], api_token_split[1]); let token_id = format!("{}-{}", api_token_split[0], api_token_split[1]);
let token_key = api_token_split[2].to_string(); let token_key = api_token_split[2].to_string();
let token: api_key::Model = api_key::Entity::find().filter( let token: api_key::Model = api_key::Entity::find()
Condition::all().add(api_key::Column::Id.eq(token_id)).add(api_key::Column::Key.eq(token_key)) .filter(
).one(db).await?.ok_or("Invalid api token")?; Condition::all()
let token_scopes: Vec<api_key_scope::Model> = api_key_scope::Entity::find().filter(api_key_scope::Column::ApiKey.eq(api_token)).all(db).await?; .add(api_key::Column::Id.eq(token_id))
.add(api_key::Column::Key.eq(token_key)),
)
.one(db)
.await?
.ok_or("Invalid api token")?;
let token_scopes: Vec<api_key_scope::Model> = api_key_scope::Entity::find()
.filter(api_key_scope::Column::ApiKey.eq(api_token))
.all(db)
.await?;
let token_scopes: Vec<&str> = token_scopes.iter().map(|i| i.scope.as_str()).collect(); let token_scopes: Vec<&str> = token_scopes.iter().map(|i| i.scope.as_str()).collect();
for scope in scopes { for scope in scopes {
@ -151,4 +203,4 @@ pub async fn enforce_api_token(req: &HttpRequest, scopes: &[&str], db: &Database
scopes: token_scopes.iter().map(|i| i.to_string()).collect(), scopes: token_scopes.iter().map(|i| i.to_string()).collect(),
organization: token.organization, organization: token.organization,
})) }))
} }

View File

@ -14,11 +14,11 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>. // along with this program. If not, see <https://www.gnu.org/licenses/>.
use std::fs;
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
use log::error; use log::error;
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use serde::{Serialize, Deserialize}; use serde::{Deserialize, Serialize};
use std::fs;
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
pub static CONFIG: Lazy<TrifidConfig> = Lazy::new(|| { pub static CONFIG: Lazy<TrifidConfig> = Lazy::new(|| {
let config_str = match fs::read_to_string("/etc/trifid/config.toml") { let config_str = match fs::read_to_string("/etc/trifid/config.toml") {
@ -43,7 +43,7 @@ pub struct TrifidConfig {
pub database: TrifidConfigDatabase, pub database: TrifidConfigDatabase,
pub server: TrifidConfigServer, pub server: TrifidConfigServer,
pub tokens: TrifidConfigTokens, pub tokens: TrifidConfigTokens,
pub crypto: TrifidConfigCryptography pub crypto: TrifidConfigCryptography,
} }
#[derive(Serialize, Deserialize, Debug)] #[derive(Serialize, Deserialize, Debug)]
@ -62,13 +62,13 @@ pub struct TrifidConfigDatabase {
#[serde(default = "time_defaults")] #[serde(default = "time_defaults")]
pub max_lifetime: u64, pub max_lifetime: u64,
#[serde(default = "sqlx_logging_default")] #[serde(default = "sqlx_logging_default")]
pub sqlx_logging: bool pub sqlx_logging: bool,
} }
#[derive(Serialize, Deserialize, Debug)] #[derive(Serialize, Deserialize, Debug)]
pub struct TrifidConfigServer { pub struct TrifidConfigServer {
#[serde(default = "socketaddr_8080")] #[serde(default = "socketaddr_8080")]
pub bind: SocketAddr pub bind: SocketAddr,
} }
#[derive(Serialize, Deserialize, Debug)] #[derive(Serialize, Deserialize, Debug)]
@ -80,20 +80,38 @@ pub struct TrifidConfigTokens {
#[serde(default = "totp_setup_timeout_time")] #[serde(default = "totp_setup_timeout_time")]
pub totp_setup_timeout_time_seconds: u64, pub totp_setup_timeout_time_seconds: u64,
#[serde(default = "mfa_tokens_expiry_time")] #[serde(default = "mfa_tokens_expiry_time")]
pub mfa_tokens_expiry_time_seconds: u64 pub mfa_tokens_expiry_time_seconds: u64,
} }
#[derive(Serialize, Deserialize, Debug)] #[derive(Serialize, Deserialize, Debug)]
pub struct TrifidConfigCryptography { pub struct TrifidConfigCryptography {
pub data_encryption_key: String pub data_encryption_key: String,
} }
fn max_connections_default() -> u32 { 100 } fn max_connections_default() -> u32 {
fn min_connections_default() -> u32 { 5 } 100
fn time_defaults() -> u64 { 8 } }
fn sqlx_logging_default() -> bool { true } fn min_connections_default() -> u32 {
fn socketaddr_8080() -> SocketAddr { SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::from([0, 0, 0, 0]), 8080)) } 5
fn magic_link_expiry_time() -> u64 { 3600 } // 1 hour }
fn session_token_expiry_time() -> u64 { 15780000 } // 6 months fn time_defaults() -> u64 {
fn totp_setup_timeout_time() -> u64 { 600 } // 10 minutes 8
fn mfa_tokens_expiry_time() -> u64 { 600 } // 10 minutes }
fn sqlx_logging_default() -> bool {
true
}
fn socketaddr_8080() -> SocketAddr {
SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::from([0, 0, 0, 0]), 8080))
}
fn magic_link_expiry_time() -> u64 {
3600
} // 1 hour
fn session_token_expiry_time() -> u64 {
15780000
} // 6 months
fn totp_setup_timeout_time() -> u64 {
600
} // 10 minutes
fn mfa_tokens_expiry_time() -> u64 {
600
} // 10 minutes

View File

@ -14,25 +14,33 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>. // along with this program. If not, see <https://www.gnu.org/licenses/>.
use std::error::Error;
use aes_gcm::{Aes256Gcm, KeyInit, Nonce};
use aes_gcm::aead::{Aead, Payload};
use rand::Rng;
use trifid_pki::rand_core::OsRng;
use crate::config::TrifidConfig; use crate::config::TrifidConfig;
use aes_gcm::aead::{Aead, Payload};
use aes_gcm::{Aes256Gcm, KeyInit, Nonce};
use rand::Rng;
use std::error::Error;
use trifid_pki::rand_core::OsRng;
pub fn get_cipher_from_config(config: &TrifidConfig) -> Result<Aes256Gcm, Box<dyn Error>> { pub fn get_cipher_from_config(config: &TrifidConfig) -> Result<Aes256Gcm, Box<dyn Error>> {
let key_slice = hex::decode(&config.crypto.data_encryption_key)?; let key_slice = hex::decode(&config.crypto.data_encryption_key)?;
Ok(Aes256Gcm::new_from_slice(&key_slice)?) Ok(Aes256Gcm::new_from_slice(&key_slice)?)
} }
pub fn encrypt_with_nonce(plaintext: &[u8], nonce: [u8; 12], cipher: &Aes256Gcm) -> Result<Vec<u8>, aes_gcm::Error> { pub fn encrypt_with_nonce(
plaintext: &[u8],
nonce: [u8; 12],
cipher: &Aes256Gcm,
) -> Result<Vec<u8>, aes_gcm::Error> {
let nonce = Nonce::from_slice(&nonce); let nonce = Nonce::from_slice(&nonce);
let ciphertext = cipher.encrypt(nonce, plaintext)?; let ciphertext = cipher.encrypt(nonce, plaintext)?;
Ok(ciphertext) Ok(ciphertext)
} }
pub fn decrypt_with_nonce(ciphertext: &[u8], nonce: [u8; 12], cipher: &Aes256Gcm) -> Result<Vec<u8>, aes_gcm::Error> { pub fn decrypt_with_nonce(
ciphertext: &[u8],
nonce: [u8; 12],
cipher: &Aes256Gcm,
) -> Result<Vec<u8>, aes_gcm::Error> {
let nonce = Nonce::from_slice(&nonce); let nonce = Nonce::from_slice(&nonce);
let plaintext = cipher.decrypt(nonce, Payload::from(ciphertext))?; let plaintext = cipher.decrypt(nonce, Payload::from(ciphertext))?;
Ok(plaintext) Ok(plaintext)
@ -40,4 +48,4 @@ pub fn decrypt_with_nonce(ciphertext: &[u8], nonce: [u8; 12], cipher: &Aes256Gcm
pub fn generate_random_iv() -> [u8; 12] { pub fn generate_random_iv() -> [u8; 12] {
OsRng.gen() OsRng.gen()
} }

View File

@ -14,13 +14,13 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>. // along with this program. If not, see <https://www.gnu.org/licenses/>.
use std::error::Error;
use base64::Engine; use base64::Engine;
use serde::{Serialize, Deserialize}; use serde::{Deserialize, Serialize};
use std::error::Error;
#[derive(Serialize, Deserialize, Debug, Clone)] #[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Cursor { pub struct Cursor {
pub page: u64 pub page: u64,
} }
impl TryFrom<Cursor> for String { impl TryFrom<Cursor> for String {
@ -41,9 +41,7 @@ impl TryFrom<String> for Cursor {
fn try_from(value: String) -> Result<Self, Self::Error> { fn try_from(value: String) -> Result<Self, Self::Error> {
if value.is_empty() { if value.is_empty() {
// If empty, it's page 0 // If empty, it's page 0
return Ok(Cursor { return Ok(Cursor { page: 0 });
page: 0
})
} }
// Base64-decode the value // Base64-decode the value
let json_bytes = base64::engine::general_purpose::STANDARD.decode(value)?; let json_bytes = base64::engine::general_purpose::STANDARD.decode(value)?;
@ -53,4 +51,4 @@ impl TryFrom<String> for Cursor {
let cursor = serde_json::from_str(&json_str)?; let cursor = serde_json::from_str(&json_str)?;
Ok(cursor) Ok(cursor)
} }
} }

View File

@ -15,11 +15,11 @@
// along with this program. If not, see <https://www.gnu.org/licenses/>. // along with this program. If not, see <https://www.gnu.org/licenses/>.
use actix_web::error::{JsonPayloadError, PayloadError}; use actix_web::error::{JsonPayloadError, PayloadError};
use serde::{Serialize, Deserialize}; use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Debug, Clone)] #[derive(Serialize, Deserialize, Debug, Clone)]
pub struct APIErrorsResponse { pub struct APIErrorsResponse {
pub errors: Vec<APIError> pub errors: Vec<APIError>,
} }
#[derive(Serialize, Deserialize, Debug, Clone)] #[derive(Serialize, Deserialize, Debug, Clone)]
pub struct APIError { pub struct APIError {
@ -27,10 +27,12 @@ pub struct APIError {
pub message: String, pub message: String,
#[serde(skip_serializing_if = "is_none")] #[serde(skip_serializing_if = "is_none")]
#[serde(default)] #[serde(default)]
pub path: Option<String> pub path: Option<String>,
} }
fn is_none<T>(o: &Option<T>) -> bool { o.is_none() } fn is_none<T>(o: &Option<T>) -> bool {
o.is_none()
}
impl From<&JsonPayloadError> for APIError { impl From<&JsonPayloadError> for APIError {
fn from(value: &JsonPayloadError) -> Self { fn from(value: &JsonPayloadError) -> Self {
@ -87,58 +89,44 @@ impl From<&JsonPayloadError> for APIError {
impl From<&PayloadError> for APIError { impl From<&PayloadError> for APIError {
fn from(value: &PayloadError) -> Self { fn from(value: &PayloadError) -> Self {
match value { match value {
PayloadError::Incomplete(e) => { PayloadError::Incomplete(e) => APIError {
APIError { code: "ERR_UNEXPECTED_EOF".to_string(),
code: "ERR_UNEXPECTED_EOF".to_string(), message: match e {
message: match e { None => "Payload reached EOF but was incomplete".to_string(),
None => "Payload reached EOF but was incomplete".to_string(), Some(e) => format!("Payload reached EOF but was incomplete: {}", e),
Some(e) => format!("Payload reached EOF but was incomplete: {}", e) },
}, path: None,
path: None, },
} PayloadError::EncodingCorrupted => APIError {
} code: "ERR_CORRUPTED_PAYLOAD".to_string(),
PayloadError::EncodingCorrupted => { message: "Payload content encoding corrupted".to_string(),
APIError { path: None,
code: "ERR_CORRUPTED_PAYLOAD".to_string(), },
message: "Payload content encoding corrupted".to_string(), PayloadError::Overflow => APIError {
path: None, code: "ERR_PAYLOAD_OVERFLOW".to_string(),
} message: "Payload reached size limit".to_string(),
} path: None,
PayloadError::Overflow => { },
APIError { PayloadError::UnknownLength => APIError {
code: "ERR_PAYLOAD_OVERFLOW".to_string(), code: "ERR_PAYLOAD_UNKNOWN_LENGTH".to_string(),
message: "Payload reached size limit".to_string(), message: "Unable to determine payload length".to_string(),
path: None, path: None,
} },
} PayloadError::Http2Payload(e) => APIError {
PayloadError::UnknownLength => { code: "ERR_HTTP2_ERROR".to_string(),
APIError { message: format!("HTTP/2 error: {}", e),
code: "ERR_PAYLOAD_UNKNOWN_LENGTH".to_string(), path: None,
message: "Unable to determine payload length".to_string(), },
path: None, PayloadError::Io(e) => APIError {
} code: "ERR_IO_ERROR".to_string(),
} message: format!("I/O error: {}", e),
PayloadError::Http2Payload(e) => { path: None,
APIError { },
code: "ERR_HTTP2_ERROR".to_string(), _ => APIError {
message: format!("HTTP/2 error: {}", e), code: "ERR_UNKNOWN_ERROR".to_string(),
path: None, message: "An unknown error has occured".to_string(),
} path: None,
} },
PayloadError::Io(e) => {
APIError {
code: "ERR_IO_ERROR".to_string(),
message: format!("I/O error: {}", e),
path: None,
}
}
_ => {
APIError {
code: "ERR_UNKNOWN_ERROR".to_string(),
message: "An unknown error has occured".to_string(),
path: None,
}
}
} }
} }
} }

View File

@ -14,12 +14,12 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>. // along with this program. If not, see <https://www.gnu.org/licenses/>.
use std::error::Error;
use log::info; use log::info;
use std::error::Error;
pub fn send_magic_link(token: &str) -> Result<(), Box<dyn Error>> { pub fn send_magic_link(token: &str) -> Result<(), Box<dyn Error>> {
// TODO: actually do this // TODO: actually do this
info!("sent magic link {}", token); info!("sent magic link {}", token);
Ok(()) Ok(())
} }

View File

@ -14,30 +14,33 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>. // along with this program. If not, see <https://www.gnu.org/licenses/>.
use std::error::Error;
use std::time::Duration;
use actix_request_identifier::RequestIdentifier; use actix_request_identifier::RequestIdentifier;
use actix_web::{App, HttpResponse, HttpServer, web::{Data, JsonConfig}}; use actix_web::{
web::{Data, JsonConfig},
App, HttpResponse, HttpServer,
};
use log::{info, Level}; use log::{info, Level};
use sea_orm::{ConnectOptions, Database, DatabaseConnection}; use sea_orm::{ConnectOptions, Database, DatabaseConnection};
use std::error::Error;
use std::time::Duration;
use trifid_api_migration::{Migrator, MigratorTrait};
use crate::config::CONFIG; use crate::config::CONFIG;
use crate::error::{APIError, APIErrorsResponse}; use crate::error::{APIError, APIErrorsResponse};
use crate::tokens::random_id_no_id; use crate::tokens::random_id_no_id;
use trifid_api_migration::{Migrator, MigratorTrait};
pub mod config;
pub mod routes;
pub mod error;
pub mod tokens;
pub mod timers;
pub mod magic_link;
pub mod auth_tokens; pub mod auth_tokens;
pub mod cursor; pub mod config;
pub mod crypto; pub mod crypto;
pub mod cursor;
pub mod error;
pub mod magic_link;
pub mod routes;
pub mod timers;
pub mod tokens;
pub struct AppState { pub struct AppState {
pub conn: DatabaseConnection pub conn: DatabaseConnection,
} }
#[actix_web::main] #[actix_web::main]
@ -61,9 +64,7 @@ async fn main() -> Result<(), Box<dyn Error>> {
info!("Performing database migration..."); info!("Performing database migration...");
Migrator::up(&db, None).await?; Migrator::up(&db, None).await?;
let data = Data::new(AppState { let data = Data::new(AppState { conn: db });
conn: db
});
HttpServer::new(move || { HttpServer::new(move || {
App::new() App::new()
@ -73,11 +74,10 @@ async fn main() -> Result<(), Box<dyn Error>> {
actix_web::error::InternalError::from_response( actix_web::error::InternalError::from_response(
err, err,
HttpResponse::BadRequest().json(APIErrorsResponse { HttpResponse::BadRequest().json(APIErrorsResponse {
errors: vec![ errors: vec![api_error],
api_error }),
], )
}) .into()
).into()
})) }))
.wrap(RequestIdentifier::with_generator(random_id_no_id)) .wrap(RequestIdentifier::with_generator(random_id_no_id))
.service(routes::v1::auth::magic_link::magic_link_request) .service(routes::v1::auth::magic_link::magic_link_request)
@ -100,7 +100,11 @@ async fn main() -> Result<(), Box<dyn Error>> {
.service(routes::v1::hosts::get_host) .service(routes::v1::hosts::get_host)
.service(routes::v1::hosts::delete_host) .service(routes::v1::hosts::delete_host)
.service(routes::v1::hosts::edit_host) .service(routes::v1::hosts::edit_host)
}).bind(CONFIG.server.bind)?.run().await?; .service(routes::v1::hosts::block_host)
})
.bind(CONFIG.server.bind)?
.run()
.await?;
Ok(()) Ok(())
} }

View File

@ -1 +1 @@
pub mod v1; pub mod v1;

View File

@ -19,29 +19,29 @@
// This endpoint is considered done. No major features should be added or removed, unless it fixes bugs. // This endpoint is considered done. No major features should be added or removed, unless it fixes bugs.
// This endpoint requires the `definednetworking` extension to be enabled to be used. // This endpoint requires the `definednetworking` extension to be enabled to be used.
use actix_web::{HttpResponse, post};
use actix_web::web::{Data, Json};
use log::error;
use sea_orm::{ActiveModelTrait, ColumnTrait, EntityTrait, IntoActiveModel, QueryFilter};
use serde::{Serialize, Deserialize};
use trifid_api_entities::entity::user::Entity as UserEntity;
use trifid_api_entities::entity::user;
use crate::AppState;
use crate::config::CONFIG; use crate::config::CONFIG;
use crate::error::{APIError, APIErrorsResponse}; use crate::error::{APIError, APIErrorsResponse};
use crate::magic_link::send_magic_link; use crate::magic_link::send_magic_link;
use crate::timers::expires_in_seconds; use crate::timers::expires_in_seconds;
use crate::tokens::random_token; use crate::tokens::random_token;
use crate::AppState;
use actix_web::web::{Data, Json};
use actix_web::{post, HttpResponse};
use log::error;
use sea_orm::{ActiveModelTrait, ColumnTrait, EntityTrait, IntoActiveModel, QueryFilter};
use serde::{Deserialize, Serialize};
use trifid_api_entities::entity::user;
use trifid_api_entities::entity::user::Entity as UserEntity;
#[derive(Serialize, Deserialize, Clone, Debug)] #[derive(Serialize, Deserialize, Clone, Debug)]
pub struct MagicLinkRequest { pub struct MagicLinkRequest {
pub email: String pub email: String,
} }
#[derive(Serialize, Deserialize, Clone, Debug)] #[derive(Serialize, Deserialize, Clone, Debug)]
pub struct MagicLinkResponse { pub struct MagicLinkResponse {
pub data: MagicLinkResponseData, pub data: MagicLinkResponseData,
pub metadata: MagicLinkResponseMetadata pub metadata: MagicLinkResponseMetadata,
} }
#[derive(Serialize, Deserialize, Clone, Debug)] #[derive(Serialize, Deserialize, Clone, Debug)]
pub struct MagicLinkResponseData {} pub struct MagicLinkResponseData {}
@ -50,19 +50,23 @@ pub struct MagicLinkResponseMetadata {}
#[post("/v1/auth/magic-link")] #[post("/v1/auth/magic-link")]
pub async fn magic_link_request(data: Data<AppState>, req: Json<MagicLinkRequest>) -> HttpResponse { pub async fn magic_link_request(data: Data<AppState>, req: Json<MagicLinkRequest>) -> HttpResponse {
let user: Option<user::Model> = match UserEntity::find().filter(user::Column::Email.eq(&req.email)).one(&data.conn).await { let user: Option<user::Model> = match UserEntity::find()
.filter(user::Column::Email.eq(&req.email))
.one(&data.conn)
.await
{
Ok(r) => r, Ok(r) => r,
Err(e) => { Err(e) => {
error!("database error: {}", e); error!("database error: {}", e);
return HttpResponse::InternalServerError().json(APIErrorsResponse { return HttpResponse::InternalServerError().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_DB_ERROR".to_string(),
code: "ERR_DB_ERROR".to_string(), message:
message: "There was an error with the database request, please try again later.".to_string(), "There was an error with the database request, please try again later."
path: None, .to_string(),
} path: None,
], }],
}) });
} }
}; };
@ -70,13 +74,11 @@ pub async fn magic_link_request(data: Data<AppState>, req: Json<MagicLinkRequest
Some(u) => u, Some(u) => u,
None => { None => {
return HttpResponse::Unauthorized().json(APIErrorsResponse { return HttpResponse::Unauthorized().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_USER_DOES_NOT_EXIST".to_string(),
code: "ERR_USER_DOES_NOT_EXIST".to_string(), message: "That user does not exist.".to_string(),
message: "That user does not exist.".to_string(), path: None,
path: None, }],
}
],
}) })
} }
}; };
@ -92,14 +94,14 @@ pub async fn magic_link_request(data: Data<AppState>, req: Json<MagicLinkRequest
Err(e) => { Err(e) => {
error!("error sending magic link: {}", e); error!("error sending magic link: {}", e);
return HttpResponse::InternalServerError().json(APIErrorsResponse { return HttpResponse::InternalServerError().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_ML_ERROR".to_string(),
code: "ERR_ML_ERROR".to_string(), message:
message: "There was an error sending the magic link email, please try again later.".to_string(), "There was an error sending the magic link email, please try again later."
path: None, .to_string(),
} path: None,
], }],
}) });
} }
} }
@ -110,19 +112,19 @@ pub async fn magic_link_request(data: Data<AppState>, req: Json<MagicLinkRequest
Err(e) => { Err(e) => {
error!("database error: {}", e); error!("database error: {}", e);
return HttpResponse::InternalServerError().json(APIErrorsResponse { return HttpResponse::InternalServerError().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_DB_ERROR".to_string(),
code: "ERR_DB_ERROR".to_string(), message:
message: "There was an error with the database request, please try again later.".to_string(), "There was an error with the database request, please try again later."
path: None, .to_string(),
} path: None,
], }],
}) });
} }
} }
HttpResponse::Ok().json(MagicLinkResponse { HttpResponse::Ok().json(MagicLinkResponse {
data: MagicLinkResponseData {}, data: MagicLinkResponseData {},
metadata: MagicLinkResponseMetadata {} metadata: MagicLinkResponseMetadata {},
}) })
} }

View File

@ -1,3 +1,3 @@
pub mod magic_link; pub mod magic_link;
pub mod totp;
pub mod verify_magic_link; pub mod verify_magic_link;
pub mod totp;

View File

@ -19,95 +19,95 @@
// This endpoint is considered done. No major features should be added or removed, unless it fixes bugs. // This endpoint is considered done. No major features should be added or removed, unless it fixes bugs.
// This endpoint requires the `definednetworking` extension to be enabled to be used. // This endpoint requires the `definednetworking` extension to be enabled to be used.
use actix_web::{HttpRequest, HttpResponse, post};
use actix_web::web::{Data, Json};
use log::{debug, error};
use serde::{Serialize, Deserialize};
use trifid_api_entities::entity::totp_authenticator;
use crate::AppState;
use crate::auth_tokens::{enforce_session, TokenInfo}; use crate::auth_tokens::{enforce_session, TokenInfo};
use crate::error::{APIError, APIErrorsResponse}; use crate::error::{APIError, APIErrorsResponse};
use sea_orm::{EntityTrait, QueryFilter, ColumnTrait, IntoActiveModel, ActiveModelTrait}; use crate::AppState;
use actix_web::web::{Data, Json};
use actix_web::{post, HttpRequest, HttpResponse};
use log::{debug, error};
use sea_orm::{ActiveModelTrait, ColumnTrait, EntityTrait, IntoActiveModel, QueryFilter};
use serde::{Deserialize, Serialize};
use trifid_api_entities::entity::totp_authenticator;
use totp_rs::{Secret, TOTP};
use trifid_api_entities::entity::auth_token;
use crate::config::CONFIG; use crate::config::CONFIG;
use crate::timers::expires_in_seconds; use crate::timers::expires_in_seconds;
use crate::tokens::random_token; use crate::tokens::random_token;
use totp_rs::{Secret, TOTP};
use trifid_api_entities::entity::auth_token;
#[derive(Serialize, Deserialize, Debug, Clone)] #[derive(Serialize, Deserialize, Debug, Clone)]
pub struct TotpRequest { pub struct TotpRequest {
pub code: String pub code: String,
} }
#[derive(Serialize, Deserialize, Debug, Clone)] #[derive(Serialize, Deserialize, Debug, Clone)]
pub struct TotpResponse { pub struct TotpResponse {
pub data: TotpResponseData, pub data: TotpResponseData,
pub metadata: TotpResponseMetadata pub metadata: TotpResponseMetadata,
} }
#[derive(Serialize, Deserialize, Debug, Clone)] #[derive(Serialize, Deserialize, Debug, Clone)]
pub struct TotpResponseData { pub struct TotpResponseData {
#[serde(rename = "authToken")] #[serde(rename = "authToken")]
pub auth_token: String pub auth_token: String,
} }
#[derive(Serialize, Deserialize, Debug, Clone)] #[derive(Serialize, Deserialize, Debug, Clone)]
pub struct TotpResponseMetadata {} pub struct TotpResponseMetadata {}
#[post("/v1/auth/totp")] #[post("/v1/auth/totp")]
pub async fn totp_request(req: Json<TotpRequest>, req_data: HttpRequest, db: Data<AppState>) -> HttpResponse { pub async fn totp_request(
req: Json<TotpRequest>,
req_data: HttpRequest,
db: Data<AppState>,
) -> HttpResponse {
// require a user session // require a user session
let session_token = match enforce_session(&req_data, &db.conn).await { let session_token = match enforce_session(&req_data, &db.conn).await {
Ok(r) => { Ok(r) => match r {
match r { TokenInfo::SessionToken(i) => i,
TokenInfo::SessionToken(i) => i, _ => unreachable!(),
_ => unreachable!() },
}
}
Err(e) => { Err(e) => {
error!("error enforcing session: {}", e); error!("error enforcing session: {}", e);
return HttpResponse::Unauthorized().json(APIErrorsResponse { return HttpResponse::Unauthorized().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_UNAUTHORIZED".to_string(),
code: "ERR_UNAUTHORIZED".to_string(), message: "Unauthorized".to_string(),
message: "Unauthorized".to_string(), path: None,
path: None, }],
}
],
}); });
} }
}; };
// determine if the user has a totp authenticator // determine if the user has a totp authenticator
let auther = match totp_authenticator::Entity::find().filter(totp_authenticator::Column::User.eq(&session_token.user.id)).one(&db.conn).await { let auther = match totp_authenticator::Entity::find()
.filter(totp_authenticator::Column::User.eq(&session_token.user.id))
.one(&db.conn)
.await
{
Ok(r) => r, Ok(r) => r,
Err(e) => { Err(e) => {
error!("database error: {}", e); error!("database error: {}", e);
return HttpResponse::InternalServerError().json(APIErrorsResponse { return HttpResponse::InternalServerError().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_DB_ERROR".to_string(),
code: "ERR_DB_ERROR".to_string(), message:
message: "There was an error with the database request, please try again later.".to_string(), "There was an error with the database request, please try again later."
path: None, .to_string(),
} path: None,
], }],
}); });
} }
}; };
let auther = match auther { let auther = match auther {
Some(a) => { Some(a) => a,
a
},
None => { None => {
return HttpResponse::BadRequest().json(APIErrorsResponse { return HttpResponse::BadRequest().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_USER_NO_TOTP".to_string(),
code: "ERR_USER_NO_TOTP".to_string(), message: "This user does not have a totp authenticator".to_string(),
message: "This user does not have a totp authenticator".to_string(), path: None,
path: None, }],
}
]
}); });
} }
}; };
@ -118,30 +118,26 @@ pub async fn totp_request(req: Json<TotpRequest>, req_data: HttpRequest, db: Dat
Err(e) => { Err(e) => {
error!("totp url error: {}", e); error!("totp url error: {}", e);
return HttpResponse::InternalServerError().json(APIErrorsResponse { return HttpResponse::InternalServerError().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_SECRET_ERROR".to_string(),
code: "ERR_SECRET_ERROR".to_string(), message: "There was an error parsing the totpmachine. Please try again later."
message: "There was an error parsing the totpmachine. Please try again later.".to_string(), .to_string(),
path: None, path: None,
} }],
],
}); });
} }
}; };
let valid = match totpmachine.check_current(&req.code) { let valid = match totpmachine.check_current(&req.code) {
Ok(valid) => valid, Ok(valid) => valid,
Err(e) => { Err(e) => {
error!("system time error: {}", e); error!("system time error: {}", e);
return HttpResponse::InternalServerError().json(APIErrorsResponse { return HttpResponse::InternalServerError().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_TIME_ERROR".to_string(),
code: "ERR_TIME_ERROR".to_string(), message: "There was an with the server-side time clock.".to_string(),
message: "There was an with the server-side time clock.".to_string(), path: None,
path: None, }],
}
],
}); });
} }
}; };
@ -150,14 +146,12 @@ pub async fn totp_request(req: Json<TotpRequest>, req_data: HttpRequest, db: Dat
debug!("current: {}", totpmachine.generate_current().unwrap()); debug!("current: {}", totpmachine.generate_current().unwrap());
error!("user send invalid totp code"); error!("user send invalid totp code");
return HttpResponse::Unauthorized().json(APIErrorsResponse { return HttpResponse::Unauthorized().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_UNAUTHORIZED".to_string(),
code: "ERR_UNAUTHORIZED".to_string(), message: "Unauthorized".to_string(),
message: "Unauthorized".to_string(), path: None,
path: None, }],
} });
],
})
} }
let model: auth_token::Model = auth_token::Model { let model: auth_token::Model = auth_token::Model {
@ -172,13 +166,11 @@ pub async fn totp_request(req: Json<TotpRequest>, req_data: HttpRequest, db: Dat
Err(e) => { Err(e) => {
error!("database error: {}", e); error!("database error: {}", e);
return HttpResponse::InternalServerError().json(APIErrorsResponse { return HttpResponse::InternalServerError().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_DB_ERROR".to_string(),
code: "ERR_DB_ERROR".to_string(), message: "There was an error issuing the authentication token.".to_string(),
message: "There was an error issuing the authentication token.".to_string(), path: None,
path: None, }],
}
],
}); });
} }
} }
@ -187,4 +179,4 @@ pub async fn totp_request(req: Json<TotpRequest>, req_data: HttpRequest, db: Dat
data: TotpResponseData { auth_token: token }, data: TotpResponseData { auth_token: token },
metadata: TotpResponseMetadata {}, metadata: TotpResponseMetadata {},
}) })
} }

View File

@ -19,56 +19,65 @@
// This endpoint is considered done. No major features should be added or removed, unless it fixes bugs. // This endpoint is considered done. No major features should be added or removed, unless it fixes bugs.
// This endpoint requires the `definednetworking` extension to be enabled to be used. // This endpoint requires the `definednetworking` extension to be enabled to be used.
use actix_web::{HttpResponse, post};
use actix_web::web::{Data, Json};
use log::error;
use sea_orm::{ActiveModelTrait, ColumnTrait, EntityTrait, IntoActiveModel, ModelTrait, QueryFilter};
use serde::{Serialize, Deserialize};
use crate::AppState;
use trifid_api_entities::entity::magic_link;
use trifid_api_entities::entity::magic_link::Model;
use trifid_api_entities::entity::session_token;
use crate::config::CONFIG; use crate::config::CONFIG;
use crate::error::{APIError, APIErrorsResponse}; use crate::error::{APIError, APIErrorsResponse};
use crate::timers::{expired, expires_in_seconds}; use crate::timers::{expired, expires_in_seconds};
use crate::tokens::random_token; use crate::tokens::random_token;
use crate::AppState;
use actix_web::web::{Data, Json};
use actix_web::{post, HttpResponse};
use log::error;
use sea_orm::{
ActiveModelTrait, ColumnTrait, EntityTrait, IntoActiveModel, ModelTrait, QueryFilter,
};
use serde::{Deserialize, Serialize};
use trifid_api_entities::entity::magic_link;
use trifid_api_entities::entity::magic_link::Model;
use trifid_api_entities::entity::session_token;
#[derive(Serialize, Deserialize, Debug, Clone)] #[derive(Serialize, Deserialize, Debug, Clone)]
pub struct VerifyMagicLinkRequest { pub struct VerifyMagicLinkRequest {
#[serde(rename = "magicLinkToken")] #[serde(rename = "magicLinkToken")]
pub magic_link_token: String pub magic_link_token: String,
} }
#[derive(Serialize, Deserialize, Debug, Clone)] #[derive(Serialize, Deserialize, Debug, Clone)]
pub struct VerifyMagicLinkResponse { pub struct VerifyMagicLinkResponse {
pub data: VerifyMagicLinkResponseData, pub data: VerifyMagicLinkResponseData,
pub metadata: VerifyMagicLinkResponseMetadata pub metadata: VerifyMagicLinkResponseMetadata,
} }
#[derive(Serialize, Deserialize, Debug, Clone)] #[derive(Serialize, Deserialize, Debug, Clone)]
pub struct VerifyMagicLinkResponseData { pub struct VerifyMagicLinkResponseData {
#[serde(rename = "sessionToken")] #[serde(rename = "sessionToken")]
pub session_token: String pub session_token: String,
} }
#[derive(Serialize, Deserialize, Debug, Clone)] #[derive(Serialize, Deserialize, Debug, Clone)]
pub struct VerifyMagicLinkResponseMetadata {} pub struct VerifyMagicLinkResponseMetadata {}
#[post("/v1/auth/verify-magic-link")] #[post("/v1/auth/verify-magic-link")]
pub async fn verify_magic_link_request(db: Data<AppState>, req: Json<VerifyMagicLinkRequest>) -> HttpResponse { pub async fn verify_magic_link_request(
let link: Option<Model> = match magic_link::Entity::find().filter(magic_link::Column::Id.eq(&req.magic_link_token)).one(&db.conn).await { db: Data<AppState>,
req: Json<VerifyMagicLinkRequest>,
) -> HttpResponse {
let link: Option<Model> = match magic_link::Entity::find()
.filter(magic_link::Column::Id.eq(&req.magic_link_token))
.one(&db.conn)
.await
{
Ok(r) => r, Ok(r) => r,
Err(e) => { Err(e) => {
error!("database error: {}", e); error!("database error: {}", e);
return HttpResponse::InternalServerError().json(APIErrorsResponse { return HttpResponse::InternalServerError().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_DB_ERROR".to_string(),
code: "ERR_DB_ERROR".to_string(), message:
message: "There was an error with the database request, please try again later.".to_string(), "There was an error with the database request, please try again later."
path: None, .to_string(),
} path: None,
], }],
}) });
} }
}; };
@ -76,27 +85,23 @@ pub async fn verify_magic_link_request(db: Data<AppState>, req: Json<VerifyMagic
Some(l) => l, Some(l) => l,
None => { None => {
return HttpResponse::Unauthorized().json(APIErrorsResponse { return HttpResponse::Unauthorized().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_UNAUTHORIZED".to_string(),
code: "ERR_UNAUTHORIZED".to_string(), message: "Unauthorized".to_string(),
message: "Unauthorized".to_string(), path: None,
path: None }],
}
]
}) })
} }
}; };
if expired(link.expires_on as u64) { if expired(link.expires_on as u64) {
return HttpResponse::Unauthorized().json(APIErrorsResponse { return HttpResponse::Unauthorized().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_EXPIRED".to_string(),
code: "ERR_EXPIRED".to_string(), message: "Magic link token expired".to_string(),
message: "Magic link token expired".to_string(), path: None,
path: None }],
} });
]
})
} }
let user = link.user.clone(); let user = link.user.clone();
@ -106,14 +111,14 @@ pub async fn verify_magic_link_request(db: Data<AppState>, req: Json<VerifyMagic
Err(e) => { Err(e) => {
error!("database error: {}", e); error!("database error: {}", e);
return HttpResponse::InternalServerError().json(APIErrorsResponse { return HttpResponse::InternalServerError().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_DB_ERROR".to_string(),
code: "ERR_DB_ERROR".to_string(), message:
message: "There was an error with the database request, please try again later.".to_string(), "There was an error with the database request, please try again later."
path: None, .to_string(),
} path: None,
], }],
}) });
} }
} }
@ -130,23 +135,21 @@ pub async fn verify_magic_link_request(db: Data<AppState>, req: Json<VerifyMagic
Err(e) => { Err(e) => {
error!("database error: {}", e); error!("database error: {}", e);
return HttpResponse::InternalServerError().json(APIErrorsResponse { return HttpResponse::InternalServerError().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_DB_ERROR".to_string(),
code: "ERR_DB_ERROR".to_string(), message:
message: "There was an error with the database request, please try again later.".to_string(), "There was an error with the database request, please try again later."
path: None, .to_string(),
} path: None,
], }],
}) });
} }
} }
HttpResponse::Ok().json( HttpResponse::Ok().json(VerifyMagicLinkResponse {
VerifyMagicLinkResponse { data: VerifyMagicLinkResponseData {
data: VerifyMagicLinkResponseData { session_token: token,
session_token: token, },
}, metadata: VerifyMagicLinkResponseMetadata {},
metadata: VerifyMagicLinkResponseMetadata {}, })
} }
)
}

File diff suppressed because it is too large Load Diff

View File

@ -1,9 +1,9 @@
pub mod auth; pub mod auth;
pub mod signup; pub mod hosts;
pub mod totp_authenticators;
pub mod verify_totp_authenticators;
pub mod networks; pub mod networks;
pub mod organization; pub mod organization;
pub mod roles; pub mod roles;
pub mod signup;
pub mod totp_authenticators;
pub mod trifid; pub mod trifid;
pub mod hosts; pub mod verify_totp_authenticators;

View File

@ -24,24 +24,24 @@
// This endpoint is considered done. No major features should be added or removed, unless it fixes bugs. // This endpoint is considered done. No major features should be added or removed, unless it fixes bugs.
// This endpoint requires the `definednetworking` extension to be enabled to be used. // This endpoint requires the `definednetworking` extension to be enabled to be used.
use serde::{Serialize, Deserialize}; use crate::auth_tokens::{enforce_2fa, enforce_api_token, TokenInfo};
use actix_web::{get, HttpRequest, HttpResponse}; use crate::cursor::Cursor;
use crate::error::{APIError, APIErrorsResponse};
use crate::timers::TIME_FORMAT;
use crate::AppState;
use actix_web::web::{Data, Path, Query}; use actix_web::web::{Data, Path, Query};
use actix_web::{get, HttpRequest, HttpResponse};
use chrono::{TimeZone, Utc}; use chrono::{TimeZone, Utc};
use log::error; use log::error;
use sea_orm::{ColumnTrait, EntityTrait, PaginatorTrait, QueryFilter, QueryOrder}; use sea_orm::{ColumnTrait, EntityTrait, PaginatorTrait, QueryFilter, QueryOrder};
use crate::AppState; use serde::{Deserialize, Serialize};
use crate::auth_tokens::{enforce_2fa, enforce_api_token, TokenInfo};
use crate::error::{APIError, APIErrorsResponse};
use trifid_api_entities::entity::organization;
use trifid_api_entities::entity::network; use trifid_api_entities::entity::network;
use crate::cursor::Cursor; use trifid_api_entities::entity::organization;
use crate::timers::TIME_FORMAT;
#[derive(Serialize, Deserialize, Debug, Clone)] #[derive(Serialize, Deserialize, Debug, Clone)]
pub struct GetNetworksResponse { pub struct GetNetworksResponse {
pub data: Vec<GetNetworksResponseData>, pub data: Vec<GetNetworksResponseData>,
pub metadata: GetNetworksResponseMetadata pub metadata: GetNetworksResponseMetadata,
} }
#[derive(Serialize, Deserialize, Debug, Clone)] #[derive(Serialize, Deserialize, Debug, Clone)]
@ -56,7 +56,7 @@ pub struct GetNetworksResponseData {
pub created_at: String, // 2023-03-22T18:55:47.009Z pub created_at: String, // 2023-03-22T18:55:47.009Z
pub name: String, pub name: String,
#[serde(rename = "lighthousesAsRelays")] #[serde(rename = "lighthousesAsRelays")]
pub lighthouses_as_relays: bool pub lighthouses_as_relays: bool,
} }
#[derive(Serialize, Deserialize, Debug, Clone)] #[derive(Serialize, Deserialize, Debug, Clone)]
@ -72,16 +72,18 @@ pub struct GetNetworksResponseMetadata {
#[serde(default, rename = "nextCursor")] #[serde(default, rename = "nextCursor")]
pub next_cursor: Option<String>, pub next_cursor: Option<String>,
#[serde(default)] #[serde(default)]
pub page: Option<GetNetworksResponseMetadataPage> pub page: Option<GetNetworksResponseMetadataPage>,
} }
#[derive(Serialize, Deserialize, Debug, Clone)] #[derive(Serialize, Deserialize, Debug, Clone)]
pub struct GetNetworksResponseMetadataPage { pub struct GetNetworksResponseMetadataPage {
pub count: u64, pub count: u64,
pub start: u64 pub start: u64,
} }
fn u64_25() -> u64 { 25 } fn u64_25() -> u64 {
25
}
#[derive(Serialize, Deserialize, Debug, Clone)] #[derive(Serialize, Deserialize, Debug, Clone)]
pub struct GetNetworksQueryParams { pub struct GetNetworksQueryParams {
@ -90,17 +92,27 @@ pub struct GetNetworksQueryParams {
#[serde(default)] #[serde(default)]
pub cursor: String, pub cursor: String,
#[serde(default = "u64_25", rename = "pageSize")] #[serde(default = "u64_25", rename = "pageSize")]
pub page_size: u64 pub page_size: u64,
} }
#[get("/v1/networks")] #[get("/v1/networks")]
pub async fn get_networks(opts: Query<GetNetworksQueryParams>, req_info: HttpRequest, db: Data<AppState>) -> HttpResponse { pub async fn get_networks(
opts: Query<GetNetworksQueryParams>,
req_info: HttpRequest,
db: Data<AppState>,
) -> HttpResponse {
// For this endpoint, you either need to be a fully authenticated user OR a token with networks:list // For this endpoint, you either need to be a fully authenticated user OR a token with networks:list
let session_info = enforce_2fa(&req_info, &db.conn).await.unwrap_or(TokenInfo::NotPresent); let session_info = enforce_2fa(&req_info, &db.conn)
let api_token_info = enforce_api_token(&req_info, &["networks:list"], &db.conn).await.unwrap_or(TokenInfo::NotPresent); .await
.unwrap_or(TokenInfo::NotPresent);
let api_token_info = enforce_api_token(&req_info, &["networks:list"], &db.conn)
.await
.unwrap_or(TokenInfo::NotPresent);
// If neither are present, throw an error // If neither are present, throw an error
if matches!(session_info, TokenInfo::NotPresent) && matches!(api_token_info, TokenInfo::NotPresent) { if matches!(session_info, TokenInfo::NotPresent)
&& matches!(api_token_info, TokenInfo::NotPresent)
{
return HttpResponse::Unauthorized().json(APIErrorsResponse { return HttpResponse::Unauthorized().json(APIErrorsResponse {
errors: vec![ errors: vec![
APIError { APIError {
@ -109,11 +121,13 @@ pub async fn get_networks(opts: Query<GetNetworksQueryParams>, req_info: HttpReq
path: None, path: None,
} }
], ],
}) });
} }
// If both are present, throw an error // If both are present, throw an error
if matches!(session_info, TokenInfo::AuthToken(_)) && matches!(api_token_info, TokenInfo::ApiToken(_)) { if matches!(session_info, TokenInfo::AuthToken(_))
&& matches!(api_token_info, TokenInfo::ApiToken(_))
{
return HttpResponse::BadRequest().json(APIErrorsResponse { return HttpResponse::BadRequest().json(APIErrorsResponse {
errors: vec![ errors: vec![
APIError { APIError {
@ -122,7 +136,7 @@ pub async fn get_networks(opts: Query<GetNetworksQueryParams>, req_info: HttpReq
path: None path: None
} }
], ],
}) });
} }
let org = match api_token_info { let org = match api_token_info {
@ -131,10 +145,14 @@ pub async fn get_networks(opts: Query<GetNetworksQueryParams>, req_info: HttpReq
// we have a session token, which means we have to do a db request to get the organization that this user owns // we have a session token, which means we have to do a db request to get the organization that this user owns
let user = match session_info { let user = match session_info {
TokenInfo::AuthToken(tkn) => tkn.session_info.user, TokenInfo::AuthToken(tkn) => tkn.session_info.user,
_ => unreachable!() _ => unreachable!(),
}; };
let org = match organization::Entity::find().filter(organization::Column::Owner.eq(user.id)).one(&db.conn).await { let org = match organization::Entity::find()
.filter(organization::Column::Owner.eq(user.id))
.one(&db.conn)
.await
{
Ok(r) => r, Ok(r) => r,
Err(e) => { Err(e) => {
error!("database error: {}", e); error!("database error: {}", e);
@ -161,7 +179,7 @@ pub async fn get_networks(opts: Query<GetNetworksQueryParams>, req_info: HttpReq
path: None path: None
} }
], ],
}) });
} }
} }
}; };
@ -171,18 +189,19 @@ pub async fn get_networks(opts: Query<GetNetworksQueryParams>, req_info: HttpReq
Err(e) => { Err(e) => {
error!("invalid cursor: {}", e); error!("invalid cursor: {}", e);
return HttpResponse::BadRequest().json(APIErrorsResponse { return HttpResponse::BadRequest().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_INVALID_CURSOR".to_string(),
code: "ERR_INVALID_CURSOR".to_string(), message: "The provided cursor was invalid, please try again later.".to_string(),
message: "The provided cursor was invalid, please try again later.".to_string(), path: None,
path: None }],
} });
],
})
} }
}; };
let network_pages = network::Entity::find().filter(network::Column::Organization.eq(org)).order_by_asc(network::Column::CreatedAt).paginate(&db.conn, opts.page_size); let network_pages = network::Entity::find()
.filter(network::Column::Organization.eq(org))
.order_by_asc(network::Column::CreatedAt)
.paginate(&db.conn, opts.page_size);
let total = match network_pages.num_items().await { let total = match network_pages.num_items().await {
Ok(r) => r, Ok(r) => r,
@ -230,17 +249,22 @@ pub async fn get_networks(opts: Query<GetNetworksQueryParams>, req_info: HttpReq
}); });
} }
}; };
let models_mapped: Vec<GetNetworksResponseData> = models.iter().map(|u| { let models_mapped: Vec<GetNetworksResponseData> = models
GetNetworksResponseData { .iter()
.map(|u| GetNetworksResponseData {
id: u.id.clone(), id: u.id.clone(),
cidr: u.cidr.clone(), cidr: u.cidr.clone(),
organization_id: u.organization.clone(), organization_id: u.organization.clone(),
signing_ca_id: u.signing_ca.clone(), signing_ca_id: u.signing_ca.clone(),
created_at: Utc.timestamp_opt(u.created_at, 0).unwrap().format(TIME_FORMAT).to_string(), created_at: Utc
.timestamp_opt(u.created_at, 0)
.unwrap()
.format(TIME_FORMAT)
.to_string(),
name: u.name.clone(), name: u.name.clone(),
lighthouses_as_relays: u.lighthouses_as_relays, lighthouses_as_relays: u.lighthouses_as_relays,
} })
}).collect(); .collect();
let count = models_mapped.len() as u64; let count = models_mapped.len() as u64;
@ -248,20 +272,28 @@ pub async fn get_networks(opts: Query<GetNetworksQueryParams>, req_info: HttpReq
data: models_mapped, data: models_mapped,
metadata: GetNetworksResponseMetadata { metadata: GetNetworksResponseMetadata {
total_count: total, total_count: total,
has_next_page: cursor.page+1 != pages, has_next_page: cursor.page + 1 != pages,
has_prev_page: cursor.page != 0, has_prev_page: cursor.page != 0,
prev_cursor: if cursor.page != 0 { prev_cursor: if cursor.page != 0 {
match (Cursor { page: cursor.page - 1 }).try_into() { match (Cursor {
page: cursor.page - 1,
})
.try_into()
{
Ok(r) => Some(r), Ok(r) => Some(r),
Err(_) => None Err(_) => None,
} }
} else { } else {
None None
}, },
next_cursor: if cursor.page+1 != pages { next_cursor: if cursor.page + 1 != pages {
match (Cursor { page: cursor.page + 1 }).try_into() { match (Cursor {
page: cursor.page + 1,
})
.try_into()
{
Ok(r) => Some(r), Ok(r) => Some(r),
Err(_) => None Err(_) => None,
} }
} else { } else {
None None
@ -271,19 +303,31 @@ pub async fn get_networks(opts: Query<GetNetworksQueryParams>, req_info: HttpReq
count, count,
start: opts.page_size * cursor.page, start: opts.page_size * cursor.page,
}) })
} else { None }, } else {
None
},
}, },
}) })
} }
#[get("/v1/networks/{network_id}")] #[get("/v1/networks/{network_id}")]
pub async fn get_network_request(net: Path<String>, req_info: HttpRequest, db: Data<AppState>) -> HttpResponse { pub async fn get_network_request(
net: Path<String>,
req_info: HttpRequest,
db: Data<AppState>,
) -> HttpResponse {
// For this endpoint, you either need to be a fully authenticated user OR a token with networks:list // For this endpoint, you either need to be a fully authenticated user OR a token with networks:list
let session_info = enforce_2fa(&req_info, &db.conn).await.unwrap_or(TokenInfo::NotPresent); let session_info = enforce_2fa(&req_info, &db.conn)
let api_token_info = enforce_api_token(&req_info, &["networks:read"], &db.conn).await.unwrap_or(TokenInfo::NotPresent); .await
.unwrap_or(TokenInfo::NotPresent);
let api_token_info = enforce_api_token(&req_info, &["networks:read"], &db.conn)
.await
.unwrap_or(TokenInfo::NotPresent);
// If neither are present, throw an error // If neither are present, throw an error
if matches!(session_info, TokenInfo::NotPresent) && matches!(api_token_info, TokenInfo::NotPresent) { if matches!(session_info, TokenInfo::NotPresent)
&& matches!(api_token_info, TokenInfo::NotPresent)
{
return HttpResponse::Unauthorized().json(APIErrorsResponse { return HttpResponse::Unauthorized().json(APIErrorsResponse {
errors: vec![ errors: vec![
APIError { APIError {
@ -292,11 +336,13 @@ pub async fn get_network_request(net: Path<String>, req_info: HttpRequest, db: D
path: None, path: None,
} }
], ],
}) });
} }
// If both are present, throw an error // If both are present, throw an error
if matches!(session_info, TokenInfo::AuthToken(_)) && matches!(api_token_info, TokenInfo::ApiToken(_)) { if matches!(session_info, TokenInfo::AuthToken(_))
&& matches!(api_token_info, TokenInfo::ApiToken(_))
{
return HttpResponse::BadRequest().json(APIErrorsResponse { return HttpResponse::BadRequest().json(APIErrorsResponse {
errors: vec![ errors: vec![
APIError { APIError {
@ -305,10 +351,14 @@ pub async fn get_network_request(net: Path<String>, req_info: HttpRequest, db: D
path: None path: None
} }
], ],
}) });
} }
let network: Option<network::Model> = match network::Entity::find().filter(network::Column::Id.eq(net.into_inner())).one(&db.conn).await { let network: Option<network::Model> = match network::Entity::find()
.filter(network::Column::Id.eq(net.into_inner()))
.one(&db.conn)
.await
{
Ok(r) => r, Ok(r) => r,
Err(e) => { Err(e) => {
error!("database error: {}", e); error!("database error: {}", e);
@ -331,7 +381,11 @@ pub async fn get_network_request(net: Path<String>, req_info: HttpRequest, db: D
cidr: network.cidr, cidr: network.cidr,
organization_id: network.organization, organization_id: network.organization,
signing_ca_id: network.signing_ca, signing_ca_id: network.signing_ca,
created_at: Utc.timestamp_opt(network.created_at, 0).unwrap().format(TIME_FORMAT).to_string(), created_at: Utc
.timestamp_opt(network.created_at, 0)
.unwrap()
.format(TIME_FORMAT)
.to_string(),
name: network.name, name: network.name,
lighthouses_as_relays: network.lighthouses_as_relays, lighthouses_as_relays: network.lighthouses_as_relays,
}, },
@ -339,22 +393,19 @@ pub async fn get_network_request(net: Path<String>, req_info: HttpRequest, db: D
}) })
} else { } else {
HttpResponse::NotFound().json(APIErrorsResponse { HttpResponse::NotFound().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_MISSING_NETWORK".to_string(),
code: "ERR_MISSING_NETWORK".to_string(), message: "Network does not exist".to_string(),
message: "Network does not exist".to_string(), path: None,
path: None, }],
}
],
}) })
} }
} }
#[derive(Serialize, Deserialize, Debug, Clone)] #[derive(Serialize, Deserialize, Debug, Clone)]
pub struct GetNetworkResponse { pub struct GetNetworkResponse {
pub data: GetNetworksResponseData, pub data: GetNetworksResponseData,
pub metadata: GetNetworkResponseMetadata pub metadata: GetNetworkResponseMetadata,
} }
#[derive(Serialize, Deserialize, Debug, Clone)] #[derive(Serialize, Deserialize, Debug, Clone)]
pub struct GetNetworkResponseMetadata {} pub struct GetNetworkResponseMetadata {}

View File

@ -19,59 +19,66 @@
// While this endpoint is considered done, help is wanted with reverse engineering the original API. Major features should not be added or removed unless it is replacing this endpoint with the correct, DN-compatible endpoint. // While this endpoint is considered done, help is wanted with reverse engineering the original API. Major features should not be added or removed unless it is replacing this endpoint with the correct, DN-compatible endpoint.
// This endpoint requires the `definednetworking` extension to be enabled to be used. // This endpoint requires the `definednetworking` extension to be enabled to be used.
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use actix_web::{HttpRequest, HttpResponse};
use actix_web::web::{Data, Json};
use serde::{Serialize, Deserialize};
use crate::AppState;
use actix_web::post;
use log::error;
use sea_orm::{ActiveModelTrait, ColumnTrait, EntityTrait, IntoActiveModel, QueryFilter};
use trifid_pki::cert::{NebulaCertificate, NebulaCertificateDetails, serialize_x25519_private};
use trifid_pki::ed25519_dalek::SigningKey;
use trifid_pki::rand_core::OsRng;
use trifid_api_entities::entity::{network, organization, signing_ca};
use crate::auth_tokens::{enforce_2fa, TokenInfo}; use crate::auth_tokens::{enforce_2fa, TokenInfo};
use crate::config::CONFIG; use crate::config::CONFIG;
use crate::crypto::{encrypt_with_nonce, generate_random_iv, get_cipher_from_config}; use crate::crypto::{encrypt_with_nonce, generate_random_iv, get_cipher_from_config};
use crate::error::{APIError, APIErrorsResponse}; use crate::error::{APIError, APIErrorsResponse};
use crate::tokens::random_id; use crate::tokens::random_id;
use crate::AppState;
use actix_web::post;
use actix_web::web::{Data, Json};
use actix_web::{HttpRequest, HttpResponse};
use log::error;
use sea_orm::{ActiveModelTrait, ColumnTrait, EntityTrait, IntoActiveModel, QueryFilter};
use serde::{Deserialize, Serialize};
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use trifid_api_entities::entity::{network, organization, signing_ca};
use trifid_pki::cert::{serialize_x25519_private, NebulaCertificate, NebulaCertificateDetails};
use trifid_pki::ed25519_dalek::SigningKey;
use trifid_pki::rand_core::OsRng;
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
pub struct OrgCreateRequest { pub struct OrgCreateRequest {
pub cidr: String pub cidr: String,
} }
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
pub struct OrgCreateResponse { pub struct OrgCreateResponse {
pub organization: String, pub organization: String,
pub ca: String, pub ca: String,
pub network: String pub network: String,
} }
#[post("/v1/organization")] #[post("/v1/organization")]
pub async fn create_org_request(req: Json<OrgCreateRequest>, req_info: HttpRequest, db: Data<AppState>) -> HttpResponse { pub async fn create_org_request(
req: Json<OrgCreateRequest>,
req_info: HttpRequest,
db: Data<AppState>,
) -> HttpResponse {
// For this endpoint, you need to be a fully authenticated user // For this endpoint, you need to be a fully authenticated user
let session_info = enforce_2fa(&req_info, &db.conn).await.unwrap_or(TokenInfo::NotPresent); let session_info = enforce_2fa(&req_info, &db.conn)
.await
.unwrap_or(TokenInfo::NotPresent);
// we have a session token, which means we have to do a db request to get the organization that this user owns // we have a session token, which means we have to do a db request to get the organization that this user owns
let user = match session_info { let user = match session_info {
TokenInfo::AuthToken(tkn) => tkn.session_info.user, TokenInfo::AuthToken(tkn) => tkn.session_info.user,
_ => { _ => {
return HttpResponse::Unauthorized().json(APIErrorsResponse { return HttpResponse::Unauthorized().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_UNAUTHORIZED".to_string(),
code: "ERR_UNAUTHORIZED".to_string(), message: "Unauthorized".to_string(),
message: "Unauthorized".to_string(), path: None,
path: None, }],
}
],
}) })
} }
}; };
let org = match organization::Entity::find().filter(organization::Column::Owner.eq(&user.id)).one(&db.conn).await { let org = match organization::Entity::find()
.filter(organization::Column::Owner.eq(&user.id))
.one(&db.conn)
.await
{
Ok(r) => r, Ok(r) => r,
Err(e) => { Err(e) => {
error!("database error: {}", e); error!("database error: {}", e);
@ -89,14 +96,12 @@ pub async fn create_org_request(req: Json<OrgCreateRequest>, req_info: HttpReque
if org.is_some() { if org.is_some() {
return HttpResponse::BadRequest().json(APIErrorsResponse { return HttpResponse::BadRequest().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_USER_ALREADY_OWNS_ORG".to_string(),
code: "ERR_USER_ALREADY_OWNS_ORG".to_string(), message: "This user already owns an organization".to_string(),
message: "This user already owns an organization".to_string(), path: None,
path: None, }],
} });
],
})
} }
let org = organization::Model { let org = organization::Model {
@ -201,7 +206,12 @@ pub async fn create_org_request(req: Json<OrgCreateRequest>, req_info: HttpReque
organization: org.id.clone(), organization: org.id.clone(),
cert: ca_key_encrypted, cert: ca_key_encrypted,
key: ca_crt, key: ca_crt,
expires: cert.details.not_after.duration_since(UNIX_EPOCH).expect("Time went backwards").as_secs() as i64, expires: cert
.details
.not_after
.duration_since(UNIX_EPOCH)
.expect("Time went backwards")
.as_secs() as i64,
nonce: iv_hex, nonce: iv_hex,
}; };
@ -210,7 +220,10 @@ pub async fn create_org_request(req: Json<OrgCreateRequest>, req_info: HttpReque
cidr: req.cidr.clone(), cidr: req.cidr.clone(),
organization: org.id.clone(), organization: org.id.clone(),
signing_ca: signing_ca.id.clone(), signing_ca: signing_ca.id.clone(),
created_at: SystemTime::now().duration_since(UNIX_EPOCH).expect("Time went backwards").as_secs() as i64, created_at: SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("Time went backwards")
.as_secs() as i64,
name: "Network1".to_string(), name: "Network1".to_string(),
lighthouses_as_relays: true, lighthouses_as_relays: true,
}; };
@ -274,4 +287,4 @@ pub async fn create_org_request(req: Json<OrgCreateRequest>, req_info: HttpReque
ca: new_signing_ca_id, ca: new_signing_ca_id,
network: new_network_id, network: new_network_id,
}) })
} }

File diff suppressed because it is too large Load Diff

View File

@ -19,29 +19,29 @@
// This endpoint is considered done. No major features should be added or removed, unless it fixes bugs. // This endpoint is considered done. No major features should be added or removed, unless it fixes bugs.
// This endpoint requires the `definednetworking` extension to be enabled to be used. // This endpoint requires the `definednetworking` extension to be enabled to be used.
use actix_web::{HttpResponse, post};
use actix_web::web::{Data, Json};
use log::error;
use sea_orm::{ActiveModelTrait, ColumnTrait, EntityTrait, IntoActiveModel, QueryFilter};
use serde::{Serialize, Deserialize};
use trifid_api_entities::entity::user::Entity as UserEntity;
use trifid_api_entities::entity::user;
use crate::AppState;
use crate::config::CONFIG; use crate::config::CONFIG;
use crate::error::{APIError, APIErrorsResponse}; use crate::error::{APIError, APIErrorsResponse};
use crate::magic_link::send_magic_link; use crate::magic_link::send_magic_link;
use crate::timers::expires_in_seconds; use crate::timers::expires_in_seconds;
use crate::tokens::{random_id, random_token}; use crate::tokens::{random_id, random_token};
use crate::AppState;
use actix_web::web::{Data, Json};
use actix_web::{post, HttpResponse};
use log::error;
use sea_orm::{ActiveModelTrait, ColumnTrait, EntityTrait, IntoActiveModel, QueryFilter};
use serde::{Deserialize, Serialize};
use trifid_api_entities::entity::user;
use trifid_api_entities::entity::user::Entity as UserEntity;
#[derive(Serialize, Deserialize, Clone, Debug)] #[derive(Serialize, Deserialize, Clone, Debug)]
pub struct SignupRequest { pub struct SignupRequest {
pub email: String pub email: String,
} }
#[derive(Serialize, Deserialize, Clone, Debug)] #[derive(Serialize, Deserialize, Clone, Debug)]
pub struct SignupResponse { pub struct SignupResponse {
pub data: Option<SignupResponseData>, pub data: Option<SignupResponseData>,
pub metadata: SignupResponseMetadata pub metadata: SignupResponseMetadata,
} }
#[derive(Serialize, Deserialize, Clone, Debug)] #[derive(Serialize, Deserialize, Clone, Debug)]
pub struct SignupResponseData {} pub struct SignupResponseData {}
@ -50,37 +50,39 @@ pub struct SignupResponseMetadata {}
#[post("/v1/signup")] #[post("/v1/signup")]
pub async fn signup_request(data: Data<AppState>, req: Json<SignupRequest>) -> HttpResponse { pub async fn signup_request(data: Data<AppState>, req: Json<SignupRequest>) -> HttpResponse {
let user: Vec<user::Model> = match UserEntity::find().filter(user::Column::Email.eq(&req.email)).all(&data.conn).await { let user: Vec<user::Model> = match UserEntity::find()
.filter(user::Column::Email.eq(&req.email))
.all(&data.conn)
.await
{
Ok(r) => r, Ok(r) => r,
Err(e) => { Err(e) => {
error!("database error: {}", e); error!("database error: {}", e);
return HttpResponse::InternalServerError().json(APIErrorsResponse { return HttpResponse::InternalServerError().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_DB_ERROR".to_string(),
code: "ERR_DB_ERROR".to_string(), message:
message: "There was an error with the database request, please try again later.".to_string(), "There was an error with the database request, please try again later."
path: None, .to_string(),
} path: None,
], }],
}) });
} }
}; };
if !user.is_empty() { if !user.is_empty() {
return HttpResponse::Unauthorized().json(APIErrorsResponse { return HttpResponse::Unauthorized().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_USER_EXISTS".to_string(),
code: "ERR_USER_EXISTS".to_string(), message: "That user already exists.".to_string(),
message: "That user already exists.".to_string(), path: None,
path: None, }],
} });
],
})
} }
let model = user::Model { let model = user::Model {
id: random_id("user"), id: random_id("user"),
email: req.email.clone() email: req.email.clone(),
}; };
let id = model.id.clone(); let id = model.id.clone();
@ -91,14 +93,14 @@ pub async fn signup_request(data: Data<AppState>, req: Json<SignupRequest>) -> H
Err(e) => { Err(e) => {
error!("database error: {}", e); error!("database error: {}", e);
return HttpResponse::InternalServerError().json(APIErrorsResponse { return HttpResponse::InternalServerError().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_DB_ERROR".to_string(),
code: "ERR_DB_ERROR".to_string(), message:
message: "There was an error with the database request, please try again later.".to_string(), "There was an error with the database request, please try again later."
path: None, .to_string(),
} path: None,
], }],
}) });
} }
} }
@ -113,14 +115,14 @@ pub async fn signup_request(data: Data<AppState>, req: Json<SignupRequest>) -> H
Err(e) => { Err(e) => {
error!("error sending magic link: {}", e); error!("error sending magic link: {}", e);
return HttpResponse::InternalServerError().json(APIErrorsResponse { return HttpResponse::InternalServerError().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_ML_ERROR".to_string(),
code: "ERR_ML_ERROR".to_string(), message:
message: "There was an error sending the magic link email, please try again later.".to_string(), "There was an error sending the magic link email, please try again later."
path: None, .to_string(),
} path: None,
], }],
}) });
} }
} }
@ -131,19 +133,19 @@ pub async fn signup_request(data: Data<AppState>, req: Json<SignupRequest>) -> H
Err(e) => { Err(e) => {
error!("database error: {}", e); error!("database error: {}", e);
return HttpResponse::InternalServerError().json(APIErrorsResponse { return HttpResponse::InternalServerError().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_DB_ERROR".to_string(),
code: "ERR_DB_ERROR".to_string(), message:
message: "There was an error with the database request, please try again later.".to_string(), "There was an error with the database request, please try again later."
path: None, .to_string(),
} path: None,
], }],
}) });
} }
} }
HttpResponse::Ok().json(SignupResponse { HttpResponse::Ok().json(SignupResponse {
data: None, data: None,
metadata: SignupResponseMetadata {} metadata: SignupResponseMetadata {},
}) })
} }

View File

@ -19,19 +19,21 @@
// This endpoint is considered done. No major features should be added or removed, unless it fixes bugs. // This endpoint is considered done. No major features should be added or removed, unless it fixes bugs.
// This endpoint requires the `definednetworking` extension to be enabled to be used. // This endpoint requires the `definednetworking` extension to be enabled to be used.
use serde::{Serialize, Deserialize};
use actix_web::{HttpRequest, HttpResponse, post};
use actix_web::web::{Data, Json};
use log::error;
use sea_orm::{ActiveModelTrait, ColumnTrait, EntityTrait, IntoActiveModel, ModelTrait, QueryFilter};
use totp_rs::{Algorithm, Secret, TOTP};
use crate::AppState;
use crate::auth_tokens::{enforce_session, TokenInfo}; use crate::auth_tokens::{enforce_session, TokenInfo};
use crate::error::{APIError, APIErrorsResponse};
use trifid_api_entities::entity::totp_authenticator;
use crate::config::CONFIG; use crate::config::CONFIG;
use crate::error::{APIError, APIErrorsResponse};
use crate::timers::expires_in_seconds; use crate::timers::expires_in_seconds;
use crate::tokens::{random_token}; use crate::tokens::random_token;
use crate::AppState;
use actix_web::web::{Data, Json};
use actix_web::{post, HttpRequest, HttpResponse};
use log::error;
use sea_orm::{
ActiveModelTrait, ColumnTrait, EntityTrait, IntoActiveModel, ModelTrait, QueryFilter,
};
use serde::{Deserialize, Serialize};
use totp_rs::{Algorithm, Secret, TOTP};
use trifid_api_entities::entity::totp_authenticator;
#[derive(Serialize, Deserialize, Debug, Clone)] #[derive(Serialize, Deserialize, Debug, Clone)]
pub struct TotpAuthenticatorsRequest {} pub struct TotpAuthenticatorsRequest {}
@ -54,55 +56,57 @@ pub struct TotpAuthenticatorsResponse {
} }
#[post("/v1/totp-authenticators")] #[post("/v1/totp-authenticators")]
pub async fn totp_authenticators_request(db: Data<AppState>, req_data: HttpRequest, _req: Json<TotpAuthenticatorsRequest>) -> HttpResponse { pub async fn totp_authenticators_request(
db: Data<AppState>,
req_data: HttpRequest,
_req: Json<TotpAuthenticatorsRequest>,
) -> HttpResponse {
// require a user session // require a user session
let session_token = match enforce_session(&req_data, &db.conn).await { let session_token = match enforce_session(&req_data, &db.conn).await {
Ok(r) => { Ok(r) => match r {
match r { TokenInfo::SessionToken(i) => i,
TokenInfo::SessionToken(i) => i, _ => unreachable!(),
_ => unreachable!() },
}
}
Err(e) => { Err(e) => {
error!("error enforcing session: {}", e); error!("error enforcing session: {}", e);
return HttpResponse::Unauthorized().json(APIErrorsResponse { return HttpResponse::Unauthorized().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_UNAUTHORIZED".to_string(),
code: "ERR_UNAUTHORIZED".to_string(), message: "Unauthorized".to_string(),
message: "Unauthorized".to_string(), path: None,
path: None, }],
}
],
}); });
} }
}; };
// determine if the user has a totp authenticator // determine if the user has a totp authenticator
let auther = match totp_authenticator::Entity::find().filter(totp_authenticator::Column::User.eq(&session_token.user.id)).one(&db.conn).await { let auther = match totp_authenticator::Entity::find()
.filter(totp_authenticator::Column::User.eq(&session_token.user.id))
.one(&db.conn)
.await
{
Ok(r) => r, Ok(r) => r,
Err(e) => { Err(e) => {
error!("database error: {}", e); error!("database error: {}", e);
return HttpResponse::InternalServerError().json(APIErrorsResponse { return HttpResponse::InternalServerError().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_DB_ERROR".to_string(),
code: "ERR_DB_ERROR".to_string(), message:
message: "There was an error with the database request, please try again later.".to_string(), "There was an error with the database request, please try again later."
path: None, .to_string(),
} path: None,
], }],
}); });
} }
}; };
if let Some(auther) = auther { if let Some(auther) = auther {
if auther.verified { if auther.verified {
return HttpResponse::BadRequest().json(APIErrorsResponse { return HttpResponse::BadRequest().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_ALREADY_HAS_TOTP".to_string(),
code: "ERR_ALREADY_HAS_TOTP".to_string(), message: "This user already has a totp authenticator".to_string(),
message: "This user already has a totp authenticator".to_string(), path: None,
path: None, }],
}
]
}); });
} }
match auther.delete(&db.conn).await { match auther.delete(&db.conn).await {
@ -110,38 +114,48 @@ pub async fn totp_authenticators_request(db: Data<AppState>, req_data: HttpReque
Err(e) => { Err(e) => {
error!("database error: {}", e); error!("database error: {}", e);
return HttpResponse::InternalServerError().json(APIErrorsResponse { return HttpResponse::InternalServerError().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_DB_ERROR".to_string(),
code: "ERR_DB_ERROR".to_string(), message:
message: "There was an error with the database request, please try again later.".to_string(), "There was an error with the database request, please try again later."
path: None, .to_string(),
} path: None,
], }],
}) });
} }
} }
} }
let secret = Secret::generate_secret(); let secret = Secret::generate_secret();
let totpmachine = match TOTP::new(Algorithm::SHA1, 6, 1, 30, secret.to_bytes().expect("Invalid randomized data"), Some("trifid-api".to_string()), session_token.user.email) { let totpmachine = match TOTP::new(
Algorithm::SHA1,
6,
1,
30,
secret.to_bytes().expect("Invalid randomized data"),
Some("trifid-api".to_string()),
session_token.user.email,
) {
Ok(m) => m, Ok(m) => m,
Err(e) => { Err(e) => {
error!("totp machine create error: {}", e); error!("totp machine create error: {}", e);
return HttpResponse::InternalServerError().json(APIErrorsResponse { return HttpResponse::InternalServerError().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_SECRET_ERR".to_string(),
code: "ERR_SECRET_ERR".to_string(), message:
message: "There was an error configuring the authenticator, please try again later.".to_string(), "There was an error configuring the authenticator, please try again later."
path: None, .to_string(),
} path: None,
], }],
}); });
} }
}; };
let model = totp_authenticator::Model { let model = totp_authenticator::Model {
id: random_token("totp"), id: random_token("totp"),
secret: Secret::Raw(totpmachine.secret.clone()).to_encoded().to_string(), secret: Secret::Raw(totpmachine.secret.clone())
.to_encoded()
.to_string(),
url: totpmachine.get_url(), url: totpmachine.get_url(),
verified: false, verified: false,
expires_on: expires_in_seconds(CONFIG.tokens.totp_setup_timeout_time_seconds) as i64, expires_on: expires_in_seconds(CONFIG.tokens.totp_setup_timeout_time_seconds) as i64,
@ -157,14 +171,14 @@ pub async fn totp_authenticators_request(db: Data<AppState>, req_data: HttpReque
Err(e) => { Err(e) => {
error!("database error: {}", e); error!("database error: {}", e);
return HttpResponse::InternalServerError().json(APIErrorsResponse { return HttpResponse::InternalServerError().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_DB_ERROR".to_string(),
code: "ERR_DB_ERROR".to_string(), message:
message: "There was an error with the database request, please try again later.".to_string(), "There was an error with the database request, please try again later."
path: None, .to_string(),
} path: None,
], }],
}) });
} }
} }
@ -176,4 +190,4 @@ pub async fn totp_authenticators_request(db: Data<AppState>, req_data: HttpReque
}, },
metadata: TotpAuthenticatorsResponseMetadata {}, metadata: TotpAuthenticatorsResponseMetadata {},
}) })
} }

View File

@ -32,14 +32,19 @@
// If the request returns a non-200 response, or does not follow the typical TrifidExtensions schema, that server should be assumed to only support t+features:definednetworking. // If the request returns a non-200 response, or does not follow the typical TrifidExtensions schema, that server should be assumed to only support t+features:definednetworking.
// Endpoint specs (#REQTYPE) can indicate they require a feature by adding t+features:[feature] // Endpoint specs (#REQTYPE) can indicate they require a feature by adding t+features:[feature]
use actix_web::{HttpResponse, get}; use actix_web::{get, HttpResponse};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
pub const SUPPORTED_EXTENSIONS: &[&str] = &["definednetworking", "trifidextensions", "extended_roles", "extended_hosts"]; pub const SUPPORTED_EXTENSIONS: &[&str] = &[
"definednetworking",
"trifidextensions",
"extended_roles",
"extended_hosts",
];
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
pub struct TrifidExtensionsResponse { pub struct TrifidExtensionsResponse {
pub extensions: Vec<String> pub extensions: Vec<String>,
} }
#[get("/v1/trifid_extensions")] #[get("/v1/trifid_extensions")]
@ -47,4 +52,4 @@ pub async fn trifid_extensions() -> HttpResponse {
HttpResponse::Ok().json(TrifidExtensionsResponse { HttpResponse::Ok().json(TrifidExtensionsResponse {
extensions: SUPPORTED_EXTENSIONS.iter().map(|u| u.to_string()).collect(), extensions: SUPPORTED_EXTENSIONS.iter().map(|u| u.to_string()).collect(),
}) })
} }

View File

@ -19,81 +19,85 @@
// This endpoint is considered done. No major features should be added or removed, unless it fixes bugs. // This endpoint is considered done. No major features should be added or removed, unless it fixes bugs.
// This endpoint requires the `definednetworking` extension to be enabled to be used. // This endpoint requires the `definednetworking` extension to be enabled to be used.
use actix_web::{HttpRequest, HttpResponse, post};
use actix_web::web::{Data, Json};
use log::{debug, error};
use serde::{Serialize, Deserialize};
use trifid_api_entities::entity::totp_authenticator;
use crate::AppState;
use crate::auth_tokens::{enforce_session, TokenInfo}; use crate::auth_tokens::{enforce_session, TokenInfo};
use crate::error::{APIError, APIErrorsResponse};
use sea_orm::{EntityTrait, QueryFilter, ColumnTrait, IntoActiveModel, ActiveModelTrait};
use sea_orm::ActiveValue::Set;
use totp_rs::{Secret, TOTP};
use trifid_api_entities::entity::auth_token;
use crate::config::CONFIG; use crate::config::CONFIG;
use crate::error::{APIError, APIErrorsResponse};
use crate::timers::expires_in_seconds; use crate::timers::expires_in_seconds;
use crate::tokens::random_token; use crate::tokens::random_token;
use crate::AppState;
use actix_web::web::{Data, Json};
use actix_web::{post, HttpRequest, HttpResponse};
use log::{debug, error};
use sea_orm::ActiveValue::Set;
use sea_orm::{ActiveModelTrait, ColumnTrait, EntityTrait, IntoActiveModel, QueryFilter};
use serde::{Deserialize, Serialize};
use totp_rs::{Secret, TOTP};
use trifid_api_entities::entity::auth_token;
use trifid_api_entities::entity::totp_authenticator;
#[derive(Serialize, Deserialize, Debug, Clone)] #[derive(Serialize, Deserialize, Debug, Clone)]
pub struct VerifyTotpAuthenticatorsRequest { pub struct VerifyTotpAuthenticatorsRequest {
#[serde(rename = "totpToken")] #[serde(rename = "totpToken")]
pub totp_token: String, pub totp_token: String,
pub code: String pub code: String,
} }
#[derive(Serialize, Deserialize, Debug, Clone)] #[derive(Serialize, Deserialize, Debug, Clone)]
pub struct VerifyTotpAuthenticatorsResponse { pub struct VerifyTotpAuthenticatorsResponse {
pub data: VerifyTotpAuthenticatorsResponseData, pub data: VerifyTotpAuthenticatorsResponseData,
pub metadata: VerifyTotpAuthenticatorsResponseMetadata pub metadata: VerifyTotpAuthenticatorsResponseMetadata,
} }
#[derive(Serialize, Deserialize, Debug, Clone)] #[derive(Serialize, Deserialize, Debug, Clone)]
pub struct VerifyTotpAuthenticatorsResponseData { pub struct VerifyTotpAuthenticatorsResponseData {
#[serde(rename = "authToken")] #[serde(rename = "authToken")]
pub auth_token: String pub auth_token: String,
} }
#[derive(Serialize, Deserialize, Debug, Clone)] #[derive(Serialize, Deserialize, Debug, Clone)]
pub struct VerifyTotpAuthenticatorsResponseMetadata {} pub struct VerifyTotpAuthenticatorsResponseMetadata {}
#[post("/v1/verify-totp-authenticators")] #[post("/v1/verify-totp-authenticators")]
pub async fn verify_totp_authenticators_request(req: Json<VerifyTotpAuthenticatorsRequest>, req_data: HttpRequest, db: Data<AppState>) -> HttpResponse { pub async fn verify_totp_authenticators_request(
req: Json<VerifyTotpAuthenticatorsRequest>,
req_data: HttpRequest,
db: Data<AppState>,
) -> HttpResponse {
// require a user session // require a user session
let session_token = match enforce_session(&req_data, &db.conn).await { let session_token = match enforce_session(&req_data, &db.conn).await {
Ok(r) => { Ok(r) => match r {
match r { TokenInfo::SessionToken(i) => i,
TokenInfo::SessionToken(i) => i, _ => unreachable!(),
_ => unreachable!() },
}
}
Err(e) => { Err(e) => {
error!("error enforcing session: {}", e); error!("error enforcing session: {}", e);
return HttpResponse::Unauthorized().json(APIErrorsResponse { return HttpResponse::Unauthorized().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_UNAUTHORIZED".to_string(),
code: "ERR_UNAUTHORIZED".to_string(), message: "Unauthorized".to_string(),
message: "Unauthorized".to_string(), path: None,
path: None, }],
}
],
}); });
} }
}; };
// determine if the user has a totp authenticator // determine if the user has a totp authenticator
let auther = match totp_authenticator::Entity::find().filter(totp_authenticator::Column::Id.eq(&req.totp_token)).one(&db.conn).await { let auther = match totp_authenticator::Entity::find()
.filter(totp_authenticator::Column::Id.eq(&req.totp_token))
.one(&db.conn)
.await
{
Ok(r) => r, Ok(r) => r,
Err(e) => { Err(e) => {
error!("database error: {}", e); error!("database error: {}", e);
return HttpResponse::InternalServerError().json(APIErrorsResponse { return HttpResponse::InternalServerError().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_DB_ERROR".to_string(),
code: "ERR_DB_ERROR".to_string(), message:
message: "There was an error with the database request, please try again later.".to_string(), "There was an error with the database request, please try again later."
path: None, .to_string(),
} path: None,
], }],
}); });
} }
}; };
@ -101,26 +105,22 @@ pub async fn verify_totp_authenticators_request(req: Json<VerifyTotpAuthenticato
Some(a) => { Some(a) => {
if a.verified { if a.verified {
return HttpResponse::BadRequest().json(APIErrorsResponse { return HttpResponse::BadRequest().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_ALREADY_HAS_TOTP".to_string(),
code: "ERR_ALREADY_HAS_TOTP".to_string(), message: "This user already has a totp authenticator".to_string(),
message: "This user already has a totp authenticator".to_string(), path: None,
path: None, }],
}
]
}); });
} }
a a
}, }
None => { None => {
return HttpResponse::BadRequest().json(APIErrorsResponse { return HttpResponse::BadRequest().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_USER_NO_TOTP".to_string(),
code: "ERR_USER_NO_TOTP".to_string(), message: "This user does not have a totp authenticator".to_string(),
message: "This user does not have a totp authenticator".to_string(), path: None,
path: None, }],
}
]
}); });
} }
}; };
@ -131,30 +131,26 @@ pub async fn verify_totp_authenticators_request(req: Json<VerifyTotpAuthenticato
Err(e) => { Err(e) => {
error!("totp url error: {}", e); error!("totp url error: {}", e);
return HttpResponse::InternalServerError().json(APIErrorsResponse { return HttpResponse::InternalServerError().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_SECRET_ERROR".to_string(),
code: "ERR_SECRET_ERROR".to_string(), message: "There was an error parsing the totpmachine. Please try again later."
message: "There was an error parsing the totpmachine. Please try again later.".to_string(), .to_string(),
path: None, path: None,
} }],
],
}); });
} }
}; };
let valid = match totpmachine.check_current(&req.code) { let valid = match totpmachine.check_current(&req.code) {
Ok(valid) => valid, Ok(valid) => valid,
Err(e) => { Err(e) => {
error!("system time error: {}", e); error!("system time error: {}", e);
return HttpResponse::InternalServerError().json(APIErrorsResponse { return HttpResponse::InternalServerError().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_TIME_ERROR".to_string(),
code: "ERR_TIME_ERROR".to_string(), message: "There was an with the server-side time clock.".to_string(),
message: "There was an with the server-side time clock.".to_string(), path: None,
path: None, }],
}
],
}); });
} }
}; };
@ -163,14 +159,12 @@ pub async fn verify_totp_authenticators_request(req: Json<VerifyTotpAuthenticato
debug!("current: {}", totpmachine.generate_current().unwrap()); debug!("current: {}", totpmachine.generate_current().unwrap());
error!("user send invalid totp code"); error!("user send invalid totp code");
return HttpResponse::Unauthorized().json(APIErrorsResponse { return HttpResponse::Unauthorized().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_UNAUTHORIZED".to_string(),
code: "ERR_UNAUTHORIZED".to_string(), message: "Unauthorized".to_string(),
message: "Unauthorized".to_string(), path: None,
path: None, }],
} });
],
})
} }
let mut active_model = auther.into_active_model(); let mut active_model = auther.into_active_model();
@ -182,14 +176,13 @@ pub async fn verify_totp_authenticators_request(req: Json<VerifyTotpAuthenticato
Err(e) => { Err(e) => {
error!("database error: {}", e); error!("database error: {}", e);
return HttpResponse::InternalServerError().json(APIErrorsResponse { return HttpResponse::InternalServerError().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_DB_ERROR".to_string(),
code: "ERR_DB_ERROR".to_string(), message: "There was an error updating the totpmachine, please try again later."
message: "There was an error updating the totpmachine, please try again later.".to_string(), .to_string(),
path: None, path: None,
} }],
], });
})
} }
} }
@ -205,13 +198,11 @@ pub async fn verify_totp_authenticators_request(req: Json<VerifyTotpAuthenticato
Err(e) => { Err(e) => {
error!("database error: {}", e); error!("database error: {}", e);
return HttpResponse::InternalServerError().json(APIErrorsResponse { return HttpResponse::InternalServerError().json(APIErrorsResponse {
errors: vec![ errors: vec![APIError {
APIError { code: "ERR_DB_ERROR".to_string(),
code: "ERR_DB_ERROR".to_string(), message: "There was an error issuing the authentication token.".to_string(),
message: "There was an error issuing the authentication token.".to_string(), path: None,
path: None, }],
}
],
}); });
} }
} }
@ -220,4 +211,4 @@ pub async fn verify_totp_authenticators_request(req: Json<VerifyTotpAuthenticato
data: VerifyTotpAuthenticatorsResponseData { auth_token: token }, data: VerifyTotpAuthenticatorsResponseData { auth_token: token },
metadata: VerifyTotpAuthenticatorsResponseMetadata {}, metadata: VerifyTotpAuthenticatorsResponseMetadata {},
}) })
} }

View File

@ -19,9 +19,12 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH};
pub const TIME_FORMAT: &str = "%Y-%m-%dT%H:%M:%S%.f%:z"; pub const TIME_FORMAT: &str = "%Y-%m-%dT%H:%M:%S%.f%:z";
pub fn expires_in_seconds(seconds: u64) -> u64 { pub fn expires_in_seconds(seconds: u64) -> u64 {
(SystemTime::now() + Duration::from_secs(seconds)).duration_since(UNIX_EPOCH).expect("Time went backwards").as_secs() (SystemTime::now() + Duration::from_secs(seconds))
.duration_since(UNIX_EPOCH)
.expect("Time went backwards")
.as_secs()
} }
pub fn expired(time: u64) -> bool { pub fn expired(time: u64) -> bool {
UNIX_EPOCH + Duration::from_secs(time) < SystemTime::now() UNIX_EPOCH + Duration::from_secs(time) < SystemTime::now()
} }

View File

@ -20,7 +20,8 @@ use rand::Rng;
pub const ID_CHARSET: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"; pub const ID_CHARSET: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
pub const ID_LEN: u32 = 26; pub const ID_LEN: u32 = 26;
pub const TOKEN_CHARSET: &[u8] = b"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_"; pub const TOKEN_CHARSET: &[u8] =
b"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_";
pub const TOKEN_LEN: u32 = 43; pub const TOKEN_LEN: u32 = 43;
// 26 // 26
@ -38,16 +39,22 @@ pub fn random_id_no_id() -> HeaderValue {
// 43 // 43
// format: [TYPE]-[43 chars] // format: [TYPE]-[43 chars]
pub fn random_token(identifier: &str) -> String { pub fn random_token(identifier: &str) -> String {
format!("{}-{}", identifier, random_with_charset(TOKEN_LEN, TOKEN_CHARSET)) format!(
"{}-{}",
identifier,
random_with_charset(TOKEN_LEN, TOKEN_CHARSET)
)
} }
fn random_with_charset(len: u32, charset: &[u8]) -> String { fn random_with_charset(len: u32, charset: &[u8]) -> String {
(0..len).map(|_| { (0..len)
let idx = rand::thread_rng().gen_range(0..charset.len()); .map(|_| {
charset[idx] as char let idx = rand::thread_rng().gen_range(0..charset.len());
}).collect() charset[idx] as char
})
.collect()
} }
pub fn get_token_type(token: &str) -> Option<&str> { pub fn get_token_type(token: &str) -> Option<&str> {
token.split('-').collect::<Vec<&str>>().get(0).copied() token.split('-').collect::<Vec<&str>>().first().copied()
} }

View File

@ -1 +1 @@
pub mod entity; pub mod entity;

View File

@ -13,12 +13,15 @@ impl MigrationTrait for Migration {
.if_not_exists() .if_not_exists()
.col(ColumnDef::new(User::Id).string().not_null().primary_key()) .col(ColumnDef::new(User::Id).string().not_null().primary_key())
.col(ColumnDef::new(User::Email).string().not_null().unique_key()) .col(ColumnDef::new(User::Email).string().not_null().unique_key())
.to_owned() .to_owned(),
).await )
.await
} }
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager.drop_table(Table::drop().table(User::Table).to_owned()).await manager
.drop_table(Table::drop().table(User::Table).to_owned())
.await
} }
} }
@ -27,5 +30,5 @@ impl MigrationTrait for Migration {
pub enum User { pub enum User {
Table, Table,
Id, Id,
Email Email,
} }

View File

@ -1,6 +1,5 @@
use sea_orm_migration::prelude::*;
use crate::m20230402_162601_create_table_users::User; use crate::m20230402_162601_create_table_users::User;
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)] #[derive(DeriveMigrationName)]
pub struct Migration; pub struct Migration;
@ -8,26 +7,40 @@ pub struct Migration;
#[async_trait::async_trait] #[async_trait::async_trait]
impl MigrationTrait for Migration { impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager.create_table( manager
Table::create() .create_table(
.table(MagicLink::Table) Table::create()
.if_not_exists() .table(MagicLink::Table)
.col(ColumnDef::new(MagicLink::Id).string().not_null().primary_key()) .if_not_exists()
.col(ColumnDef::new(MagicLink::User).string().not_null()) .col(
.col(ColumnDef::new(MagicLink::ExpiresOn).big_integer().not_null()) ColumnDef::new(MagicLink::Id)
.foreign_key( .string()
ForeignKey::create() .not_null()
.name("fk_magiclink_user_users_id") .primary_key(),
.from(MagicLink::Table, MagicLink::User) )
.to(User::Table, User::Id) .col(ColumnDef::new(MagicLink::User).string().not_null())
.on_delete(ForeignKeyAction::Cascade) .col(
.on_update(ForeignKeyAction::Cascade) ColumnDef::new(MagicLink::ExpiresOn)
).to_owned() .big_integer()
).await .not_null(),
)
.foreign_key(
ForeignKey::create()
.name("fk_magiclink_user_users_id")
.from(MagicLink::Table, MagicLink::User)
.to(User::Table, User::Id)
.on_delete(ForeignKeyAction::Cascade)
.on_update(ForeignKeyAction::Cascade),
)
.to_owned(),
)
.await
} }
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager.drop_table(Table::drop().table(MagicLink::Table).to_owned()).await manager
.drop_table(Table::drop().table(MagicLink::Table).to_owned())
.await
} }
} }
@ -37,5 +50,5 @@ pub enum MagicLink {
Table, Table,
Id, Id,
User, User,
ExpiresOn ExpiresOn,
} }

View File

@ -1,5 +1,5 @@
use sea_orm_migration::prelude::*;
use crate::m20230402_162601_create_table_users::User; use crate::m20230402_162601_create_table_users::User;
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)] #[derive(DeriveMigrationName)]
pub struct Migration; pub struct Migration;
@ -7,26 +7,39 @@ pub struct Migration;
#[async_trait::async_trait] #[async_trait::async_trait]
impl MigrationTrait for Migration { impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager.create_table( manager
Table::create() .create_table(
.table(SessionToken::Table) Table::create()
.if_not_exists() .table(SessionToken::Table)
.col(ColumnDef::new(SessionToken::Id).string().not_null().primary_key()) .if_not_exists()
.col(ColumnDef::new(SessionToken::User).string().not_null()) .col(
.col(ColumnDef::new(SessionToken::ExpiresOn).big_integer().not_null()) ColumnDef::new(SessionToken::Id)
.foreign_key( .string()
ForeignKey::create() .not_null()
.from(SessionToken::Table, SessionToken::User) .primary_key(),
.to(User::Table, User::Id) )
.on_delete(ForeignKeyAction::Cascade) .col(ColumnDef::new(SessionToken::User).string().not_null())
.on_update(ForeignKeyAction::Cascade) .col(
) ColumnDef::new(SessionToken::ExpiresOn)
.to_owned() .big_integer()
).await .not_null(),
)
.foreign_key(
ForeignKey::create()
.from(SessionToken::Table, SessionToken::User)
.to(User::Table, User::Id)
.on_delete(ForeignKeyAction::Cascade)
.on_update(ForeignKeyAction::Cascade),
)
.to_owned(),
)
.await
} }
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager.drop_table(Table::drop().table(SessionToken::Table).to_owned()).await manager
.drop_table(Table::drop().table(SessionToken::Table).to_owned())
.await
} }
} }
@ -36,5 +49,5 @@ pub enum SessionToken {
Table, Table,
Id, Id,
User, User,
ExpiresOn ExpiresOn,
} }

View File

@ -1,5 +1,5 @@
use sea_orm_migration::prelude::*;
use crate::m20230402_162601_create_table_users::User; use crate::m20230402_162601_create_table_users::User;
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)] #[derive(DeriveMigrationName)]
pub struct Migration; pub struct Migration;
@ -7,23 +7,39 @@ pub struct Migration;
#[async_trait::async_trait] #[async_trait::async_trait]
impl MigrationTrait for Migration { impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager.create_table( manager
Table::create().table(Organization::Table) .create_table(
.col(ColumnDef::new(Organization::Id).string().not_null().primary_key()) Table::create()
.col(ColumnDef::new(Organization::Name).string().not_null()) .table(Organization::Table)
.col(ColumnDef::new(Organization::Owner).string().not_null().unique_key()) .col(
.foreign_key( ColumnDef::new(Organization::Id)
ForeignKey::create() .string()
.from(Organization::Table, Organization::Owner) .not_null()
.to(User::Table, User::Id) .primary_key(),
.on_delete(ForeignKeyAction::Cascade) )
.on_update(ForeignKeyAction::Cascade) .col(ColumnDef::new(Organization::Name).string().not_null())
).to_owned() .col(
).await ColumnDef::new(Organization::Owner)
.string()
.not_null()
.unique_key(),
)
.foreign_key(
ForeignKey::create()
.from(Organization::Table, Organization::Owner)
.to(User::Table, User::Id)
.on_delete(ForeignKeyAction::Cascade)
.on_update(ForeignKeyAction::Cascade),
)
.to_owned(),
)
.await
} }
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager.drop_table(Table::drop().table(Organization::Table).to_owned()).await manager
.drop_table(Table::drop().table(Organization::Table).to_owned())
.await
} }
} }
@ -33,5 +49,5 @@ pub enum Organization {
Table, Table,
Id, Id,
Name, Name,
Owner Owner,
} }

View File

@ -1,5 +1,5 @@
use sea_orm_migration::prelude::*;
use crate::m20230402_232316_create_table_organizations::Organization; use crate::m20230402_232316_create_table_organizations::Organization;
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)] #[derive(DeriveMigrationName)]
pub struct Migration; pub struct Migration;
@ -7,25 +7,29 @@ pub struct Migration;
#[async_trait::async_trait] #[async_trait::async_trait]
impl MigrationTrait for Migration { impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager.create_table( manager
Table::create() .create_table(
.table(ApiKey::Table) Table::create()
.col(ColumnDef::new(ApiKey::Id).string().not_null().primary_key()) .table(ApiKey::Table)
.col(ColumnDef::new(ApiKey::Key).string().not_null().unique_key()) .col(ColumnDef::new(ApiKey::Id).string().not_null().primary_key())
.col(ColumnDef::new(ApiKey::Organization).string().not_null()) .col(ColumnDef::new(ApiKey::Key).string().not_null().unique_key())
.foreign_key( .col(ColumnDef::new(ApiKey::Organization).string().not_null())
ForeignKey::create() .foreign_key(
.from(ApiKey::Table, ApiKey::Organization) ForeignKey::create()
.to(Organization::Table, Organization::Id) .from(ApiKey::Table, ApiKey::Organization)
.on_delete(ForeignKeyAction::Cascade) .to(Organization::Table, Organization::Id)
.on_update(ForeignKeyAction::Cascade) .on_delete(ForeignKeyAction::Cascade)
) .on_update(ForeignKeyAction::Cascade),
.to_owned() )
).await .to_owned(),
)
.await
} }
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager.drop_table(Table::drop().table(ApiKey::Table).to_owned()).await manager
.drop_table(Table::drop().table(ApiKey::Table).to_owned())
.await
} }
} }
@ -35,5 +39,5 @@ pub enum ApiKey {
Table, Table,
Id, Id,
Key, Key,
Organization Organization,
} }

View File

@ -1,5 +1,5 @@
use sea_orm_migration::prelude::*;
use crate::m20230402_233043_create_table_api_keys::ApiKey; use crate::m20230402_233043_create_table_api_keys::ApiKey;
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)] #[derive(DeriveMigrationName)]
pub struct Migration; pub struct Migration;
@ -7,24 +7,34 @@ pub struct Migration;
#[async_trait::async_trait] #[async_trait::async_trait]
impl MigrationTrait for Migration { impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager.create_table( manager
Table::create() .create_table(
.table(ApiKeyScope::Table) Table::create()
.col(ColumnDef::new(ApiKeyScope::Id).string().not_null().primary_key()) .table(ApiKeyScope::Table)
.col(ColumnDef::new(ApiKeyScope::Scope).string().not_null()) .col(
.col(ColumnDef::new(ApiKeyScope::ApiKey).string().not_null()) ColumnDef::new(ApiKeyScope::Id)
.foreign_key( .string()
ForeignKey::create() .not_null()
.from(ApiKeyScope::Table, ApiKeyScope::ApiKey) .primary_key(),
.to(ApiKey::Table, ApiKey::Id) )
.on_delete(ForeignKeyAction::Cascade) .col(ColumnDef::new(ApiKeyScope::Scope).string().not_null())
.on_update(ForeignKeyAction::Cascade) .col(ColumnDef::new(ApiKeyScope::ApiKey).string().not_null())
).to_owned() .foreign_key(
).await ForeignKey::create()
.from(ApiKeyScope::Table, ApiKeyScope::ApiKey)
.to(ApiKey::Table, ApiKey::Id)
.on_delete(ForeignKeyAction::Cascade)
.on_update(ForeignKeyAction::Cascade),
)
.to_owned(),
)
.await
} }
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager.drop_table(Table::drop().table(ApiKeyScope::Table).to_owned()).await manager
.drop_table(Table::drop().table(ApiKeyScope::Table).to_owned())
.await
} }
} }
@ -34,5 +44,5 @@ pub enum ApiKeyScope {
Table, Table,
Id, Id,
Scope, Scope,
ApiKey ApiKey,
} }

View File

@ -1,5 +1,5 @@
use sea_orm_migration::prelude::*;
use crate::m20230402_162601_create_table_users::User; use crate::m20230402_162601_create_table_users::User;
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)] #[derive(DeriveMigrationName)]
pub struct Migration; pub struct Migration;
@ -7,27 +7,60 @@ pub struct Migration;
#[async_trait::async_trait] #[async_trait::async_trait]
impl MigrationTrait for Migration { impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager.create_table( manager
Table::create() .create_table(
.table(TotpAuthenticator::Table) Table::create()
.col(ColumnDef::new(TotpAuthenticator::Id).string().not_null().primary_key()) .table(TotpAuthenticator::Table)
.col(ColumnDef::new(TotpAuthenticator::Secret).string().not_null().unique_key()) .col(
.col(ColumnDef::new(TotpAuthenticator::Url).string().not_null().unique_key()) ColumnDef::new(TotpAuthenticator::Id)
.col(ColumnDef::new(TotpAuthenticator::Verified).boolean().not_null()) .string()
.col(ColumnDef::new(TotpAuthenticator::ExpiresOn).big_integer().not_null()) .not_null()
.col(ColumnDef::new(TotpAuthenticator::User).string().not_null().unique_key()) .primary_key(),
.foreign_key( )
ForeignKey::create() .col(
.from(TotpAuthenticator::Table, TotpAuthenticator::User) ColumnDef::new(TotpAuthenticator::Secret)
.to(User::Table, User::Id) .string()
.on_delete(ForeignKeyAction::Cascade) .not_null()
.on_update(ForeignKeyAction::Cascade) .unique_key(),
).to_owned() )
).await .col(
ColumnDef::new(TotpAuthenticator::Url)
.string()
.not_null()
.unique_key(),
)
.col(
ColumnDef::new(TotpAuthenticator::Verified)
.boolean()
.not_null(),
)
.col(
ColumnDef::new(TotpAuthenticator::ExpiresOn)
.big_integer()
.not_null(),
)
.col(
ColumnDef::new(TotpAuthenticator::User)
.string()
.not_null()
.unique_key(),
)
.foreign_key(
ForeignKey::create()
.from(TotpAuthenticator::Table, TotpAuthenticator::User)
.to(User::Table, User::Id)
.on_delete(ForeignKeyAction::Cascade)
.on_update(ForeignKeyAction::Cascade),
)
.to_owned(),
)
.await
} }
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager.drop_table(Table::drop().table(TotpAuthenticator::Table).to_owned()).await manager
.drop_table(Table::drop().table(TotpAuthenticator::Table).to_owned())
.await
} }
} }
@ -40,5 +73,5 @@ pub enum TotpAuthenticator {
Url, Url,
Verified, Verified,
ExpiresOn, ExpiresOn,
User User,
} }

View File

@ -1,5 +1,5 @@
use sea_orm_migration::prelude::*;
use crate::m20230402_162601_create_table_users::User; use crate::m20230402_162601_create_table_users::User;
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)] #[derive(DeriveMigrationName)]
pub struct Migration; pub struct Migration;
@ -7,26 +7,39 @@ pub struct Migration;
#[async_trait::async_trait] #[async_trait::async_trait]
impl MigrationTrait for Migration { impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager.create_table( manager
Table::create() .create_table(
.table(AuthToken::Table) Table::create()
.if_not_exists() .table(AuthToken::Table)
.col(ColumnDef::new(AuthToken::Id).string().not_null().primary_key()) .if_not_exists()
.col(ColumnDef::new(AuthToken::User).string().not_null()) .col(
.col(ColumnDef::new(AuthToken::ExpiresOn).big_integer().not_null()) ColumnDef::new(AuthToken::Id)
.foreign_key( .string()
ForeignKey::create() .not_null()
.from(AuthToken::Table, AuthToken::User) .primary_key(),
.to(User::Table, User::Id) )
.on_delete(ForeignKeyAction::Cascade) .col(ColumnDef::new(AuthToken::User).string().not_null())
.on_update(ForeignKeyAction::Cascade) .col(
) ColumnDef::new(AuthToken::ExpiresOn)
.to_owned() .big_integer()
).await .not_null(),
)
.foreign_key(
ForeignKey::create()
.from(AuthToken::Table, AuthToken::User)
.to(User::Table, User::Id)
.on_delete(ForeignKeyAction::Cascade)
.on_update(ForeignKeyAction::Cascade),
)
.to_owned(),
)
.await
} }
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager.drop_table(Table::drop().table(AuthToken::Table).to_owned()).await manager
.drop_table(Table::drop().table(AuthToken::Table).to_owned())
.await
} }
} }
@ -36,5 +49,5 @@ pub enum AuthToken {
Table, Table,
Id, Id,
User, User,
ExpiresOn ExpiresOn,
} }

View File

@ -6,21 +6,40 @@ pub struct Migration;
#[async_trait::async_trait] #[async_trait::async_trait]
impl MigrationTrait for Migration { impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager.create_table( manager
Table::create() .create_table(
.table(SigningCA::Table) Table::create()
.col(ColumnDef::new(SigningCA::Id).string().not_null().primary_key()) .table(SigningCA::Table)
.col(ColumnDef::new(SigningCA::Organization).string().not_null()) .col(
.col(ColumnDef::new(SigningCA::Cert).string().not_null()) ColumnDef::new(SigningCA::Id)
.col(ColumnDef::new(SigningCA::Key).string().not_null().unique_key()) .string()
.col(ColumnDef::new(SigningCA::Expires).big_integer().not_null()) .not_null()
.col(ColumnDef::new(SigningCA::Nonce).string().not_null().unique_key()) .primary_key(),
.to_owned() )
).await .col(ColumnDef::new(SigningCA::Organization).string().not_null())
.col(ColumnDef::new(SigningCA::Cert).string().not_null())
.col(
ColumnDef::new(SigningCA::Key)
.string()
.not_null()
.unique_key(),
)
.col(ColumnDef::new(SigningCA::Expires).big_integer().not_null())
.col(
ColumnDef::new(SigningCA::Nonce)
.string()
.not_null()
.unique_key(),
)
.to_owned(),
)
.await
} }
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager.drop_table(Table::drop().table(SigningCA::Table).to_owned()).await manager
.drop_table(Table::drop().table(SigningCA::Table).to_owned())
.await
} }
} }
@ -33,5 +52,5 @@ pub enum SigningCA {
Cert, Cert,
Key, Key,
Expires, Expires,
Nonce Nonce,
} }

View File

@ -1,6 +1,6 @@
use sea_orm_migration::prelude::*;
use crate::m20230402_232316_create_table_organizations::Organization; use crate::m20230402_232316_create_table_organizations::Organization;
use crate::m20230403_142517_create_table_signing_cas::SigningCA; use crate::m20230403_142517_create_table_signing_cas::SigningCA;
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)] #[derive(DeriveMigrationName)]
pub struct Migration; pub struct Migration;
@ -8,36 +8,59 @@ pub struct Migration;
#[async_trait::async_trait] #[async_trait::async_trait]
impl MigrationTrait for Migration { impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager.create_table( manager
Table::create() .create_table(
.table(Network::Table) Table::create()
.col(ColumnDef::new(Network::Id).string().not_null().primary_key()) .table(Network::Table)
.col(ColumnDef::new(Network::Cidr).string().not_null()) .col(
.col(ColumnDef::new(Network::Organization).string().not_null().unique_key()) ColumnDef::new(Network::Id)
.col(ColumnDef::new(Network::SigningCA).string().not_null().unique_key()) .string()
.col(ColumnDef::new(Network::CreatedAt).big_integer().not_null()) .not_null()
.col(ColumnDef::new(Network::Name).string().not_null()) .primary_key(),
.col(ColumnDef::new(Network::LighthousesAsRelays).boolean().not_null()) )
.foreign_key( .col(ColumnDef::new(Network::Cidr).string().not_null())
ForeignKey::create() .col(
.from(Network::Table, Network::Organization) ColumnDef::new(Network::Organization)
.to(Organization::Table, Organization::Id) .string()
.on_delete(ForeignKeyAction::Cascade) .not_null()
.on_update(ForeignKeyAction::Cascade) .unique_key(),
) )
.foreign_key( .col(
ForeignKey::create() ColumnDef::new(Network::SigningCA)
.from(Network::Table, Network::SigningCA) .string()
.to(SigningCA::Table, SigningCA::Id) .not_null()
.on_delete(ForeignKeyAction::Cascade) .unique_key(),
.on_update(ForeignKeyAction::Cascade) )
) .col(ColumnDef::new(Network::CreatedAt).big_integer().not_null())
.to_owned() .col(ColumnDef::new(Network::Name).string().not_null())
).await .col(
ColumnDef::new(Network::LighthousesAsRelays)
.boolean()
.not_null(),
)
.foreign_key(
ForeignKey::create()
.from(Network::Table, Network::Organization)
.to(Organization::Table, Organization::Id)
.on_delete(ForeignKeyAction::Cascade)
.on_update(ForeignKeyAction::Cascade),
)
.foreign_key(
ForeignKey::create()
.from(Network::Table, Network::SigningCA)
.to(SigningCA::Table, SigningCA::Id)
.on_delete(ForeignKeyAction::Cascade)
.on_update(ForeignKeyAction::Cascade),
)
.to_owned(),
)
.await
} }
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager.drop_table(Table::drop().table(Network::Table).to_owned()).await manager
.drop_table(Table::drop().table(Network::Table).to_owned())
.await
} }
} }
@ -51,5 +74,5 @@ pub enum Network {
SigningCA, SigningCA,
CreatedAt, CreatedAt,
Name, Name,
LighthousesAsRelays LighthousesAsRelays,
} }

View File

@ -1,5 +1,5 @@
use sea_orm_migration::prelude::*;
use crate::m20230402_232316_create_table_organizations::Organization; use crate::m20230402_232316_create_table_organizations::Organization;
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)] #[derive(DeriveMigrationName)]
pub struct Migration; pub struct Migration;
@ -7,27 +7,32 @@ pub struct Migration;
#[async_trait::async_trait] #[async_trait::async_trait]
impl MigrationTrait for Migration { impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager.create_table( manager
Table::create() .create_table(
.table(Role::Table) Table::create()
.col(ColumnDef::new(Role::Id).string().not_null().primary_key()) .table(Role::Table)
.col(ColumnDef::new(Role::Name).string().not_null().unique_key()) .col(ColumnDef::new(Role::Id).string().not_null().primary_key())
.col(ColumnDef::new(Role::Description).string().not_null()) .col(ColumnDef::new(Role::Name).string().not_null().unique_key())
.col(ColumnDef::new(Role::Organization).string().not_null()) .col(ColumnDef::new(Role::Description).string().not_null())
.col(ColumnDef::new(Role::CreatedAt).big_integer().not_null()) .col(ColumnDef::new(Role::Organization).string().not_null())
.col(ColumnDef::new(Role::ModifiedAt).big_integer().not_null()) .col(ColumnDef::new(Role::CreatedAt).big_integer().not_null())
.foreign_key( .col(ColumnDef::new(Role::ModifiedAt).big_integer().not_null())
ForeignKey::create() .foreign_key(
.from(Role::Table, Role::Organization) ForeignKey::create()
.to(Organization::Table, Organization::Id) .from(Role::Table, Role::Organization)
.on_update(ForeignKeyAction::Cascade) .to(Organization::Table, Organization::Id)
.on_delete(ForeignKeyAction::Cascade) .on_update(ForeignKeyAction::Cascade)
).to_owned() .on_delete(ForeignKeyAction::Cascade),
).await )
.to_owned(),
)
.await
} }
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager.drop_table(Table::drop().table(Role::Table).to_owned()).await manager
.drop_table(Table::drop().table(Role::Table).to_owned())
.await
} }
} }
@ -40,5 +45,5 @@ pub enum Role {
Description, Description,
Organization, Organization,
CreatedAt, CreatedAt,
ModifiedAt ModifiedAt,
} }

View File

@ -1,5 +1,5 @@
use sea_orm_migration::prelude::*;
use crate::m20230404_133809_create_table_roles::Role; use crate::m20230404_133809_create_table_roles::Role;
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)] #[derive(DeriveMigrationName)]
pub struct Migration; pub struct Migration;
@ -7,35 +7,57 @@ pub struct Migration;
#[async_trait::async_trait] #[async_trait::async_trait]
impl MigrationTrait for Migration { impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager.create_table( manager
Table::create() .create_table(
.table(FirewallRule::Table) Table::create()
.col(ColumnDef::new(FirewallRule::Id).string().not_null().primary_key()) .table(FirewallRule::Table)
.col(ColumnDef::new(FirewallRule::Role).string().not_null()) .col(
.col(ColumnDef::new(FirewallRule::Protocol).string().not_null()) ColumnDef::new(FirewallRule::Id)
.col(ColumnDef::new(FirewallRule::Description).string().not_null()) .string()
.col(ColumnDef::new(FirewallRule::AllowedRoleID).string().null()) .not_null()
.col(ColumnDef::new(FirewallRule::PortRangeFrom).integer().not_null()) .primary_key(),
.col(ColumnDef::new(FirewallRule::PortRangeTo).integer().not_null()) )
.foreign_key( .col(ColumnDef::new(FirewallRule::Role).string().not_null())
ForeignKey::create() .col(ColumnDef::new(FirewallRule::Protocol).string().not_null())
.from(FirewallRule::Table, FirewallRule::Role) .col(
.to(Role::Table, Role::Id) ColumnDef::new(FirewallRule::Description)
.on_delete(ForeignKeyAction::Cascade) .string()
.on_update(ForeignKeyAction::Cascade) .not_null(),
) )
.foreign_key( .col(ColumnDef::new(FirewallRule::AllowedRoleID).string().null())
ForeignKey::create() .col(
.from(FirewallRule::Table, FirewallRule::AllowedRoleID) ColumnDef::new(FirewallRule::PortRangeFrom)
.to(Role::Table, Role::Id) .integer()
.on_delete(ForeignKeyAction::Cascade) .not_null(),
.on_delete(ForeignKeyAction::Cascade) )
).to_owned() .col(
).await ColumnDef::new(FirewallRule::PortRangeTo)
.integer()
.not_null(),
)
.foreign_key(
ForeignKey::create()
.from(FirewallRule::Table, FirewallRule::Role)
.to(Role::Table, Role::Id)
.on_delete(ForeignKeyAction::Cascade)
.on_update(ForeignKeyAction::Cascade),
)
.foreign_key(
ForeignKey::create()
.from(FirewallRule::Table, FirewallRule::AllowedRoleID)
.to(Role::Table, Role::Id)
.on_delete(ForeignKeyAction::Cascade)
.on_delete(ForeignKeyAction::Cascade),
)
.to_owned(),
)
.await
} }
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager.drop_table(Table::drop().table(FirewallRule::Table).to_owned()).await manager
.drop_table(Table::drop().table(FirewallRule::Table).to_owned())
.await
} }
} }
@ -49,5 +71,5 @@ pub enum FirewallRule {
Description, Description,
AllowedRoleID, AllowedRoleID,
PortRangeFrom, PortRangeFrom,
PortRangeTo PortRangeTo,
} }

View File

@ -1,6 +1,6 @@
use sea_orm_migration::prelude::*;
use crate::m20230403_173431_create_table_networks::Network; use crate::m20230403_173431_create_table_networks::Network;
use crate::m20230404_133809_create_table_roles::Role; use crate::m20230404_133809_create_table_roles::Role;
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)] #[derive(DeriveMigrationName)]
pub struct Migration; pub struct Migration;
@ -8,56 +8,58 @@ pub struct Migration;
#[async_trait::async_trait] #[async_trait::async_trait]
impl MigrationTrait for Migration { impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager.create_table( manager
Table::create() .create_table(
.table(Host::Table) Table::create()
.col(ColumnDef::new(Host::Id).string().not_null().primary_key()) .table(Host::Table)
.col(ColumnDef::new(Host::Name).string().not_null()) .col(ColumnDef::new(Host::Id).string().not_null().primary_key())
.col(ColumnDef::new(Host::Network).string().not_null()) .col(ColumnDef::new(Host::Name).string().not_null())
.col(ColumnDef::new(Host::Role).string().not_null()) .col(ColumnDef::new(Host::Network).string().not_null())
.col(ColumnDef::new(Host::IP).string().not_null()) .col(ColumnDef::new(Host::Role).string().not_null())
.col(ColumnDef::new(Host::ListenPort).unsigned().not_null()) .col(ColumnDef::new(Host::IP).string().not_null())
.col(ColumnDef::new(Host::IsLighthouse).boolean().not_null()) .col(ColumnDef::new(Host::ListenPort).unsigned().not_null())
.col(ColumnDef::new(Host::IsRelay).boolean().not_null()) .col(ColumnDef::new(Host::IsLighthouse).boolean().not_null())
.col(ColumnDef::new(Host::Counter).unsigned().not_null()) .col(ColumnDef::new(Host::IsRelay).boolean().not_null())
.col(ColumnDef::new(Host::CreatedAt).big_integer().not_null()) .col(ColumnDef::new(Host::Counter).unsigned().not_null())
.col(ColumnDef::new(Host::IsBlocked).boolean().not_null()) .col(ColumnDef::new(Host::CreatedAt).big_integer().not_null())
.col(ColumnDef::new(Host::LastSeenAt).big_integer().not_null()) .col(ColumnDef::new(Host::IsBlocked).boolean().not_null())
.col(ColumnDef::new(Host::LastVersion).integer().not_null()) .col(ColumnDef::new(Host::LastSeenAt).big_integer().not_null())
.col(ColumnDef::new(Host::LastPlatform).string().not_null()) .col(ColumnDef::new(Host::LastVersion).integer().not_null())
.col(ColumnDef::new(Host::LastOutOfDate).boolean().not_null()) .col(ColumnDef::new(Host::LastPlatform).string().not_null())
.foreign_key( .col(ColumnDef::new(Host::LastOutOfDate).boolean().not_null())
ForeignKey::create() .foreign_key(
.from(Host::Table, Host::Network) ForeignKey::create()
.to(Network::Table, Network::Id) .from(Host::Table, Host::Network)
.on_update(ForeignKeyAction::Cascade) .to(Network::Table, Network::Id)
.on_delete(ForeignKeyAction::Cascade) .on_update(ForeignKeyAction::Cascade)
) .on_delete(ForeignKeyAction::Cascade),
.foreign_key( )
ForeignKey::create() .foreign_key(
.from(Host::Table, Host::Role) ForeignKey::create()
.to(Role::Table, Role::Id) .from(Host::Table, Host::Role)
.on_update(ForeignKeyAction::Cascade) .to(Role::Table, Role::Id)
.on_delete(ForeignKeyAction::Cascade) .on_update(ForeignKeyAction::Cascade)
) .on_delete(ForeignKeyAction::Cascade),
.index( )
Index::create() .index(
.name("idx-hosts-net-name-unique") Index::create()
.table(Host::Table) .name("idx-hosts-net-name-unique")
.col(Host::Network) .table(Host::Table)
.col(Host::Name) .col(Host::Network)
.unique() .col(Host::Name)
) .unique(),
.index( )
Index::create() .index(
.name("idx-hosts-net-ip-unique") Index::create()
.table(Host::Table) .name("idx-hosts-net-ip-unique")
.col(Host::Network) .table(Host::Table)
.col(Host::IP) .col(Host::Network)
.unique() .col(Host::IP)
) .unique(),
.to_owned() )
).await .to_owned(),
)
.await
} }
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
@ -85,5 +87,5 @@ pub enum Host {
LastSeenAt, LastSeenAt,
LastVersion, LastVersion,
LastPlatform, LastPlatform,
LastOutOfDate LastOutOfDate,
} }

View File

@ -1,5 +1,5 @@
use sea_orm_migration::prelude::*;
use crate::m20230427_170037_create_table_hosts::Host; use crate::m20230427_170037_create_table_hosts::Host;
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)] #[derive(DeriveMigrationName)]
pub struct Migration; pub struct Migration;
@ -11,15 +11,24 @@ impl MigrationTrait for Migration {
.create_table( .create_table(
Table::create() Table::create()
.table(HostStaticAddress::Table) .table(HostStaticAddress::Table)
.col(ColumnDef::new(HostStaticAddress::Id).string().not_null().primary_key()) .col(
ColumnDef::new(HostStaticAddress::Id)
.string()
.not_null()
.primary_key(),
)
.col(ColumnDef::new(HostStaticAddress::Host).string().not_null()) .col(ColumnDef::new(HostStaticAddress::Host).string().not_null())
.col(ColumnDef::new(HostStaticAddress::Address).string().not_null()) .col(
ColumnDef::new(HostStaticAddress::Address)
.string()
.not_null(),
)
.foreign_key( .foreign_key(
ForeignKey::create() ForeignKey::create()
.from(HostStaticAddress::Table, HostStaticAddress::Host) .from(HostStaticAddress::Table, HostStaticAddress::Host)
.to(Host::Table, Host::Id) .to(Host::Table, Host::Id)
.on_update(ForeignKeyAction::Cascade) .on_update(ForeignKeyAction::Cascade)
.on_delete(ForeignKeyAction::Cascade) .on_delete(ForeignKeyAction::Cascade),
) )
.to_owned(), .to_owned(),
) )
@ -39,5 +48,5 @@ pub enum HostStaticAddress {
Table, Table,
Id, Id,
Host, Host,
Address Address,
} }

View File

@ -1,5 +1,5 @@
use sea_orm_migration::prelude::*;
use crate::m20230427_170037_create_table_hosts::Host; use crate::m20230427_170037_create_table_hosts::Host;
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)] #[derive(DeriveMigrationName)]
pub struct Migration; pub struct Migration;
@ -7,30 +7,41 @@ pub struct Migration;
#[async_trait::async_trait] #[async_trait::async_trait]
impl MigrationTrait for Migration { impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager.create_table( manager
Table::create() .create_table(
.table(HostConfigOverride::Table) Table::create()
.col(ColumnDef::new(HostConfigOverride::Id).string().not_null().primary_key()) .table(HostConfigOverride::Table)
.col(ColumnDef::new(HostConfigOverride::Key).string().not_null()) .col(
.col(ColumnDef::new(HostConfigOverride::Value).string().not_null()) ColumnDef::new(HostConfigOverride::Id)
.col(ColumnDef::new(HostConfigOverride::Host).string().not_null()) .string()
.foreign_key( .not_null()
ForeignKey::create() .primary_key(),
.from(HostConfigOverride::Table, HostConfigOverride::Host) )
.to(Host::Table, Host::Id) .col(ColumnDef::new(HostConfigOverride::Key).string().not_null())
.on_delete(ForeignKeyAction::Cascade) .col(
.on_update(ForeignKeyAction::Cascade) ColumnDef::new(HostConfigOverride::Value)
) .string()
.index( .not_null(),
Index::create() )
.name("idx_hosts_config_overrides-key-host-unique") .col(ColumnDef::new(HostConfigOverride::Host).string().not_null())
.table(HostConfigOverride::Table) .foreign_key(
.col(HostConfigOverride::Key) ForeignKey::create()
.col(HostConfigOverride::Id) .from(HostConfigOverride::Table, HostConfigOverride::Host)
.unique() .to(Host::Table, Host::Id)
) .on_delete(ForeignKeyAction::Cascade)
.to_owned() .on_update(ForeignKeyAction::Cascade),
).await )
.index(
Index::create()
.name("idx_hosts_config_overrides-key-host-unique")
.table(HostConfigOverride::Table)
.col(HostConfigOverride::Key)
.col(HostConfigOverride::Id)
.unique(),
)
.to_owned(),
)
.await
} }
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {

View File

@ -1,16 +1,16 @@
//! `SeaORM` Entity. Generated by sea-orm-codegen 0.11.2 //! `SeaORM` Entity. Generated by sea-orm-codegen 0.11.2
pub mod prelude ; pub mod prelude;
pub mod api_key ; pub mod api_key;
pub mod api_key_scope ; pub mod api_key_scope;
pub mod auth_token ; pub mod auth_token;
pub mod firewall_rule ; pub mod firewall_rule;
pub mod magic_link ; pub mod magic_link;
pub mod network ; pub mod network;
pub mod organization ; pub mod organization;
pub mod role ; pub mod role;
pub mod session_token ; pub mod session_token;
pub mod signing_ca ; pub mod signing_ca;
pub mod totp_authenticator ; pub mod totp_authenticator;
pub mod user ; pub mod user;

View File

@ -1,15 +1,52 @@
//! `SeaORM` Entity. Generated by sea-orm-codegen 0.11.2 //! `SeaORM` Entity. Generated by sea-orm-codegen 0.11.2
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)]
#[sea_orm(table_name = "network")]
pub struct Model {
#[sea_orm(primary_key, auto_increment = false)]
pub id: String,
pub cidr: String,
#[sea_orm(unique)]
pub organization: String,
#[sea_orm(unique)]
pub signing_ca: String,
pub created_at: i64,
pub name: String,
pub lighthouses_as_relays: bool,
}
use sea_orm :: entity :: prelude :: * ; #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::organization::Entity",
from = "Column::Organization",
to = "super::organization::Column::Id",
on_update = "Cascade",
on_delete = "Cascade"
)]
Organization,
#[sea_orm(
belongs_to = "super::signing_ca::Entity",
from = "Column::SigningCa",
to = "super::signing_ca::Column::Id",
on_update = "Cascade",
on_delete = "Cascade"
)]
SigningCa,
}
# [derive (Clone , Debug , PartialEq , DeriveEntityModel , Eq)] # [sea_orm (table_name = "network")] pub struct Model { # [sea_orm (primary_key , auto_increment = false)] pub id : String , pub cidr : String , # [sea_orm (unique)] pub organization : String , # [sea_orm (unique)] pub signing_ca : String , pub created_at : i64 , pub name : String , pub lighthouses_as_relays : bool , } impl Related<super::organization::Entity> for Entity {
fn to() -> RelationDef {
Relation::Organization.def()
}
}
# [derive (Copy , Clone , Debug , EnumIter , DeriveRelation)] pub enum Relation { # [sea_orm (belongs_to = "super::organization::Entity" , from = "Column::Organization" , to = "super::organization::Column::Id" , on_update = "Cascade" , on_delete = "Cascade" ,)] Organization , # [sea_orm (belongs_to = "super::signing_ca::Entity" , from = "Column::SigningCa" , to = "super::signing_ca::Column::Id" , on_update = "Cascade" , on_delete = "Cascade" ,)] SigningCa , } impl Related<super::signing_ca::Entity> for Entity {
fn to() -> RelationDef {
Relation::SigningCa.def()
}
}
impl Related < super :: organization :: Entity > for Entity { fn to () -> RelationDef { Relation :: Organization . def () } } impl ActiveModelBehavior for ActiveModel {}
impl Related < super :: signing_ca :: Entity > for Entity { fn to () -> RelationDef { Relation :: SigningCa . def () } }
impl ActiveModelBehavior for ActiveModel { }

View File

@ -1,19 +1,57 @@
//! `SeaORM` Entity. Generated by sea-orm-codegen 0.11.2 //! `SeaORM` Entity. Generated by sea-orm-codegen 0.11.2
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)]
#[sea_orm(table_name = "organization")]
pub struct Model {
#[sea_orm(primary_key, auto_increment = false)]
pub id: String,
pub name: String,
#[sea_orm(unique)]
pub owner: String,
}
use sea_orm :: entity :: prelude :: * ; #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(has_many = "super::api_key::Entity")]
ApiKey,
#[sea_orm(has_one = "super::network::Entity")]
Network,
#[sea_orm(has_many = "super::role::Entity")]
Role,
#[sea_orm(
belongs_to = "super::user::Entity",
from = "Column::Owner",
to = "super::user::Column::Id",
on_update = "Cascade",
on_delete = "Cascade"
)]
User,
}
# [derive (Clone , Debug , PartialEq , DeriveEntityModel , Eq)] # [sea_orm (table_name = "organization")] pub struct Model { # [sea_orm (primary_key , auto_increment = false)] pub id : String , pub name : String , # [sea_orm (unique)] pub owner : String , } impl Related<super::api_key::Entity> for Entity {
fn to() -> RelationDef {
Relation::ApiKey.def()
}
}
# [derive (Copy , Clone , Debug , EnumIter , DeriveRelation)] pub enum Relation { # [sea_orm (has_many = "super::api_key::Entity")] ApiKey , # [sea_orm (has_one = "super::network::Entity")] Network , # [sea_orm (has_many = "super::role::Entity")] Role , # [sea_orm (belongs_to = "super::user::Entity" , from = "Column::Owner" , to = "super::user::Column::Id" , on_update = "Cascade" , on_delete = "Cascade" ,)] User , } impl Related<super::network::Entity> for Entity {
fn to() -> RelationDef {
Relation::Network.def()
}
}
impl Related < super :: api_key :: Entity > for Entity { fn to () -> RelationDef { Relation :: ApiKey . def () } } impl Related<super::role::Entity> for Entity {
fn to() -> RelationDef {
Relation::Role.def()
}
}
impl Related < super :: network :: Entity > for Entity { fn to () -> RelationDef { Relation :: Network . def () } } impl Related<super::user::Entity> for Entity {
fn to() -> RelationDef {
Relation::User.def()
}
}
impl Related < super :: role :: Entity > for Entity { fn to () -> RelationDef { Relation :: Role . def () } } impl ActiveModelBehavior for ActiveModel {}
impl Related < super :: user :: Entity > for Entity { fn to () -> RelationDef { Relation :: User . def () } }
impl ActiveModelBehavior for ActiveModel { }

View File

@ -1,14 +1,14 @@
//! `SeaORM` Entity. Generated by sea-orm-codegen 0.11.2 //! `SeaORM` Entity. Generated by sea-orm-codegen 0.11.2
pub use super :: api_key :: Entity as ApiKey ; pub use super::api_key::Entity as ApiKey;
pub use super :: api_key_scope :: Entity as ApiKeyScope ; pub use super::api_key_scope::Entity as ApiKeyScope;
pub use super :: auth_token :: Entity as AuthToken ; pub use super::auth_token::Entity as AuthToken;
pub use super :: firewall_rule :: Entity as FirewallRule ; pub use super::firewall_rule::Entity as FirewallRule;
pub use super :: magic_link :: Entity as MagicLink ; pub use super::magic_link::Entity as MagicLink;
pub use super :: network :: Entity as Network ; pub use super::network::Entity as Network;
pub use super :: organization :: Entity as Organization ; pub use super::organization::Entity as Organization;
pub use super :: role :: Entity as Role ; pub use super::role::Entity as Role;
pub use super :: session_token :: Entity as SessionToken ; pub use super::session_token::Entity as SessionToken;
pub use super :: signing_ca :: Entity as SigningCa ; pub use super::signing_ca::Entity as SigningCa;
pub use super :: totp_authenticator :: Entity as TotpAuthenticator ; pub use super::totp_authenticator::Entity as TotpAuthenticator;
pub use super :: user :: Entity as User ; pub use super::user::Entity as User;

View File

@ -1,13 +1,36 @@
//! `SeaORM` Entity. Generated by sea-orm-codegen 0.11.2 //! `SeaORM` Entity. Generated by sea-orm-codegen 0.11.2
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)]
#[sea_orm(table_name = "role")]
pub struct Model {
#[sea_orm(primary_key, auto_increment = false)]
pub id: String,
#[sea_orm(unique)]
pub name: String,
pub description: String,
pub organization: String,
pub created_at: i64,
pub modified_at: i64,
}
use sea_orm :: entity :: prelude :: * ; #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::organization::Entity",
from = "Column::Organization",
to = "super::organization::Column::Id",
on_update = "Cascade",
on_delete = "Cascade"
)]
Organization,
}
# [derive (Clone , Debug , PartialEq , DeriveEntityModel , Eq)] # [sea_orm (table_name = "role")] pub struct Model { # [sea_orm (primary_key , auto_increment = false)] pub id : String , # [sea_orm (unique)] pub name : String , pub description : String , pub organization : String , pub created_at : i64 , pub modified_at : i64 , } impl Related<super::organization::Entity> for Entity {
fn to() -> RelationDef {
Relation::Organization.def()
}
}
# [derive (Copy , Clone , Debug , EnumIter , DeriveRelation)] pub enum Relation { # [sea_orm (belongs_to = "super::organization::Entity" , from = "Column::Organization" , to = "super::organization::Column::Id" , on_update = "Cascade" , on_delete = "Cascade" ,)] Organization , } impl ActiveModelBehavior for ActiveModel {}
impl Related < super :: organization :: Entity > for Entity { fn to () -> RelationDef { Relation :: Organization . def () } }
impl ActiveModelBehavior for ActiveModel { }

View File

@ -1,13 +1,32 @@
//! `SeaORM` Entity. Generated by sea-orm-codegen 0.11.2 //! `SeaORM` Entity. Generated by sea-orm-codegen 0.11.2
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)]
#[sea_orm(table_name = "session_token")]
pub struct Model {
#[sea_orm(primary_key, auto_increment = false)]
pub id: String,
pub user: String,
pub expires_on: i64,
}
use sea_orm :: entity :: prelude :: * ; #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::user::Entity",
from = "Column::User",
to = "super::user::Column::Id",
on_update = "Cascade",
on_delete = "Cascade"
)]
User,
}
# [derive (Clone , Debug , PartialEq , DeriveEntityModel , Eq)] # [sea_orm (table_name = "session_token")] pub struct Model { # [sea_orm (primary_key , auto_increment = false)] pub id : String , pub user : String , pub expires_on : i64 , } impl Related<super::user::Entity> for Entity {
fn to() -> RelationDef {
Relation::User.def()
}
}
# [derive (Copy , Clone , Debug , EnumIter , DeriveRelation)] pub enum Relation { # [sea_orm (belongs_to = "super::user::Entity" , from = "Column::User" , to = "super::user::Column::Id" , on_update = "Cascade" , on_delete = "Cascade" ,)] User , } impl ActiveModelBehavior for ActiveModel {}
impl Related < super :: user :: Entity > for Entity { fn to () -> RelationDef { Relation :: User . def () } }
impl ActiveModelBehavior for ActiveModel { }

View File

@ -1,13 +1,31 @@
//! `SeaORM` Entity. Generated by sea-orm-codegen 0.11.2 //! `SeaORM` Entity. Generated by sea-orm-codegen 0.11.2
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)]
#[sea_orm(table_name = "signing_ca")]
pub struct Model {
#[sea_orm(primary_key, auto_increment = false)]
pub id: String,
pub organization: String,
pub cert: String,
#[sea_orm(unique)]
pub key: String,
pub expires: i64,
#[sea_orm(unique)]
pub nonce: String,
}
use sea_orm :: entity :: prelude :: * ; #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(has_one = "super::network::Entity")]
Network,
}
# [derive (Clone , Debug , PartialEq , DeriveEntityModel , Eq)] # [sea_orm (table_name = "signing_ca")] pub struct Model { # [sea_orm (primary_key , auto_increment = false)] pub id : String , pub organization : String , pub cert : String , # [sea_orm (unique)] pub key : String , pub expires : i64 , # [sea_orm (unique)] pub nonce : String , } impl Related<super::network::Entity> for Entity {
fn to() -> RelationDef {
Relation::Network.def()
}
}
# [derive (Copy , Clone , Debug , EnumIter , DeriveRelation)] pub enum Relation { # [sea_orm (has_one = "super::network::Entity")] Network , } impl ActiveModelBehavior for ActiveModel {}
impl Related < super :: network :: Entity > for Entity { fn to () -> RelationDef { Relation :: Network . def () } }
impl ActiveModelBehavior for ActiveModel { }

View File

@ -1,13 +1,38 @@
//! `SeaORM` Entity. Generated by sea-orm-codegen 0.11.2 //! `SeaORM` Entity. Generated by sea-orm-codegen 0.11.2
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)]
#[sea_orm(table_name = "totp_authenticator")]
pub struct Model {
#[sea_orm(primary_key, auto_increment = false)]
pub id: String,
#[sea_orm(unique)]
pub secret: String,
#[sea_orm(unique)]
pub url: String,
pub verified: bool,
pub expires_on: i64,
#[sea_orm(unique)]
pub user: String,
}
use sea_orm :: entity :: prelude :: * ; #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::user::Entity",
from = "Column::User",
to = "super::user::Column::Id",
on_update = "Cascade",
on_delete = "Cascade"
)]
User,
}
# [derive (Clone , Debug , PartialEq , DeriveEntityModel , Eq)] # [sea_orm (table_name = "totp_authenticator")] pub struct Model { # [sea_orm (primary_key , auto_increment = false)] pub id : String , # [sea_orm (unique)] pub secret : String , # [sea_orm (unique)] pub url : String , pub verified : bool , pub expires_on : i64 , # [sea_orm (unique)] pub user : String , } impl Related<super::user::Entity> for Entity {
fn to() -> RelationDef {
Relation::User.def()
}
}
# [derive (Copy , Clone , Debug , EnumIter , DeriveRelation)] pub enum Relation { # [sea_orm (belongs_to = "super::user::Entity" , from = "Column::User" , to = "super::user::Column::Id" , on_update = "Cascade" , on_delete = "Cascade" ,)] User , } impl ActiveModelBehavior for ActiveModel {}
impl Related < super :: user :: Entity > for Entity { fn to () -> RelationDef { Relation :: User . def () } }
impl ActiveModelBehavior for ActiveModel { }

View File

@ -1,21 +1,58 @@
//! `SeaORM` Entity. Generated by sea-orm-codegen 0.11.2 //! `SeaORM` Entity. Generated by sea-orm-codegen 0.11.2
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)]
#[sea_orm(table_name = "user")]
pub struct Model {
#[sea_orm(primary_key, auto_increment = false)]
pub id: String,
#[sea_orm(unique)]
pub email: String,
}
use sea_orm :: entity :: prelude :: * ; #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(has_many = "super::auth_token::Entity")]
AuthToken,
#[sea_orm(has_many = "super::magic_link::Entity")]
MagicLink,
#[sea_orm(has_one = "super::organization::Entity")]
Organization,
#[sea_orm(has_many = "super::session_token::Entity")]
SessionToken,
#[sea_orm(has_one = "super::totp_authenticator::Entity")]
TotpAuthenticator,
}
# [derive (Clone , Debug , PartialEq , DeriveEntityModel , Eq)] # [sea_orm (table_name = "user")] pub struct Model { # [sea_orm (primary_key , auto_increment = false)] pub id : String , # [sea_orm (unique)] pub email : String , } impl Related<super::auth_token::Entity> for Entity {
fn to() -> RelationDef {
Relation::AuthToken.def()
}
}
# [derive (Copy , Clone , Debug , EnumIter , DeriveRelation)] pub enum Relation { # [sea_orm (has_many = "super::auth_token::Entity")] AuthToken , # [sea_orm (has_many = "super::magic_link::Entity")] MagicLink , # [sea_orm (has_one = "super::organization::Entity")] Organization , # [sea_orm (has_many = "super::session_token::Entity")] SessionToken , # [sea_orm (has_one = "super::totp_authenticator::Entity")] TotpAuthenticator , } impl Related<super::magic_link::Entity> for Entity {
fn to() -> RelationDef {
Relation::MagicLink.def()
}
}
impl Related < super :: auth_token :: Entity > for Entity { fn to () -> RelationDef { Relation :: AuthToken . def () } } impl Related<super::organization::Entity> for Entity {
fn to() -> RelationDef {
Relation::Organization.def()
}
}
impl Related < super :: magic_link :: Entity > for Entity { fn to () -> RelationDef { Relation :: MagicLink . def () } } impl Related<super::session_token::Entity> for Entity {
fn to() -> RelationDef {
Relation::SessionToken.def()
}
}
impl Related < super :: organization :: Entity > for Entity { fn to () -> RelationDef { Relation :: Organization . def () } } impl Related<super::totp_authenticator::Entity> for Entity {
fn to() -> RelationDef {
Relation::TotpAuthenticator.def()
}
}
impl Related < super :: session_token :: Entity > for Entity { fn to () -> RelationDef { Relation :: SessionToken . def () } } impl ActiveModelBehavior for ActiveModel {}
impl Related < super :: totp_authenticator :: Entity > for Entity { fn to () -> RelationDef { Relation :: TotpAuthenticator . def () } }
impl ActiveModelBehavior for ActiveModel { }