networks and organizations
This commit is contained in:
parent
f54d8a11a1
commit
8d4bd51b4e
|
@ -216,6 +216,31 @@ dependencies = [
|
||||||
"generic-array",
|
"generic-array",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "aes"
|
||||||
|
version = "0.8.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "433cfd6710c9986c576a25ca913c39d66a6474107b406f34f91d4a8923395241"
|
||||||
|
dependencies = [
|
||||||
|
"cfg-if",
|
||||||
|
"cipher",
|
||||||
|
"cpufeatures",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "aes-gcm"
|
||||||
|
version = "0.10.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "82e1366e0c69c9f927b1fa5ce2c7bf9eafc8f9268c0b9800729e8b267612447c"
|
||||||
|
dependencies = [
|
||||||
|
"aead",
|
||||||
|
"aes",
|
||||||
|
"cipher",
|
||||||
|
"ctr",
|
||||||
|
"ghash",
|
||||||
|
"subtle",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ahash"
|
name = "ahash"
|
||||||
version = "0.7.6"
|
version = "0.7.6"
|
||||||
|
@ -679,30 +704,6 @@ version = "1.0.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
|
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "chacha20"
|
|
||||||
version = "0.9.1"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818"
|
|
||||||
dependencies = [
|
|
||||||
"cfg-if",
|
|
||||||
"cipher",
|
|
||||||
"cpufeatures",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "chacha20poly1305"
|
|
||||||
version = "0.10.1"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35"
|
|
||||||
dependencies = [
|
|
||||||
"aead",
|
|
||||||
"chacha20",
|
|
||||||
"cipher",
|
|
||||||
"poly1305",
|
|
||||||
"zeroize",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "chrono"
|
name = "chrono"
|
||||||
version = "0.4.24"
|
version = "0.4.24"
|
||||||
|
@ -727,7 +728,6 @@ checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"crypto-common",
|
"crypto-common",
|
||||||
"inout",
|
"inout",
|
||||||
"zeroize",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -936,6 +936,15 @@ dependencies = [
|
||||||
"syn 1.0.107",
|
"syn 1.0.107",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "ctr"
|
||||||
|
version = "0.9.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835"
|
||||||
|
dependencies = [
|
||||||
|
"cipher",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ctrlc"
|
name = "ctrlc"
|
||||||
version = "3.2.5"
|
version = "3.2.5"
|
||||||
|
@ -1418,6 +1427,16 @@ dependencies = [
|
||||||
"wasi 0.11.0+wasi-snapshot-preview1",
|
"wasi 0.11.0+wasi-snapshot-preview1",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "ghash"
|
||||||
|
version = "0.5.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40"
|
||||||
|
dependencies = [
|
||||||
|
"opaque-debug",
|
||||||
|
"polyval",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "gloo-timers"
|
name = "gloo-timers"
|
||||||
version = "0.2.6"
|
version = "0.2.6"
|
||||||
|
@ -2201,11 +2220,12 @@ dependencies = [
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "poly1305"
|
name = "polyval"
|
||||||
version = "0.8.0"
|
version = "0.6.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf"
|
checksum = "7ef234e08c11dfcb2e56f79fd70f6f2eb7f025c0ce2333e82f4f0518ecad30c6"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"cfg-if",
|
||||||
"cpufeatures",
|
"cpufeatures",
|
||||||
"opaque-debug",
|
"opaque-debug",
|
||||||
"universal-hash",
|
"universal-hash",
|
||||||
|
@ -3473,8 +3493,8 @@ version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"actix-request-identifier",
|
"actix-request-identifier",
|
||||||
"actix-web",
|
"actix-web",
|
||||||
|
"aes-gcm",
|
||||||
"base64 0.21.0",
|
"base64 0.21.0",
|
||||||
"chacha20poly1305",
|
|
||||||
"chrono",
|
"chrono",
|
||||||
"hex",
|
"hex",
|
||||||
"log",
|
"log",
|
||||||
|
|
|
@ -29,4 +29,4 @@ base64 = "0.21.0" # Misc.
|
||||||
chrono = "0.4.24" # Misc.
|
chrono = "0.4.24" # Misc.
|
||||||
|
|
||||||
trifid-pki = { version = "0.1.9" } # Cryptography
|
trifid-pki = { version = "0.1.9" } # Cryptography
|
||||||
chacha20poly1305 = "0.10.1" # Cryptography
|
aes-gcm = "0.10.1" # Cryptography
|
|
@ -0,0 +1,123 @@
|
||||||
|
##########################
|
||||||
|
# trifid-api config file #
|
||||||
|
##########################
|
||||||
|
# trifid-api, an open source reimplementation of the Defined Networking nebula management server.
|
||||||
|
# Copyright (C) 2023 c0repwn3r
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <https:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
# Please read this file in it's entirety to learn what options you do or don't need to change
|
||||||
|
# to get a functional trifid-api instance.
|
||||||
|
|
||||||
|
#### [database] ####
|
||||||
|
# Options related to the PostgreSQL database connection.
|
||||||
|
[database]
|
||||||
|
# The PostgreSQL connection URL to connect to the database.
|
||||||
|
# Example: postgres://username:password@ip:port/database-name.
|
||||||
|
# The database provided must exist. Database migrations will be run automatically upon database startup.
|
||||||
|
# Url. Required.
|
||||||
|
url = "your-database-url-here"
|
||||||
|
|
||||||
|
# The maximum number of connections that will be established to the database.
|
||||||
|
# This will effectively mean the amount of requests that trifid-api can process in parallel, as almost every
|
||||||
|
# request handler acquires a connection from the pool.
|
||||||
|
# Integer. Optional. Default: 100
|
||||||
|
# max_connections = 100
|
||||||
|
|
||||||
|
# The minimum number of connections that will be established to the database.
|
||||||
|
# At least this number of connections will be created and kept idle until needed. If requests have a lot of latency
|
||||||
|
# due to acquiring connections from the database, raise this number.
|
||||||
|
# Integer. Optional. Default = 5
|
||||||
|
# min_connections = 5
|
||||||
|
|
||||||
|
# The maximum amount of time (in seconds) that the database pool will wait in order to connect to the database.
|
||||||
|
# After this amount of time, the connection will return an error and trifid-api will exit. If you have a very high-latency
|
||||||
|
# database connection, raise this number.
|
||||||
|
# Integer. Optional. Default = 8
|
||||||
|
# connect_timeout = 8
|
||||||
|
|
||||||
|
# The maximum amount of time (in seconds) that the database pool will wait in order to acquire a connection from the database pool.
|
||||||
|
# After this amount of time, the connection will return an error and trifid-api will exit. If you have a very high-latency
|
||||||
|
# database connection, raise this number.
|
||||||
|
# Integer. Optional. Default = 8
|
||||||
|
# acquire_timeout = 8
|
||||||
|
|
||||||
|
# The maximum amount of time (in seconds) that a database connection will remain idle before the connection is closed.
|
||||||
|
# This only applies if closing this connection would not bring the number of connections below min_connections.
|
||||||
|
# Unless you are handling thousands of requests per second, you probably don't need to change this value.
|
||||||
|
# Integer. Optional. Default = 8
|
||||||
|
# idle_timeout = 8
|
||||||
|
|
||||||
|
# The maximum amount of time (in seconds) that a database connection will remain active before it is closed and replaced with a new connection.
|
||||||
|
# It is unlikely you ever need to change this value, unless your database takes 5 or more seconds per query, in which case you
|
||||||
|
# need a better database.
|
||||||
|
# Integer. Optional. Default = 8
|
||||||
|
# max_lifetime = 8
|
||||||
|
|
||||||
|
# Should sqlx query logging be enabled?
|
||||||
|
# Disable this if you are tired of the constant query spam in the logs. Enable for debugging.
|
||||||
|
# Boolean. Optional. Default = true
|
||||||
|
# sqlx_logging = true
|
||||||
|
|
||||||
|
#### [server] ####
|
||||||
|
# Configure options for the trifid-api HTTP server.
|
||||||
|
[server]
|
||||||
|
# What IPs and ports should the trifid-api server listen on?
|
||||||
|
# This may need to be changed if you want to bind on a different port or interface.
|
||||||
|
# SocketAddr. Optional. Default = 0.0.0.0:8080 (all IPs, port 8080)
|
||||||
|
# bind = "0.0.0.0:8080"
|
||||||
|
|
||||||
|
#### [tokens] ####
|
||||||
|
# Configure options related to the various tokens that may be issued by the trifid-api server.
|
||||||
|
[tokens]
|
||||||
|
# How long (in seconds) should magic link tokens be valid for?
|
||||||
|
# This controls how long links sent to user's email addresses will remain valid for login.
|
||||||
|
# The default of 3600 (1 hour) is a sane default and you likely do not need to change this.
|
||||||
|
# Integer. Optional. Default = 3600
|
||||||
|
# magic_link_expiry_time_seconds = 3600 # 1 hour
|
||||||
|
|
||||||
|
# How long (in seconds) should session tokens be valid for?
|
||||||
|
# This controls how long it will take before a user will need to re-log in with a magic link, if they do not explicitly
|
||||||
|
# log out first.
|
||||||
|
# The default of 15780000 (6 months) is a sane default and you likely do not need to change this.
|
||||||
|
# Integer. Optional. Default = 15780000
|
||||||
|
# session_token_expiry_time_seconds = 15780000 # 6 months
|
||||||
|
|
||||||
|
# How long (in seconds) should TOTP setup tokens be valid for?
|
||||||
|
# This controls how long a user will have to setup TOTP after starting the setup process before the token is invalidated
|
||||||
|
# and they need to try again.
|
||||||
|
# The default of 600 (10 minutes) is a sane default and you likely do not need to change this.
|
||||||
|
# Integer. Optional. Default = 600
|
||||||
|
# totp_setup_timeout_time_seconds = 600 # 10 minutes
|
||||||
|
|
||||||
|
# How long (in seconds) should MFA auth tokens be valid for?
|
||||||
|
# This controls how long a user will remain logged in before they need to re-input their 2FA code..
|
||||||
|
# The default of 600 (10 minutes) is a sane default and you likely do not need to change this.
|
||||||
|
# Integer. Optional. Default = 600
|
||||||
|
# mfa_tokens_expiry_time_seconds = 600 # 10 minutes
|
||||||
|
|
||||||
|
#### [crypto] ####
|
||||||
|
# Configure settings related to the cryptography used inside trifid-api
|
||||||
|
[crypto]
|
||||||
|
|
||||||
|
# The per-instance data encryption key to protect sensitive data in the instance.
|
||||||
|
# YOU ABSOLUTELY NEED TO CHANGE THIS. If you don't change anything else in this file, this should be the one thing you change.
|
||||||
|
# This should be a 32-byte hex value. Generate it with `openssl rand -hex 32`, or any other tool of your choice.
|
||||||
|
# If you get "InvalidLength" errors while trying to do anything involving organizations, that indicates that this
|
||||||
|
# value was improperly generated.
|
||||||
|
#
|
||||||
|
# ------- WARNING -------
|
||||||
|
# Do not change this value in a production instance. It will make existing data inaccessible until changed back.
|
||||||
|
# ------- WARNING -------
|
||||||
|
data-key = "edd600bcebea461381ea23791b6967c8667e12827ac8b94dc022f189a5dc59a2"
|
|
@ -26,7 +26,8 @@ pub static CONFIG: Lazy<TrifidConfig> = Lazy::new(|| {
|
||||||
pub struct TrifidConfig {
|
pub struct TrifidConfig {
|
||||||
pub database: TrifidConfigDatabase,
|
pub database: TrifidConfigDatabase,
|
||||||
pub server: TrifidConfigServer,
|
pub server: TrifidConfigServer,
|
||||||
pub tokens: TrifidConfigTokens
|
pub tokens: TrifidConfigTokens,
|
||||||
|
pub crypto: TrifidConfigCryptography
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug)]
|
#[derive(Serialize, Deserialize, Debug)]
|
||||||
|
@ -66,6 +67,11 @@ pub struct TrifidConfigTokens {
|
||||||
pub mfa_tokens_expiry_time_seconds: u64
|
pub mfa_tokens_expiry_time_seconds: u64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug)]
|
||||||
|
pub struct TrifidConfigCryptography {
|
||||||
|
pub data_encryption_key: String
|
||||||
|
}
|
||||||
|
|
||||||
fn max_connections_default() -> u32 { 100 }
|
fn max_connections_default() -> u32 { 100 }
|
||||||
fn min_connections_default() -> u32 { 5 }
|
fn min_connections_default() -> u32 { 5 }
|
||||||
fn time_defaults() -> u64 { 8 }
|
fn time_defaults() -> u64 { 8 }
|
||||||
|
|
|
@ -0,0 +1,27 @@
|
||||||
|
use std::error::Error;
|
||||||
|
use aes_gcm::{Aes256Gcm, KeyInit, Nonce};
|
||||||
|
use aes_gcm::aead::{Aead, Payload};
|
||||||
|
use rand::Rng;
|
||||||
|
use trifid_pki::rand_core::OsRng;
|
||||||
|
use crate::config::TrifidConfig;
|
||||||
|
|
||||||
|
pub fn get_cipher_from_config(config: &TrifidConfig) -> Result<Aes256Gcm, Box<dyn Error>> {
|
||||||
|
let key_slice = hex::decode(&config.crypto.data_encryption_key)?;
|
||||||
|
Ok(Aes256Gcm::new_from_slice(&key_slice)?)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn encrypt_with_nonce(plaintext: &[u8], nonce: [u8; 12], cipher: &Aes256Gcm) -> Result<Vec<u8>, aes_gcm::Error> {
|
||||||
|
let nonce = Nonce::from_slice(&nonce);
|
||||||
|
let ciphertext = cipher.encrypt(nonce, plaintext)?;
|
||||||
|
Ok(ciphertext)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn decrypt_with_nonce(ciphertext: &[u8], nonce: [u8; 12], cipher: &Aes256Gcm) -> Result<Vec<u8>, aes_gcm::Error> {
|
||||||
|
let nonce = Nonce::from_slice(&nonce);
|
||||||
|
let plaintext = cipher.decrypt(nonce, Payload::from(ciphertext))?;
|
||||||
|
Ok(plaintext)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn generate_random_iv() -> [u8; 12] {
|
||||||
|
OsRng.gen()
|
||||||
|
}
|
|
@ -18,6 +18,7 @@ pub mod timers;
|
||||||
pub mod magic_link;
|
pub mod magic_link;
|
||||||
pub mod auth_tokens;
|
pub mod auth_tokens;
|
||||||
pub mod cursor;
|
pub mod cursor;
|
||||||
|
pub mod crypto;
|
||||||
|
|
||||||
pub struct AppState {
|
pub struct AppState {
|
||||||
pub conn: DatabaseConnection
|
pub conn: DatabaseConnection
|
||||||
|
@ -70,6 +71,7 @@ async fn main() -> Result<(), Box<dyn Error>> {
|
||||||
.service(routes::v1::verify_totp_authenticators::verify_totp_authenticators_request)
|
.service(routes::v1::verify_totp_authenticators::verify_totp_authenticators_request)
|
||||||
.service(routes::v1::auth::totp::totp_request)
|
.service(routes::v1::auth::totp::totp_request)
|
||||||
.service(routes::v1::networks::get_networks)
|
.service(routes::v1::networks::get_networks)
|
||||||
|
.service(routes::v1::organization::create_org_request)
|
||||||
}).bind(CONFIG.server.bind)?.run().await?;
|
}).bind(CONFIG.server.bind)?.run().await?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|
|
@ -2,4 +2,5 @@ pub mod auth;
|
||||||
pub mod signup;
|
pub mod signup;
|
||||||
pub mod totp_authenticators;
|
pub mod totp_authenticators;
|
||||||
pub mod verify_totp_authenticators;
|
pub mod verify_totp_authenticators;
|
||||||
pub mod networks;
|
pub mod networks;
|
||||||
|
pub mod organization;
|
|
@ -1,7 +1,6 @@
|
||||||
use serde::{Serialize, Deserialize};
|
use serde::{Serialize, Deserialize};
|
||||||
use actix_web::{get, HttpRequest, HttpResponse};
|
use actix_web::{get, HttpRequest, HttpResponse};
|
||||||
use actix_web::web::{Data, Query};
|
use actix_web::web::{Data, Query};
|
||||||
use chacha20poly1305::consts::P1;
|
|
||||||
use chrono::{TimeZone, Utc};
|
use chrono::{TimeZone, Utc};
|
||||||
use log::error;
|
use log::error;
|
||||||
use sea_orm::{ColumnTrait, EntityTrait, PaginatorTrait, QueryFilter, QueryOrder};
|
use sea_orm::{ColumnTrait, EntityTrait, PaginatorTrait, QueryFilter, QueryOrder};
|
||||||
|
@ -204,23 +203,25 @@ pub async fn get_networks(opts: Query<GetNetworksQueryParams>, req_info: HttpReq
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
let models_mapped = models.iter().map(|u| {
|
let models_mapped: Vec<GetNetworksResponseData> = models.iter().map(|u| {
|
||||||
GetNetworksResponseData {
|
GetNetworksResponseData {
|
||||||
id: u.id.clone(),
|
id: u.id.clone(),
|
||||||
cidr: u.cidr.clone(),
|
cidr: u.cidr.clone(),
|
||||||
organization_id: u.organization.clone(),
|
organization_id: u.organization.clone(),
|
||||||
signing_ca_id: u.signing_ca.clone(),
|
signing_ca_id: u.signing_ca.clone(),
|
||||||
created_at: Utc.timestamp_opt(u.created_at, 0).unwrap().format("%Y-%m-%dT%H-%M-%S.%.3fZ").to_string(),
|
created_at: Utc.timestamp_opt(u.created_at, 0).unwrap().format("%Y-%m-%dT%H-%M-%S%.3fZ").to_string(),
|
||||||
name: u.name.clone(),
|
name: u.name.clone(),
|
||||||
lighthouses_as_relays: u.lighthouses_as_relays,
|
lighthouses_as_relays: u.lighthouses_as_relays,
|
||||||
}
|
}
|
||||||
}).collect();
|
}).collect();
|
||||||
|
|
||||||
|
let count = models_mapped.len() as u64;
|
||||||
|
|
||||||
HttpResponse::Ok().json(GetNetworksResponse {
|
HttpResponse::Ok().json(GetNetworksResponse {
|
||||||
data: models_mapped,
|
data: models_mapped,
|
||||||
metadata: GetNetworksResponseMetadata {
|
metadata: GetNetworksResponseMetadata {
|
||||||
total_count: total,
|
total_count: total,
|
||||||
has_next_page: cursor.page != pages,
|
has_next_page: cursor.page+1 != pages,
|
||||||
has_prev_page: cursor.page != 0,
|
has_prev_page: cursor.page != 0,
|
||||||
prev_cursor: if cursor.page != 0 {
|
prev_cursor: if cursor.page != 0 {
|
||||||
match (Cursor { page: cursor.page - 1 }).try_into() {
|
match (Cursor { page: cursor.page - 1 }).try_into() {
|
||||||
|
@ -230,7 +231,7 @@ pub async fn get_networks(opts: Query<GetNetworksQueryParams>, req_info: HttpReq
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
},
|
},
|
||||||
next_cursor: if cursor.page != pages {
|
next_cursor: if cursor.page+1 != pages {
|
||||||
match (Cursor { page: cursor.page + 1 }).try_into() {
|
match (Cursor { page: cursor.page + 1 }).try_into() {
|
||||||
Ok(r) => Some(r),
|
Ok(r) => Some(r),
|
||||||
Err(_) => None
|
Err(_) => None
|
||||||
|
@ -240,7 +241,7 @@ pub async fn get_networks(opts: Query<GetNetworksQueryParams>, req_info: HttpReq
|
||||||
},
|
},
|
||||||
page: if opts.include_counts {
|
page: if opts.include_counts {
|
||||||
Some(GetNetworksResponseMetadataPage {
|
Some(GetNetworksResponseMetadataPage {
|
||||||
count: opts.page_size,
|
count,
|
||||||
start: opts.page_size * cursor.page,
|
start: opts.page_size * cursor.page,
|
||||||
})
|
})
|
||||||
} else { None },
|
} else { None },
|
||||||
|
|
|
@ -0,0 +1,262 @@
|
||||||
|
// !! !! !! THIS IS NOT A DN-COMPATIBLE API! !! !! !!
|
||||||
|
// The organization create API has not yet been reverse engineered. This endpoint has nothing to do with the original API
|
||||||
|
// and is a complete fabrication for trifid.
|
||||||
|
// Help us out! Reverse engineer the actual org create mechanism and get us back to 100% parity!
|
||||||
|
// - trifid maintainers
|
||||||
|
|
||||||
|
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
||||||
|
use actix_web::{HttpRequest, HttpResponse};
|
||||||
|
use actix_web::web::{Data, Json};
|
||||||
|
use serde::{Serialize, Deserialize};
|
||||||
|
use crate::AppState;
|
||||||
|
use actix_web::post;
|
||||||
|
use log::error;
|
||||||
|
use sea_orm::{ActiveModelTrait, ColumnTrait, EntityTrait, IntoActiveModel, QueryFilter};
|
||||||
|
use trifid_pki::cert::{NebulaCertificate, NebulaCertificateDetails, serialize_x25519_private};
|
||||||
|
use trifid_pki::ed25519_dalek::SigningKey;
|
||||||
|
use trifid_pki::rand_core::OsRng;
|
||||||
|
use trifid_api_entities::entity::{network, organization, signing_ca};
|
||||||
|
use crate::auth_tokens::{enforce_2fa, enforce_api_token, TokenInfo};
|
||||||
|
use crate::config::CONFIG;
|
||||||
|
use crate::crypto::{encrypt_with_nonce, generate_random_iv, get_cipher_from_config};
|
||||||
|
use crate::error::{APIError, APIErrorsResponse};
|
||||||
|
use crate::tokens::random_id;
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
pub struct OrgCreateRequest {
|
||||||
|
pub cidr: String
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
pub struct OrgCreateResponse {
|
||||||
|
pub organization: String,
|
||||||
|
pub ca: String,
|
||||||
|
pub network: String
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/v1/organization")]
|
||||||
|
pub async fn create_org_request(req: Json<OrgCreateRequest>, req_info: HttpRequest, db: Data<AppState>) -> HttpResponse {
|
||||||
|
// For this endpoint, you need to be a fully authenticated user
|
||||||
|
let session_info = enforce_2fa(&req_info, &db.conn).await.unwrap_or(TokenInfo::NotPresent);
|
||||||
|
|
||||||
|
|
||||||
|
// we have a session token, which means we have to do a db request to get the organization that this user owns
|
||||||
|
let user = match session_info {
|
||||||
|
TokenInfo::AuthToken(tkn) => tkn.session_info.user,
|
||||||
|
_ => {
|
||||||
|
return HttpResponse::Unauthorized().json(APIErrorsResponse {
|
||||||
|
errors: vec![
|
||||||
|
APIError {
|
||||||
|
code: "ERR_UNAUTHORIZED".to_string(),
|
||||||
|
message: "Unauthorized".to_string(),
|
||||||
|
path: None,
|
||||||
|
}
|
||||||
|
],
|
||||||
|
})
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let org = match organization::Entity::find().filter(organization::Column::Owner.eq(&user.id)).one(&db.conn).await {
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(e) => {
|
||||||
|
error!("database error: {}", e);
|
||||||
|
return HttpResponse::InternalServerError().json(APIErrorsResponse {
|
||||||
|
errors: vec![
|
||||||
|
APIError {
|
||||||
|
code: "ERR_DB_ERROR".to_string(),
|
||||||
|
message: "There was an error performing the database request, please try again later.".to_string(),
|
||||||
|
path: None,
|
||||||
|
}
|
||||||
|
],
|
||||||
|
});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if org.is_some() {
|
||||||
|
return HttpResponse::BadRequest().json(APIErrorsResponse {
|
||||||
|
errors: vec![
|
||||||
|
APIError {
|
||||||
|
code: "ERR_USER_ALREADY_OWNS_ORG".to_string(),
|
||||||
|
message: "This user already owns an organization".to_string(),
|
||||||
|
path: None,
|
||||||
|
}
|
||||||
|
],
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
let org = organization::Model {
|
||||||
|
id: random_id("org"),
|
||||||
|
name: format!("{}'s Organization", user.email),
|
||||||
|
owner: user.id.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Generate the CA keypair
|
||||||
|
let private_key = SigningKey::generate(&mut OsRng);
|
||||||
|
let public_key = private_key.verifying_key();
|
||||||
|
|
||||||
|
let mut cert = NebulaCertificate {
|
||||||
|
details: NebulaCertificateDetails {
|
||||||
|
name: format!("{} Signing CA", org.name),
|
||||||
|
ips: vec![],
|
||||||
|
subnets: vec![],
|
||||||
|
groups: vec![],
|
||||||
|
not_before: SystemTime::now(),
|
||||||
|
not_after: SystemTime::now() + Duration::from_secs(31536000 * 3), // 3 years
|
||||||
|
public_key: public_key.to_bytes(),
|
||||||
|
is_ca: true,
|
||||||
|
issuer: "".to_string(), // Self-signed certificate! No issuer present
|
||||||
|
},
|
||||||
|
signature: vec![],
|
||||||
|
};
|
||||||
|
// Self-sign the CA certificate
|
||||||
|
match cert.sign(&private_key) {
|
||||||
|
Ok(_) => (),
|
||||||
|
Err(e) => {
|
||||||
|
error!("[security] certificate signature error: {}", e);
|
||||||
|
return HttpResponse::InternalServerError().json(APIErrorsResponse {
|
||||||
|
errors: vec![
|
||||||
|
APIError {
|
||||||
|
code: "ERR_CERT_SIGNING_ERROR".to_string(),
|
||||||
|
message: "There was an error signing the Certificate Authority on the server. Please try again later.".to_string(),
|
||||||
|
path: None,
|
||||||
|
}
|
||||||
|
]
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PEM-encode the CA key
|
||||||
|
let ca_key_pem = serialize_x25519_private(&private_key.to_keypair_bytes());
|
||||||
|
// PEM-encode the CA cert
|
||||||
|
let ca_cert_pem = match cert.serialize_to_pem() {
|
||||||
|
Ok(pem) => pem,
|
||||||
|
Err(e) => {
|
||||||
|
error!("[security] certificate encoding error: {}", e);
|
||||||
|
return HttpResponse::InternalServerError().json(APIErrorsResponse {
|
||||||
|
errors: vec![
|
||||||
|
APIError {
|
||||||
|
code: "ERR_CERT_ENCODING_ERROR".to_string(),
|
||||||
|
message: "There was an error encoding the certificate on the server. Please try again later.".to_string(),
|
||||||
|
path: None,
|
||||||
|
}
|
||||||
|
]
|
||||||
|
});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let iv = generate_random_iv(); // Generate a randomized IV to use for key encryption
|
||||||
|
let iv_hex = hex::encode(iv);
|
||||||
|
|
||||||
|
let cipher = match get_cipher_from_config(&CONFIG) {
|
||||||
|
Ok(pem) => pem,
|
||||||
|
Err(e) => {
|
||||||
|
error!("[security] cipher fetch error: {}", e);
|
||||||
|
return HttpResponse::InternalServerError().json(APIErrorsResponse {
|
||||||
|
errors: vec![
|
||||||
|
APIError {
|
||||||
|
code: "ERR_CIPHER_ERROR".to_string(),
|
||||||
|
message: "There was an error encrypting the organization data. Please try again later.".to_string(),
|
||||||
|
path: None,
|
||||||
|
}
|
||||||
|
]
|
||||||
|
});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let ca_key_encrypted = match encrypt_with_nonce(&ca_key_pem, iv, &cipher) {
|
||||||
|
Ok(key) => hex::encode(key),
|
||||||
|
Err(e) => {
|
||||||
|
error!("[security] certificate encoding error: {}", e);
|
||||||
|
return HttpResponse::InternalServerError().json(APIErrorsResponse {
|
||||||
|
errors: vec![
|
||||||
|
APIError {
|
||||||
|
code: "ERR_CERT_ENCODING_ERROR".to_string(),
|
||||||
|
message: "There was an error encoding the certificate on the server. Please try again later.".to_string(),
|
||||||
|
path: None,
|
||||||
|
}
|
||||||
|
]
|
||||||
|
});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let ca_crt = hex::encode(ca_cert_pem);
|
||||||
|
|
||||||
|
let signing_ca = signing_ca::Model {
|
||||||
|
id: random_id("ca"),
|
||||||
|
organization: org.id.clone(),
|
||||||
|
cert: ca_key_encrypted,
|
||||||
|
key: ca_crt,
|
||||||
|
expires: cert.details.not_after.duration_since(UNIX_EPOCH).expect("Time went backwards").as_secs() as i64,
|
||||||
|
nonce: iv_hex,
|
||||||
|
};
|
||||||
|
|
||||||
|
let network_model = network::Model {
|
||||||
|
id: random_id("network"),
|
||||||
|
cidr: req.cidr.clone(),
|
||||||
|
organization: org.id.clone(),
|
||||||
|
signing_ca: signing_ca.id.clone(),
|
||||||
|
created_at: SystemTime::now().duration_since(UNIX_EPOCH).expect("Time went backwards").as_secs() as i64,
|
||||||
|
name: "Network1".to_string(),
|
||||||
|
lighthouses_as_relays: true,
|
||||||
|
};
|
||||||
|
|
||||||
|
let new_org_id = org.id.clone();
|
||||||
|
let new_signing_ca_id = signing_ca.id.clone();
|
||||||
|
let new_network_id = network_model.id.clone();
|
||||||
|
|
||||||
|
let org_active_model = org.into_active_model();
|
||||||
|
let signing_ca_active_model = signing_ca.into_active_model();
|
||||||
|
let network_active_model = network_model.into_active_model();
|
||||||
|
|
||||||
|
match org_active_model.insert(&db.conn).await {
|
||||||
|
Ok(_) => (),
|
||||||
|
Err(e) => {
|
||||||
|
error!("database error: {}", e);
|
||||||
|
return HttpResponse::InternalServerError().json(APIErrorsResponse {
|
||||||
|
errors: vec![
|
||||||
|
APIError {
|
||||||
|
code: "ERR_DB_ERROR".to_string(),
|
||||||
|
message: "There was an error performing the database request, please try again later.".to_string(),
|
||||||
|
path: None,
|
||||||
|
}
|
||||||
|
],
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
match signing_ca_active_model.insert(&db.conn).await {
|
||||||
|
Ok(_) => (),
|
||||||
|
Err(e) => {
|
||||||
|
error!("database error: {}", e);
|
||||||
|
return HttpResponse::InternalServerError().json(APIErrorsResponse {
|
||||||
|
errors: vec![
|
||||||
|
APIError {
|
||||||
|
code: "ERR_DB_ERROR".to_string(),
|
||||||
|
message: "There was an error performing the database request, please try again later.".to_string(),
|
||||||
|
path: None,
|
||||||
|
}
|
||||||
|
],
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
match network_active_model.insert(&db.conn).await {
|
||||||
|
Ok(_) => (),
|
||||||
|
Err(e) => {
|
||||||
|
error!("database error: {}", e);
|
||||||
|
return HttpResponse::InternalServerError().json(APIErrorsResponse {
|
||||||
|
errors: vec![
|
||||||
|
APIError {
|
||||||
|
code: "ERR_DB_ERROR".to_string(),
|
||||||
|
message: "There was an error performing the database request, please try again later.".to_string(),
|
||||||
|
path: None,
|
||||||
|
}
|
||||||
|
],
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
HttpResponse::Ok().json(OrgCreateResponse {
|
||||||
|
organization: new_org_id,
|
||||||
|
ca: new_signing_ca_id,
|
||||||
|
network: new_network_id,
|
||||||
|
})
|
||||||
|
}
|
Loading…
Reference in New Issue