new work
/ build (push) Failing after 49s Details
/ build_x64 (push) Successful in 2m26s Details
/ build_arm64 (push) Successful in 2m33s Details
/ build_win64 (push) Successful in 2m32s Details

This commit is contained in:
core 2023-11-18 22:51:45 -05:00
parent cec6c72380
commit 53c6fb18fd
Signed by: core
GPG Key ID: FDBF740DADDCEECF
115 changed files with 1715 additions and 2808 deletions

1891
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,12 +1,10 @@
[workspace]
members = [
"trifid-api",
"trifid-api/trifid_api_migration",
"trifid-api/trifid_api_entities",
"tfclient",
"trifid-pki",
"dnapi-rs",
"tfcli",
"nebula-ffi"
"nebula-ffi",
"trifid-api"
]
resolver = "2"

View File

@ -2,8 +2,8 @@ FROM rust:latest
COPY trifid-pki /trifid-pki
COPY dnapi-rs /dnapi-rs
COPY trifid-api /trifid-api
COPY trifid-api-old /trifid-api
RUN cd /trifid-api && cargo build --release && cp target/release/trifid-api /bin/trifid-api
RUN cd /trifid-api-old && cargo build --release && cp target/release/trifid-api-old /bin/trifid-api-old
CMD ["/bin/trifid-api"]

2
database_structure Normal file
View File

@ -0,0 +1,2 @@
Users
\-

View File

@ -82,8 +82,8 @@ const config = {
to: '/docs/intro',
},
{
label: 'trifid-api',
to: '/docs/trifid-api/intro',
label: 'trifid-api-old',
to: '/docs/trifid-api-old/intro',
},
{
label: 'tfweb',

View File

@ -24,7 +24,7 @@ pub struct Args {
#[command(subcommand)]
command: Commands,
#[clap(short, long, env = "TFCLI_SERVER")]
/// The base URL of your trifid-api instance. Defaults to the value in $XDG_CONFIG_HOME/tfcli-server-url.conf or the TFCLI_SERVER environment variable.
/// The base URL of your trifid-api-old instance. Defaults to the value in $XDG_CONFIG_HOME/tfcli-server-url.conf or the TFCLI_SERVER environment variable.
server: Option<Url>
}
@ -69,7 +69,7 @@ pub enum AccountCommands {
#[clap(short, long)]
email: String
},
/// Log in to your account with a magic-link token acquired via email or the trifid-api logs.
/// Log in to your account with a magic-link token acquired via email or the trifid-api-old logs.
MagicLink {
#[clap(short, long)]
magic_link_token: String
@ -103,7 +103,7 @@ pub enum NetworkCommands {
#[derive(Subcommand, Debug)]
pub enum OrgCommands {
/// Create an organization on your trifid-api server. NOTE: This command ONLY works on trifid-api servers. It will NOT work on original DN servers.
/// Create an organization on your trifid-api-old server. NOTE: This command ONLY works on trifid-api-old servers. It will NOT work on original DN servers.
Create {
#[clap(short, long)]
cidr: Ipv4Net
@ -179,7 +179,7 @@ pub enum HostCommands {
#[clap(short, long)]
id: String
},
/// Update a specific host by it's ID, changing the listen port and static addresses, as well as the name, ip and role. The name, ip and role updates will only work on trifid-api compatible servers.
/// Update a specific host by it's ID, changing the listen port and static addresses, as well as the name, ip and role. The name, ip and role updates will only work on trifid-api-old compatible servers.
Update {
#[clap(short, long)]
id: String,

View File

@ -55,7 +55,7 @@ enum Commands {
server: String,
},
/// Enroll this host using a trifid-api enrollment code
/// Enroll this host using a trifid-api-old enrollment code
Enroll {
#[clap(short, long, default_value = "tfclient")]
/// Service name specified on install

3
trifid-api-old/build.rs Normal file
View File

@ -0,0 +1,3 @@
fn main() {
println!("cargo:rerun-if-changed=migrations/");
}

View File

@ -1,7 +1,7 @@
##########################
# trifid-api config file #
# trifid-api-old config file #
##########################
# trifid-api, an open source reimplementation of the Defined Networking nebula management server.
# trifid-api-old, an open source reimplementation of the Defined Networking nebula management server.
# Copyright (C) 2023 c0repwn3r
#
# This program is free software: you can redistribute it and/or modify
@ -18,7 +18,7 @@
# along with this program. If not, see <https:#www.gnu.org/licenses/>.
# Please read this file in it's entirety to learn what options you do or don't need to change
# to get a functional trifid-api instance.
# to get a functional trifid-api-old instance.
#### [database] ####
# Options related to the PostgreSQL database connection.
@ -30,7 +30,7 @@
url = "your-database-url-here"
# The maximum number of connections that will be established to the database.
# This will effectively mean the amount of requests that trifid-api can process in parallel, as almost every
# This will effectively mean the amount of requests that trifid-api-old can process in parallel, as almost every
# request handler acquires a connection from the pool.
# Integer. Optional. Default: 100
# max_connections = 100
@ -42,13 +42,13 @@ url = "your-database-url-here"
# min_connections = 5
# The maximum amount of time (in seconds) that the database pool will wait in order to connect to the database.
# After this amount of time, the connection will return an error and trifid-api will exit. If you have a very high-latency
# After this amount of time, the connection will return an error and trifid-api-old will exit. If you have a very high-latency
# database connection, raise this number.
# Integer. Optional. Default = 8
# connect_timeout = 8
# The maximum amount of time (in seconds) that the database pool will wait in order to acquire a connection from the database pool.
# After this amount of time, the connection will return an error and trifid-api will exit. If you have a very high-latency
# After this amount of time, the connection will return an error and trifid-api-old will exit. If you have a very high-latency
# database connection, raise this number.
# Integer. Optional. Default = 8
# acquire_timeout = 8
@ -71,9 +71,9 @@ url = "your-database-url-here"
# sqlx_logging = true
#### [server] ####
# Configure options for the trifid-api HTTP server.
# Configure options for the trifid-api-old HTTP server.
[server]
# What IPs and ports should the trifid-api server listen on?
# What IPs and ports should the trifid-api-old server listen on?
# This may need to be changed if you want to bind on a different port or interface.
# SocketAddr. Optional. Default = 0.0.0.0:8080 (all IPs, port 8080)
# bind = "0.0.0.0:8080"
@ -84,7 +84,7 @@ url = "your-database-url-here"
# workers = 32
#### [tokens] ####
# Configure options related to the various tokens that may be issued by the trifid-api server.
# Configure options related to the various tokens that may be issued by the trifid-api-old server.
[tokens]
# How long (in seconds) should magic link tokens be valid for?
# This controls how long links sent to user's email addresses will remain valid for login.
@ -113,7 +113,7 @@ url = "your-database-url-here"
# mfa_tokens_expiry_time_seconds = 600 # 10 minutes
#### [crypto] ####
# Configure settings related to the cryptography used inside trifid-api
# Configure settings related to the cryptography used inside trifid-api-old
[crypto]
# The per-instance data encryption key to protect sensitive data in the instance.

View File

@ -1,4 +1,4 @@
// trifid-api, an open source reimplementation of the Defined Networking nebula management server.
// trifid-api-old, an open source reimplementation of the Defined Networking nebula management server.
// Copyright (C) 2023 c0repwn3r
//
// This program is free software: you can redistribute it and/or modify

View File

@ -0,0 +1,736 @@
// trifid-api-old, an open source reimplementation of the Defined Networking nebula management server.
// Copyright (C) 2023 c0repwn3r
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use ipnet::{IpNet, Ipv4Net};
use log::error;
use once_cell::sync::Lazy;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::error::Error;
use std::fs;
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
use std::path::PathBuf;
use std::time::SystemTime;
use trifid_pki::cert::deserialize_nebula_certificate_from_pem;
pub static CONFIG: Lazy<TrifidConfig> = Lazy::new(|| {
let config_str = match fs::read_to_string("/etc/trifid/config.toml") {
Ok(str) => str,
Err(e) => {
error!("Unable to read config file: {}", e);
std::process::exit(1);
}
};
match toml::from_str(&config_str) {
Ok(cfg) => cfg,
Err(e) => {
error!("Unable to parse config file: {}", e);
std::process::exit(1);
}
}
});
#[derive(Serialize, Debug, Deserialize)]
pub struct TrifidConfig {
pub database: TrifidConfigDatabase,
pub server: TrifidConfigServer,
pub tokens: TrifidConfigTokens,
pub crypto: TrifidConfigCryptography,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct TrifidConfigDatabase {
pub url: String,
#[serde(default = "max_connections_default")]
pub max_connections: u32,
#[serde(default = "min_connections_default")]
pub min_connections: u32,
#[serde(default = "time_defaults")]
pub connect_timeout: u64,
#[serde(default = "time_defaults")]
pub acquire_timeout: u64,
#[serde(default = "time_defaults")]
pub idle_timeout: u64,
#[serde(default = "time_defaults")]
pub max_lifetime: u64,
#[serde(default = "sqlx_logging_default")]
pub sqlx_logging: bool,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct TrifidConfigServer {
#[serde(default = "socketaddr_8080")]
pub bind: SocketAddr,
#[serde(default = "default_workers")]
pub workers: usize
}
#[derive(Serialize, Deserialize, Debug)]
pub struct TrifidConfigTokens {
#[serde(default = "magic_link_expiry_time")]
pub magic_link_expiry_time_seconds: u64,
#[serde(default = "session_token_expiry_time")]
pub session_token_expiry_time_seconds: u64,
#[serde(default = "totp_setup_timeout_time")]
pub totp_setup_timeout_time_seconds: u64,
#[serde(default = "mfa_tokens_expiry_time")]
pub mfa_tokens_expiry_time_seconds: u64,
#[serde(default = "enrollment_tokens_expiry_time")]
pub enrollment_tokens_expiry_time: u64,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct TrifidConfigCryptography {
pub data_encryption_key: String,
pub local_keystore_directory: PathBuf,
#[serde(default = "certs_expiry_time")]
pub certs_expiry_time: u64,
}
fn max_connections_default() -> u32 {
100
}
fn min_connections_default() -> u32 {
5
}
fn time_defaults() -> u64 {
8
}
fn sqlx_logging_default() -> bool {
true
}
fn socketaddr_8080() -> SocketAddr {
SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::from([0, 0, 0, 0]), 8080))
}
fn magic_link_expiry_time() -> u64 {
3600
} // 1 hour
fn session_token_expiry_time() -> u64 {
15780000
} // 6 months
fn totp_setup_timeout_time() -> u64 {
600
} // 10 minutes
fn mfa_tokens_expiry_time() -> u64 {
600
} // 10 minutes
fn enrollment_tokens_expiry_time() -> u64 {
600
} // 10 minutes
fn certs_expiry_time() -> u64 {
3600 * 24 * 31 * 12 // 1 year
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct NebulaConfig {
pub pki: NebulaConfigPki,
#[serde(default = "empty_hashmap")]
#[serde(skip_serializing_if = "is_empty_hashmap")]
pub static_host_map: HashMap<Ipv4Addr, Vec<SocketAddrV4>>,
#[serde(skip_serializing_if = "is_none")]
pub lighthouse: Option<NebulaConfigLighthouse>,
#[serde(skip_serializing_if = "is_none")]
pub listen: Option<NebulaConfigListen>,
#[serde(skip_serializing_if = "is_none")]
pub punchy: Option<NebulaConfigPunchy>,
#[serde(default = "cipher_aes")]
#[serde(skip_serializing_if = "is_cipher_aes")]
pub cipher: NebulaConfigCipher,
#[serde(default = "empty_vec")]
#[serde(skip_serializing_if = "is_empty_vec")]
pub preferred_ranges: Vec<IpNet>,
#[serde(skip_serializing_if = "is_none")]
pub relay: Option<NebulaConfigRelay>,
#[serde(skip_serializing_if = "is_none")]
pub tun: Option<NebulaConfigTun>,
#[serde(skip_serializing_if = "is_none")]
pub logging: Option<NebulaConfigLogging>,
#[serde(skip_serializing_if = "is_none")]
pub sshd: Option<NebulaConfigSshd>,
#[serde(skip_serializing_if = "is_none")]
pub firewall: Option<NebulaConfigFirewall>,
#[serde(default = "u64_1")]
#[serde(skip_serializing_if = "is_u64_1")]
pub routines: u64,
#[serde(default = "none")]
#[serde(skip_serializing_if = "is_none")]
pub stats: Option<NebulaConfigStats>,
#[serde(default = "none")]
#[serde(skip_serializing_if = "is_none")]
pub local_range: Option<Ipv4Net>,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct NebulaConfigPki {
pub ca: String,
pub cert: String,
#[serde(default = "none")]
#[serde(skip_serializing_if = "is_none")]
pub key: Option<String>,
#[serde(default = "empty_vec")]
#[serde(skip_serializing_if = "is_empty_vec")]
pub blocklist: Vec<String>,
#[serde(default = "bool_false")]
#[serde(skip_serializing_if = "is_bool_false")]
pub disconnect_invalid: bool,
}
impl PartialEq for NebulaConfigPki {
fn eq(&self, other: &Self) -> bool {
if self.ca != other.ca {
return false;
}
if self.key != other.key {
return false;
}
if self.blocklist != other.blocklist {
return false;
}
if self.disconnect_invalid != other.disconnect_invalid {
return false;
}
// cert logic
// if the cert is invalid, fallback to just checking equality
match is_cert_equal_ignoring_expiry(&self.cert, &other.cert) {
Ok(res) => res,
Err(_) => self.cert == other.cert,
}
}
}
fn is_cert_equal_ignoring_expiry(me: &str, other: &str) -> Result<bool, Box<dyn Error>> {
// determines if the certificates are equal, ignoring not_before, not_after and the signature
// exception: if either certificate is expired, not_before and not_after will be checked anyway
// parse cert A
let cert_a = deserialize_nebula_certificate_from_pem(me.as_bytes())?;
let cert_b = deserialize_nebula_certificate_from_pem(other.as_bytes())?;
if cert_a.details.is_ca != cert_b.details.is_ca {
return Ok(false);
}
if cert_a.details.name != cert_b.details.name {
return Ok(false);
}
if cert_a.details.public_key != cert_b.details.public_key {
return Ok(false);
}
if cert_a.details.groups != cert_b.details.groups {
return Ok(false);
}
if cert_a.details.ips != cert_b.details.ips {
return Ok(false);
}
if cert_a.details.issuer != cert_b.details.issuer {
return Ok(false);
}
if cert_a.details.subnets != cert_b.details.subnets {
return Ok(false);
}
if cert_a.expired(SystemTime::now()) || cert_b.expired(SystemTime::now()) {
if cert_a.details.not_before != cert_b.details.not_before {
return Ok(false);
}
if cert_a.details.not_after != cert_b.details.not_after {
return Ok(false);
}
if cert_a.signature != cert_b.signature {
return Ok(false);
}
}
Ok(true)
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct NebulaConfigLighthouse {
#[serde(default = "bool_false")]
#[serde(skip_serializing_if = "is_bool_false")]
pub am_lighthouse: bool,
#[serde(default = "bool_false")]
#[serde(skip_serializing_if = "is_bool_false")]
pub serve_dns: bool,
#[serde(skip_serializing_if = "is_none")]
pub dns: Option<NebulaConfigLighthouseDns>,
#[serde(default = "u32_10")]
#[serde(skip_serializing_if = "is_u32_10")]
pub interval: u32,
#[serde(default = "empty_vec")]
#[serde(skip_serializing_if = "is_empty_vec")]
pub hosts: Vec<Ipv4Addr>,
#[serde(default = "empty_hashmap")]
#[serde(skip_serializing_if = "is_empty_hashmap")]
pub remote_allow_list: HashMap<Ipv4Net, bool>,
#[serde(default = "empty_hashmap")]
#[serde(skip_serializing_if = "is_empty_hashmap")]
pub local_allow_list: HashMap<Ipv4Net, bool>, // `interfaces` is not supported
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct NebulaConfigLighthouseDns {
#[serde(default = "string_empty")]
#[serde(skip_serializing_if = "is_string_empty")]
pub host: String,
#[serde(default = "u16_53")]
#[serde(skip_serializing_if = "is_u16_53")]
pub port: u16,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct NebulaConfigListen {
#[serde(default = "string_empty")]
#[serde(skip_serializing_if = "is_string_empty")]
pub host: String,
#[serde(default = "u16_0")]
#[serde(skip_serializing_if = "is_u16_0")]
pub port: u16,
#[serde(default = "u32_64")]
#[serde(skip_serializing_if = "is_u32_64")]
pub batch: u32,
#[serde(skip_serializing_if = "is_none")]
pub read_buffer: Option<u32>,
#[serde(skip_serializing_if = "is_none")]
pub write_buffer: Option<u32>,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct NebulaConfigPunchy {
#[serde(default = "bool_false")]
#[serde(skip_serializing_if = "is_bool_false")]
pub punch: bool,
#[serde(default = "bool_false")]
#[serde(skip_serializing_if = "is_bool_false")]
pub respond: bool,
#[serde(default = "string_1s")]
#[serde(skip_serializing_if = "is_string_1s")]
pub delay: String,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub enum NebulaConfigCipher {
#[serde(rename = "aes")]
Aes,
#[serde(rename = "chachapoly")]
ChaChaPoly,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct NebulaConfigRelay {
#[serde(default = "empty_vec")]
#[serde(skip_serializing_if = "is_empty_vec")]
pub relays: Vec<Ipv4Addr>,
#[serde(default = "bool_false")]
#[serde(skip_serializing_if = "is_bool_false")]
pub am_relay: bool,
#[serde(default = "bool_true")]
#[serde(skip_serializing_if = "is_bool_true")]
pub use_relays: bool,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct NebulaConfigTun {
#[serde(default = "bool_false")]
#[serde(skip_serializing_if = "is_bool_false")]
pub disabled: bool,
#[serde(skip_serializing_if = "is_none")]
pub dev: Option<String>,
#[serde(default = "bool_false")]
#[serde(skip_serializing_if = "is_bool_false")]
pub drop_local_broadcast: bool,
#[serde(default = "bool_false")]
#[serde(skip_serializing_if = "is_bool_false")]
pub drop_multicast: bool,
#[serde(default = "u64_500")]
#[serde(skip_serializing_if = "is_u64_500")]
pub tx_queue: u64,
#[serde(default = "u64_1300")]
#[serde(skip_serializing_if = "is_u64_1300")]
pub mtu: u64,
#[serde(default = "empty_vec")]
#[serde(skip_serializing_if = "is_empty_vec")]
pub routes: Vec<NebulaConfigTunRouteOverride>,
#[serde(default = "empty_vec")]
#[serde(skip_serializing_if = "is_empty_vec")]
pub unsafe_routes: Vec<NebulaConfigTunUnsafeRoute>,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct NebulaConfigTunRouteOverride {
pub mtu: u64,
pub route: Ipv4Net,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct NebulaConfigTunUnsafeRoute {
pub route: Ipv4Net,
pub via: Ipv4Addr,
#[serde(default = "u64_1300")]
#[serde(skip_serializing_if = "is_u64_1300")]
pub mtu: u64,
#[serde(default = "i64_100")]
#[serde(skip_serializing_if = "is_i64_100")]
pub metric: i64,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct NebulaConfigLogging {
#[serde(default = "loglevel_info")]
#[serde(skip_serializing_if = "is_loglevel_info")]
pub level: NebulaConfigLoggingLevel,
#[serde(default = "format_text")]
#[serde(skip_serializing_if = "is_format_text")]
pub format: NebulaConfigLoggingFormat,
#[serde(default = "bool_false")]
#[serde(skip_serializing_if = "is_bool_false")]
pub disable_timestamp: bool,
#[serde(default = "timestamp")]
#[serde(skip_serializing_if = "is_timestamp")]
pub timestamp_format: String,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub enum NebulaConfigLoggingLevel {
#[serde(rename = "panic")]
Panic,
#[serde(rename = "fatal")]
Fatal,
#[serde(rename = "error")]
Error,
#[serde(rename = "warning")]
Warning,
#[serde(rename = "info")]
Info,
#[serde(rename = "debug")]
Debug,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub enum NebulaConfigLoggingFormat {
#[serde(rename = "json")]
Json,
#[serde(rename = "text")]
Text,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct NebulaConfigSshd {
#[serde(default = "bool_false")]
#[serde(skip_serializing_if = "is_bool_false")]
pub enabled: bool,
pub listen: SocketAddrV4,
pub host_key: String,
#[serde(default = "empty_vec")]
#[serde(skip_serializing_if = "is_empty_vec")]
pub authorized_users: Vec<NebulaConfigSshdAuthorizedUser>,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct NebulaConfigSshdAuthorizedUser {
pub user: String,
#[serde(default = "empty_vec")]
#[serde(skip_serializing_if = "is_empty_vec")]
pub keys: Vec<String>,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
#[serde(tag = "type")]
pub enum NebulaConfigStats {
#[serde(rename = "graphite")]
Graphite(NebulaConfigStatsGraphite),
#[serde(rename = "prometheus")]
Prometheus(NebulaConfigStatsPrometheus),
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct NebulaConfigStatsGraphite {
#[serde(default = "string_nebula")]
#[serde(skip_serializing_if = "is_string_nebula")]
pub prefix: String,
#[serde(default = "protocol_tcp")]
#[serde(skip_serializing_if = "is_protocol_tcp")]
pub protocol: NebulaConfigStatsGraphiteProtocol,
pub host: SocketAddrV4,
pub interval: String,
#[serde(default = "bool_false")]
#[serde(skip_serializing_if = "is_bool_false")]
pub message_metrics: bool,
#[serde(default = "bool_false")]
#[serde(skip_serializing_if = "is_bool_false")]
pub lighthouse_metrics: bool,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub enum NebulaConfigStatsGraphiteProtocol {
#[serde(rename = "tcp")]
Tcp,
#[serde(rename = "udp")]
Udp,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct NebulaConfigStatsPrometheus {
pub listen: String,
pub path: String,
#[serde(default = "string_nebula")]
#[serde(skip_serializing_if = "is_string_nebula")]
pub namespace: String,
#[serde(default = "string_nebula")]
#[serde(skip_serializing_if = "is_string_nebula")]
pub subsystem: String,
pub interval: String,
#[serde(default = "bool_false")]
#[serde(skip_serializing_if = "is_bool_false")]
pub message_metrics: bool,
#[serde(default = "bool_false")]
#[serde(skip_serializing_if = "is_bool_false")]
pub lighthouse_metrics: bool,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct NebulaConfigFirewall {
#[serde(default = "none")]
#[serde(skip_serializing_if = "is_none")]
pub conntrack: Option<NebulaConfigFirewallConntrack>,
#[serde(default = "none")]
#[serde(skip_serializing_if = "is_none")]
pub inbound: Option<Vec<NebulaConfigFirewallRule>>,
#[serde(default = "none")]
#[serde(skip_serializing_if = "is_none")]
pub outbound: Option<Vec<NebulaConfigFirewallRule>>,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct NebulaConfigFirewallConntrack {
#[serde(default = "string_12m")]
#[serde(skip_serializing_if = "is_string_12m")]
pub tcp_timeout: String,
#[serde(default = "string_3m")]
#[serde(skip_serializing_if = "is_string_3m")]
pub udp_timeout: String,
#[serde(default = "string_10m")]
#[serde(skip_serializing_if = "is_string_10m")]
pub default_timeout: String,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct NebulaConfigFirewallRule {
#[serde(default = "none")]
#[serde(skip_serializing_if = "is_none")]
pub port: Option<String>,
#[serde(default = "none")]
#[serde(skip_serializing_if = "is_none")]
pub proto: Option<String>,
#[serde(default = "none")]
#[serde(skip_serializing_if = "is_none")]
pub ca_name: Option<String>,
#[serde(default = "none")]
#[serde(skip_serializing_if = "is_none")]
pub ca_sha: Option<String>,
#[serde(default = "none")]
#[serde(skip_serializing_if = "is_none")]
pub host: Option<String>,
#[serde(default = "none")]
#[serde(skip_serializing_if = "is_none")]
pub group: Option<String>,
#[serde(default = "none")]
#[serde(skip_serializing_if = "is_none")]
pub groups: Option<Vec<String>>,
#[serde(default = "none")]
#[serde(skip_serializing_if = "is_none")]
pub cidr: Option<String>,
}
// Default values for serde
fn string_12m() -> String {
"12m".to_string()
}
fn is_string_12m(s: &str) -> bool {
s == "12m"
}
fn string_3m() -> String {
"3m".to_string()
}
fn is_string_3m(s: &str) -> bool {
s == "3m"
}
fn string_10m() -> String {
"10m".to_string()
}
fn is_string_10m(s: &str) -> bool {
s == "10m"
}
fn empty_vec<T>() -> Vec<T> {
vec![]
}
fn is_empty_vec<T>(v: &Vec<T>) -> bool {
v.is_empty()
}
fn empty_hashmap<A, B>() -> HashMap<A, B> {
HashMap::new()
}
fn is_empty_hashmap<A, B>(h: &HashMap<A, B>) -> bool {
h.is_empty()
}
fn bool_false() -> bool {
false
}
fn is_bool_false(b: &bool) -> bool {
!*b
}
fn bool_true() -> bool {
true
}
fn is_bool_true(b: &bool) -> bool {
*b
}
fn u16_53() -> u16 {
53
}
fn is_u16_53(u: &u16) -> bool {
*u == 53
}
fn u32_10() -> u32 {
10
}
fn is_u32_10(u: &u32) -> bool {
*u == 10
}
fn u16_0() -> u16 {
0
}
fn is_u16_0(u: &u16) -> bool {
*u == 0
}
fn u32_64() -> u32 {
64
}
fn is_u32_64(u: &u32) -> bool {
*u == 64
}
fn string_1s() -> String {
"1s".to_string()
}
fn is_string_1s(s: &str) -> bool {
s == "1s"
}
fn cipher_aes() -> NebulaConfigCipher {
NebulaConfigCipher::Aes
}
fn is_cipher_aes(c: &NebulaConfigCipher) -> bool {
matches!(c, NebulaConfigCipher::Aes)
}
fn u64_500() -> u64 {
500
}
fn is_u64_500(u: &u64) -> bool {
*u == 500
}
fn u64_1300() -> u64 {
1300
}
fn is_u64_1300(u: &u64) -> bool {
*u == 1300
}
fn i64_100() -> i64 {
100
}
fn is_i64_100(i: &i64) -> bool {
*i == 100
}
fn loglevel_info() -> NebulaConfigLoggingLevel {
NebulaConfigLoggingLevel::Info
}
fn is_loglevel_info(l: &NebulaConfigLoggingLevel) -> bool {
matches!(l, NebulaConfigLoggingLevel::Info)
}
fn format_text() -> NebulaConfigLoggingFormat {
NebulaConfigLoggingFormat::Text
}
fn is_format_text(f: &NebulaConfigLoggingFormat) -> bool {
matches!(f, NebulaConfigLoggingFormat::Text)
}
fn timestamp() -> String {
"2006-01-02T15:04:05Z07:00".to_string()
}
fn is_timestamp(s: &str) -> bool {
s == "2006-01-02T15:04:05Z07:00"
}
fn u64_1() -> u64 {
1
}
fn is_u64_1(u: &u64) -> bool {
*u == 1
}
fn string_nebula() -> String {
"nebula".to_string()
}
fn is_string_nebula(s: &str) -> bool {
s == "nebula"
}
fn string_empty() -> String {
String::new()
}
fn is_string_empty(s: &str) -> bool {
s.is_empty()
}
fn protocol_tcp() -> NebulaConfigStatsGraphiteProtocol {
NebulaConfigStatsGraphiteProtocol::Tcp
}
fn is_protocol_tcp(p: &NebulaConfigStatsGraphiteProtocol) -> bool {
matches!(p, NebulaConfigStatsGraphiteProtocol::Tcp)
}
fn none<T>() -> Option<T> {
None
}
fn is_none<T>(o: &Option<T>) -> bool {
o.is_none()
}
fn default_workers() -> usize { 32 }

View File

@ -1,4 +1,4 @@
// trifid-api, an open source reimplementation of the Defined Networking nebula management server.
// trifid-api-old, an open source reimplementation of the Defined Networking nebula management server.
// Copyright (C) 2023 c0repwn3r
//
// This program is free software: you can redistribute it and/or modify

View File

@ -1,4 +1,4 @@
// trifid-api, an open source reimplementation of the Defined Networking nebula management server.
// trifid-api-old, an open source reimplementation of the Defined Networking nebula management server.
// Copyright (C) 2023 c0repwn3r
//
// This program is free software: you can redistribute it and/or modify

132
trifid-api-old/src/error.rs Normal file
View File

@ -0,0 +1,132 @@
// trifid-api-old, an open source reimplementation of the Defined Networking nebula management server.
// Copyright (C) 2023 c0repwn3r
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use actix_web::error::{JsonPayloadError, PayloadError};
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct APIErrorsResponse {
pub errors: Vec<APIError>,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct APIError {
pub code: String,
pub message: String,
#[serde(skip_serializing_if = "is_none")]
#[serde(default)]
pub path: Option<String>,
}
fn is_none<T>(o: &Option<T>) -> bool {
o.is_none()
}
impl From<&JsonPayloadError> for APIError {
fn from(value: &JsonPayloadError) -> Self {
match value {
JsonPayloadError::OverflowKnownLength { length, limit } => {
APIError {
code: "ERR_PAYLOAD_OVERFLOW_KNOWN_LENGTH".to_string(),
message: format!("Payload size is bigger than allowed & content length header set. (length: {}, limit: {})", length, limit),
path: None
}
},
JsonPayloadError::Overflow { limit } => {
APIError {
code: "ERR_PAYLOAD_OVERFLOW".to_string(),
message: format!("Payload size is bigger than allowed but no content-length header is set. (limit: {})", limit),
path: None
}
},
JsonPayloadError::ContentType => {
APIError {
code: "ERR_NOT_JSON".to_string(),
message: "Content-Type header not set to expected application/json".to_string(),
path: None,
}
},
JsonPayloadError::Deserialize(e) => {
APIError {
code: "ERR_JSON_DESERIALIZE".to_string(),
message: format!("Error deserializing JSON: {}", e),
path: None,
}
},
JsonPayloadError::Serialize(e) => {
APIError {
code: "ERR_JSON_SERIALIZE".to_string(),
message: format!("Error serializing JSON: {}", e),
path: None,
}
},
JsonPayloadError::Payload(e) => {
e.into()
},
_ => {
APIError {
code: "ERR_UNKNOWN_ERROR".to_string(),
message: "An unknown error has occured".to_string(),
path: None,
}
}
}
}
}
impl From<&PayloadError> for APIError {
fn from(value: &PayloadError) -> Self {
match value {
PayloadError::Incomplete(e) => APIError {
code: "ERR_UNEXPECTED_EOF".to_string(),
message: match e {
None => "Payload reached EOF but was incomplete".to_string(),
Some(e) => format!("Payload reached EOF but was incomplete: {}", e),
},
path: None,
},
PayloadError::EncodingCorrupted => APIError {
code: "ERR_CORRUPTED_PAYLOAD".to_string(),
message: "Payload content encoding corrupted".to_string(),
path: None,
},
PayloadError::Overflow => APIError {
code: "ERR_PAYLOAD_OVERFLOW".to_string(),
message: "Payload reached size limit".to_string(),
path: None,
},
PayloadError::UnknownLength => APIError {
code: "ERR_PAYLOAD_UNKNOWN_LENGTH".to_string(),
message: "Unable to determine payload length".to_string(),
path: None,
},
PayloadError::Http2Payload(e) => APIError {
code: "ERR_HTTP2_ERROR".to_string(),
message: format!("HTTP/2 error: {}", e),
path: None,
},
PayloadError::Io(e) => APIError {
code: "ERR_IO_ERROR".to_string(),
message: format!("I/O error: {}", e),
path: None,
},
_ => APIError {
code: "ERR_UNKNOWN_ERROR".to_string(),
message: "An unknown error has occured".to_string(),
path: None,
},
}
}
}

View File

@ -1,4 +1,4 @@
// trifid-api, an open source reimplementation of the Defined Networking nebula management server.
// trifid-api-old, an open source reimplementation of the Defined Networking nebula management server.
// Copyright (C) 2023 c0repwn3r
//
// This program is free software: you can redistribute it and/or modify

View File

@ -0,0 +1,49 @@
use std::fmt::{Display, Formatter};
use actix_web::{HttpRequest, HttpResponse, Responder, ResponseError};
use actix_web::body::EitherBody;
use actix_web::web::Json;
use log::error;
use sea_orm::DbErr;
use crate::error::{APIError, APIErrorsResponse};
pub struct OkResponse<T: Responder>(T);
#[derive(Debug)]
pub struct ErrResponse(APIErrorsResponse);
impl<T: Responder> Responder for OkResponse<T> {
type Body = T::Body;
fn respond_to(self, req: &HttpRequest) -> HttpResponse<Self::Body> {
self.0.respond_to(req)
}
}
impl Responder for ErrResponse {
type Body = EitherBody<String>;
fn respond_to(self, req: &HttpRequest) -> HttpResponse<Self::Body> {
Json(self.0).respond_to(req)
}
}
impl From<DbErr> for ErrResponse {
fn from(value: DbErr) -> Self {
error!("database error: {}", value);
Self(APIErrorsResponse { errors: vec![
APIError {
code: "ERR_DB_ERROR".to_string(),
message: "There was an error performing the database query. Please try again later.".to_string(),
path: None,
}
] })
}
}
impl Display for ErrResponse {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "{:?}", self.0)
}
}
impl ResponseError for ErrResponse {}

View File

@ -0,0 +1,2 @@
pub mod v1;
pub mod v2;

View File

@ -1,4 +1,4 @@
// trifid-api, an open source reimplementation of the Defined Networking nebula management server.
// trifid-api-old, an open source reimplementation of the Defined Networking nebula management server.
// Copyright (C) 2023 c0repwn3r
//
// This program is free software: you can redistribute it and/or modify

View File

@ -1,4 +1,4 @@
// trifid-api, an open source reimplementation of the Defined Networking nebula management server.
// trifid-api-old, an open source reimplementation of the Defined Networking nebula management server.
// Copyright (C) 2023 c0repwn3r
//
// This program is free software: you can redistribute it and/or modify

View File

@ -1,4 +1,4 @@
// trifid-api, an open source reimplementation of the Defined Networking nebula management server.
// trifid-api-old, an open source reimplementation of the Defined Networking nebula management server.
// Copyright (C) 2023 c0repwn3r
//
// This program is free software: you can redistribute it and/or modify

View File

@ -1,4 +1,4 @@
// trifid-api, an open source reimplementation of the Defined Networking nebula management server.
// trifid-api-old, an open source reimplementation of the Defined Networking nebula management server.
// Copyright (C) 2023 c0repwn3r
//
// This program is free software: you can redistribute it and/or modify

View File

@ -0,0 +1,10 @@
pub mod auth;
pub mod dnclient;
pub mod hosts;
pub mod networks;
pub mod organization;
pub mod roles;
pub mod signup;
pub mod totp_authenticators;
pub mod trifid;
pub mod verify_totp_authenticators;

View File

@ -1,4 +1,4 @@
// trifid-api, an open source reimplementation of the Defined Networking nebula management server.
// trifid-api-old, an open source reimplementation of the Defined Networking nebula management server.
// Copyright (C) 2023 c0repwn3r
//
// This program is free software: you can redistribute it and/or modify

View File

@ -1,4 +1,4 @@
// trifid-api, an open source reimplementation of the Defined Networking nebula management server.
// trifid-api-old, an open source reimplementation of the Defined Networking nebula management server.
// Copyright (C) 2023 c0repwn3r
//
// This program is free software: you can redistribute it and/or modify
@ -15,7 +15,7 @@
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//
//#POST /v1/organization t+parity:none t+type:fabricated t+status:done t+status:want-reveng t+feature:definednetworking
// This is NOT a DN-compatible API. The organization create API has not yet been reverse engineered. This endpoint is a complete fabrication of trifid-api.
// This is NOT a DN-compatible API. The organization create API has not yet been reverse engineered. This endpoint is a complete fabrication of trifid-api-old.
// While this endpoint is considered done, help is wanted with reverse engineering the original API. Major features should not be added or removed unless it is replacing this endpoint with the correct, DN-compatible endpoint.
// This endpoint requires the `definednetworking` extension to be enabled to be used.

View File

@ -1,4 +1,4 @@
// trifid-api, an open source reimplementation of the Defined Networking nebula management server.
// trifid-api-old, an open source reimplementation of the Defined Networking nebula management server.
// Copyright (C) 2023 c0repwn3r
//
// This program is free software: you can redistribute it and/or modify

View File

@ -0,0 +1,151 @@
// trifid-api-old, an open source reimplementation of the Defined Networking nebula management server.
// Copyright (C) 2023 c0repwn3r
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//
//#POST /v1/signup t+parity:full t+type:reverse_engineered t+status:done t+features:definednetworking
// This endpoint has full parity with the original API. It has been reverse-engineered from the original API as the original API docs do not have this item.
// This endpoint is considered done. No major features should be added or removed, unless it fixes bugs.
// This endpoint requires the `definednetworking` extension to be enabled to be used.
use crate::config::CONFIG;
use crate::error::{APIError, APIErrorsResponse};
use crate::magic_link::send_magic_link;
use crate::timers::expires_in_seconds;
use crate::tokens::{random_id, random_token};
use crate::AppState;
use actix_web::web::{Data, Json};
use actix_web::{post, HttpResponse};
use log::error;
use sea_orm::{ActiveModelTrait, ColumnTrait, EntityTrait, IntoActiveModel, QueryFilter};
use serde::{Deserialize, Serialize};
use trifid_api_entities::entity::user;
use trifid_api_entities::entity::user::Entity as UserEntity;
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct SignupRequest {
pub email: String,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct SignupResponse {
pub data: Option<SignupResponseData>,
pub metadata: SignupResponseMetadata,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct SignupResponseData {}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct SignupResponseMetadata {}
#[post("/v1/signup")]
pub async fn signup_request(data: Data<AppState>, req: Json<SignupRequest>) -> HttpResponse {
let user: Vec<user::Model> = match UserEntity::find()
.filter(user::Column::Email.eq(&req.email))
.all(&data.conn)
.await
{
Ok(r) => r,
Err(e) => {
error!("database error: {}", e);
return HttpResponse::InternalServerError().json(APIErrorsResponse {
errors: vec![APIError {
code: "ERR_DB_ERROR".to_string(),
message:
"There was an error with the database request, please try again later."
.to_string(),
path: None,
}],
});
}
};
if !user.is_empty() {
return HttpResponse::Unauthorized().json(APIErrorsResponse {
errors: vec![APIError {
code: "ERR_USER_EXISTS".to_string(),
message: "That user already exists.".to_string(),
path: None,
}],
});
}
let model = user::Model {
id: random_id("user"),
email: req.email.clone(),
};
let id = model.id.clone();
let active_model = model.into_active_model();
match active_model.insert(&data.conn).await {
Ok(_) => (),
Err(e) => {
error!("database error: {}", e);
return HttpResponse::InternalServerError().json(APIErrorsResponse {
errors: vec![APIError {
code: "ERR_DB_ERROR".to_string(),
message:
"There was an error with the database request, please try again later."
.to_string(),
path: None,
}],
});
}
}
let model = trifid_api_entities::entity::magic_link::Model {
id: random_token("ml"),
user: id,
expires_on: expires_in_seconds(CONFIG.tokens.magic_link_expiry_time_seconds) as i64,
};
match send_magic_link(&model.id) {
Ok(_) => (),
Err(e) => {
error!("error sending magic link: {}", e);
return HttpResponse::InternalServerError().json(APIErrorsResponse {
errors: vec![APIError {
code: "ERR_ML_ERROR".to_string(),
message:
"There was an error sending the magic link email, please try again later."
.to_string(),
path: None,
}],
});
}
}
let active_model = model.into_active_model();
match active_model.insert(&data.conn).await {
Ok(_) => (),
Err(e) => {
error!("database error: {}", e);
return HttpResponse::InternalServerError().json(APIErrorsResponse {
errors: vec![APIError {
code: "ERR_DB_ERROR".to_string(),
message:
"There was an error with the database request, please try again later."
.to_string(),
path: None,
}],
});
}
}
HttpResponse::Ok().json(SignupResponse {
data: None,
metadata: SignupResponseMetadata {},
})
}