code cleanup

This commit is contained in:
c0repwn3r 2023-05-15 14:51:27 -04:00
parent 3a2319d0c0
commit a2a94ad801
Signed by: core
GPG Key ID: FDBF740DADDCEECF
11 changed files with 308 additions and 243 deletions

View File

@ -2,7 +2,6 @@
use base64_serde::base64_serde_type; use base64_serde::base64_serde_type;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use serde_with::serde_as;
/// The version 1 `DNClient` API endpoint /// The version 1 `DNClient` API endpoint
pub const ENDPOINT_V1: &str = "/v1/dnclient"; pub const ENDPOINT_V1: &str = "/v1/dnclient";
@ -201,9 +200,9 @@ pub struct APIError {
pub type APIErrors = Vec<APIError>; pub type APIErrors = Vec<APIError>;
mod b64_as { mod b64_as {
use serde::{Serialize, Deserialize};
use serde::{Deserializer, Serializer};
use base64::Engine; use base64::Engine;
use serde::{Deserialize, Serialize};
use serde::{Deserializer, Serializer};
pub fn serialize<S: Serializer>(v: &Vec<u8>, s: S) -> Result<S::Ok, S::Error> { pub fn serialize<S: Serializer>(v: &Vec<u8>, s: S) -> Result<S::Ok, S::Error> {
let base64 = base64::engine::general_purpose::STANDARD.encode(v); let base64 = base64::engine::general_purpose::STANDARD.encode(v);
@ -212,13 +211,13 @@ mod b64_as {
pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result<Vec<u8>, D::Error> { pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result<Vec<u8>, D::Error> {
let base64 = <Option<String>>::deserialize(d)?; let base64 = <Option<String>>::deserialize(d)?;
match base64 { base64.map_or_else(
Some(v) => { || Ok(vec![]),
base64::engine::general_purpose::STANDARD.decode(v.as_bytes()) |v| {
.map(|v| v) base64::engine::general_purpose::STANDARD
.map_err(|e| serde::de::Error::custom(e)) .decode(v.as_bytes())
.map_err(serde::de::Error::custom)
}, },
None => Ok(vec![]), )
}
} }
} }

View File

@ -6,7 +6,7 @@ use std::thread;
use crate::apiworker::{apiworker_main, APIWorkerMessage}; use crate::apiworker::{apiworker_main, APIWorkerMessage};
use crate::config::load_config; use crate::config::load_config;
use crate::nebulaworker::{nebulaworker_main, NebulaWorkerMessage}; use crate::nebulaworker::NebulaWorkerMessage;
use crate::socketworker::{socketworker_main, SocketWorkerMessage}; use crate::socketworker::{socketworker_main, SocketWorkerMessage};
use crate::timerworker::{timer_main, TimerWorkerMessage}; use crate::timerworker::{timer_main, TimerWorkerMessage};
use crate::util::check_server_url; use crate::util::check_server_url;
@ -28,7 +28,7 @@ pub fn daemon_main(name: String, server: String) {
let (tx_api, rx_api) = mpsc::channel::<APIWorkerMessage>(); let (tx_api, rx_api) = mpsc::channel::<APIWorkerMessage>();
let (tx_socket, rx_socket) = mpsc::channel::<SocketWorkerMessage>(); let (tx_socket, rx_socket) = mpsc::channel::<SocketWorkerMessage>();
let (tx_nebula, rx_nebula) = mpsc::channel::<NebulaWorkerMessage>(); let (tx_nebula, _rx_nebula) = mpsc::channel::<NebulaWorkerMessage>();
let (tx_timer, rx_timer) = mpsc::channel::<TimerWorkerMessage>(); let (tx_timer, rx_timer) = mpsc::channel::<TimerWorkerMessage>();
let transmitter = ThreadMessageSender { let transmitter = ThreadMessageSender {
@ -108,9 +108,9 @@ pub fn daemon_main(name: String, server: String) {
}); });
info!("Starting Nebula thread..."); info!("Starting Nebula thread...");
let config_nebula = config.clone(); let _config_nebula = config.clone();
let transmitter_nebula = transmitter.clone(); let _transmitter_nebula = transmitter.clone();
let name_nebula = name.clone(); let _name_nebula = name.clone();
//let nebula_thread = thread::spawn(move || { //let nebula_thread = thread::spawn(move || {
// nebulaworker_main(config_nebula, name_nebula, transmitter_nebula, rx_nebula); // nebulaworker_main(config_nebula, name_nebula, transmitter_nebula, rx_nebula);
//}); //});

View File

@ -6,19 +6,26 @@ use std::time::{Duration, SystemTime};
use actix_web::web::Data; use actix_web::web::Data;
use crate::config::{NebulaConfig, NebulaConfigCipher, NebulaConfigLighthouse, NebulaConfigListen, NebulaConfigPki, NebulaConfigPunchy, NebulaConfigRelay, NebulaConfigTun, CONFIG, NebulaConfigFirewall, NebulaConfigFirewallRule}; use crate::config::{
use crate::crypto::{decrypt_with_nonce, encrypt_with_nonce, get_cipher_from_config}; NebulaConfig, NebulaConfigCipher, NebulaConfigFirewall, NebulaConfigFirewallRule,
NebulaConfigLighthouse, NebulaConfigListen, NebulaConfigPki, NebulaConfigPunchy,
NebulaConfigRelay, NebulaConfigTun, CONFIG,
};
use crate::crypto::{decrypt_with_nonce, get_cipher_from_config};
use crate::keystore::keystore_init;
use crate::AppState; use crate::AppState;
use ed25519_dalek::SigningKey; use ed25519_dalek::SigningKey;
use ipnet::Ipv4Net; use ipnet::Ipv4Net;
use log::{debug, error}; use log::{debug, error};
use sea_orm::{ColumnTrait, EntityTrait, QueryFilter}; use sea_orm::{ColumnTrait, EntityTrait, QueryFilter};
use trifid_api_entities::entity::{firewall_rule, host, host_config_override, host_static_address, network, organization, signing_ca}; use trifid_api_entities::entity::{
firewall_rule, host, host_config_override, host_static_address, network, organization,
signing_ca,
};
use trifid_pki::cert::{ use trifid_pki::cert::{
deserialize_ed25519_private, deserialize_nebula_certificate_from_pem, NebulaCertificate, deserialize_ed25519_private, deserialize_nebula_certificate_from_pem, NebulaCertificate,
NebulaCertificateDetails, NebulaCertificateDetails,
}; };
use crate::keystore::keystore_init;
pub struct CodegenRequiredInfo { pub struct CodegenRequiredInfo {
pub host: host::Model, pub host: host::Model,
@ -32,14 +39,17 @@ pub struct CodegenRequiredInfo {
pub relay_ips: Vec<Ipv4Addr>, pub relay_ips: Vec<Ipv4Addr>,
pub lighthouse_ips: Vec<Ipv4Addr>, pub lighthouse_ips: Vec<Ipv4Addr>,
pub blocked_hosts: Vec<String>, pub blocked_hosts: Vec<String>,
pub firewall_rules: Vec<NebulaConfigFirewallRule> pub firewall_rules: Vec<NebulaConfigFirewallRule>,
} }
pub async fn generate_config( pub async fn generate_config(
data: &Data<AppState>, _data: &Data<AppState>,
info: &CodegenRequiredInfo, info: &CodegenRequiredInfo,
) -> Result<(NebulaConfig, NebulaCertificate), Box<dyn Error>> { ) -> Result<(NebulaConfig, NebulaCertificate), Box<dyn Error>> {
debug!("chk: deserialize CA cert {:x?}", hex::decode(&info.ca.cert)?); debug!(
"chk: deserialize CA cert {:x?}",
hex::decode(&info.ca.cert)?
);
// decode the CA data // decode the CA data
let ca_cert = deserialize_nebula_certificate_from_pem(&hex::decode(&info.ca.cert)?)?; let ca_cert = deserialize_nebula_certificate_from_pem(&hex::decode(&info.ca.cert)?)?;
@ -53,9 +63,7 @@ pub async fn generate_config(
) )
.unwrap()], .unwrap()],
subnets: vec![], subnets: vec![],
groups: vec![ groups: vec![format!("role:{}", info.host.role)],
format!("role:{}", info.host.role)
],
not_before: SystemTime::now(), not_before: SystemTime::now(),
not_after: SystemTime::now() + Duration::from_secs(CONFIG.crypto.certs_expiry_time), not_after: SystemTime::now() + Duration::from_secs(CONFIG.crypto.certs_expiry_time),
public_key: info.dh_pubkey.clone().try_into().unwrap(), public_key: info.dh_pubkey.clone().try_into().unwrap(),
@ -172,8 +180,7 @@ pub async fn generate_config(
firewall: Some(NebulaConfigFirewall { firewall: Some(NebulaConfigFirewall {
conntrack: None, conntrack: None,
inbound: Some(info.firewall_rules.clone()), inbound: Some(info.firewall_rules.clone()),
outbound: Some(vec![ outbound: Some(vec![NebulaConfigFirewallRule {
NebulaConfigFirewallRule {
port: Some("any".to_string()), port: Some("any".to_string()),
proto: Some("any".to_string()), proto: Some("any".to_string()),
ca_name: None, ca_name: None,
@ -182,8 +189,7 @@ pub async fn generate_config(
group: None, group: None,
groups: None, groups: None,
cidr: None, cidr: None,
} }]),
]),
}), }),
routines: 0, routines: 0,
stats: None, stats: None,
@ -310,19 +316,35 @@ pub async fn collect_info<'a>(
let best_ca = best_ca.unwrap(); let best_ca = best_ca.unwrap();
// pull our role's firewall rules // pull our role's firewall rules
let firewall_rules = trifid_api_entities::entity::firewall_rule::Entity::find().filter(firewall_rule::Column::Role.eq(&host.id)).all(&db.conn).await?; let firewall_rules = trifid_api_entities::entity::firewall_rule::Entity::find()
let firewall_rules = firewall_rules.iter().map(|u| { .filter(firewall_rule::Column::Role.eq(&host.id))
NebulaConfigFirewallRule { .all(&db.conn)
port: Some(if u.port_range_from == 0 && u.port_range_to == 65535 { "any".to_string() } else { format!("{}-{}", u.port_range_from, u.port_range_to) }), .await?;
let firewall_rules = firewall_rules
.iter()
.map(|u| NebulaConfigFirewallRule {
port: Some(if u.port_range_from == 0 && u.port_range_to == 65535 {
"any".to_string()
} else {
format!("{}-{}", u.port_range_from, u.port_range_to)
}),
proto: Some(u.protocol.clone()), proto: Some(u.protocol.clone()),
ca_name: None, ca_name: None,
ca_sha: None, ca_sha: None,
host: if u.allowed_role_id.is_some() { None } else { Some("any".to_string()) }, host: if u.allowed_role_id.is_some() {
groups: if u.allowed_role_id.is_some() { Some(vec![format!("role:{}", u.allowed_role_id.clone().unwrap())])} else { None }, None
} else {
Some("any".to_string())
},
groups: if u.allowed_role_id.is_some() {
Some(vec![format!("role:{}", u.allowed_role_id.clone().unwrap())])
} else {
None
},
group: None, group: None,
cidr: None, cidr: None,
} })
}).collect(); .collect();
Ok(CodegenRequiredInfo { Ok(CodegenRequiredInfo {
host, host,
@ -336,6 +358,6 @@ pub async fn collect_info<'a>(
relay_ips: relays, relay_ips: relays,
lighthouse_ips: lighthouses, lighthouse_ips: lighthouses,
blocked_hosts, blocked_hosts,
firewall_rules firewall_rules,
}) })
} }

View File

@ -24,7 +24,7 @@ use std::fs;
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
use std::path::PathBuf; use std::path::PathBuf;
use std::time::SystemTime; use std::time::SystemTime;
use derivative::Derivative;
use trifid_pki::cert::deserialize_nebula_certificate_from_pem; use trifid_pki::cert::deserialize_nebula_certificate_from_pem;
pub static CONFIG: Lazy<TrifidConfig> = Lazy::new(|| { pub static CONFIG: Lazy<TrifidConfig> = Lazy::new(|| {
@ -194,20 +194,24 @@ pub struct NebulaConfigPki {
impl PartialEq for NebulaConfigPki { impl PartialEq for NebulaConfigPki {
fn eq(&self, other: &Self) -> bool { fn eq(&self, other: &Self) -> bool {
if self.ca != other.ca { return false; } if self.ca != other.ca {
if self.key != other.key { return false; } return false;
if self.blocklist != other.blocklist { return false; } }
if self.disconnect_invalid != other.disconnect_invalid { return false; } if self.key != other.key {
return false;
}
if self.blocklist != other.blocklist {
return false;
}
if self.disconnect_invalid != other.disconnect_invalid {
return false;
}
// cert logic // cert logic
// if the cert is invalid, fallback to just checking equality // if the cert is invalid, fallback to just checking equality
match is_cert_equal_ignoring_expiry(&self.cert, &other.cert) { match is_cert_equal_ignoring_expiry(&self.cert, &other.cert) {
Ok(res) => { Ok(res) => res,
res Err(_) => self.cert == other.cert,
},
Err(_) => {
self.cert == other.cert
}
} }
} }
} }
@ -220,18 +224,38 @@ fn is_cert_equal_ignoring_expiry(me: &str, other: &str) -> Result<bool, Box<dyn
let cert_a = deserialize_nebula_certificate_from_pem(me.as_bytes())?; let cert_a = deserialize_nebula_certificate_from_pem(me.as_bytes())?;
let cert_b = deserialize_nebula_certificate_from_pem(other.as_bytes())?; let cert_b = deserialize_nebula_certificate_from_pem(other.as_bytes())?;
if cert_a.details.is_ca != cert_b.details.is_ca { return Ok(false); } if cert_a.details.is_ca != cert_b.details.is_ca {
if cert_a.details.name != cert_b.details.name { return Ok(false); } return Ok(false);
if cert_a.details.public_key != cert_b.details.public_key { return Ok(false); } }
if cert_a.details.groups != cert_b.details.groups { return Ok(false); } if cert_a.details.name != cert_b.details.name {
if cert_a.details.ips != cert_b.details.ips { return Ok(false); } return Ok(false);
if cert_a.details.issuer != cert_b.details.issuer { return Ok(false); } }
if cert_a.details.subnets != cert_b.details.subnets { return Ok(false); } if cert_a.details.public_key != cert_b.details.public_key {
return Ok(false);
}
if cert_a.details.groups != cert_b.details.groups {
return Ok(false);
}
if cert_a.details.ips != cert_b.details.ips {
return Ok(false);
}
if cert_a.details.issuer != cert_b.details.issuer {
return Ok(false);
}
if cert_a.details.subnets != cert_b.details.subnets {
return Ok(false);
}
if cert_a.expired(SystemTime::now()) || cert_b.expired(SystemTime::now()) { if cert_a.expired(SystemTime::now()) || cert_b.expired(SystemTime::now()) {
if cert_a.details.not_before != cert_b.details.not_before { return Ok(false); } if cert_a.details.not_before != cert_b.details.not_before {
if cert_a.details.not_after != cert_b.details.not_after { return Ok(false); } return Ok(false);
if cert_a.signature != cert_b.signature { return Ok(false); } }
if cert_a.details.not_after != cert_b.details.not_after {
return Ok(false);
}
if cert_a.signature != cert_b.signature {
return Ok(false);
}
} }
Ok(true) Ok(true)

View File

@ -1,9 +1,9 @@
use crate::config::{NebulaConfig, CONFIG}; use crate::config::{NebulaConfig, CONFIG};
use ed25519_dalek::{SigningKey, VerifyingKey}; use ed25519_dalek::{SigningKey, VerifyingKey};
use log::debug;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::error::Error; use std::error::Error;
use std::fs; use std::fs;
use log::debug;
use trifid_pki::cert::NebulaCertificate; use trifid_pki::cert::NebulaCertificate;
use trifid_pki::x25519_dalek::PublicKey; use trifid_pki::x25519_dalek::PublicKey;
@ -22,9 +22,7 @@ pub fn keystore_init() -> Result<Keystore, Box<dyn Error>> {
ks_fp.push("tfks.toml"); ks_fp.push("tfks.toml");
if !ks_fp.exists() { if !ks_fp.exists() {
return Ok(Keystore { return Ok(Keystore { hosts: vec![] });
hosts: vec![]
})
} }
let f_str = fs::read_to_string(ks_fp)?; let f_str = fs::read_to_string(ks_fp)?;

View File

@ -26,7 +26,7 @@ use std::time::Duration;
use crate::config::CONFIG; use crate::config::CONFIG;
use crate::error::{APIError, APIErrorsResponse}; use crate::error::{APIError, APIErrorsResponse};
use crate::keystore::{keystore_init, Keystore}; use crate::keystore::keystore_init;
use crate::tokens::random_id_no_id; use crate::tokens::random_id_no_id;
use trifid_api_migration::{Migrator, MigratorTrait}; use trifid_api_migration::{Migrator, MigratorTrait};
@ -43,7 +43,7 @@ pub mod timers;
pub mod tokens; pub mod tokens;
pub struct AppState { pub struct AppState {
pub conn: DatabaseConnection pub conn: DatabaseConnection,
} }
#[actix_web::main] #[actix_web::main]
@ -52,7 +52,7 @@ async fn main() -> Result<(), Box<dyn Error>> {
info!("Creating keystore..."); info!("Creating keystore...");
let keystore = keystore_init()?; let _keystore = keystore_init()?;
info!("Connecting to database at {}...", CONFIG.database.url); info!("Connecting to database at {}...", CONFIG.database.url);
@ -76,7 +76,10 @@ async fn main() -> Result<(), Box<dyn Error>> {
HttpServer::new(move || { HttpServer::new(move || {
App::new() App::new()
.app_data(data.clone()) .app_data(data.clone())
.app_data(JsonConfig::default().content_type_required(false).error_handler(|err, _req| { .app_data(
JsonConfig::default()
.content_type_required(false)
.error_handler(|err, _req| {
let api_error: APIError = (&err).into(); let api_error: APIError = (&err).into();
actix_web::error::InternalError::from_response( actix_web::error::InternalError::from_response(
err, err,
@ -85,7 +88,8 @@ async fn main() -> Result<(), Box<dyn Error>> {
}), }),
) )
.into() .into()
})) }),
)
.wrap(RequestIdentifier::with_generator(random_id_no_id)) .wrap(RequestIdentifier::with_generator(random_id_no_id))
.service(routes::v1::auth::magic_link::magic_link_request) .service(routes::v1::auth::magic_link::magic_link_request)
.service(routes::v1::signup::signup_request) .service(routes::v1::signup::signup_request)

View File

@ -1,28 +1,34 @@
use actix_web::{HttpRequest, HttpResponse, post};
use actix_web::web::{Data, Json};
use base64::Engine;
use dnapi_rs::message::{APIError, APIErrors, CheckForUpdateResponse, CheckForUpdateResponseWrapper, DoUpdateRequest, DoUpdateResponse, EnrollResponse, RequestV1, RequestWrapper, SignedResponse, SignedResponseWrapper};
use ed25519_dalek::{Signature, Signer, SigningKey, Verifier, VerifyingKey};
use log::{debug, error};
use trifid_pki::x25519_dalek::PublicKey;
use crate::AppState;
use crate::codegen::{collect_info, generate_config}; use crate::codegen::{collect_info, generate_config};
use crate::keystore::{keystore_flush, keystore_init, KSCert, KSClientKey, KSConfig, KSSigningKey}; use crate::keystore::{keystore_flush, keystore_init, KSCert, KSClientKey, KSConfig, KSSigningKey};
use std::clone::Clone; use crate::AppState;
use actix_web::web::{Data, Json};
use actix_web::{post, HttpRequest, HttpResponse};
use base64::Engine;
use dnapi_rs::credentials::ed25519_public_keys_to_pem; use dnapi_rs::credentials::ed25519_public_keys_to_pem;
use dnapi_rs::message::{
APIError, CheckForUpdateResponse, CheckForUpdateResponseWrapper, DoUpdateRequest,
DoUpdateResponse, EnrollResponse, RequestV1, RequestWrapper, SignedResponse,
SignedResponseWrapper,
};
use ed25519_dalek::{Signature, Signer, SigningKey, Verifier, VerifyingKey};
use log::{debug, error};
use rand::rngs::OsRng; use rand::rngs::OsRng;
use std::clone::Clone;
use trifid_pki::cert::{deserialize_ed25519_public, deserialize_x25519_public}; use trifid_pki::cert::{deserialize_ed25519_public, deserialize_x25519_public};
use trifid_pki::x25519_dalek::PublicKey;
#[post("/v1/dnclient")] #[post("/v1/dnclient")]
pub async fn dnclient(req: Json<RequestV1>, req_info: HttpRequest, db: Data<AppState>) -> HttpResponse { pub async fn dnclient(
req: Json<RequestV1>,
_req_info: HttpRequest,
db: Data<AppState>,
) -> HttpResponse {
if req.version != 1 { if req.version != 1 {
return HttpResponse::BadRequest().json(vec![ return HttpResponse::BadRequest().json(vec![APIError {
APIError {
code: "ERR_UNSUPPORTED_VERSION".to_string(), code: "ERR_UNSUPPORTED_VERSION".to_string(),
message: "This server does not support the requested DNClient version.".to_string(), message: "This server does not support the requested DNClient version.".to_string(),
path: None, path: None,
} }]);
])
} }
// verify the signature // verify the signature
@ -49,44 +55,57 @@ pub async fn dnclient(req: Json<RequestV1>, req_info: HttpRequest, db: Data<AppS
let host_in_ks = match host_in_ks { let host_in_ks = match host_in_ks {
Some(host) => host, Some(host) => host,
None => { None => {
return HttpResponse::Unauthorized().json(vec![ return HttpResponse::Unauthorized().json(vec![APIError {
APIError {
code: "ERR_HOST_ERROR".to_string(), code: "ERR_HOST_ERROR".to_string(),
message: "The host does not exist or you do not have permission to access it.".to_string(), message: "The host does not exist or you do not have permission to access it."
.to_string(),
path: None, path: None,
} }])
])
} }
}; };
let client_keys = host_in_ks.client_keys.iter().find(|u| u.id == req.counter as u64).unwrap(); let client_keys = host_in_ks
let client_keys_2 = host_in_ks.client_keys.iter().find(|u| u.id == host_in_ks.current_client_key).unwrap(); .client_keys
.iter()
.find(|u| u.id == req.counter as u64)
.unwrap();
let client_keys_2 = host_in_ks
.client_keys
.iter()
.find(|u| u.id == host_in_ks.current_client_key)
.unwrap();
let signature = match Signature::from_slice(&req.signature) { let signature = match Signature::from_slice(&req.signature) {
Ok(sig) => sig, Ok(sig) => sig,
Err(e) => { Err(e) => {
error!("signature load error: {}", e); error!("signature load error: {}", e);
// Be intentionally vague as the signature is invalid. // Be intentionally vague as the signature is invalid.
return HttpResponse::Unauthorized().json(vec![ return HttpResponse::Unauthorized().json(vec![APIError {
APIError {
code: "ERR_HOST_ERROR".to_string(), code: "ERR_HOST_ERROR".to_string(),
message: "The host does not exist or you do not have permission to access it.".to_string(), message: "The host does not exist or you do not have permission to access it."
.to_string(),
path: None, path: None,
} }]);
])
} }
}; };
if client_keys.ed_pub.verify(req.message.as_bytes(), &signature).is_err() && client_keys_2.ed_pub.verify(req.message.as_bytes(), &signature).is_err() { if client_keys
.ed_pub
.verify(req.message.as_bytes(), &signature)
.is_err()
&& client_keys_2
.ed_pub
.verify(req.message.as_bytes(), &signature)
.is_err()
{
// Be intentionally vague as the message is invalid. // Be intentionally vague as the message is invalid.
debug!("! invalid signature"); debug!("! invalid signature");
return HttpResponse::Unauthorized().json(vec![ return HttpResponse::Unauthorized().json(vec![APIError {
APIError {
code: "ERR_HOST_ERROR".to_string(), code: "ERR_HOST_ERROR".to_string(),
message: "The host does not exist or you do not have permission to access it.".to_string(), message: "The host does not exist or you do not have permission to access it."
.to_string(),
path: None, path: None,
} }]);
])
} }
// Sig OK // Sig OK
@ -98,13 +117,11 @@ pub async fn dnclient(req: Json<RequestV1>, req_info: HttpRequest, db: Data<AppS
Ok(msg) => msg, Ok(msg) => msg,
Err(e) => { Err(e) => {
error!("b64 decode error: {}", e); error!("b64 decode error: {}", e);
return HttpResponse::BadRequest().json(vec![ return HttpResponse::BadRequest().json(vec![APIError {
APIError {
code: "ERR_INVALID_MESSAGE".to_string(), code: "ERR_INVALID_MESSAGE".to_string(),
message: "Error while decoding message from base64.".to_string(), message: "Error while decoding message from base64.".to_string(),
path: None path: None,
} }]);
])
} }
}; };
@ -116,13 +133,11 @@ pub async fn dnclient(req: Json<RequestV1>, req_info: HttpRequest, db: Data<AppS
Ok(msg) => msg, Ok(msg) => msg,
Err(e) => { Err(e) => {
error!("msg decode error: {}", e); error!("msg decode error: {}", e);
return HttpResponse::BadRequest().json(vec![ return HttpResponse::BadRequest().json(vec![APIError {
APIError {
code: "ERR_INVALID_MESSAGE".to_string(), code: "ERR_INVALID_MESSAGE".to_string(),
message: "Error while decoding message from JSON.".to_string(), message: "Error while decoding message from JSON.".to_string(),
path: None path: None,
} }]);
])
} }
}; };
@ -142,7 +157,7 @@ pub async fn dnclient(req: Json<RequestV1>, req_info: HttpRequest, db: Data<AppS
}; };
// codegen: handoff to dedicated codegen module, we have collected all information // codegen: handoff to dedicated codegen module, we have collected all information
let (cfg, cert) = match generate_config(&db, &info).await { let (cfg, _cert) = match generate_config(&db, &info).await {
Ok(cfg) => cfg, Ok(cfg) => cfg,
Err(e) => { Err(e) => {
error!("error generating configuration: {}", e); error!("error generating configuration: {}", e);
@ -156,39 +171,41 @@ pub async fn dnclient(req: Json<RequestV1>, req_info: HttpRequest, db: Data<AppS
} }
}; };
let current_cfg = host_in_ks.config.iter().find(|u| u.id == host_in_ks.current_config); let current_cfg = host_in_ks
.config
.iter()
.find(|u| u.id == host_in_ks.current_config);
let config_update_avail = current_cfg.map(|u| u.config.clone()) != Some(cfg.clone()) || req.counter < host_in_ks.current_config as u32; let config_update_avail = current_cfg.map(|u| u.config.clone()) != Some(cfg.clone())
|| req.counter < host_in_ks.current_config as u32;
return match req_w.message_type.as_str() { return match req_w.message_type.as_str() {
"CheckForUpdate" => { "CheckForUpdate" => {
// value ignored here // value ignored here
HttpResponse::Ok().json(CheckForUpdateResponseWrapper { HttpResponse::Ok().json(CheckForUpdateResponseWrapper {
data: CheckForUpdateResponse { update_available: config_update_avail }, data: CheckForUpdateResponse {
}) update_available: config_update_avail,
}, },
})
}
"DoUpdate" => { "DoUpdate" => {
if !config_update_avail { if !config_update_avail {
return HttpResponse::BadRequest().json(vec![ return HttpResponse::BadRequest().json(vec![APIError {
APIError {
code: "ERR_NO_UPDATE_AVAILABLE".to_string(), code: "ERR_NO_UPDATE_AVAILABLE".to_string(),
message: "There is no new configuration available.".to_string(), message: "There is no new configuration available.".to_string(),
path: None path: None,
} }]);
])
} }
let do_update_req: DoUpdateRequest = match serde_json::from_slice(&req_w.value) { let do_update_req: DoUpdateRequest = match serde_json::from_slice(&req_w.value) {
Ok(req) => req, Ok(req) => req,
Err(e) => { Err(e) => {
error!("DoUpdate deserialization error: {}", e); error!("DoUpdate deserialization error: {}", e);
return HttpResponse::BadRequest().json(vec![ return HttpResponse::BadRequest().json(vec![APIError {
APIError {
code: "ERR_REQ_DESERIALIZE_ERROR".to_string(), code: "ERR_REQ_DESERIALIZE_ERROR".to_string(),
message: "There was an error deserializing the update request.".to_string(), message: "There was an error deserializing the update request.".to_string(),
path: None path: None,
} }]);
])
} }
}; };
@ -196,13 +213,11 @@ pub async fn dnclient(req: Json<RequestV1>, req_info: HttpRequest, db: Data<AppS
Ok(pk) => pk, Ok(pk) => pk,
Err(e) => { Err(e) => {
error!("PEM decode error: {}", e); error!("PEM decode error: {}", e);
return HttpResponse::BadRequest().json(vec![ return HttpResponse::BadRequest().json(vec![APIError {
APIError {
code: "ERR_BAD_PK".to_string(), code: "ERR_BAD_PK".to_string(),
message: "There was an error deserializing the DHPK.".to_string(), message: "There was an error deserializing the DHPK.".to_string(),
path: None path: None,
} }]);
])
} }
}; };
@ -227,7 +242,8 @@ pub async fn dnclient(req: Json<RequestV1>, req_info: HttpRequest, db: Data<AppS
return HttpResponse::InternalServerError().json(EnrollResponse::Error { return HttpResponse::InternalServerError().json(EnrollResponse::Error {
errors: vec![APIError { errors: vec![APIError {
code: "ERR_CFG_GENERATION_ERROR".to_string(), code: "ERR_CFG_GENERATION_ERROR".to_string(),
message: "There was an error generating the host configuration.".to_string(), message: "There was an error generating the host configuration."
.to_string(),
path: None, path: None,
}], }],
}); });
@ -258,13 +274,11 @@ pub async fn dnclient(req: Json<RequestV1>, req_info: HttpRequest, db: Data<AppS
Ok(r) => r, Ok(r) => r,
Err(e) => { Err(e) => {
error!("DH pubkey deserialize error: {}", e); error!("DH pubkey deserialize error: {}", e);
return HttpResponse::BadRequest().json(vec![ return HttpResponse::BadRequest().json(vec![APIError {
APIError {
code: "ERR_DH_INVALID".to_string(), code: "ERR_DH_INVALID".to_string(),
message: "There was an error deserializing the DH pubkey.".to_string(), message: "There was an error deserializing the DH pubkey.".to_string(),
path: None path: None,
} }]);
])
} }
}; };
@ -272,17 +286,15 @@ pub async fn dnclient(req: Json<RequestV1>, req_info: HttpRequest, db: Data<AppS
Ok(r) => r, Ok(r) => r,
Err(e) => { Err(e) => {
error!("ED pubkey deserialize error: {}", e); error!("ED pubkey deserialize error: {}", e);
return HttpResponse::BadRequest().json(vec![ return HttpResponse::BadRequest().json(vec![APIError {
APIError {
code: "ERR_ED_INVALID".to_string(), code: "ERR_ED_INVALID".to_string(),
message: "There was an error deserializing the ED pubkey.".to_string(), message: "There was an error deserializing the ED pubkey.".to_string(),
path: None path: None,
} }]);
])
} }
}; };
let dh_pubkey_typed: [u8; 32] = dh_pubkey.clone().try_into().unwrap(); let dh_pubkey_typed: [u8; 32] = dh_pubkey.try_into().unwrap();
ks.client_keys.push(KSClientKey { ks.client_keys.push(KSClientKey {
id: ks.current_client_key + 1, id: ks.current_client_key + 1,
@ -297,32 +309,33 @@ pub async fn dnclient(req: Json<RequestV1>, req_info: HttpRequest, db: Data<AppS
Ok(_) => (), Ok(_) => (),
Err(e) => { Err(e) => {
error!("keystore save error: {}", e); error!("keystore save error: {}", e);
return HttpResponse::InternalServerError().json(vec![ return HttpResponse::InternalServerError().json(vec![APIError {
APIError {
code: "ERR_SAVE_ERR".to_string(), code: "ERR_SAVE_ERR".to_string(),
message: "There was an error saving the keystore.".to_string(), message: "There was an error saving the keystore.".to_string(),
path: None path: None,
} }]);
])
} }
} }
// get the signing key that the client last trusted based on its current config version // get the signing key that the client last trusted based on its current config version
// this is their current counter // this is their current counter
let signing_key = host_in_ks.signing_keys.iter().find(|u| u.id == (req.counter as u64) - 1).unwrap(); let signing_key = host_in_ks
.signing_keys
.iter()
.find(|u| u.id == (req.counter as u64) - 1)
.unwrap();
let msg = DoUpdateResponse { let msg = DoUpdateResponse {
config: match serde_yaml::to_string(&cfg) { config: match serde_yaml::to_string(&cfg) {
Ok(c_str) => c_str.as_bytes().to_vec(), Ok(c_str) => c_str.as_bytes().to_vec(),
Err(e) => { Err(e) => {
error!("config serialization error: {}", e); error!("config serialization error: {}", e);
return HttpResponse::InternalServerError().json(vec![ return HttpResponse::InternalServerError().json(vec![APIError {
APIError {
code: "ERR_CFG_SERIALIZATION".to_string(), code: "ERR_CFG_SERIALIZATION".to_string(),
message: "There was an error serializing the new configuration.".to_string(), message: "There was an error serializing the new configuration."
path: None .to_string(),
} path: None,
]) }]);
} }
}, },
counter: host_in_ks.current_config as u32, counter: host_in_ks.current_config as u32,
@ -334,13 +347,12 @@ pub async fn dnclient(req: Json<RequestV1>, req_info: HttpRequest, db: Data<AppS
Ok(b) => b, Ok(b) => b,
Err(e) => { Err(e) => {
error!("response serialization error: {}", e); error!("response serialization error: {}", e);
return HttpResponse::InternalServerError().json(vec![ return HttpResponse::InternalServerError().json(vec![APIError {
APIError {
code: "ERR_CFG_SERIALIZATION".to_string(), code: "ERR_CFG_SERIALIZATION".to_string(),
message: "There was an error serializing the new configuration.".to_string(), message: "There was an error serializing the new configuration."
path: None .to_string(),
} path: None,
]) }]);
} }
}; };
@ -350,20 +362,14 @@ pub async fn dnclient(req: Json<RequestV1>, req_info: HttpRequest, db: Data<AppS
signature: signing_key.key.sign(&msg_bytes).to_vec(), signature: signing_key.key.sign(&msg_bytes).to_vec(),
}; };
let resp_w = SignedResponseWrapper { let resp_w = SignedResponseWrapper { data: resp };
data: resp,
};
HttpResponse::Ok().json(resp_w) HttpResponse::Ok().json(resp_w)
}, }
_ => { _ => HttpResponse::BadRequest().json(vec![APIError {
HttpResponse::BadRequest().json(vec![
APIError {
code: "ERR_UNSUPPORTED_METHOD".to_string(), code: "ERR_UNSUPPORTED_METHOD".to_string(),
message: "This server does not support that method yet.".to_string(), message: "This server does not support that method yet.".to_string(),
path: None path: None,
} }]),
]) };
}
}
} }

View File

@ -1,4 +1,5 @@
pub mod auth; pub mod auth;
pub mod dnclient;
pub mod hosts; pub mod hosts;
pub mod networks; pub mod networks;
pub mod organization; pub mod organization;
@ -7,4 +8,3 @@ pub mod signup;
pub mod totp_authenticators; pub mod totp_authenticators;
pub mod trifid; pub mod trifid;
pub mod verify_totp_authenticators; pub mod verify_totp_authenticators;
pub mod dnclient;

View File

@ -33,7 +33,7 @@ use sea_orm::{ActiveModelTrait, ColumnTrait, EntityTrait, IntoActiveModel, Query
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::time::{Duration, SystemTime, UNIX_EPOCH}; use std::time::{Duration, SystemTime, UNIX_EPOCH};
use trifid_api_entities::entity::{network, organization, signing_ca}; use trifid_api_entities::entity::{network, organization, signing_ca};
use trifid_pki::cert::{serialize_x25519_private, NebulaCertificate, NebulaCertificateDetails, serialize_ed25519_private}; use trifid_pki::cert::{serialize_ed25519_private, NebulaCertificate, NebulaCertificateDetails};
use trifid_pki::ed25519_dalek::SigningKey; use trifid_pki::ed25519_dalek::SigningKey;
use trifid_pki::rand_core::OsRng; use trifid_pki::rand_core::OsRng;

View File

@ -4,12 +4,15 @@ use dnapi_rs::message::{
APIError, EnrollRequest, EnrollResponse, EnrollResponseData, EnrollResponseDataOrg, APIError, EnrollRequest, EnrollResponse, EnrollResponseData, EnrollResponseDataOrg,
}; };
use ed25519_dalek::{SigningKey, VerifyingKey}; use ed25519_dalek::{SigningKey, VerifyingKey};
use log::{debug, error, trace}; use log::{debug, error};
use rand::rngs::OsRng; use rand::rngs::OsRng;
use sea_orm::{ColumnTrait, EntityTrait, ModelTrait, QueryFilter}; use sea_orm::{ColumnTrait, EntityTrait, ModelTrait, QueryFilter};
use crate::codegen::{collect_info, generate_config}; use crate::codegen::{collect_info, generate_config};
use crate::keystore::{KSCert, KSClientKey, KSConfig, KSSigningKey, KeystoreHostInformation, keystore_flush, keystore_init}; use crate::keystore::{
keystore_flush, keystore_init, KSCert, KSClientKey, KSConfig, KSSigningKey,
KeystoreHostInformation,
};
use crate::AppState; use crate::AppState;
use trifid_api_entities::entity::host_enrollment_code; use trifid_api_entities::entity::host_enrollment_code;
use trifid_pki::cert::{ use trifid_pki::cert::{
@ -198,13 +201,11 @@ pub async fn enroll(
Ok(_) => (), Ok(_) => (),
Err(e) => { Err(e) => {
error!("keystore save error: {}", e); error!("keystore save error: {}", e);
return HttpResponse::InternalServerError().json(vec![ return HttpResponse::InternalServerError().json(vec![APIError {
APIError {
code: "ERR_SAVE_ERR".to_string(), code: "ERR_SAVE_ERR".to_string(),
message: "There was an error saving the keystore.".to_string(), message: "There was an error saving the keystore.".to_string(),
path: None, path: None,
} }]);
]);
} }
} }
@ -214,19 +215,30 @@ pub async fn enroll(
Ok(cfg) => cfg.as_bytes().to_vec(), Ok(cfg) => cfg.as_bytes().to_vec(),
Err(e) => { Err(e) => {
error!("serialization error: {}", e); error!("serialization error: {}", e);
return HttpResponse::BadRequest().json(vec![ return HttpResponse::BadRequest().json(vec![APIError {
APIError {
code: "ERR_ED_INVALID".to_string(), code: "ERR_ED_INVALID".to_string(),
message: "There was an error deserializing the ED pubkey.".to_string(), message: "There was an error deserializing the ED pubkey.".to_string(),
path: None, path: None,
} }]);
]);
} }
}, },
host_id: enroll_info.host.clone(), host_id: enroll_info.host.clone(),
counter: host.current_config as u32, counter: host.current_config as u32,
trusted_keys: serialize_ed25519_public(host.signing_keys.iter().find(|u| u.id == host.current_signing_key).unwrap().key.verifying_key().as_bytes().as_slice()).to_vec(), trusted_keys: serialize_ed25519_public(
organization: EnrollResponseDataOrg { id: info.organization.id.clone(), name: info.organization.name.clone() }, host.signing_keys
.iter()
.find(|u| u.id == host.current_signing_key)
.unwrap()
.key
.verifying_key()
.as_bytes()
.as_slice(),
)
.to_vec(),
organization: EnrollResponseDataOrg {
id: info.organization.id.clone(),
name: info.organization.name.clone(),
},
}, },
}) })
} }

View File

@ -43,7 +43,7 @@ impl NebulaCAPool {
match pool.add_ca_certificate(pem::encode(&cert).as_bytes()) { match pool.add_ca_certificate(pem::encode(&cert).as_bytes()) {
Ok(did_expire) => { Ok(did_expire) => {
if did_expire { if did_expire {
pool.expired = true pool.expired = true;
} }
} }
Err(e) => return Err(e), Err(e) => return Err(e),
@ -71,7 +71,7 @@ impl NebulaCAPool {
let expired = cert.expired(SystemTime::now()); let expired = cert.expired(SystemTime::now());
if expired { if expired {
self.expired = true self.expired = true;
} }
self.cas.insert(fingerprint, cert); self.cas.insert(fingerprint, cert);