From fe67c34fd54f25579b323c6c898568ba09bfb1cc Mon Sep 17 00:00:00 2001 From: c0repwn3r Date: Thu, 27 Apr 2023 20:40:56 -0400 Subject: [PATCH] /v1/hosts --- trifid-api/src/main.rs | 1 + trifid-api/src/routes/v1/hosts.rs | 358 ++++++++++++++++++ trifid-api/src/routes/v1/mod.rs | 3 +- .../trifid_api_entities/src/entity/host.rs | 1 + .../m20230427_170037_create_table_hosts.rs | 4 +- 5 files changed, 365 insertions(+), 2 deletions(-) create mode 100644 trifid-api/src/routes/v1/hosts.rs diff --git a/trifid-api/src/main.rs b/trifid-api/src/main.rs index 05a68bf..12365dd 100644 --- a/trifid-api/src/main.rs +++ b/trifid-api/src/main.rs @@ -95,6 +95,7 @@ async fn main() -> Result<(), Box> { .service(routes::v1::roles::delete_role) .service(routes::v1::roles::update_role_request) .service(routes::v1::trifid::trifid_extensions) + .service(routes::v1::hosts::get_hosts) }).bind(CONFIG.server.bind)?.run().await?; Ok(()) diff --git a/trifid-api/src/routes/v1/hosts.rs b/trifid-api/src/routes/v1/hosts.rs new file mode 100644 index 0000000..04a783d --- /dev/null +++ b/trifid-api/src/routes/v1/hosts.rs @@ -0,0 +1,358 @@ +// trifid-api, an open source reimplementation of the Defined Networking nebula management server. +// Copyright (C) 2023 c0repwn3r +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +// +//#GET /v1/hosts t+parity:full t+type:documented t+status:done t+feature:definednetworking +// This endpoint has full parity with the original API. It has been recreated from the original API documentation. +// This endpoint is considered done. No major features should be added or removed, unless it fixes bugs. +// This endpoint requires the `definednetworking` extension to be enabled to be used. + +use std::net::SocketAddrV4; +use std::str::FromStr; +use actix_web::{HttpRequest, HttpResponse, get}; +use actix_web::web::{Data, Query}; +use log::error; +use sea_orm::{EntityTrait, QueryFilter, ColumnTrait, QueryOrder, PaginatorTrait}; +use serde::{Serialize, Deserialize}; +use trifid_api_entities::entity::{host, host_static_address, network, organization}; +use crate::AppState; +use crate::auth_tokens::{enforce_2fa, enforce_api_token, TokenInfo}; +use crate::cursor::Cursor; +use crate::error::{APIError, APIErrorsResponse}; + +#[derive(Serialize, Deserialize)] +pub struct ListHostsRequestOpts { + #[serde(default, rename = "includeCounts")] + pub include_counts: bool, + #[serde(default)] + pub cursor: String, + #[serde(default = "page_default", rename = "pageSize")] + pub page_size: u64 +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct ListHostsResponse { + pub data: Vec, + pub metadata: ListHostsResponseMetadata +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct ListHostsResponseMetadata { + #[serde(rename = "totalCount")] + pub total_count: u64, + #[serde(rename = "hasNextPage")] + pub has_next_page: bool, + #[serde(rename = "hasPrevPage")] + pub has_prev_page: bool, + #[serde(default, rename = "prevCursor")] + pub prev_cursor: Option, + #[serde(default, rename = "nextCursor")] + pub next_cursor: Option, + #[serde(default)] + pub page: Option +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct ListHostsResponseMetadataPage { + pub count: u64, + pub start: u64 +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct HostResponse { + pub id: String, + #[serde(rename = "organizationID")] + pub organization_id: String, + #[serde(rename = "networkID")] + pub network_id: String, + #[serde(rename = "roleID")] + pub role_id: String, + pub name: String, + #[serde(rename = "ipAddress")] + pub ip_address: String, + #[serde(rename = "staticAddresses")] + pub static_addresses: Vec, + #[serde(rename = "listenPort")] + pub listen_port: u16, + #[serde(rename = "isLighthouse")] + pub is_lighthouse: bool, + #[serde(rename = "isRelay")] + pub is_relay: bool, + #[serde(rename = "createdAt")] + pub created_at: String, + #[serde(rename = "isBlocked")] + pub is_blocked: bool, + pub metadata: HostResponseMetadata +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct HostResponseMetadata { + #[serde(rename = "lastSeenAt")] + pub last_seen_at: Option, + pub version: String, + pub platform: String, + #[serde(rename = "updateAvailable")] + pub update_available: bool +} + +fn page_default() -> u64 { 25 } + + +#[get("/v1/hosts")] +pub async fn get_hosts(opts: Query, req_info: HttpRequest, db: Data) -> HttpResponse { + // For this endpoint, you either need to be a fully authenticated user OR a token with roles:list + let session_info = enforce_2fa(&req_info, &db.conn).await.unwrap_or(TokenInfo::NotPresent); + let api_token_info = enforce_api_token(&req_info, &["hosts:list"], &db.conn).await.unwrap_or(TokenInfo::NotPresent); + + // If neither are present, throw an error + if matches!(session_info, TokenInfo::NotPresent) && matches!(api_token_info, TokenInfo::NotPresent) { + return HttpResponse::Unauthorized().json(APIErrorsResponse { + errors: vec![ + APIError { + code: "ERR_UNAUTHORIZED".to_string(), + message: "This endpoint requires either a fully authenticated user or a token with the hosts:list scope".to_string(), + path: None, + } + ], + }) + } + + // If both are present, throw an error + if matches!(session_info, TokenInfo::AuthToken(_)) && matches!(api_token_info, TokenInfo::ApiToken(_)) { + return HttpResponse::BadRequest().json(APIErrorsResponse { + errors: vec![ + APIError { + code: "ERR_AMBIGUOUS_AUTHENTICATION".to_string(), + message: "Both a user token and an API token with the proper scope was provided. Please only provide one.".to_string(), + path: None + } + ], + }) + } + + let org_id = match api_token_info { + TokenInfo::ApiToken(tkn) => tkn.organization, + _ => { + // we have a session token, which means we have to do a db request to get the organization that this user owns + let user = match session_info { + TokenInfo::AuthToken(tkn) => tkn.session_info.user, + _ => unreachable!() + }; + + let org = match organization::Entity::find().filter(organization::Column::Owner.eq(user.id)).one(&db.conn).await { + Ok(r) => r, + Err(e) => { + error!("database error: {}", e); + return HttpResponse::InternalServerError().json(APIErrorsResponse { + errors: vec![ + APIError { + code: "ERR_DB_ERROR".to_string(), + message: "There was an error performing the database request, please try again later.".to_string(), + path: None, + } + ], + }); + } + }; + + if let Some(org) = org { + org.id + } else { + return HttpResponse::Unauthorized().json(APIErrorsResponse { + errors: vec![ + APIError { + code: "ERR_NO_ORG".to_string(), + message: "This user does not own any organizations. Try using an API token instead.".to_string(), + path: None + } + ], + }) + } + } + }; + + let net_id; + + let net = match network::Entity::find().filter(network::Column::Organization.eq(&org_id)).one(&db.conn).await { + Ok(r) => r, + Err(e) => { + error!("database error: {}", e); + return HttpResponse::InternalServerError().json(APIErrorsResponse { + errors: vec![ + APIError { + code: "ERR_DB_ERROR".to_string(), + message: "There was an error performing the database request, please try again later.".to_string(), + path: None, + } + ], + }); + } + }; + + if let Some(net) = net { + net_id = net.id; + } else { + return HttpResponse::Unauthorized().json(APIErrorsResponse { + errors: vec![ + APIError { + code: "ERR_NO_NET".to_string(), + message: "This user does not own any networks. Try using an API token instead.".to_string(), + path: None + } + ], + }) + } + + let cursor: Cursor = match opts.cursor.clone().try_into() { + Ok(r) => r, + Err(e) => { + error!("invalid cursor: {}", e); + return HttpResponse::BadRequest().json(APIErrorsResponse { + errors: vec![ + APIError { + code: "ERR_INVALID_CURSOR".to_string(), + message: "The provided cursor was invalid, please try again later.".to_string(), + path: None + } + ], + }) + } + }; + + let host_pages = host::Entity::find().filter(host::Column::Network.eq(net_id)).order_by_asc(host::Column::CreatedAt).paginate(&db.conn, opts.page_size); + + let total = match host_pages.num_items().await { + Ok(r) => r, + Err(e) => { + error!("database error: {}", e); + return HttpResponse::InternalServerError().json(APIErrorsResponse { + errors: vec![ + APIError { + code: "ERR_DB_ERROR".to_string(), + message: "There was an error performing the database request, please try again later.".to_string(), + path: None, + } + ], + }); + } + }; + let pages = match host_pages.num_pages().await { + Ok(r) => r, + Err(e) => { + error!("database error: {}", e); + return HttpResponse::InternalServerError().json(APIErrorsResponse { + errors: vec![ + APIError { + code: "ERR_DB_ERROR".to_string(), + message: "There was an error performing the database request, please try again later.".to_string(), + path: None, + } + ], + }); + } + }; + + let models = match host_pages.fetch_page(cursor.page).await { + Ok(r) => r, + Err(e) => { + error!("database error: {}", e); + return HttpResponse::InternalServerError().json(APIErrorsResponse { + errors: vec![ + APIError { + code: "ERR_DB_ERROR".to_string(), + message: "There was an error performing the database request, please try again later.".to_string(), + path: None, + } + ], + }); + } + }; + + let mut models_mapped: Vec = vec![]; + + for u in models { + // fetch static addresses + let ips = match host_static_address::Entity::find().filter(host_static_address::Column::Host.eq(&u.id)).all(&db.conn).await { + Ok(r) => r, + Err(e) => { + error!("database error: {}", e); + return HttpResponse::InternalServerError().json(APIErrorsResponse { + errors: vec![ + APIError { + code: "ERR_DB_ERROR".to_string(), + message: "There was an error performing the database request, please try again later.".to_string(), + path: None, + } + ], + }); + } + }; + + models_mapped.push(HostResponse { + id: u.id, + organization_id: org_id.clone(), + network_id: u.network, + role_id: u.role, + name: u.name, + ip_address: u.ip, + static_addresses: ips.iter().map(|u| SocketAddrV4::from_str(&u.address).unwrap()).collect(), + + listen_port: u.listen_port as u16, + is_lighthouse: false, + is_relay: false, + created_at: "".to_string(), + is_blocked: false, + metadata: HostResponseMetadata { + last_seen_at: None, + version: "".to_string(), + platform: "".to_string(), + update_available: false, + }, + }) + } + + let count = models_mapped.len() as u64; + + HttpResponse::Ok().json(ListHostsResponse { + data: models_mapped, + metadata: ListHostsResponseMetadata { + total_count: total, + has_next_page: cursor.page+1 != pages, + has_prev_page: cursor.page != 0, + prev_cursor: if cursor.page != 0 { + match (Cursor { page: cursor.page - 1 }).try_into() { + Ok(r) => Some(r), + Err(_) => None + } + } else { + None + }, + next_cursor: if cursor.page+1 != pages { + match (Cursor { page: cursor.page + 1 }).try_into() { + Ok(r) => Some(r), + Err(_) => None + } + } else { + None + }, + page: if opts.include_counts { + Some(ListHostsResponseMetadataPage { + count, + start: opts.page_size * cursor.page, + }) + } else { None }, + }, + }) +} \ No newline at end of file diff --git a/trifid-api/src/routes/v1/mod.rs b/trifid-api/src/routes/v1/mod.rs index ba01da3..011b224 100644 --- a/trifid-api/src/routes/v1/mod.rs +++ b/trifid-api/src/routes/v1/mod.rs @@ -5,4 +5,5 @@ pub mod verify_totp_authenticators; pub mod networks; pub mod organization; pub mod roles; -pub mod trifid; \ No newline at end of file +pub mod trifid; +pub mod hosts; \ No newline at end of file diff --git a/trifid-api/trifid_api_entities/src/entity/host.rs b/trifid-api/trifid_api_entities/src/entity/host.rs index f2d689a..6dc6594 100644 --- a/trifid-api/trifid_api_entities/src/entity/host.rs +++ b/trifid-api/trifid_api_entities/src/entity/host.rs @@ -15,6 +15,7 @@ pub struct Model { pub is_lighthouse: bool, pub is_relay: bool, pub counter: i32, + pub created_at: i64, } #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] diff --git a/trifid-api/trifid_api_migration/src/m20230427_170037_create_table_hosts.rs b/trifid-api/trifid_api_migration/src/m20230427_170037_create_table_hosts.rs index 9ad0b96..c867ef6 100644 --- a/trifid-api/trifid_api_migration/src/m20230427_170037_create_table_hosts.rs +++ b/trifid-api/trifid_api_migration/src/m20230427_170037_create_table_hosts.rs @@ -20,6 +20,7 @@ impl MigrationTrait for Migration { .col(ColumnDef::new(Host::IsLighthouse).boolean().not_null()) .col(ColumnDef::new(Host::IsRelay).boolean().not_null()) .col(ColumnDef::new(Host::Counter).unsigned().not_null()) + .col(ColumnDef::new(Host::CreatedAt).big_integer().not_null()) .foreign_key( ForeignKey::create() .from(Host::Table, Host::Network) @@ -65,5 +66,6 @@ pub enum Host { ListenPort, IsLighthouse, IsRelay, - Counter + Counter, + CreatedAt }