diff --git a/.gitignore b/.gitignore
index 1de5659..3208d77 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1 +1,2 @@
-target
\ No newline at end of file
+target
+pg_data
\ No newline at end of file
diff --git a/.gitmodules b/.gitmodules
deleted file mode 100644
index 5eabc69..0000000
--- a/.gitmodules
+++ /dev/null
@@ -1,3 +0,0 @@
-[submodule "dnapi"]
- path = dnapi
- url = https://github.com/DefinedNet/dnapi
diff --git a/.idea/trifid.iml b/.idea/trifid.iml
index bef05f9..ecc578a 100644
--- a/.idea/trifid.iml
+++ b/.idea/trifid.iml
@@ -5,6 +5,7 @@
+
diff --git a/Cargo.lock b/Cargo.lock
index 63782ef..66579ec 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -81,18 +81,18 @@ checksum = "10f203db73a71dfa2fb6dd22763990fa26f3d2625a6da2da900d23b87d26be27"
dependencies = [
"proc-macro2",
"quote",
- "syn",
+ "syn 1.0.107",
]
[[package]]
name = "async-trait"
-version = "0.1.64"
+version = "0.1.68"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1cd7fce9ba8c3c042128ce72d8b2ddbf3a05747efb67ea0313c635e10bda47a2"
+checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842"
dependencies = [
"proc-macro2",
"quote",
- "syn",
+ "syn 2.0.11",
]
[[package]]
@@ -154,6 +154,16 @@ version = "0.21.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a"
+[[package]]
+name = "base64-serde"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ba368df5de76a5bea49aaf0cf1b39ccfbbef176924d1ba5db3e4135216cbe3c7"
+dependencies = [
+ "base64 0.21.0",
+ "serde",
+]
+
[[package]]
name = "base64ct"
version = "1.5.3"
@@ -219,9 +229,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "chrono"
-version = "0.4.23"
+version = "0.4.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f"
+checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b"
dependencies = [
"iana-time-zone",
"js-sys",
@@ -242,6 +252,43 @@ dependencies = [
"inout",
]
+[[package]]
+name = "clap"
+version = "4.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ce38afc168d8665cfc75c7b1dd9672e50716a137f433f070991619744a67342a"
+dependencies = [
+ "bitflags",
+ "clap_derive",
+ "clap_lex",
+ "is-terminal",
+ "once_cell",
+ "strsim",
+ "termcolor",
+]
+
+[[package]]
+name = "clap_derive"
+version = "4.1.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fddf67631444a3a3e3e5ac51c36a5e01335302de677bd78759eaa90ab1f46644"
+dependencies = [
+ "heck",
+ "proc-macro-error",
+ "proc-macro2",
+ "quote",
+ "syn 1.0.107",
+]
+
+[[package]]
+name = "clap_lex"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "033f6b7a4acb1f358c742aaca805c939ee73b4c6209ae4318ec7aca81c42e646"
+dependencies = [
+ "os_str_bytes",
+]
+
[[package]]
name = "codespan-reporting"
version = "0.11.1"
@@ -258,6 +305,17 @@ version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b"
+[[package]]
+name = "colored"
+version = "2.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b3616f750b84d8f0de8a58bda93e08e2a81ad3f523089b05f1dffecab48c6cbd"
+dependencies = [
+ "atty",
+ "lazy_static",
+ "winapi",
+]
+
[[package]]
name = "const-oid"
version = "0.9.1"
@@ -376,6 +434,16 @@ dependencies = [
"cipher",
]
+[[package]]
+name = "ctrlc"
+version = "3.2.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bbcf33c2a618cbe41ee43ae6e9f2e48368cd9f9db2896f10167d8d762679f639"
+dependencies = [
+ "nix",
+ "windows-sys 0.45.0",
+]
+
[[package]]
name = "curve25519-dalek"
version = "3.2.0"
@@ -385,6 +453,7 @@ dependencies = [
"byteorder",
"digest 0.9.0",
"rand_core 0.5.1",
+ "serde",
"subtle",
"zeroize",
]
@@ -428,7 +497,7 @@ dependencies = [
"proc-macro2",
"quote",
"scratch",
- "syn",
+ "syn 1.0.107",
]
[[package]]
@@ -445,7 +514,7 @@ checksum = "086c685979a698443656e5cf7856c95c642295a38599f12fb1ff76fb28d19892"
dependencies = [
"proc-macro2",
"quote",
- "syn",
+ "syn 1.0.107",
]
[[package]]
@@ -488,7 +557,7 @@ dependencies = [
"proc-macro2",
"proc-macro2-diagnostics",
"quote",
- "syn",
+ "syn 1.0.107",
]
[[package]]
@@ -517,7 +586,16 @@ version = "4.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ca3aa72a6f96ea37bbc5aa912f6788242832f75369bdfdadcb0e38423f100059"
dependencies = [
- "dirs-sys",
+ "dirs-sys 0.3.7",
+]
+
+[[package]]
+name = "dirs"
+version = "5.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dece029acd3353e3a58ac2e3eb3c8d6c35827a892edc6cc4138ef9c33df46ecd"
+dependencies = [
+ "dirs-sys 0.4.0",
]
[[package]]
@@ -531,6 +609,33 @@ dependencies = [
"winapi",
]
+[[package]]
+name = "dirs-sys"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "04414300db88f70d74c5ff54e50f9e1d1737d9a5b90f53fcf2e95ca2a9ab554b"
+dependencies = [
+ "libc",
+ "redox_users",
+ "windows-sys 0.45.0",
+]
+
+[[package]]
+name = "dnapi-rs"
+version = "0.1.7"
+dependencies = [
+ "base64 0.21.0",
+ "base64-serde",
+ "chrono",
+ "log",
+ "rand",
+ "reqwest",
+ "serde",
+ "serde_json",
+ "trifid-pki",
+ "url",
+]
+
[[package]]
name = "dotenvy"
version = "0.15.6"
@@ -544,6 +649,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3cf420a7ec85d98495b0c34aa4a58ca117f982ffbece111aeb545160148d7010"
dependencies = [
"pkcs8",
+ "serde",
"signature",
]
@@ -557,6 +663,7 @@ dependencies = [
"ed25519",
"rand_core 0.6.4",
"serde",
+ "serde_bytes",
"sha2",
"zeroize",
]
@@ -576,6 +683,27 @@ dependencies = [
"cfg-if",
]
+[[package]]
+name = "errno"
+version = "0.2.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1"
+dependencies = [
+ "errno-dragonfly",
+ "libc",
+ "winapi",
+]
+
+[[package]]
+name = "errno-dragonfly"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf"
+dependencies = [
+ "cc",
+ "libc",
+]
+
[[package]]
name = "event-listener"
version = "2.5.3"
@@ -611,6 +739,18 @@ dependencies = [
"version_check",
]
+[[package]]
+name = "filetime"
+version = "0.2.20"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8a3de6e8d11b22ff9edc6d916f890800597d60f8b2da1caf2955c274638d6412"
+dependencies = [
+ "cfg-if",
+ "libc",
+ "redox_syscall",
+ "windows-sys 0.45.0",
+]
+
[[package]]
name = "flate2"
version = "1.0.25"
@@ -852,6 +992,12 @@ dependencies = [
"libc",
]
+[[package]]
+name = "hermit-abi"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286"
+
[[package]]
name = "hex"
version = "0.4.3"
@@ -934,6 +1080,19 @@ dependencies = [
"want",
]
+[[package]]
+name = "hyper-tls"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905"
+dependencies = [
+ "bytes",
+ "hyper",
+ "native-tls",
+ "tokio",
+ "tokio-native-tls",
+]
+
[[package]]
name = "iana-time-zone"
version = "0.1.53"
@@ -1017,6 +1176,17 @@ dependencies = [
"cfg-if",
]
+[[package]]
+name = "io-lifetimes"
+version = "1.0.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0dd6da19f25979c7270e70fa95ab371ec3b701cd0eefc47667a09785b3c59155"
+dependencies = [
+ "hermit-abi 0.3.1",
+ "libc",
+ "windows-sys 0.45.0",
+]
+
[[package]]
name = "ipnet"
version = "2.7.1"
@@ -1026,6 +1196,18 @@ dependencies = [
"serde",
]
+[[package]]
+name = "is-terminal"
+version = "0.4.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8687c819457e979cc940d09cb16e42a1bf70aa6b60a549de6d3a62a0ee90c69e"
+dependencies = [
+ "hermit-abi 0.3.1",
+ "io-lifetimes",
+ "rustix",
+ "windows-sys 0.45.0",
+]
+
[[package]]
name = "itertools"
version = "0.10.5"
@@ -1077,6 +1259,12 @@ dependencies = [
"cc",
]
+[[package]]
+name = "linux-raw-sys"
+version = "0.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4"
+
[[package]]
name = "lock_api"
version = "0.4.9"
@@ -1206,6 +1394,18 @@ dependencies = [
"tempfile",
]
+[[package]]
+name = "nix"
+version = "0.26.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bfdda3d196821d6af13126e40375cdf7da646a96114af134d5f417a9a1dc8e1a"
+dependencies = [
+ "bitflags",
+ "cfg-if",
+ "libc",
+ "static_assertions",
+]
+
[[package]]
name = "nom"
version = "7.1.3"
@@ -1216,15 +1416,6 @@ dependencies = [
"minimal-lexical",
]
-[[package]]
-name = "nom8"
-version = "0.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ae01545c9c7fc4486ab7debaf2aad7003ac19431791868fb2e8066df97fad2f8"
-dependencies = [
- "memchr",
-]
-
[[package]]
name = "nu-ansi-term"
version = "0.46.0"
@@ -1275,6 +1466,15 @@ dependencies = [
"libc",
]
+[[package]]
+name = "num_threads"
+version = "0.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44"
+dependencies = [
+ "libc",
+]
+
[[package]]
name = "once_cell"
version = "1.17.0"
@@ -1310,7 +1510,7 @@ checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c"
dependencies = [
"proc-macro2",
"quote",
- "syn",
+ "syn 1.0.107",
]
[[package]]
@@ -1332,6 +1532,12 @@ dependencies = [
"vcpkg",
]
+[[package]]
+name = "os_str_bytes"
+version = "6.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee"
+
[[package]]
name = "overload"
version = "0.1.1"
@@ -1422,7 +1628,7 @@ dependencies = [
"proc-macro2",
"proc-macro2-diagnostics",
"quote",
- "syn",
+ "syn 1.0.107",
]
[[package]]
@@ -1505,10 +1711,34 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
[[package]]
-name = "proc-macro2"
-version = "1.0.50"
+name = "proc-macro-error"
+version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6ef7d57beacfaf2d8aee5937dab7b7f28de3cb8b1828479bb5de2a7106f2bae2"
+checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c"
+dependencies = [
+ "proc-macro-error-attr",
+ "proc-macro2",
+ "quote",
+ "syn 1.0.107",
+ "version_check",
+]
+
+[[package]]
+name = "proc-macro-error-attr"
+version = "1.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "version_check",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.52"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1d0e1ae9e836cc3beddd63db0df682593d7e2d3d891ae8c9083d2113e1744224"
dependencies = [
"unicode-ident",
]
@@ -1521,7 +1751,7 @@ checksum = "4bf29726d67464d49fa6224a1d07936a8c08bb3fba727c7493f6cf1616fdaada"
dependencies = [
"proc-macro2",
"quote",
- "syn",
+ "syn 1.0.107",
"version_check",
"yansi",
]
@@ -1543,9 +1773,9 @@ dependencies = [
[[package]]
name = "quote"
-version = "1.0.23"
+version = "1.0.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b"
+checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc"
dependencies = [
"proc-macro2",
]
@@ -1626,7 +1856,7 @@ checksum = "9f9c0c92af03644e4806106281fe2e068ac5bc0ae74a707266d06ea27bccee5f"
dependencies = [
"proc-macro2",
"quote",
- "syn",
+ "syn 1.0.107",
]
[[package]]
@@ -1654,12 +1884,40 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848"
[[package]]
-name = "remove_dir_all"
-version = "0.5.3"
+name = "reqwest"
+version = "0.11.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
+checksum = "27b71749df584b7f4cac2c426c127a7c785a5106cc98f7a8feb044115f0fa254"
dependencies = [
- "winapi",
+ "base64 0.21.0",
+ "bytes",
+ "encoding_rs",
+ "futures-core",
+ "futures-util",
+ "h2",
+ "http",
+ "http-body",
+ "hyper",
+ "hyper-tls",
+ "ipnet",
+ "js-sys",
+ "log",
+ "mime",
+ "native-tls",
+ "once_cell",
+ "percent-encoding",
+ "pin-project-lite",
+ "serde",
+ "serde_json",
+ "serde_urlencoded",
+ "tokio",
+ "tokio-native-tls",
+ "tower-service",
+ "url",
+ "wasm-bindgen",
+ "wasm-bindgen-futures",
+ "web-sys",
+ "winreg",
]
[[package]]
@@ -1713,7 +1971,7 @@ dependencies = [
"proc-macro2",
"quote",
"rocket_http",
- "syn",
+ "syn 1.0.107",
"unicode-xid",
]
@@ -1744,6 +2002,20 @@ dependencies = [
"uncased",
]
+[[package]]
+name = "rustix"
+version = "0.36.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2fe885c3a125aa45213b68cc1472a49880cb5923dc23f522ad2791b882228778"
+dependencies = [
+ "bitflags",
+ "errno",
+ "io-lifetimes",
+ "libc",
+ "linux-raw-sys",
+ "windows-sys 0.45.0",
+]
+
[[package]]
name = "rustversion"
version = "1.0.11"
@@ -1808,29 +2080,38 @@ dependencies = [
[[package]]
name = "serde"
-version = "1.0.152"
+version = "1.0.159"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb"
+checksum = "3c04e8343c3daeec41f58990b9d77068df31209f2af111e059e9fe9646693065"
dependencies = [
"serde_derive",
]
[[package]]
-name = "serde_derive"
-version = "1.0.152"
+name = "serde_bytes"
+version = "0.11.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e"
+checksum = "416bda436f9aab92e02c8e10d49a15ddd339cea90b6e340fe51ed97abb548294"
+dependencies = [
+ "serde",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.159"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4c614d17805b093df4b147b51339e7e44bf05ef59fba1e45d83500bcfb4d8585"
dependencies = [
"proc-macro2",
"quote",
- "syn",
+ "syn 2.0.11",
]
[[package]]
name = "serde_json"
-version = "1.0.91"
+version = "1.0.95"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "877c235533714907a8c2464236f5c4b2a17262ef1bd71f38f35ea592c8da6883"
+checksum = "d721eca97ac802aa7777b701877c8004d950fc142651367300d21c1cc0194744"
dependencies = [
"itoa",
"ryu",
@@ -1846,6 +2127,31 @@ dependencies = [
"serde",
]
+[[package]]
+name = "serde_urlencoded"
+version = "0.7.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd"
+dependencies = [
+ "form_urlencoded",
+ "itoa",
+ "ryu",
+ "serde",
+]
+
+[[package]]
+name = "serde_yaml"
+version = "0.9.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f82e6c8c047aa50a7328632d067bcae6ef38772a79e28daf32f735e0e4f3dd10"
+dependencies = [
+ "indexmap",
+ "itoa",
+ "ryu",
+ "serde",
+ "unsafe-libyaml",
+]
+
[[package]]
name = "sha1"
version = "0.10.5"
@@ -1892,6 +2198,19 @@ version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8fe458c98333f9c8152221191a77e2a44e8325d0193484af2e9421a53019e57d"
+[[package]]
+name = "simple_logger"
+version = "4.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e78beb34673091ccf96a8816fce8bfd30d1292c7621ca2bcb5f2ba0fae4f558d"
+dependencies = [
+ "atty",
+ "colored",
+ "log",
+ "time 0.3.17",
+ "windows-sys 0.42.0",
+]
+
[[package]]
name = "slab"
version = "0.4.7"
@@ -1968,7 +2287,7 @@ dependencies = [
"bytes",
"crc",
"crossbeam-queue",
- "dirs",
+ "dirs 4.0.0",
"dotenvy",
"either",
"event-listener",
@@ -2019,7 +2338,7 @@ dependencies = [
"sha2",
"sqlx-core",
"sqlx-rt",
- "syn",
+ "syn 1.0.107",
"url",
]
@@ -2053,6 +2372,12 @@ dependencies = [
"loom",
]
+[[package]]
+name = "static_assertions"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
+
[[package]]
name = "stringprep"
version = "0.1.2"
@@ -2063,6 +2388,12 @@ dependencies = [
"unicode-normalization",
]
+[[package]]
+name = "strsim"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
+
[[package]]
name = "subtle"
version = "2.4.1"
@@ -2080,6 +2411,17 @@ dependencies = [
"unicode-ident",
]
+[[package]]
+name = "syn"
+version = "2.0.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "21e3787bb71465627110e7d87ed4faaa36c1f61042ee67badb9e2ef173accc40"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
[[package]]
name = "synstructure"
version = "0.12.6"
@@ -2088,22 +2430,32 @@ checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f"
dependencies = [
"proc-macro2",
"quote",
- "syn",
+ "syn 1.0.107",
"unicode-xid",
]
[[package]]
-name = "tempfile"
-version = "3.3.0"
+name = "tar"
+version = "0.4.38"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4"
+checksum = "4b55807c0344e1e6c04d7c965f5289c39a8d94ae23ed5c0b57aabac549f871c6"
+dependencies = [
+ "filetime",
+ "libc",
+ "xattr",
+]
+
+[[package]]
+name = "tempfile"
+version = "3.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "af18f7ae1acd354b992402e9ec5864359d693cd8a79dcbef59f76891701c1e95"
dependencies = [
"cfg-if",
"fastrand",
- "libc",
"redox_syscall",
- "remove_dir_all",
- "winapi",
+ "rustix",
+ "windows-sys 0.42.0",
]
[[package]]
@@ -2118,6 +2470,30 @@ dependencies = [
[[package]]
name = "tfclient"
version = "0.1.0"
+dependencies = [
+ "base64 0.21.0",
+ "base64-serde",
+ "chrono",
+ "clap",
+ "ctrlc",
+ "dirs 5.0.0",
+ "dnapi-rs",
+ "flate2",
+ "hex",
+ "ipnet",
+ "log",
+ "reqwest",
+ "serde",
+ "serde_json",
+ "serde_yaml",
+ "sha2",
+ "simple_logger",
+ "tar",
+ "tempfile",
+ "toml 0.7.3",
+ "trifid-pki",
+ "url",
+]
[[package]]
name = "thiserror"
@@ -2136,7 +2512,7 @@ checksum = "1fb327af4685e4d03fa8cbcf1716380da910eeb2bb8be417e7f9fd3fb164f36f"
dependencies = [
"proc-macro2",
"quote",
- "syn",
+ "syn 1.0.107",
]
[[package]]
@@ -2166,6 +2542,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376"
dependencies = [
"itoa",
+ "libc",
+ "num_threads",
"serde",
"time-core",
"time-macros",
@@ -2229,7 +2607,7 @@ checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8"
dependencies = [
"proc-macro2",
"quote",
- "syn",
+ "syn 1.0.107",
]
[[package]]
@@ -2278,9 +2656,9 @@ dependencies = [
[[package]]
name = "toml"
-version = "0.7.1"
+version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "772c1426ab886e7362aedf4abc9c0d1348a979517efedfc25862944d10137af0"
+checksum = "b403acf6f2bb0859c93c7f0d967cb4a75a7ac552100f9322faf64dc047669b21"
dependencies = [
"serde",
"serde_spanned",
@@ -2299,15 +2677,15 @@ dependencies = [
[[package]]
name = "toml_edit"
-version = "0.19.1"
+version = "0.19.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "90a238ee2e6ede22fb95350acc78e21dc40da00bb66c0334bde83de4ed89424e"
+checksum = "dc18466501acd8ac6a3f615dd29a3438f8ca6bb3b19537138b3106e575621274"
dependencies = [
"indexmap",
- "nom8",
"serde",
"serde_spanned",
"toml_datetime",
+ "winnow",
]
[[package]]
@@ -2355,7 +2733,7 @@ checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a"
dependencies = [
"proc-macro2",
"quote",
- "syn",
+ "syn 1.0.107",
]
[[package]]
@@ -2415,7 +2793,7 @@ dependencies = [
"sha2",
"sqlx",
"tokio",
- "toml 0.7.1",
+ "toml 0.7.3",
"totp-rs",
"trifid-pki",
"url",
@@ -2425,7 +2803,7 @@ dependencies = [
[[package]]
name = "trifid-pki"
-version = "0.1.4"
+version = "0.1.9"
dependencies = [
"ed25519-dalek",
"hex",
@@ -2434,6 +2812,7 @@ dependencies = [
"quick-protobuf",
"rand",
"rand_core 0.6.4",
+ "serde",
"sha2",
"x25519-dalek",
]
@@ -2524,6 +2903,12 @@ dependencies = [
"subtle",
]
+[[package]]
+name = "unsafe-libyaml"
+version = "0.2.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ad2024452afd3874bf539695e04af6732ba06517424dbf958fdb16a01f3bef6c"
+
[[package]]
name = "url"
version = "2.3.1"
@@ -2561,7 +2946,7 @@ checksum = "c1b300a878652a387d2a0de915bdae8f1a548f0c6d45e072fe2688794b656cc9"
dependencies = [
"proc-macro2",
"quote",
- "syn",
+ "syn 1.0.107",
]
[[package]]
@@ -2631,10 +3016,22 @@ dependencies = [
"once_cell",
"proc-macro2",
"quote",
- "syn",
+ "syn 1.0.107",
"wasm-bindgen-shared",
]
+[[package]]
+name = "wasm-bindgen-futures"
+version = "0.4.34"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f219e0d211ba40266969f6dbdd90636da12f75bee4fc9d6c23d1260dadb51454"
+dependencies = [
+ "cfg-if",
+ "js-sys",
+ "wasm-bindgen",
+ "web-sys",
+]
+
[[package]]
name = "wasm-bindgen-macro"
version = "0.2.84"
@@ -2653,7 +3050,7 @@ checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6"
dependencies = [
"proc-macro2",
"quote",
- "syn",
+ "syn 1.0.107",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
@@ -2839,6 +3236,24 @@ version = "0.42.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd"
+[[package]]
+name = "winnow"
+version = "0.3.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "23d020b441f92996c80d94ae9166e8501e59c7bb56121189dc9eab3bd8216966"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "winreg"
+version = "0.10.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d"
+dependencies = [
+ "winapi",
+]
+
[[package]]
name = "x25519-dalek"
version = "2.0.0-pre.1"
@@ -2847,9 +3262,19 @@ checksum = "e5da623d8af10a62342bcbbb230e33e58a63255a58012f8653c578e54bab48df"
dependencies = [
"curve25519-dalek 3.2.0",
"rand_core 0.6.4",
+ "serde",
"zeroize",
]
+[[package]]
+name = "xattr"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6d1526bbe5aaeb5eb06885f4d987bcdfa5e23187055de9b83fe00156a821fabc"
+dependencies = [
+ "libc",
+]
+
[[package]]
name = "yansi"
version = "0.5.1"
@@ -2873,6 +3298,6 @@ checksum = "44bf07cb3e50ea2003396695d58bf46bc9887a1f362260446fad6bc4e79bd36c"
dependencies = [
"proc-macro2",
"quote",
- "syn",
+ "syn 1.0.107",
"synstructure",
]
diff --git a/Cargo.toml b/Cargo.toml
index 1ad3cfa..31bce56 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -2,5 +2,6 @@
members = [
"trifid-api",
"tfclient",
- "trifid-pki"
+ "trifid-pki",
+ "dnapi-rs"
]
\ No newline at end of file
diff --git a/api/v2/enroll.txt b/api/v2/enroll.txt
index bc3a41e..afead04 100644
--- a/api/v2/enroll.txt
+++ b/api/v2/enroll.txt
@@ -7,6 +7,9 @@ Connection: close
{"code":"xM22QsIzd4F0nLDTbh86RCSYwelfU_Hshqt-7u4yy_Y","dhPubkey":"LS0tLS1CRUdJTiBORUJVTEEgWDI1NTE5IFBVQkxJQyBLRVktLS0tLQpqZW9aaDZZYUNNSHZKK04zWGRlQ1hCbHo3dm5saTBjL1NlQ1hVR3lYbEIwPQotLS0tLUVORCBORUJVTEEgWDI1NTE5IFBVQkxJQyBLRVktLS0tLQo=","edPubkey":"LS0tLS1CRUdJTiBORUJVTEEgRUQyNTUxOSBQVUJMSUMgS0VZLS0tLS0KWHE0RG9mUGJoQzBubjc4VEhRWUxhNC83V1Ixei9iU1kzSm9pRzNRZ1VMcz0KLS0tLS1FTkQgTkVCVUxBIEVEMjU1MTkgUFVCTElDIEtFWS0tLS0tCg==","timestamp":"2023-02-01T13:24:56.380006369-05:00"}
+2023-02-01T13:24:56.380006369-05:00
+%Y-%m-%dT%H:%M:%S.%f-%:z
+
HTTP/2 200 OK
Cache-Control: no-store
Content-Security-Policy: default-src 'none'
diff --git a/dnapi b/dnapi
deleted file mode 160000
index 6f56f05..0000000
--- a/dnapi
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit 6f56f055f9912755979e27942a91efcf794a82dc
diff --git a/dnapi-rs/Cargo.toml b/dnapi-rs/Cargo.toml
new file mode 100644
index 0000000..7a0bebc
--- /dev/null
+++ b/dnapi-rs/Cargo.toml
@@ -0,0 +1,24 @@
+[package]
+name = "dnapi-rs"
+version = "0.1.7"
+edition = "2021"
+description = "A rust client for the Defined Networking API"
+license = "AGPL-3.0-or-later"
+documentation = "https://docs.rs/dnapi-rs"
+homepage = "https://git.e3t.cc/~core/trifid"
+repository = "https://git.e3t.cc/~core/trifid"
+
+
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[dependencies]
+serde = { version = "1.0.159", features = ["derive"] }
+base64-serde = "0.7.0"
+log = "0.4.17"
+reqwest = { version = "0.11.16", features = ["blocking", "json"] }
+url = "2.3.1"
+base64 = "0.21.0"
+serde_json = "1.0.95"
+trifid-pki = { version = "0.1.6", path = "../trifid-pki", features = ["serde_derive"] }
+rand = "0.8.5"
+chrono = "0.4.24"
\ No newline at end of file
diff --git a/dnapi-rs/README.md b/dnapi-rs/README.md
new file mode 100644
index 0000000..854582d
--- /dev/null
+++ b/dnapi-rs/README.md
@@ -0,0 +1,4 @@
+# dnapi-rs
+**dnapi-rs** is a Rust-native crate for interacting with the Defined Networking client API. It is a direct port of `dnapi`, an officially maintained API client by Defined Networking.
+
+This crate is maintained as a part of the trifid project. Check out the other crates in [the git repository](https://git.e3t.cc/~core/trifid).
\ No newline at end of file
diff --git a/dnapi-rs/src/client_async.rs b/dnapi-rs/src/client_async.rs
new file mode 100644
index 0000000..3badec2
--- /dev/null
+++ b/dnapi-rs/src/client_async.rs
@@ -0,0 +1,221 @@
+//! Client structs to handle communication with the Defined Networking API. This is the async client API - if you want blocking instead, enable the blocking (or default) feature instead.
+
+use std::error::Error;
+use chrono::Local;
+use log::{debug, error};
+use reqwest::StatusCode;
+use url::Url;
+use trifid_pki::cert::serialize_ed25519_public;
+use trifid_pki::ed25519_dalek::{Signature, Signer, SigningKey, Verifier};
+use crate::credentials::{Credentials, ed25519_public_keys_from_pem};
+use crate::crypto::{new_keys, nonce};
+use crate::message::{CHECK_FOR_UPDATE, CheckForUpdateResponseWrapper, DO_UPDATE, DoUpdateRequest, DoUpdateResponse, ENDPOINT_V1, ENROLL_ENDPOINT, EnrollRequest, EnrollResponse, RequestV1, RequestWrapper, SignedResponseWrapper};
+use serde::{Serialize, Deserialize};
+use base64::Engine;
+
+/// A type alias to abstract return types
+pub type NebulaConfig = Vec;
+
+/// A type alias to abstract DH private keys
+pub type DHPrivateKeyPEM = Vec;
+
+/// A combination of persistent data and HTTP client used for communicating with the API.
+pub struct Client {
+ http_client: reqwest::Client,
+ server_url: Url
+}
+
+#[derive(Serialize, Deserialize, Clone)]
+/// A struct containing organization metadata returned as a result of enrollment
+pub struct EnrollMeta {
+ /// The server organization ID this node is now a member of
+ pub organization_id: String,
+ /// The server organization name this node is now a member of
+ pub organization_name: String
+}
+
+impl Client {
+ /// Create a new `Client` configured with the given User-Agent and API base.
+ /// # Errors
+ /// This function will return an error if the reqwest Client could not be created.
+ pub fn new(user_agent: String, api_base: Url) -> Result> {
+ let client = reqwest::Client::builder().user_agent(user_agent).build()?;
+ Ok(Self {
+ http_client: client,
+ server_url: api_base
+ })
+ }
+
+ /// Issues an enrollment request against the REST API using the given enrollment code, passing along a
+ /// locally generated DH X25519 Nebula key to be signed by the CA, and an Ed25519 key for future API
+ /// authentication. On success it returns the Nebula config generated by the server, a Nebula private key PEM,
+ /// credentials to be used for future DN API requests, and an object containing organization information.
+ /// # Errors
+ /// This function will return an error in any of the following situations:
+ /// - the `server_url` is invalid
+ /// - the HTTP request fails
+ /// - the HTTP response is missing X-Request-ID
+ /// - X-Request-ID isn't valid UTF-8
+ /// - the server returns an error
+ /// - the server returns invalid JSON
+ /// - the `trusted_keys` field is invalid
+ pub async fn enroll(&self, code: &str) -> Result<(NebulaConfig, DHPrivateKeyPEM, Credentials, EnrollMeta), Box> {
+ debug!("making enrollment request to API {{server: {}, code: {}}}", self.server_url, code);
+
+ let (dh_pubkey_pem, dh_privkey_pem, ed_pubkey, ed_privkey) = new_keys();
+
+ let req_json = serde_json::to_string(&EnrollRequest {
+ code: code.to_string(),
+ dh_pubkey: dh_pubkey_pem,
+ ed_pubkey: serialize_ed25519_public(ed_pubkey.as_bytes()),
+ timestamp: Local::now().format("%Y-%m-%dT%H:%M:%S.%f%:z").to_string(),
+ })?;
+
+ let resp = self.http_client.post(self.server_url.join(ENROLL_ENDPOINT)?).body(req_json).send().await?;
+
+ let req_id = resp.headers().get("X-Request-ID").ok_or("Response missing X-Request-ID")?.to_str()?;
+ debug!("enrollment request complete {{req_id: {}}}", req_id);
+
+ let resp: EnrollResponse = resp.json().await?;
+
+ let r = match resp {
+ EnrollResponse::Success { data } => data,
+ EnrollResponse::Error { errors } => {
+ error!("unexpected error during enrollment: {}", errors[0].message);
+ return Err(errors[0].message.clone().into());
+ }
+ };
+
+ let meta = EnrollMeta {
+ organization_id: r.organization.id,
+ organization_name: r.organization.name,
+ };
+
+ let trusted_keys = ed25519_public_keys_from_pem(&r.trusted_keys)?;
+
+ let creds = Credentials {
+ host_id: r.host_id,
+ ed_privkey,
+ counter: r.counter,
+ trusted_keys,
+ };
+
+ Ok((r.config, dh_privkey_pem, creds, meta))
+ }
+
+ /// Send a signed message to the `DNClient` API to learn if there is a new configuration available.
+ /// # Errors
+ /// This function returns an error if the dnclient request fails, or the server returns invalid data.
+ pub async fn check_for_update(&self, creds: &Credentials) -> Result> {
+ let body = self.post_dnclient(CHECK_FOR_UPDATE, &[], &creds.host_id, creds.counter, &creds.ed_privkey).await?;
+
+ let result: CheckForUpdateResponseWrapper = serde_json::from_slice(&body)?;
+
+ Ok(result.data.update_available)
+ }
+
+ /// Send a signed message to the `DNClient` API to fetch the new configuration update. During this call a new
+ /// DH X25519 keypair is generated for the new Nebula certificate as well as a new Ed25519 keypair for `DNClient` API
+ /// communication. On success it returns the new config, a Nebula private key PEM to be inserted into the config
+ /// and new `DNClient` API credentials
+ /// # Errors
+ /// This function returns an error in any of the following scenarios:
+ /// - if the message could not be serialized
+ /// - if the request fails
+ /// - if the response could not be deserialized
+ /// - if the signature is invalid
+ /// - if the keys are invalid
+ pub async fn do_update(&self, creds: &Credentials) -> Result<(NebulaConfig, DHPrivateKeyPEM, Credentials), Box> {
+ let (dh_pubkey_pem, dh_privkey_pem, ed_pubkey, ed_privkey) = new_keys();
+
+ let update_keys = DoUpdateRequest {
+ ed_pubkey_pem: serialize_ed25519_public(ed_pubkey.as_bytes()),
+ dh_pubkey_pem,
+ nonce: nonce().to_vec(),
+ };
+
+ let update_keys_blob = serde_json::to_vec(&update_keys)?;
+
+ let resp = self.post_dnclient(DO_UPDATE, &update_keys_blob, &creds.host_id, creds.counter, &creds.ed_privkey).await?;
+
+ let result_wrapper: SignedResponseWrapper = serde_json::from_slice(&resp)?;
+
+ let mut valid = false;
+
+ for ca_pubkey in &creds.trusted_keys {
+ if ca_pubkey.verify(&result_wrapper.data.message, &Signature::from_slice(&result_wrapper.data.signature)?).is_ok() {
+ valid = true;
+ break;
+ }
+ }
+
+ if !valid {
+ return Err("Failed to verify signed API result".into())
+ }
+
+ let result: DoUpdateResponse = serde_json::from_slice(&result_wrapper.data.message)?;
+
+ if result.nonce != update_keys.nonce {
+ error!("nonce mismatch between request {:x?} and response {:x?}", result.nonce, update_keys.nonce);
+ return Err("nonce mismatch between request and response".into())
+ }
+
+ let trusted_keys = ed25519_public_keys_from_pem(&result.trusted_keys)?;
+
+ let new_creds = Credentials {
+ host_id: creds.host_id.clone(),
+ ed_privkey,
+ counter: result.counter,
+ trusted_keys,
+ };
+
+ Ok((result.config, dh_privkey_pem, new_creds))
+ }
+
+ /// Wraps and signs the given `req_type` and value, and then makes the API call.
+ /// On success, returns the response body.
+ /// # Errors
+ /// This function will return an error if:
+ /// - serialization in any step fails
+ /// - if the `server_url` is invalid
+ /// - if the request could not be sent
+ pub async fn post_dnclient(&self, req_type: &str, value: &[u8], host_id: &str, counter: u32, ed_privkey: &SigningKey) -> Result, Box> {
+ let encoded_msg = serde_json::to_string(&RequestWrapper {
+ message_type: req_type.to_string(),
+ value: value.to_vec(),
+ timestamp: Local::now().format("%Y-%m-%dT%H:%M:%S.%f%:z").to_string(),
+ })?;
+ let encoded_msg_bytes = encoded_msg.into_bytes();
+ let b64_msg = base64::engine::general_purpose::STANDARD.encode(encoded_msg_bytes);
+ let b64_msg_bytes = b64_msg.as_bytes();
+ let signature = ed_privkey.sign(b64_msg_bytes).to_vec();
+
+ ed_privkey.verify(b64_msg_bytes, &Signature::from_slice(&signature)?)?;
+ debug!("signature valid via clientside check");
+
+ let body = RequestV1 {
+ version: 1,
+ host_id: host_id.to_string(),
+ counter,
+ message: b64_msg,
+ signature,
+ };
+
+ let post_body = serde_json::to_string(&body)?;
+
+ let resp = self.http_client.post(self.server_url.join(ENDPOINT_V1)?).body(post_body).send().await?;
+
+ match resp.status() {
+ StatusCode::OK => {
+ Ok(resp.bytes().await?.to_vec())
+ },
+ StatusCode::FORBIDDEN => {
+ Err("Forbidden".into())
+ },
+ _ => {
+ error!("dnclient endpoint returned bad status code {}", resp.status());
+ Err("dnclient endpoint returned error".into())
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/dnapi-rs/src/client_blocking.rs b/dnapi-rs/src/client_blocking.rs
new file mode 100644
index 0000000..bd932e9
--- /dev/null
+++ b/dnapi-rs/src/client_blocking.rs
@@ -0,0 +1,231 @@
+//! Client structs to handle communication with the Defined Networking API. This is the blocking client API - if you want async instead, set no-default-features and enable the async feature instead.
+
+use std::error::Error;
+use base64::Engine;
+use chrono::Local;
+use log::{debug, error, trace};
+use reqwest::StatusCode;
+use url::Url;
+use trifid_pki::cert::serialize_ed25519_public;
+use trifid_pki::ed25519_dalek::{Signature, Signer, SigningKey, Verifier};
+use crate::credentials::{Credentials, ed25519_public_keys_from_pem};
+use crate::crypto::{new_keys, nonce};
+use crate::message::{CHECK_FOR_UPDATE, CheckForUpdateResponseWrapper, DO_UPDATE, DoUpdateRequest, DoUpdateResponse, ENDPOINT_V1, ENROLL_ENDPOINT, EnrollRequest, EnrollResponse, RequestV1, RequestWrapper, SignedResponseWrapper};
+use serde::{Serialize, Deserialize};
+
+/// A type alias to abstract return types
+pub type NebulaConfig = Vec;
+
+/// A type alias to abstract DH private keys
+pub type DHPrivateKeyPEM = Vec;
+
+/// A combination of persistent data and HTTP client used for communicating with the API.
+pub struct Client {
+ http_client: reqwest::blocking::Client,
+ server_url: Url
+}
+
+#[derive(Serialize, Deserialize, Clone)]
+/// A struct containing organization metadata returned as a result of enrollment
+pub struct EnrollMeta {
+ /// The server organization ID this node is now a member of
+ pub organization_id: String,
+ /// The server organization name this node is now a member of
+ pub organization_name: String
+}
+
+impl Client {
+ /// Create a new `Client` configured with the given User-Agent and API base.
+ /// # Errors
+ /// This function will return an error if the reqwest Client could not be created.
+ pub fn new(user_agent: String, api_base: Url) -> Result> {
+ let client = reqwest::blocking::Client::builder().user_agent(user_agent).build()?;
+ Ok(Self {
+ http_client: client,
+ server_url: api_base
+ })
+ }
+
+ /// Issues an enrollment request against the REST API using the given enrollment code, passing along a
+ /// locally generated DH X25519 Nebula key to be signed by the CA, and an Ed25519 key for future API
+ /// authentication. On success it returns the Nebula config generated by the server, a Nebula private key PEM,
+ /// credentials to be used for future DN API requests, and an object containing organization information.
+ /// # Errors
+ /// This function will return an error in any of the following situations:
+ /// - the `server_url` is invalid
+ /// - the HTTP request fails
+ /// - the HTTP response is missing X-Request-ID
+ /// - X-Request-ID isn't valid UTF-8
+ /// - the server returns an error
+ /// - the server returns invalid JSON
+ /// - the `trusted_keys` field is invalid
+ pub fn enroll(&self, code: &str) -> Result<(NebulaConfig, DHPrivateKeyPEM, Credentials, EnrollMeta), Box> {
+ debug!("making enrollment request to API {{server: {}, code: {}}}", self.server_url, code);
+
+ let (dh_pubkey_pem, dh_privkey_pem, ed_pubkey, ed_privkey) = new_keys();
+
+ let req_json = serde_json::to_string(&EnrollRequest {
+ code: code.to_string(),
+ dh_pubkey: dh_pubkey_pem,
+ ed_pubkey: serialize_ed25519_public(ed_pubkey.as_bytes()),
+ timestamp: Local::now().format("%Y-%m-%dT%H:%M:%S.%f%:z").to_string(),
+ })?;
+
+ let resp = self.http_client.post(self.server_url.join(ENROLL_ENDPOINT)?).body(req_json).send()?;
+
+ let req_id = resp.headers().get("X-Request-ID").ok_or("Response missing X-Request-ID")?.to_str()?;
+ debug!("enrollment request complete {{req_id: {}}}", req_id);
+
+ let resp: EnrollResponse = resp.json()?;
+
+ let r = match resp {
+ EnrollResponse::Success { data } => data,
+ EnrollResponse::Error { errors } => {
+ error!("unexpected error during enrollment: {}", errors[0].message);
+ return Err(errors[0].message.clone().into());
+ }
+ };
+
+ let meta = EnrollMeta {
+ organization_id: r.organization.id,
+ organization_name: r.organization.name,
+ };
+
+ debug!("parsing public keys");
+
+
+ let trusted_keys = ed25519_public_keys_from_pem(&r.trusted_keys)?;
+
+ let creds = Credentials {
+ host_id: r.host_id,
+ ed_privkey,
+ counter: r.counter,
+ trusted_keys,
+ };
+
+ Ok((r.config, dh_privkey_pem, creds, meta))
+ }
+
+ /// Send a signed message to the `DNClient` API to learn if there is a new configuration available.
+ /// # Errors
+ /// This function returns an error if the dnclient request fails, or the server returns invalid data.
+ pub fn check_for_update(&self, creds: &Credentials) -> Result> {
+ let body = self.post_dnclient(CHECK_FOR_UPDATE, &[], &creds.host_id, creds.counter, &creds.ed_privkey)?;
+
+ let result: CheckForUpdateResponseWrapper = serde_json::from_slice(&body)?;
+
+ Ok(result.data.update_available)
+ }
+
+ /// Send a signed message to the `DNClient` API to fetch the new configuration update. During this call a new
+ /// DH X25519 keypair is generated for the new Nebula certificate as well as a new Ed25519 keypair for `DNClient` API
+ /// communication. On success it returns the new config, a Nebula private key PEM to be inserted into the config
+ /// and new `DNClient` API credentials
+ /// # Errors
+ /// This function returns an error in any of the following scenarios:
+ /// - if the message could not be serialized
+ /// - if the request fails
+ /// - if the response could not be deserialized
+ /// - if the signature is invalid
+ /// - if the keys are invalid
+ pub fn do_update(&self, creds: &Credentials) -> Result<(NebulaConfig, DHPrivateKeyPEM, Credentials), Box> {
+ let (dh_pubkey_pem, dh_privkey_pem, ed_pubkey, ed_privkey) = new_keys();
+
+ let update_keys = DoUpdateRequest {
+ ed_pubkey_pem: serialize_ed25519_public(ed_pubkey.as_bytes()),
+ dh_pubkey_pem,
+ nonce: nonce().to_vec(),
+ };
+
+ let update_keys_blob = serde_json::to_vec(&update_keys)?;
+
+ let resp = self.post_dnclient(DO_UPDATE, &update_keys_blob, &creds.host_id, creds.counter, &creds.ed_privkey)?;
+
+ let result_wrapper: SignedResponseWrapper = serde_json::from_slice(&resp)?;
+
+ let mut valid = false;
+
+ for ca_pubkey in &creds.trusted_keys {
+ if ca_pubkey.verify(&result_wrapper.data.message, &Signature::from_slice(&result_wrapper.data.signature)?).is_ok() {
+ valid = true;
+ break;
+ }
+ }
+
+ if !valid {
+ return Err("Failed to verify signed API result".into())
+ }
+
+ let result: DoUpdateResponse = serde_json::from_slice(&result_wrapper.data.message)?;
+
+ if result.nonce != update_keys.nonce {
+ error!("nonce mismatch between request {:x?} and response {:x?}", result.nonce, update_keys.nonce);
+ return Err("nonce mismatch between request and response".into())
+ }
+
+ if result.counter <= creds.counter {
+ error!("counter in request {} should be less than counter in response {}", creds.counter, result.counter);
+ return Err("received older config than what we already had".into())
+ }
+
+ let trusted_keys = ed25519_public_keys_from_pem(&result.trusted_keys)?;
+
+ let new_creds = Credentials {
+ host_id: creds.host_id.clone(),
+ ed_privkey,
+ counter: result.counter,
+ trusted_keys,
+ };
+
+ Ok((result.config, dh_privkey_pem, new_creds))
+ }
+
+ /// Wraps and signs the given `req_type` and value, and then makes the API call.
+ /// On success, returns the response body.
+ /// # Errors
+ /// This function will return an error if:
+ /// - serialization in any step fails
+ /// - if the `server_url` is invalid
+ /// - if the request could not be sent
+ pub fn post_dnclient(&self, req_type: &str, value: &[u8], host_id: &str, counter: u32, ed_privkey: &SigningKey) -> Result, Box> {
+ let encoded_msg = serde_json::to_string(&RequestWrapper {
+ message_type: req_type.to_string(),
+ value: value.to_vec(),
+ timestamp: Local::now().format("%Y-%m-%dT%H:%M:%S.%f%:z").to_string(),
+ })?;
+ let encoded_msg_bytes = encoded_msg.into_bytes();
+ let b64_msg = base64::engine::general_purpose::STANDARD.encode(encoded_msg_bytes);
+ let b64_msg_bytes = b64_msg.as_bytes();
+ let signature = ed_privkey.sign(b64_msg_bytes).to_vec();
+
+ ed_privkey.verify(b64_msg_bytes, &Signature::from_slice(&signature)?)?;
+ debug!("signature valid via clientside check");
+
+ let body = RequestV1 {
+ version: 1,
+ host_id: host_id.to_string(),
+ counter,
+ message: b64_msg,
+ signature,
+ };
+
+ let post_body = serde_json::to_string(&body)?;
+
+ trace!("sending dnclient request {}", post_body);
+
+ let resp = self.http_client.post(self.server_url.join(ENDPOINT_V1)?).body(post_body).send()?;
+
+ match resp.status() {
+ StatusCode::OK => {
+ Ok(resp.bytes()?.to_vec())
+ },
+ StatusCode::FORBIDDEN => {
+ Err("Forbidden".into())
+ },
+ _ => {
+ error!("dnclient endpoint returned bad status code {}", resp.status());
+ Err("dnclient endpoint returned error".into())
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/dnapi-rs/src/credentials.rs b/dnapi-rs/src/credentials.rs
new file mode 100644
index 0000000..01a0b3d
--- /dev/null
+++ b/dnapi-rs/src/credentials.rs
@@ -0,0 +1,45 @@
+//! Contains the `Credentials` struct, which contains all keys, IDs, organizations and other identity-related and security-related data that is persistent in a `Client`
+
+use std::error::Error;
+use trifid_pki::cert::{deserialize_ed25519_public_many, serialize_ed25519_public};
+use trifid_pki::ed25519_dalek::{SigningKey, VerifyingKey};
+use serde::{Serialize, Deserialize};
+
+#[derive(Serialize, Deserialize, Clone)]
+/// Contains information necessary to make requests against the `DNClient` API.
+pub struct Credentials {
+ /// The assigned Host ID that this client represents
+ pub host_id: String,
+ /// The ed25519 private key used to sign requests against the API
+ pub ed_privkey: SigningKey,
+ /// The counter used in the other API requests. It is unknown what the purpose of this is, but the original client persists it and it is needed for API calls.
+ pub counter: u32,
+ /// The set of trusted ed25519 keys that may be used by the API to sign API responses.
+ pub trusted_keys: Vec
+}
+
+/// Converts an array of `VerifyingKey`s to a singular bundle of PEM-encoded keys
+pub fn ed25519_public_keys_to_pem(keys: &[VerifyingKey]) -> Vec {
+ let mut res = vec![];
+
+ for key in keys {
+ res.append(&mut serialize_ed25519_public(&key.to_bytes()));
+ }
+
+ res
+}
+
+/// Converts a set of PEM-encoded ed25519 public keys, and converts them into an array of `VerifyingKey`s.
+/// # Errors
+/// This function will return an error if the PEM could not be decoded, or if any of the encoded keys are invalid.
+pub fn ed25519_public_keys_from_pem(pem: &[u8]) -> Result, Box> {
+ let pems = deserialize_ed25519_public_many(pem)?;
+ let mut keys = vec![];
+
+ #[allow(clippy::unwrap_used)]
+ for pem in pems {
+ keys.push(VerifyingKey::from_bytes(&pem.try_into().unwrap_or_else(|_| unreachable!()))?);
+ }
+
+ Ok(keys)
+}
\ No newline at end of file
diff --git a/dnapi-rs/src/crypto.rs b/dnapi-rs/src/crypto.rs
new file mode 100644
index 0000000..d5f87d9
--- /dev/null
+++ b/dnapi-rs/src/crypto.rs
@@ -0,0 +1,41 @@
+//! Functions for generating keys and nonces for use in API calls
+
+use rand::Rng;
+use rand::rngs::OsRng;
+use trifid_pki::cert::{serialize_x25519_private, serialize_x25519_public};
+use trifid_pki::ed25519_dalek::{SigningKey, VerifyingKey};
+use trifid_pki::x25519_dalek::{PublicKey, StaticSecret};
+
+/// Generate a new random set of Nebula (Diffie-Hellman) and Ed25519 (API calls) keys for use in your client
+pub fn new_keys() -> (Vec, Vec, VerifyingKey, SigningKey) {
+ let (dh_pub, dh_priv) = new_nebula_keypair();
+ let (ed_pub, ed_priv) = new_ed25519_keypair();
+ (dh_pub, dh_priv, ed_pub, ed_priv)
+}
+
+/// Generate a new PEM-encoded Nebula keypair
+pub fn new_nebula_keypair() -> (Vec, Vec) {
+ let (pub_key, priv_key) = new_x25519_keypair();
+ let pub_key_encoded = serialize_x25519_public(&pub_key);
+ let priv_key_encoded = serialize_x25519_private(&priv_key);
+ (pub_key_encoded, priv_key_encoded)
+}
+
+/// Generate a new 32-byte X25519 keypair
+pub fn new_x25519_keypair() -> ([u8; 32], [u8; 32]) {
+ let priv_key = StaticSecret::new(OsRng);
+ let pub_key = PublicKey::from(&priv_key);
+ (pub_key.to_bytes(), priv_key.to_bytes())
+}
+
+/// Generate a new random Ed25519 signing keypair for signing API calls
+pub fn new_ed25519_keypair() -> (VerifyingKey, SigningKey) {
+ let secret = SigningKey::generate(&mut OsRng);
+ let public = secret.verifying_key();
+ (public, secret)
+}
+
+/// Generates a 16-byte random nonce for use in API calls
+pub fn nonce() -> [u8; 16] {
+ rand::thread_rng().gen()
+}
\ No newline at end of file
diff --git a/dnapi-rs/src/lib.rs b/dnapi-rs/src/lib.rs
new file mode 100644
index 0000000..b9b63d8
--- /dev/null
+++ b/dnapi-rs/src/lib.rs
@@ -0,0 +1,24 @@
+//! # dnapi-rs
+//! **dnapi-rs** is a Rust-native crate for interacting with the Defined Networking client API. It is a direct port of `dnapi`, an officially maintained API client by Defined Networking.
+//!
+//! This crate is maintained as a part of the trifid project. Check out the other crates in [the git repository](https://git.e3t.cc/~core/trifid).
+
+#![warn(clippy::pedantic)]
+#![warn(clippy::nursery)]
+#![deny(clippy::unwrap_used)]
+#![deny(clippy::expect_used)]
+#![deny(missing_docs)]
+#![deny(clippy::missing_errors_doc)]
+#![deny(clippy::missing_panics_doc)]
+#![deny(clippy::missing_safety_doc)]
+#![allow(clippy::must_use_candidate)]
+#![allow(clippy::too_many_lines)]
+#![allow(clippy::module_name_repetitions)]
+
+pub mod message;
+
+pub mod client_blocking;
+pub mod client_async;
+
+pub mod credentials;
+pub mod crypto;
\ No newline at end of file
diff --git a/dnapi-rs/src/message.rs b/dnapi-rs/src/message.rs
new file mode 100644
index 0000000..d1360ed
--- /dev/null
+++ b/dnapi-rs/src/message.rs
@@ -0,0 +1,201 @@
+//! Models for interacting with the Defined Networking API.
+
+use base64_serde::base64_serde_type;
+use serde::{Serialize, Deserialize};
+
+/// The version 1 `DNClient` API endpoint
+pub const ENDPOINT_V1: &str = "/v1/dnclient";
+
+/// The `CheckForUpdate` message type
+pub const CHECK_FOR_UPDATE: &str = "CheckForUpdate";
+/// The `DoUpdate` message type
+pub const DO_UPDATE: &str = "DoUpdate";
+
+base64_serde_type!(Base64Standard, base64::engine::general_purpose::STANDARD);
+
+#[derive(Serialize, Deserialize)]
+/// `RequestV1` is the version 1 `DNClient` request message.
+pub struct RequestV1 {
+ /// Version is always 1
+ pub version: i32,
+ #[serde(rename = "hostID")]
+ /// The Host ID of this dnclient instance
+ pub host_id: String,
+ /// The counter last returned by the server
+ pub counter: u32,
+ /// A base64-encoded message. This must be previously base64-encoded, as the signature is signed over the base64-encoded data.
+ pub message: String,
+ #[serde(with = "Base64Standard")]
+ /// An ed25519 signature over the `message`, which can be verified with the host's previously enrolled ed25519 public key
+ pub signature: Vec
+}
+
+#[derive(Serialize, Deserialize)]
+/// `RequestWrapper` wraps a `DNClient` request message. It consists of a
+/// type and value, with the type string indicating how to interpret the value blob.
+pub struct RequestWrapper {
+ #[serde(rename = "type")]
+ /// The type of the message. Used to determine how `value` is encoded
+ pub message_type: String,
+ #[serde(with = "Base64Standard")]
+ /// A base64-encoded arbitrary message, the type of which is stated in `message_type`
+ pub value: Vec,
+ /// The timestamp of when this message was sent. Follows the format `%Y-%m-%dT%H:%M:%S.%f%:z`, or:
+ /// <4-digit year>--T::.
+ /// For example:
+ /// `2023-03-29T09:56:42.380006369-04:00`
+ /// would represent `29 March 03, 2023, 09:56:42.380006369 UTC-4`
+ pub timestamp: String
+}
+
+#[derive(Serialize, Deserialize)]
+/// `SignedResponseWrapper` contains a response message and a signature to validate inside `data`.
+pub struct SignedResponseWrapper {
+ /// The response data contained in this message
+ pub data: SignedResponse
+}
+
+#[derive(Serialize, Deserialize)]
+/// `SignedResponse` contains a response message and a signature to validate.
+pub struct SignedResponse {
+ /// The API version - always 1
+ pub version: i32,
+ #[serde(with = "Base64Standard")]
+ /// The Base64-encoded message signed inside this message
+ pub message: Vec,
+ #[serde(with = "Base64Standard")]
+ /// The ed25519 signature over the `message`
+ pub signature: Vec
+}
+
+#[derive(Serialize, Deserialize)]
+/// `CheckForUpdateResponseWrapper` contains a response to `CheckForUpdate` inside "data."
+pub struct CheckForUpdateResponseWrapper {
+ /// The response data contained in this message
+ pub data: CheckForUpdateResponse
+}
+
+#[derive(Serialize, Deserialize)]
+/// `CheckForUpdateResponse` is the response generated for a `CheckForUpdate` request.
+pub struct CheckForUpdateResponse {
+ #[serde(rename = "updateAvailable")]
+ /// Set to true if a config update is available
+ pub update_available: bool
+}
+
+#[derive(Serialize, Deserialize)]
+/// `DoUpdateRequest` is the request sent for a `DoUpdate` request.
+pub struct DoUpdateRequest {
+ #[serde(rename = "edPubkeyPEM")]
+ #[serde(with = "Base64Standard")]
+ /// The new ed25519 public key that should be used for future API requests
+ pub ed_pubkey_pem: Vec,
+ #[serde(rename = "dhPubkeyPEM")]
+ #[serde(with = "Base64Standard")]
+ /// The new ECDH public key that the Nebula certificate should be signed for
+ pub dh_pubkey_pem: Vec,
+ #[serde(with = "Base64Standard")]
+ /// A randomized value used to uniquely identify this request.
+ /// The original client uses a randomized, 16-byte value here, which dnapi-rs replicates
+ pub nonce: Vec
+}
+
+#[derive(Serialize, Deserialize)]
+/// A server response to a `DoUpdateRequest`, with the updated config and key information
+pub struct DoUpdateResponse {
+ #[serde(with = "Base64Standard")]
+ /// The base64-encoded Nebula config. It does **NOT** have a private-key, which must be inserted explicitly before Nebula can be ran
+ pub config: Vec,
+ /// The new config counter. It is unknown what the purpose of this is, but the original client keeps track of it and it is used later in the api
+ pub counter: u32,
+ #[serde(with = "Base64Standard")]
+ /// The same base64-encoded nonce that was sent in the `DoUpdateRequest`.
+ pub nonce: Vec,
+ #[serde(rename = "trustedKeys")]
+ #[serde(with = "Base64Standard")]
+ /// A new set of trusted ed25519 keys that can be used by the server to sign messages.
+ pub trusted_keys: Vec
+}
+
+/// The REST enrollment endpoint
+pub const ENROLL_ENDPOINT: &str = "/v2/enroll";
+
+#[derive(Serialize, Deserialize)]
+/// `EnrollRequest` is issued to the `ENROLL_ENDPOINT` to enroll this `dnclient` with a dnapi organization
+pub struct EnrollRequest {
+ /// The enrollment code given by the API server.
+ pub code: String,
+ #[serde(rename = "dhPubkey")]
+ #[serde(with = "Base64Standard")]
+ /// The ECDH public-key that should be used to sign the Nebula certificate given to this node.
+ pub dh_pubkey: Vec,
+ #[serde(rename = "edPubkey")]
+ #[serde(with = "Base64Standard")]
+ /// The Ed25519 public-key that this node will use to sign messages sent to the API.
+ pub ed_pubkey: Vec,
+ /// The timestamp of when this request was sent. Follows the format `%Y-%m-%dT%H:%M:%S.%f%:z`, or:
+ /// <4-digit year>--T::.
+ /// For example:
+ /// `2023-03-29T09:56:42.380006369-04:00`
+ /// would represent `29 March 03, 2023, 09:56:42.380006369 UTC-4`
+ pub timestamp: String
+}
+
+
+#[derive(Serialize, Deserialize)]
+#[serde(untagged)]
+/// The response to an `EnrollRequest`
+pub enum EnrollResponse {
+ /// A successful enrollment, with a `data` field pointing to an `EnrollResponseData`
+ Success {
+ /// The response data from this response
+ data: EnrollResponseData
+ },
+ /// An unsuccessful enrollment, with an `errors` field pointing to an array of `APIError`s.
+ Error {
+ /// A list of `APIError`s that happened while trying to enroll. `APIErrors` is a type alias to `Vec`
+ errors: APIErrors
+ }
+}
+
+#[derive(Serialize, Deserialize)]
+/// The data included in an successful enrollment.
+pub struct EnrollResponseData {
+ #[serde(with = "Base64Standard")]
+ /// The base64-encoded Nebula config. It does **NOT** have a private-key, which must be inserted explicitly before Nebula can be ran
+ pub config: Vec,
+ #[serde(rename = "hostID")]
+ /// The server-side Host ID that this node now has.
+ pub host_id: String,
+ /// The new config counter. It is unknown what the purpose of this is, but the original client keeps track of it and it is used later in the api
+ pub counter: u32,
+ #[serde(rename = "trustedKeys")]
+ #[serde(with = "Base64Standard")]
+ /// A new set of trusted ed25519 keys that can be used by the server to sign messages.
+ pub trusted_keys: Vec,
+ /// The organization data that this node is now a part of
+ pub organization: EnrollResponseDataOrg
+}
+
+#[derive(Serialize, Deserialize)]
+/// The organization data that this node is now a part of
+pub struct EnrollResponseDataOrg {
+ /// The organization ID that this node is now a part of
+ pub id: String,
+ /// The name of the organization that this node is now a part of
+ pub name: String
+}
+
+#[derive(Serialize, Deserialize)]
+/// `APIError` represents a single error returned in an API error response.
+pub struct APIError {
+ /// The error code
+ pub code: String,
+ /// The human-readable error message
+ pub message: String,
+ /// An optional path to where the error occured
+ pub path: Option
+}
+
+/// A type alias to a array of `APIErrors`. Just for parity with dnapi.
+pub type APIErrors = Vec;
\ No newline at end of file
diff --git a/tfclient/BUILDING.md b/tfclient/BUILDING.md
new file mode 100644
index 0000000..410347c
--- /dev/null
+++ b/tfclient/BUILDING.md
@@ -0,0 +1,33 @@
+tfclient has a more involved build process than the rest of trifid due to the fact that it embeds the distribution nebula and nebula-cert binaries.
+
+## Clearing the Nebula binary cache
+
+Compiling tfclient may take longer than usual every once in a while because the build system downloads the latest version of nebula.
+After this has been done once, the generated code will be cached in the target directory. If you want to redownload it (e.g. if Nebula is updated), you can:
+
+- run `cargo clean`, or
+- delete `target/debug/build/tfclient-[HASH]/out/noredownload`, or
+- set the environment variable `TFBUILD_FORCE_REDOWNLOAD=1`
+
+The 3rd option is the best, as it will not signifigantly increase compile times *or* need you to find the build hash.
+
+## Set a GitHub API key
+
+Since the build process involves making calls to the GitHub API, you may get ratelimited.
+To provide a GitHub API token to increase your ratelimit, set `GH_API_KEY` to your GitHub API key.
+
+## Unsupported architecture :(
+
+If you're compiling on (or for) an architecture that tfclient currently does not support, you will get the following message:
+
+```text
+[*] Compiling for target: SOME-TARGET-TRIPLET-HERE
+This architecture is not supported yet :(
+Nebula has a limited set of architectures it is able to function on.
+tfclient can only be compiled on these architectures.
+See https://github.com/slackhq/nebula/releases for a list of supported architectures.
+Is your system supported by Nebula? Shoot a message to the mailing list. Include the target triplet (above) in your response, as well as a link to the functioning Nebula binary. We will happily add your machine to the list!
+```
+
+The `tfclient` build system attempts to check if your architecture is unsupported. It does this by checking your target triplet.
+If this is a false alarm, and a build of nebula is available and fully functional on your machine, please shoot a message to the mailing list with your target triplet and a link to which build of nebula works on your machine. We will update the build script and update the repo!
diff --git a/tfclient/Cargo.toml b/tfclient/Cargo.toml
index 04938fe..e58450c 100644
--- a/tfclient/Cargo.toml
+++ b/tfclient/Cargo.toml
@@ -1,8 +1,41 @@
[package]
name = "tfclient"
-version = "0.1.0"
+version = "0.1.1"
edition = "2021"
+description = "An open-source reimplementation of a Defined Networking-compatible client"
+license = "GPL-3.0-or-later"
+documentation = "https://man.e3t.cc/~core/trifid-docs"
+homepage = "https://man.e3t.cc/~core/trifid-docs"
+repository = "https://git.e3t.cc/~core/trifid"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
+clap = { version = "4.1.10", features = ["derive"] }
+trifid-pki = { version = "0.1.8", path = "../trifid-pki", features = ["serde_derive"] }
+dirs = "5.0.0"
+log = "0.4.17"
+simple_logger = "4.1.0"
+sha2 = "0.10.6"
+hex = "0.4.3"
+url = "2.3.1"
+toml = "0.7.3"
+serde = { version = "1.0.158", features = ["derive"] }
+serde_json = "1.0.94"
+ctrlc = "3.2.5"
+reqwest = { version = "0.11.16", features = ["blocking"] }
+base64 = "0.21.0"
+chrono = "0.4.24"
+ipnet = "2.7.1"
+base64-serde = "0.7.0"
+dnapi-rs = { version = "0.1.7", path = "../dnapi-rs" }
+serde_yaml = "0.9.19"
+
+[build-dependencies]
+serde = { version = "1.0.157", features = ["derive"] }
+reqwest = { version = "0.11.14", features = ["blocking", "json"] }
+flate2 = "1.0.25"
+tar = "0.4.38"
+hex = "0.4.3"
+tempfile = "3.4.0"
+sha2 = "0.10.6"
\ No newline at end of file
diff --git a/tfclient/README.md b/tfclient/README.md
new file mode 100644
index 0000000..6848163
--- /dev/null
+++ b/tfclient/README.md
@@ -0,0 +1,5 @@
+# tfclient
+
+tfclient is an open-source Rust client for the Defined Networking Management protocol. It embeds a Nebula binary for running the actual config, and uses [dnapi-rs](https://docs.rs/dnapi-rs) for making API calls against the Defined Networking API.
+
+tfclient is part of the trifid project. Check out the other crates [here!](https://git.e3t.cc/~core/trifid).
\ No newline at end of file
diff --git a/tfclient/build.rs b/tfclient/build.rs
new file mode 100644
index 0000000..b1138e6
--- /dev/null
+++ b/tfclient/build.rs
@@ -0,0 +1,178 @@
+use std::fs;
+use std::fs::{File, remove_file};
+use std::io::{Read, Write};
+use std::os::unix::fs::PermissionsExt;
+use std::path::Path;
+use std::process::{Command, Output};
+use flate2::read::GzDecoder;
+use reqwest::blocking::Response;
+use reqwest::header::HeaderMap;
+use tar::Archive;
+
+
+#[derive(serde::Deserialize, Debug)]
+struct GithubRelease {
+ name: String,
+ assets: Vec
+}
+
+#[derive(serde::Deserialize, Debug)]
+struct GithubUser {}
+
+#[derive(serde::Deserialize, Debug)]
+struct GithubReleaseAsset {
+ browser_download_url: String,
+ name: String,
+ size: i64
+}
+
+fn main() {
+ if Path::new(&format!("{}/{}", std::env::var("OUT_DIR").unwrap(), "noredownload")).exists() && std::env::var("TFBUILD_FORCE_REDOWNLOAD").is_err() {
+ println!("noredownload exists and TFBUILD_FORCE_REDOWNLOAD is not set. Not redoing build process.");
+ return;
+ }
+
+ println!("[*] Fetching nebula releaseinfo...");
+
+ let mut headers = HeaderMap::new();
+ let mut has_api_key = false;
+
+ if let Ok(api_key) = std::env::var("GH_API_KEY") {
+ headers.insert("Authorization", format!("Bearer {}", api_key).parse().unwrap());
+ has_api_key = true;
+ }
+
+ let client = reqwest::blocking::Client::builder().user_agent("curl/7.57.1").default_headers(headers).build().unwrap();
+
+ let resp: Response = client.get("https://api.github.com/repos/slackhq/nebula/releases/latest").send().unwrap();
+
+ if resp.headers().get("X-Ratelimit-Remaining").unwrap().to_str().unwrap() == "0" {
+ println!("You've been ratelimited from the GitHub API. Wait a while (1 hour)");
+ if !has_api_key {
+ println!("You can also set a GitHub API key with the environment variable GH_API_KEY, which will increase your ratelimit ( a lot )");
+ }
+ panic!("Ratelimited");
+ }
+
+
+ let release: GithubRelease = resp.json().unwrap();
+
+ println!("[*] Fetching target triplet...");
+ let target = std::env::var("TARGET").unwrap();
+
+ println!("[*] Compiling for target {}", target);
+
+ let target_file = match target.as_str() {
+ "x86_64-apple-darwin" | "aarch64-apple-darwin" => "nebula-darwin",
+ "x86_64-unknown-freebsd" => "nebula-freebsd-amd64",
+ "x86_64-unknown-linux-gnu" => "nebula-linux-amd64",
+ "armv5te-unknown-linux-gnueabi" => "nebula-linux-arm-5",
+ "arm-unknown-linux-gnueabi" | "arm-unknown-linux-gnueabihf" => "nebula-linux-arm-6",
+ "armv7-unknown-linux-gnueabihf" | "armv7-unknown-linux-gnueabi" => "nebula-linux-arm-7",
+ "aarch64-unknown-linux-gnu" => "nebula-linux-arm64",
+ "x86_64-pc-windows-msvc" => "nebula-windows-amd64",
+ "aarch64-pc-windows-msvc" => "nebula-windows-arm64",
+ _ => {
+ println!("This architecture is not supported yet :(");
+ println!("Nebula has a limited set of architectures it is able to function on.");
+ println!("tfclient can only be compiled on these architectures.");
+ println!("See https://github.com/slackhq/nebula/releases for a list of supported architectures");
+ println!("Is your system supported by Nebula? Shoot a message to the mailing list. Include the target triplet (above) in your response, as well as a link to the functioning Nebula binary. We will happily add your machine to the list!");
+ panic!("Unsupported architecture");
+ }
+ };
+
+ println!("[*] Embedding {} {}", target_file, release.name);
+
+ let download = release.assets.iter().find(|r| r.name == format!("{}.tar.gz", target_file)).expect("That architecture isn't avaliable :(");
+
+ println!("[*] Downloading {}.tar.gz ({}, {} bytes) from {}", target_file, target, download.size, download.browser_download_url);
+
+ let response = reqwest::blocking::get(&download.browser_download_url).unwrap();
+ let content = response.bytes().unwrap().to_vec();
+ let bytes = content.as_slice();
+
+ let tar = GzDecoder::new(bytes);
+ let mut archive = Archive::new(tar);
+ let entries = archive.entries().unwrap();
+
+ let mut nebula_bin = vec![];
+ let mut nebula_cert_bin = vec![];
+ let mut shasum = vec![];
+
+ for entry in entries {
+ let mut entry = entry.unwrap();
+ if entry.path().unwrap() == Path::new("nebula") || entry.path().unwrap() == Path::new("nebula.exe") {
+ nebula_bin.reserve(entry.size() as usize);
+ entry.read_to_end(&mut nebula_bin).unwrap();
+ } else if entry.path().unwrap() == Path::new("nebula-cert") || entry.path().unwrap() == Path::new("nebula-cert.exe") {
+ nebula_cert_bin.reserve(entry.size() as usize);
+ entry.read_to_end(&mut nebula_cert_bin).unwrap();
+ } else if entry.path().unwrap() == Path::new("SHASUM256.txt") {
+ shasum.reserve(entry.size() as usize);
+ entry.read_to_end(&mut shasum).unwrap();
+ }
+ }
+
+ if nebula_bin.is_empty() {
+ panic!("[x] Release did not contain nebula binary");
+ }
+ if nebula_cert_bin.is_empty() {
+ panic!("[x] Release did not contain nebula_cert binary");
+ }
+
+ let mut nebula_file = File::create(format!("{}/nebula.bin", std::env::var("OUT_DIR").unwrap())).unwrap();
+ nebula_file.write_all(&nebula_bin).unwrap();
+
+ codegen_version(&nebula_bin, "nebula.bin", "NEBULA");
+
+ let mut nebula_cert_file = File::create(format!("{}/nebula_cert.bin", std::env::var("OUT_DIR").unwrap())).unwrap();
+ nebula_cert_file.write_all(&nebula_cert_bin).unwrap();
+
+ codegen_version(&nebula_cert_bin, "nebula_cert.bin", "NEBULA_CERT");
+
+ // Indicate to cargo and ourselves that we have already downloaded and codegenned
+ File::create(format!("{}/{}", std::env::var("OUT_DIR").unwrap(), "noredownload")).unwrap();
+ println!("cargo:rerun-if-changed=build.rs");
+}
+
+fn codegen_version(bin: &[u8], fp: &str, name: &str) {
+ // get version
+ let output = execim(bin, &vec!["-version"]);
+ let stdout = output.stdout;
+ let stdout_str = String::from_utf8(stdout).unwrap();
+ if !stdout_str.starts_with("Version: ") {
+ panic!("Binary did not have expected version output. Unable to get version info.");
+ }
+ let mut version = stdout_str.split(' ').collect::>()[1].to_string();
+ version.pop();
+
+ let code = format!("// This code was automatically @generated by build.rs. It should not be modified.\npub const {}_BIN: &[u8] = include_bytes!(concat!(env!(\"OUT_DIR\"), \"/{}\"));\npub const {}_VERSION: &str = \"{}\";", name, fp, name, version);
+
+ let mut file = File::create(format!("{}/{}.rs", std::env::var("OUT_DIR").unwrap(), fp)).unwrap();
+ file.write_all(code.as_bytes()).unwrap();
+}
+
+#[cfg(not(unix))]
+fn execim(buf: &[u8], args: &Vec<&str>) -> Output {
+ let mut file = File::create("tmpexec.bin").unwrap();
+ file.write_all(buf).unwrap();
+ std::mem::drop(file);
+ let output = Command::new("./tmpexec.bin").args(args).output().unwrap();
+ remove_file("./tmpexec.bin").unwrap();
+ output
+}
+
+#[cfg(unix)]
+fn execim(buf: &[u8], args: &Vec<&str>) -> Output {
+ let mut file = File::create("tmpexec.bin").unwrap();
+ file.write_all(buf).unwrap();
+ let metadata = file.metadata().unwrap();
+ let mut permissions = metadata.permissions();
+ permissions.set_mode(0o0755);
+ fs::set_permissions("./tmpexec.bin", permissions).unwrap();
+ std::mem::drop(file);
+ let output = Command::new("./tmpexec.bin").args(args).output().unwrap();
+ remove_file("./tmpexec.bin").unwrap();
+ output
+}
\ No newline at end of file
diff --git a/tfclient/src/apiworker.rs b/tfclient/src/apiworker.rs
new file mode 100644
index 0000000..4246e93
--- /dev/null
+++ b/tfclient/src/apiworker.rs
@@ -0,0 +1,218 @@
+use std::fs;
+use std::sync::mpsc::{Receiver, TryRecvError};
+
+
+use log::{error, info, warn};
+use url::Url;
+use dnapi_rs::client_blocking::Client;
+
+
+
+
+use crate::config::{load_cdata, save_cdata, TFClientConfig};
+use crate::daemon::ThreadMessageSender;
+use crate::dirs::get_nebulaconfig_file;
+use crate::nebulaworker::NebulaWorkerMessage;
+
+pub enum APIWorkerMessage {
+ Shutdown,
+ Enroll { code: String },
+ Timer
+}
+
+pub fn apiworker_main(_config: TFClientConfig, instance: String, url: String, tx: ThreadMessageSender, rx: Receiver) {
+ let server = Url::parse(&url).unwrap();
+
+ let client = Client::new(format!("tfclient/{}", env!("CARGO_PKG_VERSION")), server).unwrap();
+
+ loop {
+ match rx.try_recv() {
+ Ok(msg) => {
+ match msg {
+ APIWorkerMessage::Shutdown => {
+ info!("recv on command socket: shutdown, stopping");
+ break;
+ },
+ APIWorkerMessage::Timer => {
+ info!("updating config");
+ let mut cdata = match load_cdata(&instance) {
+ Ok(c) => c,
+ Err(e) => {
+ error!("error in api worker thread: {}", e);
+ error!("APIWorker exiting with error");
+ return;
+ }
+ };
+ if cdata.creds.is_none() {
+ info!("not enrolled, cannot perform config update");
+ match save_cdata(&instance, cdata) {
+ Ok(_) => (),
+ Err(e) => {
+ error!("Error saving cdata: {}", e);
+ error!("APIWorker exiting with error");
+ return;
+ }
+ }
+ continue;
+ }
+ let creds = cdata.clone().creds.unwrap_or_else(|| unreachable!());
+
+ info!("checking for update");
+ let update_available = match client.check_for_update(&creds) {
+ Ok(ua) => ua,
+ Err(e) => {
+ error!("error checking for config update: {}", e);
+ match save_cdata(&instance, cdata) {
+ Ok(_) => (),
+ Err(e) => {
+ error!("Error saving cdata: {}", e);
+ error!("APIWorker exiting with error");
+ return;
+ }
+ }
+ continue;
+ }
+ };
+
+ if !update_available {
+ match save_cdata(&instance, cdata) {
+ Ok(_) => (),
+ Err(e) => {
+ error!("Error saving cdata: {}", e);
+ error!("APIWorker exiting with error");
+ return;
+ }
+ }
+ info!("no config update available");
+ continue;
+ }
+
+ info!("updated configuration is avaliable");
+ info!("updating configuration");
+
+ let (config, dh_privkey, creds) = match client.do_update(&creds) {
+ Ok(d) => d,
+ Err(e) => {
+ error!("error requesting updating config: {}", e);
+ match save_cdata(&instance, cdata) {
+ Ok(_) => (),
+ Err(e) => {
+ error!("Error saving cdata: {}", e);
+ error!("APIWorker exiting with error");
+ return;
+ }
+ }
+ continue;
+ }
+ };
+
+ cdata.creds = Some(creds);
+ cdata.dh_privkey = Some(dh_privkey.try_into().expect("32 != 32"));
+
+ match fs::write(get_nebulaconfig_file(&instance).expect("Unable to determine nebula config file location"), config) {
+ Ok(_) => (),
+ Err(e) => {
+ error!("unable to save nebula config: {}", e);
+ match save_cdata(&instance, cdata) {
+ Ok(_) => (),
+ Err(e) => {
+ error!("Error saving cdata: {}", e);
+ error!("APIWorker exiting with error");
+ return;
+ }
+ }
+ continue;
+ }
+ }
+
+ match save_cdata(&instance, cdata) {
+ Ok(_) => (),
+ Err(e) => {
+ error!("Error saving cdata: {}", e);
+ error!("APIWorker exiting with error");
+ return;
+ }
+ }
+
+ info!("configuration updated successfully!");
+ info!("sending signal to nebula thread to reload config");
+
+ match tx.nebula_thread.send(NebulaWorkerMessage::ConfigUpdated) {
+ Ok(_) => (),
+ Err(e) => {
+ error!("unable to tell nebula thread to update config: {}", e);
+ error!("APIWorker exiting with error");
+ return;
+ }
+ }
+ },
+ APIWorkerMessage::Enroll { code } => {
+ info!("recv on command socket: enroll {}", code);
+ let mut cdata = match load_cdata(&instance) {
+ Ok(c) => c,
+ Err(e) => {
+ error!("error in api worker thread: {}", e);
+ error!("APIWorker exiting with error");
+ return;
+ }
+ };
+ if cdata.creds.is_some() {
+ warn!("enrollment failed: already enrolled");
+ continue;
+ }
+
+ let (config, dh_privkey, creds, meta) = match client.enroll(&code) {
+ Ok(resp) => resp,
+ Err(e) => {
+ error!("error with enrollment request: {}", e);
+ continue;
+ }
+ };
+
+ match fs::write(get_nebulaconfig_file(&instance).expect("Unable to determine nebula config file location"), config) {
+ Ok(_) => (),
+ Err(e) => {
+ error!("unable to save nebula config: {}", e);
+ continue;
+ }
+ }
+
+ cdata.creds = Some(creds);
+ cdata.dh_privkey = Some(dh_privkey.try_into().expect("32 != 32"));
+ cdata.meta = Some(meta);
+
+ // Save vardata
+ match save_cdata(&instance, cdata) {
+ Ok(_) => (),
+ Err(e) => {
+ error!("Error saving cdata: {}", e);
+ error!("APIWorker exiting with error");
+ return;
+ }
+ }
+
+ info!("Configuration updated. Sending signal to Nebula worker thread");
+
+ match tx.nebula_thread.send(NebulaWorkerMessage::ConfigUpdated) {
+ Ok(_) => (),
+ Err(e) => {
+ error!("unable to tell nebula thread to update config: {}", e);
+ error!("APIWorker exiting with error");
+ return;
+ }
+ }
+ }
+ }
+ },
+ Err(e) => {
+ match e {
+ TryRecvError::Empty => {}
+ TryRecvError::Disconnected => {
+ error!("apiworker command socket disconnected, shutting down to prevent orphaning");
+ break;
+ }
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/tfclient/src/config.rs b/tfclient/src/config.rs
new file mode 100644
index 0000000..517a09b
--- /dev/null
+++ b/tfclient/src/config.rs
@@ -0,0 +1,534 @@
+use std::collections::HashMap;
+use std::error::Error;
+use std::fs;
+use std::net::{Ipv4Addr, SocketAddrV4};
+use ipnet::{IpNet, Ipv4Net};
+
+
+use log::{debug, info};
+use serde::{Deserialize, Serialize};
+use dnapi_rs::client_blocking::EnrollMeta;
+use dnapi_rs::credentials::Credentials;
+
+use crate::dirs::{get_cdata_dir, get_cdata_file, get_config_dir, get_config_file};
+
+pub const DEFAULT_PORT: u16 = 8157;
+fn default_port() -> u16 { DEFAULT_PORT }
+
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct TFClientConfig {
+ #[serde(default = "default_port")]
+ pub listen_port: u16
+}
+
+#[derive(Serialize, Deserialize, Clone)]
+pub struct TFClientData {
+ pub dh_privkey: Option>,
+ pub creds: Option,
+ pub meta: Option
+}
+
+pub fn create_config(instance: &str) -> Result<(), Box> {
+ info!("Creating config directory...");
+ fs::create_dir_all(get_config_dir(instance).ok_or("Unable to load config dir")?)?;
+ info!("Copying default config file to config directory...");
+ let config = TFClientConfig {
+ listen_port: DEFAULT_PORT
+ };
+ let config_str = toml::to_string(&config)?;
+ fs::write(get_config_file(instance).ok_or("Unable to load config dir")?, config_str)?;
+ Ok(())
+}
+
+pub fn load_config(instance: &str) -> Result> {
+ info!("Loading config...");
+ let config_file = get_config_file(instance).ok_or("Unable to load config dir")?;
+
+ if !config_file.exists() {
+ create_config(instance)?;
+ }
+
+ debug!("opening {}", config_file.as_path().display());
+ let config_str = fs::read_to_string(config_file)?;
+ debug!("parsing config file");
+ let config: TFClientConfig = toml::from_str(&config_str)?;
+ info!("Loaded config successfully");
+ Ok(config)
+}
+
+pub fn create_cdata(instance: &str) -> Result<(), Box> {
+ info!("Creating data directory...");
+ fs::create_dir_all(get_cdata_dir(instance).ok_or("Unable to load data dir")?)?;
+ info!("Copying default data file to config directory...");
+ let config = TFClientData { dh_privkey: None, creds: None, meta: None };
+ let config_str = toml::to_string(&config)?;
+ fs::write(get_cdata_file(instance).ok_or("Unable to load data dir")?, config_str)?;
+ Ok(())
+}
+
+pub fn load_cdata(instance: &str) -> Result> {
+ info!("Loading cdata...");
+ let config_file = get_cdata_file(instance).ok_or("Unable to load cdata dir")?;
+
+ if !config_file.exists() {
+ create_cdata(instance)?;
+ }
+
+ debug!("opening {}", config_file.as_path().display());
+ let config_str = fs::read_to_string(config_file)?;
+ debug!("parsing cdata file");
+ let config: TFClientData = toml::from_str(&config_str)?;
+ info!("Loaded cdata successfully");
+ Ok(config)
+}
+
+pub fn save_cdata(instance: &str, data: TFClientData) -> Result<(), Box> {
+ info!("Saving cdata...");
+ let config_file = get_cdata_file(instance).ok_or("Unable to load cdata dir")?;
+
+ if !config_file.exists() {
+ create_cdata(instance)?;
+ }
+
+ debug!("serializing cdata file");
+ let config: String = toml::to_string(&data)?;
+ debug!("writing to {}", config_file.as_path().display());
+ fs::write(config_file, config)?;
+ info!("Saved cdata successfully");
+ Ok(())
+}
+
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct NebulaConfig {
+ pub pki: NebulaConfigPki,
+ #[serde(default = "empty_hashmap")]
+ #[serde(skip_serializing_if = "is_empty_hashmap")]
+ pub static_host_map: HashMap>,
+ #[serde(skip_serializing_if = "is_none")]
+ pub lighthouse: Option,
+ #[serde(skip_serializing_if = "is_none")]
+ pub listen: Option,
+ #[serde(skip_serializing_if = "is_none")]
+ pub punchy: Option,
+ #[serde(default = "cipher_aes")]
+ #[serde(skip_serializing_if = "is_cipher_aes")]
+ pub cipher: NebulaConfigCipher,
+ #[serde(default = "empty_vec")]
+ #[serde(skip_serializing_if = "is_empty_vec")]
+ pub preferred_ranges: Vec,
+ #[serde(skip_serializing_if = "is_none")]
+ pub relay: Option,
+ #[serde(skip_serializing_if = "is_none")]
+ pub tun: Option,
+ #[serde(skip_serializing_if = "is_none")]
+ pub logging: Option,
+ #[serde(skip_serializing_if = "is_none")]
+ pub sshd: Option,
+
+ #[serde(skip_serializing_if = "is_none")]
+ pub firewall: Option,
+
+ #[serde(default = "u64_1")]
+ #[serde(skip_serializing_if = "is_u64_1")]
+ pub routines: u64,
+
+ #[serde(default = "none")]
+ #[serde(skip_serializing_if = "is_none")]
+ pub stats: Option,
+
+ #[serde(default = "none")]
+ #[serde(skip_serializing_if = "is_none")]
+ pub local_range: Option
+}
+
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct NebulaConfigPki {
+ pub ca: String,
+ pub cert: String,
+ #[serde(default = "none")]
+ #[serde(skip_serializing_if = "is_none")]
+ pub key: Option,
+ #[serde(default = "empty_vec")]
+ #[serde(skip_serializing_if = "is_empty_vec")]
+ pub blocklist: Vec,
+ #[serde(default = "bool_false")]
+ #[serde(skip_serializing_if = "is_bool_false")]
+ pub disconnect_invalid: bool
+}
+
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct NebulaConfigLighthouse {
+ #[serde(default = "bool_false")]
+ #[serde(skip_serializing_if = "is_bool_false")]
+ pub am_lighthouse: bool,
+ #[serde(default = "bool_false")]
+ #[serde(skip_serializing_if = "is_bool_false")]
+ pub serve_dns: bool,
+ #[serde(skip_serializing_if = "is_none")]
+ pub dns: Option,
+ #[serde(default = "u32_10")]
+ #[serde(skip_serializing_if = "is_u32_10")]
+ pub interval: u32,
+ #[serde(default = "empty_vec")]
+ #[serde(skip_serializing_if = "is_empty_vec")]
+ pub hosts: Vec,
+ #[serde(default = "empty_hashmap")]
+ #[serde(skip_serializing_if = "is_empty_hashmap")]
+ pub remote_allow_list: HashMap,
+ #[serde(default = "empty_hashmap")]
+ #[serde(skip_serializing_if = "is_empty_hashmap")]
+ pub local_allow_list: HashMap, // `interfaces` is not supported
+}
+
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct NebulaConfigLighthouseDns {
+ #[serde(default = "string_empty")]
+ #[serde(skip_serializing_if = "is_string_empty")]
+ pub host: String,
+ #[serde(default = "u16_53")]
+ #[serde(skip_serializing_if = "is_u16_53")]
+ pub port: u16
+}
+
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct NebulaConfigListen {
+ #[serde(default = "string_empty")]
+ #[serde(skip_serializing_if = "is_string_empty")]
+ pub host: String,
+ #[serde(default = "u16_0")]
+ #[serde(skip_serializing_if = "is_u16_0")]
+ pub port: u16,
+ #[serde(default = "u32_64")]
+ #[serde(skip_serializing_if = "is_u32_64")]
+ pub batch: u32,
+ #[serde(skip_serializing_if = "is_none")]
+ pub read_buffer: Option,
+ #[serde(skip_serializing_if = "is_none")]
+ pub write_buffer: Option
+}
+
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct NebulaConfigPunchy {
+ #[serde(default = "bool_false")]
+ #[serde(skip_serializing_if = "is_bool_false")]
+ pub punch: bool,
+ #[serde(default = "bool_false")]
+ #[serde(skip_serializing_if = "is_bool_false")]
+ pub respond: bool,
+ #[serde(default = "string_1s")]
+ #[serde(skip_serializing_if = "is_string_1s")]
+ pub delay: String
+}
+
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub enum NebulaConfigCipher {
+ #[serde(rename = "aes")]
+ Aes,
+ #[serde(rename = "chachapoly")]
+ ChaChaPoly
+}
+
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct NebulaConfigRelay {
+ #[serde(default = "empty_vec")]
+ #[serde(skip_serializing_if = "is_empty_vec")]
+ pub relays: Vec,
+ #[serde(default = "bool_false")]
+ #[serde(skip_serializing_if = "is_bool_false")]
+ pub am_relay: bool,
+ #[serde(default = "bool_true")]
+ #[serde(skip_serializing_if = "is_bool_true")]
+ pub use_relays: bool
+}
+
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct NebulaConfigTun {
+ #[serde(default = "bool_false")]
+ #[serde(skip_serializing_if = "is_bool_false")]
+ pub disabled: bool,
+ #[serde(skip_serializing_if = "is_none")]
+ pub dev: Option,
+ #[serde(default = "bool_false")]
+ #[serde(skip_serializing_if = "is_bool_false")]
+ pub drop_local_broadcast: bool,
+ #[serde(default = "bool_false")]
+ #[serde(skip_serializing_if = "is_bool_false")]
+ pub drop_multicast: bool,
+ #[serde(default = "u64_500")]
+ #[serde(skip_serializing_if = "is_u64_500")]
+ pub tx_queue: u64,
+ #[serde(default = "u64_1300")]
+ #[serde(skip_serializing_if = "is_u64_1300")]
+ pub mtu: u64,
+ #[serde(default = "empty_vec")]
+ #[serde(skip_serializing_if = "is_empty_vec")]
+ pub routes: Vec,
+ #[serde(default = "empty_vec")]
+ #[serde(skip_serializing_if = "is_empty_vec")]
+ pub unsafe_routes: Vec
+}
+
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct NebulaConfigTunRouteOverride {
+ pub mtu: u64,
+ pub route: Ipv4Net
+}
+
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct NebulaConfigTunUnsafeRoute {
+ pub route: Ipv4Net,
+ pub via: Ipv4Addr,
+ #[serde(default = "u64_1300")]
+ #[serde(skip_serializing_if = "is_u64_1300")]
+ pub mtu: u64,
+ #[serde(default = "i64_100")]
+ #[serde(skip_serializing_if = "is_i64_100")]
+ pub metric: i64
+}
+
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct NebulaConfigLogging {
+ #[serde(default = "loglevel_info")]
+ #[serde(skip_serializing_if = "is_loglevel_info")]
+ pub level: NebulaConfigLoggingLevel,
+ #[serde(default = "format_text")]
+ #[serde(skip_serializing_if = "is_format_text")]
+ pub format: NebulaConfigLoggingFormat,
+ #[serde(default = "bool_false")]
+ #[serde(skip_serializing_if = "is_bool_false")]
+ pub disable_timestamp: bool,
+ #[serde(default = "timestamp")]
+ #[serde(skip_serializing_if = "is_timestamp")]
+ pub timestamp_format: String
+}
+
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub enum NebulaConfigLoggingLevel {
+ #[serde(rename = "panic")]
+ Panic,
+ #[serde(rename = "fatal")]
+ Fatal,
+ #[serde(rename = "error")]
+ Error,
+ #[serde(rename = "warning")]
+ Warning,
+ #[serde(rename = "info")]
+ Info,
+ #[serde(rename = "debug")]
+ Debug
+}
+
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub enum NebulaConfigLoggingFormat {
+ #[serde(rename = "json")]
+ Json,
+ #[serde(rename = "text")]
+ Text
+}
+
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct NebulaConfigSshd {
+ #[serde(default = "bool_false")]
+ #[serde(skip_serializing_if = "is_bool_false")]
+ pub enabled: bool,
+ pub listen: SocketAddrV4,
+ pub host_key: String,
+ #[serde(default = "empty_vec")]
+ #[serde(skip_serializing_if = "is_empty_vec")]
+ pub authorized_users: Vec
+}
+
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct NebulaConfigSshdAuthorizedUser {
+ pub user: String,
+ #[serde(default = "empty_vec")]
+ #[serde(skip_serializing_if = "is_empty_vec")]
+ pub keys: Vec
+}
+
+#[derive(Serialize, Deserialize, Clone, Debug)]
+#[serde(tag = "type")]
+pub enum NebulaConfigStats {
+ #[serde(rename = "graphite")]
+ Graphite(NebulaConfigStatsGraphite),
+ #[serde(rename = "prometheus")]
+ Prometheus(NebulaConfigStatsPrometheus)
+}
+
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct NebulaConfigStatsGraphite {
+ #[serde(default = "string_nebula")]
+ #[serde(skip_serializing_if = "is_string_nebula")]
+ pub prefix: String,
+ #[serde(default = "protocol_tcp")]
+ #[serde(skip_serializing_if = "is_protocol_tcp")]
+ pub protocol: NebulaConfigStatsGraphiteProtocol,
+ pub host: SocketAddrV4,
+ pub interval: String,
+ #[serde(default = "bool_false")]
+ #[serde(skip_serializing_if = "is_bool_false")]
+ pub message_metrics: bool,
+ #[serde(default = "bool_false")]
+ #[serde(skip_serializing_if = "is_bool_false")]
+ pub lighthouse_metrics: bool
+}
+
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub enum NebulaConfigStatsGraphiteProtocol {
+ #[serde(rename = "tcp")]
+ Tcp,
+ #[serde(rename = "udp")]
+ Udp
+}
+
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct NebulaConfigStatsPrometheus {
+ pub listen: String,
+ pub path: String,
+ #[serde(default = "string_nebula")]
+ #[serde(skip_serializing_if = "is_string_nebula")]
+ pub namespace: String,
+ #[serde(default = "string_nebula")]
+ #[serde(skip_serializing_if = "is_string_nebula")]
+ pub subsystem: String,
+ pub interval: String,
+ #[serde(default = "bool_false")]
+ #[serde(skip_serializing_if = "is_bool_false")]
+ pub message_metrics: bool,
+ #[serde(default = "bool_false")]
+ #[serde(skip_serializing_if = "is_bool_false")]
+ pub lighthouse_metrics: bool
+}
+
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct NebulaConfigFirewall {
+ #[serde(default = "none")]
+ #[serde(skip_serializing_if = "is_none")]
+ pub conntrack: Option,
+
+ #[serde(default = "none")]
+ #[serde(skip_serializing_if = "is_none")]
+ pub inbound: Option>,
+
+ #[serde(default = "none")]
+ #[serde(skip_serializing_if = "is_none")]
+ pub outbound: Option>,
+}
+
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct NebulaConfigFirewallConntrack {
+ #[serde(default = "string_12m")]
+ #[serde(skip_serializing_if = "is_string_12m")]
+ pub tcp_timeout: String,
+ #[serde(default = "string_3m")]
+ #[serde(skip_serializing_if = "is_string_3m")]
+ pub udp_timeout: String,
+ #[serde(default = "string_10m")]
+ #[serde(skip_serializing_if = "is_string_10m")]
+ pub default_timeout: String
+}
+
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct NebulaConfigFirewallRule {
+ #[serde(default = "none")]
+ #[serde(skip_serializing_if = "is_none")]
+ pub port: Option,
+ #[serde(default = "none")]
+ #[serde(skip_serializing_if = "is_none")]
+ pub proto: Option,
+ #[serde(default = "none")]
+ #[serde(skip_serializing_if = "is_none")]
+ pub ca_name: Option,
+ #[serde(default = "none")]
+ #[serde(skip_serializing_if = "is_none")]
+ pub ca_sha: Option,
+ #[serde(default = "none")]
+ #[serde(skip_serializing_if = "is_none")]
+ pub host: Option,
+ #[serde(default = "none")]
+ #[serde(skip_serializing_if = "is_none")]
+ pub group: Option,
+ #[serde(default = "none")]
+ #[serde(skip_serializing_if = "is_none")]
+ pub groups: Option>,
+ #[serde(default = "none")]
+ #[serde(skip_serializing_if = "is_none")]
+ pub cidr: Option
+}
+
+// Default values for serde
+
+fn string_12m() -> String { "12m".to_string() }
+fn is_string_12m(s: &str) -> bool { s == "12m" }
+
+fn string_3m() -> String { "3m".to_string() }
+fn is_string_3m(s: &str) -> bool { s == "3m" }
+
+fn string_10m() -> String { "10m".to_string() }
+fn is_string_10m(s: &str) -> bool { s == "10m" }
+
+fn empty_vec() -> Vec { vec![] }
+fn is_empty_vec(v: &Vec) -> bool { v.is_empty() }
+
+fn empty_hashmap() -> HashMap { HashMap::new() }
+fn is_empty_hashmap(h: &HashMap) -> bool { h.is_empty() }
+
+fn bool_false() -> bool { false }
+fn is_bool_false(b: &bool) -> bool { !*b }
+
+fn bool_true() -> bool { true }
+fn is_bool_true(b: &bool) -> bool { *b }
+
+fn u16_53() -> u16 { 53 }
+fn is_u16_53(u: &u16) -> bool { *u == 53 }
+
+fn u32_10() -> u32 { 10 }
+fn is_u32_10(u: &u32) -> bool { *u == 10 }
+
+fn ipv4_0000() -> Ipv4Addr { Ipv4Addr::new(0, 0, 0, 0) }
+fn is_ipv4_0000(i: &Ipv4Addr) -> bool { *i == ipv4_0000() }
+
+fn u16_0() -> u16 { 0 }
+fn is_u16_0(u: &u16) -> bool { *u == 0 }
+
+fn u32_64() -> u32 { 64 }
+fn is_u32_64(u: &u32) -> bool { *u == 64 }
+
+fn string_1s() -> String { "1s".to_string() }
+fn is_string_1s(s: &str) -> bool { s == "1s" }
+
+fn cipher_aes() -> NebulaConfigCipher { NebulaConfigCipher::Aes }
+fn is_cipher_aes(c: &NebulaConfigCipher) -> bool { matches!(c, NebulaConfigCipher::Aes) }
+
+fn u64_500() -> u64 { 500 }
+fn is_u64_500(u: &u64) -> bool { *u == 500 }
+
+fn u64_1300() -> u64 { 1300 }
+fn is_u64_1300(u: &u64) -> bool { *u == 1300 }
+
+fn i64_100() -> i64 { 100 }
+fn is_i64_100(i: &i64) -> bool { *i == 100 }
+
+fn loglevel_info() -> NebulaConfigLoggingLevel { NebulaConfigLoggingLevel::Info }
+fn is_loglevel_info(l: &NebulaConfigLoggingLevel) -> bool { matches!(l, NebulaConfigLoggingLevel::Info) }
+
+fn format_text() -> NebulaConfigLoggingFormat { NebulaConfigLoggingFormat::Text }
+fn is_format_text(f: &NebulaConfigLoggingFormat) -> bool { matches!(f, NebulaConfigLoggingFormat::Text) }
+
+fn timestamp() -> String { "2006-01-02T15:04:05Z07:00".to_string() }
+fn is_timestamp(s: &str) -> bool { s == "2006-01-02T15:04:05Z07:00" }
+
+fn u64_1() -> u64 { 1 }
+fn is_u64_1(u: &u64) -> bool { *u == 1 }
+
+fn string_nebula() -> String { "nebula".to_string() }
+fn is_string_nebula(s: &str) -> bool { s == "nebula" }
+
+fn string_empty() -> String { String::new() }
+fn is_string_empty(s: &str) -> bool { s == "" }
+
+fn protocol_tcp() -> NebulaConfigStatsGraphiteProtocol { NebulaConfigStatsGraphiteProtocol::Tcp }
+fn is_protocol_tcp(p: &NebulaConfigStatsGraphiteProtocol) -> bool { matches!(p, NebulaConfigStatsGraphiteProtocol::Tcp) }
+
+fn none() -> Option { None }
+fn is_none(o: &Option) -> bool { o.is_none() }
\ No newline at end of file
diff --git a/tfclient/src/daemon.rs b/tfclient/src/daemon.rs
new file mode 100644
index 0000000..6f59054
--- /dev/null
+++ b/tfclient/src/daemon.rs
@@ -0,0 +1,158 @@
+use std::sync::mpsc;
+use std::sync::mpsc::Sender;
+use std::thread;
+use log::{error, info};
+
+use crate::apiworker::{apiworker_main, APIWorkerMessage};
+use crate::config::load_config;
+
+use crate::nebulaworker::{nebulaworker_main, NebulaWorkerMessage};
+use crate::socketworker::{socketworker_main, SocketWorkerMessage};
+use crate::timerworker::{timer_main, TimerWorkerMessage};
+use crate::util::check_server_url;
+
+pub fn daemon_main(name: String, server: String) {
+ // Validate the `server`
+ check_server_url(&server);
+
+ info!("Loading config...");
+ let config = match load_config(&name) {
+ Ok(cfg) => cfg,
+ Err(e) => {
+ error!("Error loading configuration: {}", e);
+ std::process::exit(1);
+ }
+ };
+
+ info!("Creating transmitter");
+
+ let (tx_api, rx_api) = mpsc::channel::();
+ let (tx_socket, rx_socket) = mpsc::channel::();
+ let (tx_nebula, rx_nebula) = mpsc::channel::();
+ let (tx_timer, rx_timer) = mpsc::channel::();
+
+ let transmitter = ThreadMessageSender {
+ socket_thread: tx_socket,
+ api_thread: tx_api,
+ nebula_thread: tx_nebula,
+ timer_thread: tx_timer,
+ };
+
+ let mainthread_transmitter = transmitter.clone();
+
+ info!("Setting signal trap...");
+
+ match ctrlc::set_handler(move || {
+ info!("Ctrl-C detected. Stopping threads...");
+ match mainthread_transmitter.nebula_thread.send(NebulaWorkerMessage::Shutdown) {
+ Ok(_) => (),
+ Err(e) => {
+ error!("Error sending shutdown message to nebula worker thread: {}", e);
+ }
+ }
+ match mainthread_transmitter.api_thread.send(APIWorkerMessage::Shutdown) {
+ Ok(_) => (),
+ Err(e) => {
+ error!("Error sending shutdown message to api worker thread: {}", e);
+ }
+ }
+ match mainthread_transmitter.socket_thread.send(SocketWorkerMessage::Shutdown) {
+ Ok(_) => (),
+ Err(e) => {
+ error!("Error sending shutdown message to socket worker thread: {}", e);
+ }
+ }
+ match mainthread_transmitter.timer_thread.send(TimerWorkerMessage::Shutdown) {
+ Ok(_) => (),
+ Err(e) => {
+ error!("Error sending shutdown message to timer worker thread: {}", e);
+ }
+ }
+ }) {
+ Ok(_) => (),
+ Err(e) => {
+ error!("Unable to set sigtrap: {}", e);
+ std::process::exit(1);
+ }
+ }
+
+ info!("Starting API thread...");
+
+ let config_api = config.clone();
+ let transmitter_api = transmitter.clone();
+ let name_api = name.clone();
+ let server_api = server.clone();
+ let api_thread = thread::spawn(move || {
+ apiworker_main(config_api, name_api, server_api,transmitter_api, rx_api);
+ });
+
+ info!("Starting Nebula thread...");
+ let config_nebula = config.clone();
+ let transmitter_nebula = transmitter.clone();
+ let name_nebula = name.clone();
+ let nebula_thread = thread::spawn(move || {
+ nebulaworker_main(config_nebula, name_nebula, transmitter_nebula, rx_nebula);
+ });
+
+ info!("Starting timer thread...");
+ let timer_transmitter = transmitter.clone();
+ let timer_thread = thread::spawn(move || {
+ timer_main(timer_transmitter, rx_timer);
+ });
+
+ info!("Starting socket worker thread...");
+ let name_socket = name.clone();
+ let socket_thread = thread::spawn(move || {
+ socketworker_main(config, name_socket, transmitter, rx_socket);
+ });
+
+ info!("Waiting for socket thread to exit...");
+ match socket_thread.join() {
+ Ok(_) => (),
+ Err(_) => {
+ error!("Error waiting for socket thread to exit.");
+ std::process::exit(1);
+ }
+ }
+ info!("Socket thread exited");
+
+ info!("Waiting for API thread to exit...");
+ match api_thread.join() {
+ Ok(_) => (),
+ Err(_) => {
+ error!("Error waiting for api thread to exit.");
+ std::process::exit(1);
+ }
+ }
+ info!("API thread exited");
+
+ info!("Waiting for timer thread to exit...");
+ match timer_thread.join() {
+ Ok(_) => (),
+ Err(_) => {
+ error!("Error waiting for timer thread to exit.");
+ std::process::exit(1);
+ }
+ }
+ info!("Timer thread exited");
+
+ info!("Waiting for Nebula thread to exit...");
+ match nebula_thread.join() {
+ Ok(_) => (),
+ Err(_) => {
+ error!("Error waiting for nebula thread to exit.");
+ std::process::exit(1);
+ }
+ }
+ info!("Nebula thread exited");
+
+ info!("All threads exited");
+}
+
+#[derive(Clone)]
+pub struct ThreadMessageSender {
+ pub socket_thread: Sender,
+ pub api_thread: Sender,
+ pub nebula_thread: Sender,
+ pub timer_thread: Sender
+}
\ No newline at end of file
diff --git a/tfclient/src/dirs.rs b/tfclient/src/dirs.rs
new file mode 100644
index 0000000..0010aa1
--- /dev/null
+++ b/tfclient/src/dirs.rs
@@ -0,0 +1,25 @@
+use std::path::PathBuf;
+
+pub fn get_data_dir() -> Option {
+ dirs::data_dir().map(|f| f.join("tfclient/"))
+}
+
+pub fn get_config_dir(instance: &str) -> Option {
+ dirs::config_dir().map(|f| f.join("tfclient/").join(format!("{}/", instance)))
+}
+
+pub fn get_config_file(instance: &str) -> Option {
+ get_config_dir(instance).map(|f| f.join("tfclient.toml"))
+}
+
+pub fn get_cdata_dir(instance: &str) -> Option {
+ dirs::config_dir().map(|f| f.join("tfclient_data/").join(format!("{}/", instance)))
+}
+
+pub fn get_cdata_file(instance: &str) -> Option {
+ get_cdata_dir(instance).map(|f| f.join("tfclient.toml"))
+}
+
+pub fn get_nebulaconfig_file(instance: &str) -> Option {
+ get_cdata_dir(instance).map(|f| f.join("nebula.sk_embedded.yml"))
+}
\ No newline at end of file
diff --git a/tfclient/src/embedded_nebula.rs b/tfclient/src/embedded_nebula.rs
new file mode 100644
index 0000000..76b6ffc
--- /dev/null
+++ b/tfclient/src/embedded_nebula.rs
@@ -0,0 +1,104 @@
+use std::error::Error;
+use std::fs;
+use std::fs::File;
+use std::io::Write;
+use std::os::unix::fs::PermissionsExt;
+use std::path::PathBuf;
+use std::process::{Child, Command};
+use log::debug;
+use crate::dirs::get_data_dir;
+use crate::util::sha256;
+
+pub fn extract_embedded_nebula() -> Result> {
+ let data_dir = get_data_dir().ok_or("Unable to get platform-specific data dir")?;
+ if !data_dir.exists() {
+ fs::create_dir_all(&data_dir)?;
+ debug!("Created data directory {}", data_dir.as_path().display());
+ }
+
+ let bin_dir = data_dir.join("cache/");
+ let hash_dir = bin_dir.join(format!("{}/", sha256(crate::nebula_bin::NEBULA_BIN)));
+
+ if !hash_dir.exists() {
+ fs::create_dir_all(&hash_dir)?;
+ debug!("Created directory {}", hash_dir.as_path().display());
+ }
+
+ let executable_postfix = if cfg!(windows) { ".exe" } else { "" };
+ let executable_name = format!("nebula-{}{}", crate::nebula_bin::NEBULA_VERSION, executable_postfix);
+
+ let file_path = hash_dir.join(executable_name);
+
+ if file_path.exists() {
+ // Already extracted
+ return Ok(file_path);
+ }
+ let mut file = File::create(&file_path)?;
+ file.write_all(crate::nebula_bin::NEBULA_BIN)?;
+
+ debug!("Extracted nebula to {}", file_path.as_path().display());
+
+ Ok(file_path)
+}
+
+pub fn extract_embedded_nebula_cert() -> Result> {
+ let data_dir = get_data_dir().ok_or("Unable to get platform-specific data dir")?;
+ if !data_dir.exists() {
+ fs::create_dir_all(&data_dir)?;
+ debug!("Created data directory {}", data_dir.as_path().display());
+ }
+
+ let bin_dir = data_dir.join("cache/");
+ let hash_dir = bin_dir.join(format!("{}/", sha256(crate::nebula_cert_bin::NEBULA_CERT_BIN)));
+
+ if !hash_dir.exists() {
+ fs::create_dir_all(&hash_dir)?;
+ debug!("Created directory {}", hash_dir.as_path().display());
+ }
+
+ let executable_postfix = if cfg!(windows) { ".exe" } else { "" };
+ let executable_name = format!("nebula-cert-{}{}", crate::nebula_cert_bin::NEBULA_CERT_VERSION, executable_postfix);
+
+ let file_path = hash_dir.join(executable_name);
+
+ if file_path.exists() {
+ // Already extracted
+ return Ok(file_path);
+ }
+
+ let mut file = File::create(&file_path)?;
+ file.write_all(crate::nebula_cert_bin::NEBULA_CERT_BIN)?;
+
+ debug!("Extracted nebula-cert to {}", file_path.as_path().display());
+
+ Ok(file_path)
+}
+
+#[cfg(unix)]
+pub fn _setup_permissions(path: &PathBuf) -> Result<(), Box> {
+ let meta = path.metadata()?;
+ let mut perms = meta.permissions();
+ perms.set_mode(0o0755);
+ debug!("Setting permissions on {} to 755", path.as_path().display());
+ fs::set_permissions(path, perms)?;
+ Ok(())
+}
+
+#[cfg(windows)]
+pub fn _setup_permissions() -> Result<(), Box> {
+ Ok(())
+}
+
+pub fn run_embedded_nebula(args: &[String]) -> Result> {
+ let path = extract_embedded_nebula()?;
+ debug!("Running {} with args {:?}", path.as_path().display(), args);
+ _setup_permissions(&path)?;
+ Ok(Command::new(path).args(args).spawn()?)
+}
+
+pub fn run_embedded_nebula_cert(args: &[String]) -> Result> {
+ let path = extract_embedded_nebula_cert()?;
+ debug!("Running {} with args {:?}", path.as_path().display(), args);
+ _setup_permissions(&path)?;
+ Ok(Command::new(path).args(args).spawn()?)
+}
\ No newline at end of file
diff --git a/tfclient/src/main.rs b/tfclient/src/main.rs
index 82c783f..4c0adfe 100644
--- a/tfclient/src/main.rs
+++ b/tfclient/src/main.rs
@@ -14,6 +14,243 @@
// You should have received a copy of the GNU General Public License
// along with this program. If not, see .
-fn main() {
- println!("Hello, world!");
+pub mod embedded_nebula;
+pub mod dirs;
+pub mod util;
+pub mod nebulaworker;
+pub mod daemon;
+pub mod config;
+pub mod service;
+pub mod apiworker;
+pub mod socketworker;
+pub mod socketclient;
+pub mod timerworker;
+
+pub mod nebula_bin {
+ include!(concat!(env!("OUT_DIR"), "/nebula.bin.rs"));
}
+pub mod nebula_cert_bin {
+ include!(concat!(env!("OUT_DIR"), "/nebula_cert.bin.rs"));
+}
+
+
+use std::fs;
+
+use clap::{Parser, ArgAction, Subcommand};
+use log::{error, info};
+use simple_logger::SimpleLogger;
+use crate::config::load_config;
+use crate::dirs::get_data_dir;
+use crate::embedded_nebula::{run_embedded_nebula, run_embedded_nebula_cert};
+use crate::service::entry::{cli_install, cli_start, cli_stop, cli_uninstall};
+
+#[derive(Parser)]
+#[command(author = "c0repwn3r", version, about, long_about = None)]
+#[clap(disable_version_flag = true)]
+struct Cli {
+ #[arg(short = 'v', long = "version", action = ArgAction::SetTrue)]
+ #[clap(global = true)]
+ /// Print the tfclient version, as well as the trifid-pki version and the version of the embedded nebula and nebula-cert binaries
+ version: bool,
+
+ #[command(subcommand)]
+ subcommand: Commands
+}
+
+#[derive(Subcommand)]
+enum Commands {
+ /// Run the `nebula` binary. This is useful if you want to do debugging with tfclient's internal nebula.
+ RunNebula {
+ /// Arguments to pass to the `nebula` binary
+ #[clap(trailing_var_arg=true, allow_hyphen_values=true)]
+ args: Vec
+ },
+ /// Run the `nebula-cert` binary. This is useful if you want to mess with certificates. Note: tfclient does not actually use nebula-cert for certificate operations, and instead uses trifid-pki internally
+ RunNebulaCert {
+ /// Arguments to pass to the `nebula-cert` binary
+ #[clap(trailing_var_arg=true, allow_hyphen_values=true)]
+ args: Vec
+ },
+ /// Clear any cached data that tfclient may have added
+ ClearCache {},
+
+ /// Install the tfclient system service
+ Install {
+ #[clap(short, long, default_value = "tfclient")]
+ /// Optional service name used to run multiple tfclient instances. Specify the same name on all other cli sub-commands (start, uninstall, etc.) to refer to this installed instance
+ name: String,
+ #[clap(short, long)]
+ /// Server to use for API calls.
+ server: String
+ },
+
+ /// Uninstall the tfclient system service
+ Uninstall {
+ #[clap(short, long, default_value = "tfclient")]
+ /// Service name specified on install
+ name: String
+ },
+
+ /// Start the tfclient system service
+ Start {
+ #[clap(short, long, default_value = "tfclient")]
+ /// Service name specified on install
+ name: String
+ },
+
+ /// Stop the tfclient system service
+ Stop {
+ #[clap(short, long, default_value = "tfclient")]
+ /// Service name specified on start
+ name: String
+ },
+
+ /// Run the tfclient daemon in the foreground
+ Run {
+ #[clap(short, long, default_value = "tfclient")]
+ /// Service name specified on install
+ name: String,
+ #[clap(short, long)]
+ /// Server to use for API calls.
+ server: String
+ },
+
+ /// Enroll this host using a trifid-api enrollment code
+ Enroll {
+ #[clap(short, long, default_value = "tfclient")]
+ /// Service name specified on install
+ name: String,
+ #[clap(short, long)]
+ /// Enrollment code used to enroll this node
+ code: String,
+ }
+}
+
+fn main() {
+ SimpleLogger::new().init().unwrap();
+
+ let args = Cli::parse();
+
+ if args.version {
+ print_version();
+ }
+
+ match args.subcommand {
+ Commands::RunNebula { args } => {
+ match run_embedded_nebula(&args) {
+ Ok(mut c) => {
+ match c.wait() {
+ Ok(stat) => {
+ match stat.code() {
+ Some(code) => {
+ if code != 0 {
+ error!("Nebula process exited with nonzero status code {}", code);
+ }
+ std::process::exit(code);
+ },
+ None => {
+ info!("Nebula process terminated by signal");
+ std::process::exit(0);
+ }
+ }
+ },
+ Err(e) => {
+ error!("Unable to wait for child to exit: {}", e);
+ std::process::exit(1);
+ }
+ }
+ },
+ Err(e) => {
+ error!("Unable to start nebula binary: {}", e);
+ std::process::exit(1);
+ }
+ }
+ },
+ Commands::ClearCache { .. } => {
+ let data_dir = match get_data_dir() {
+ Some(dir) => dir,
+ None => {
+ error!("Unable to get platform-specific data dir");
+ std::process::exit(1);
+ }
+ };
+ match fs::remove_dir_all(&data_dir) {
+ Ok(_) => (),
+ Err(e) => {
+ error!("Unable to delete data dir: {}", e);
+ std::process::exit(0);
+ }
+ }
+ info!("Removed data dir {}", data_dir.as_path().display());
+
+ info!("Removed all cached data.");
+ std::process::exit(0);
+ },
+ Commands::RunNebulaCert { args } => {
+ match run_embedded_nebula_cert(&args) {
+ Ok(mut c) => {
+ match c.wait() {
+ Ok(stat) => {
+ match stat.code() {
+ Some(code) => {
+ if code != 0 {
+ error!("nebula-cert process exited with nonzero status code {}", code);
+ }
+ std::process::exit(code);
+ },
+ None => {
+ info!("nebula-cert process terminated by signal");
+ std::process::exit(0);
+ }
+ }
+ },
+ Err(e) => {
+ error!("Unable to wait for child to exit: {}", e);
+ std::process::exit(1);
+ }
+ }
+ },
+ Err(e) => {
+ error!("Unable to start nebula-cert binary: {}", e);
+ std::process::exit(1);
+ }
+ }
+ }
+ Commands::Install { server, name } => {
+ cli_install(&name, &server);
+ }
+ Commands::Uninstall { name } => {
+ cli_uninstall(&name);
+ }
+ Commands::Start { name } => {
+ cli_start(&name);
+ }
+ Commands::Stop { name } => {
+ cli_stop(&name);
+ }
+ Commands::Run { name, server } => {
+ daemon::daemon_main(name, server);
+ }
+ Commands::Enroll { name, code } => {
+ info!("Loading config...");
+ let config = match load_config(&name) {
+ Ok(cfg) => cfg,
+ Err(e) => {
+ error!("Error loading configuration: {}", e);
+ std::process::exit(1);
+ }
+ };
+ match socketclient::enroll(&code, &config) {
+ Ok(_) => (),
+ Err(e) => {
+ error!("Error sending enrollment request: {}", e);
+ std::process::exit(1);
+ }
+ };
+ }
+ }
+}
+
+fn print_version() {
+ println!("tfclient v{} linked to trifid-pki v{}, embedding nebula v{} and nebula-cert v{}", env!("CARGO_PKG_VERSION"), trifid_pki::TRIFID_PKI_VERSION, crate::nebula_bin::NEBULA_VERSION, crate::nebula_cert_bin::NEBULA_CERT_VERSION);
+}
\ No newline at end of file
diff --git a/tfclient/src/nebulaworker.rs b/tfclient/src/nebulaworker.rs
new file mode 100644
index 0000000..6678670
--- /dev/null
+++ b/tfclient/src/nebulaworker.rs
@@ -0,0 +1,153 @@
+// Code to handle the nebula worker
+
+use std::error::Error;
+use std::fs;
+use std::sync::mpsc::{Receiver, TryRecvError};
+use std::time::{Duration, SystemTime};
+use log::{debug, error, info};
+use crate::config::{load_cdata, NebulaConfig, TFClientConfig};
+use crate::daemon::ThreadMessageSender;
+use crate::dirs::get_nebulaconfig_file;
+use crate::embedded_nebula::run_embedded_nebula;
+
+pub enum NebulaWorkerMessage {
+ Shutdown,
+ ConfigUpdated
+}
+
+fn insert_private_key(instance: &str) -> Result<(), Box> {
+ if !get_nebulaconfig_file(instance).ok_or("Could not get config file location")?.exists() {
+ return Ok(()); // cant insert private key into a file that does not exist - BUT. we can gracefully handle nebula crashing - we cannot gracefully handle this fn failing
+ }
+ let cdata = load_cdata(instance)?;
+ let key = cdata.dh_privkey.ok_or("Missing private key")?;
+
+ let config_str = fs::read_to_string(get_nebulaconfig_file(instance).ok_or("Could not get config file location")?)?;
+ let mut config: NebulaConfig = serde_yaml::from_str(&config_str)?;
+
+ config.pki.key = Some(String::from_utf8(key)?);
+
+ debug!("inserted private key into config: {:?}", config);
+
+ let config_str = serde_yaml::to_string(&config)?;
+ fs::write(get_nebulaconfig_file(instance).ok_or("Could not get config file location")?, config_str)?;
+
+ Ok(())
+}
+
+pub fn nebulaworker_main(_config: TFClientConfig, instance: String, _transmitter: ThreadMessageSender, rx: Receiver) {
+ let _cdata = match load_cdata(&instance) {
+ Ok(data) => data,
+ Err(e) => {
+ error!("unable to load cdata: {}", e);
+ error!("nebula thread exiting with error");
+ return;
+ }
+ };
+
+ info!("fixing config...");
+ match insert_private_key(&instance) {
+ Ok(_) => {
+ info!("config fixed (private-key embedded)");
+ },
+ Err(e) => {
+ error!("unable to fix config: {}", e);
+ error!("nebula thread exiting with error");
+ return;
+ }
+ }
+ info!("starting nebula child...");
+ let mut child = match run_embedded_nebula(&["-config".to_string(), get_nebulaconfig_file(&instance).unwrap().to_str().unwrap().to_string()]) {
+ Ok(c) => c,
+ Err(e) => {
+ error!("unable to start embedded nebula binary: {}", e);
+ error!("nebula thread exiting with error");
+ return;
+ }
+ };
+ info!("nebula process started");
+
+ let mut last_restart_time = SystemTime::now();
+
+ // dont need to save it, because we do not, in any circumstance, write to it
+ loop {
+ if let Ok(e) = child.try_wait() {
+ if e.is_some() && SystemTime::now() > last_restart_time + Duration::from_secs(5) {
+ info!("nebula process has exited, restarting");
+ child = match run_embedded_nebula(&["-config".to_string(), get_nebulaconfig_file(&instance).unwrap().to_str().unwrap().to_string()]) {
+ Ok(c) => c,
+ Err(e) => {
+ error!("unable to start embedded nebula binary: {}", e);
+ error!("nebula thread exiting with error");
+ return;
+ }
+ };
+ info!("nebula process started");
+ last_restart_time = SystemTime::now();
+ }
+ }
+ match rx.try_recv() {
+ Ok(msg) => {
+ match msg {
+ NebulaWorkerMessage::Shutdown => {
+ info!("recv on command socket: shutdown, stopping");
+ info!("shutting down nebula binary");
+ match child.kill() {
+ Ok(_) => {
+ debug!("nebula process exited");
+ },
+ Err(e) => {
+ error!("nebula process already exited: {}", e);
+ }
+ }
+ info!("nebula shut down");
+ break;
+ },
+ NebulaWorkerMessage::ConfigUpdated => {
+ info!("our configuration has been updated - restarting");
+ debug!("killing existing process");
+ match child.kill() {
+ Ok(_) => {
+ debug!("nebula process exited");
+ },
+ Err(e) => {
+ error!("nebula process already exited: {}", e);
+ }
+ }
+ debug!("fixing config...");
+ match insert_private_key(&instance) {
+ Ok(_) => {
+ debug!("config fixed (private-key embedded)");
+ },
+ Err(e) => {
+ error!("unable to fix config: {}", e);
+ error!("nebula thread exiting with error");
+ return;
+ }
+ }
+ debug!("restarting nebula process");
+ child = match run_embedded_nebula(&["-config".to_string(), get_nebulaconfig_file(&instance).unwrap().to_str().unwrap().to_string()]) {
+ Ok(c) => c,
+ Err(e) => {
+ error!("unable to start embedded nebula binary: {}", e);
+ error!("nebula thread exiting with error");
+ return;
+ }
+ };
+ last_restart_time = SystemTime::now();
+ debug!("nebula process restarted");
+ }
+ }
+ },
+ Err(e) => {
+ match e {
+ TryRecvError::Empty => {}
+ TryRecvError::Disconnected => {
+ error!("nebulaworker command socket disconnected, shutting down to prevent orphaning");
+ break;
+ }
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/tfclient/src/service/codegen/mod.rs b/tfclient/src/service/codegen/mod.rs
new file mode 100644
index 0000000..a570ad9
--- /dev/null
+++ b/tfclient/src/service/codegen/mod.rs
@@ -0,0 +1,9 @@
+pub mod systemd;
+
+use std::error::Error;
+use std::path::PathBuf;
+
+pub trait ServiceFileGenerator {
+ fn create_service_files(bin_path: PathBuf, name: &str, server: &str) -> Result<(), Box>;
+ fn delete_service_files(name: &str) -> Result<(), Box>;
+}
\ No newline at end of file
diff --git a/tfclient/src/service/codegen/systemd.rs b/tfclient/src/service/codegen/systemd.rs
new file mode 100644
index 0000000..b05db5d
--- /dev/null
+++ b/tfclient/src/service/codegen/systemd.rs
@@ -0,0 +1,48 @@
+use std::error::Error;
+use std::path::PathBuf;
+use log::debug;
+use crate::service::codegen::ServiceFileGenerator;
+use std::fmt::Write;
+use std::fs;
+
+pub struct SystemDServiceFileGenerator {}
+impl ServiceFileGenerator for SystemDServiceFileGenerator {
+ fn create_service_files(bin_path: PathBuf, name: &str, server: &str) -> Result<(), Box> {
+ debug!("Generating a unit file...");
+
+ let mut unit_file = String::new();
+ writeln!(unit_file, "[Unit]")?;
+ writeln!(unit_file, "Description=A client for Defined Networking compatible overlay networks (instance {})", name)?;
+ writeln!(unit_file, "Wants=basic.target network-online.target")?;
+ writeln!(unit_file, "After=basic.target network.target network-online.target")?;
+ writeln!(unit_file)?;
+ writeln!(unit_file, "[Service]")?;
+ writeln!(unit_file, "SyslogIdentifier=tfclient-{}", name)?;
+ writeln!(unit_file, "ExecStart={} run --server {} --name {}", bin_path.as_path().display(), server, name)?;
+ writeln!(unit_file, "Restart=always")?;
+ writeln!(unit_file)?;
+ writeln!(unit_file, "[Install]")?;
+ writeln!(unit_file, "WantedBy=multi-user.target")?;
+
+ fs::write(format!("/usr/lib/systemd/system/{}.service", SystemDServiceFileGenerator::get_service_file_name(name)), unit_file)?;
+
+ debug!("Installed unit file");
+
+ Ok(())
+ }
+
+ fn delete_service_files(name: &str) -> Result<(), Box> {
+ debug!("Deleting unit file...");
+
+ fs::remove_file(format!("/usr/lib/systemd/system/{}.service", SystemDServiceFileGenerator::get_service_file_name(name)))?;
+
+ debug!("Removed unit file");
+
+ Ok(())
+ }
+}
+impl SystemDServiceFileGenerator {
+ pub fn get_service_file_name(name: &str) -> String {
+ format!("tfclient_i-{}", name)
+ }
+}
\ No newline at end of file
diff --git a/tfclient/src/service/detect.rs b/tfclient/src/service/detect.rs
new file mode 100644
index 0000000..42a76a4
--- /dev/null
+++ b/tfclient/src/service/detect.rs
@@ -0,0 +1,29 @@
+use std::path::Path;
+use log::info;
+use crate::service::macos::OSXServiceManager;
+use crate::service::runit::RunitServiceManager;
+use crate::service::ServiceManager;
+use crate::service::systemd::SystemDServiceManager;
+use crate::service::windows::WindowsServiceManager;
+
+pub fn detect_service() -> Option> {
+ if cfg!(windows) {
+ return Some(Box::new(WindowsServiceManager {}));
+ }
+ if cfg!(macos) {
+ return Some(Box::new(OSXServiceManager {}));
+ }
+ detect_unix_service_manager()
+}
+
+pub fn detect_unix_service_manager() -> Option> {
+ if Path::new("/etc/runit/1").exists() {
+ info!("Detected Runit service supervision (confidence: 100%, /etc/runit/1 exists)");
+ return Some(Box::new(RunitServiceManager {}))
+ }
+ if Path::new("/var/lib/systemd").exists() {
+ info!("Detected SystemD service supervision (confidence: 100%, /var/lib/systemd exists)");
+ return Some(Box::new(SystemDServiceManager {}));
+ }
+ None
+}
\ No newline at end of file
diff --git a/tfclient/src/service/entry.rs b/tfclient/src/service/entry.rs
new file mode 100644
index 0000000..beff268
--- /dev/null
+++ b/tfclient/src/service/entry.rs
@@ -0,0 +1,82 @@
+use std::env::current_exe;
+use log::{error, info};
+use crate::service::detect::detect_service;
+use crate::util::check_server_url;
+
+pub fn cli_start(name: &str) {
+ info!("Detecting service manager...");
+ let service = detect_service();
+ if let Some(sm) = service {
+ match sm.start(name) {
+ Ok(_) => (),
+ Err(e) => {
+ error!("Error starting service: {}", e);
+ std::process::exit(1);
+ }
+ }
+ } else {
+ error!("Unable to determine which service manager to use. Could not start.");
+ std::process::exit(1);
+ }
+}
+
+pub fn cli_stop(name: &str) {
+ info!("Detecting service manager...");
+ let service = detect_service();
+ if let Some(sm) = service {
+ match sm.stop(name) {
+ Ok(_) => (),
+ Err(e) => {
+ error!("Error starting service: {}", e);
+ std::process::exit(1);
+ }
+ }
+ } else {
+ error!("Unable to determine which service manager to use. Could not stop.");
+ std::process::exit(1);
+ }
+}
+
+pub fn cli_install(name: &str, server: &str) {
+ info!("Checking server url...");
+ check_server_url(server);
+
+ info!("Detecting service manager...");
+ let service = detect_service();
+ if let Some(sm) = service {
+ let current_file = match current_exe() {
+ Ok(e) => e,
+ Err(e) => {
+ error!("Unable to get current binary: {}", e);
+ std::process::exit(1);
+ }
+ };
+ match sm.install(current_file, name, server) {
+ Ok(_) => (),
+ Err(e) => {
+ error!("Error creating service files: {}", e);
+ std::process::exit(1);
+ }
+ }
+ } else {
+ error!("Unable to determine which service manager to use. Could not install.");
+ std::process::exit(1);
+ }
+}
+
+pub fn cli_uninstall(name: &str) {
+ info!("Detecting service manager...");
+ let service = detect_service();
+ if let Some(sm) = service {
+ match sm.uninstall(name) {
+ Ok(_) => (),
+ Err(e) => {
+ error!("Error removing service files: {}", e);
+ std::process::exit(1);
+ }
+ }
+ } else {
+ error!("Unable to determine which service manager to use. Could not install.");
+ std::process::exit(1);
+ }
+}
\ No newline at end of file
diff --git a/tfclient/src/service/macos.rs b/tfclient/src/service/macos.rs
new file mode 100644
index 0000000..c1c7c8d
--- /dev/null
+++ b/tfclient/src/service/macos.rs
@@ -0,0 +1,22 @@
+use std::error::Error;
+use std::path::PathBuf;
+use crate::service::ServiceManager;
+
+pub struct OSXServiceManager {}
+impl ServiceManager for OSXServiceManager {
+ fn install(&self, _bin_path: PathBuf, _name: &str, _server_url: &str) -> Result<(), Box> {
+ todo!()
+ }
+
+ fn uninstall(&self, _name: &str) -> Result<(), Box> {
+ todo!()
+ }
+
+ fn start(&self, _name: &str) -> Result<(), Box> {
+ todo!()
+ }
+
+ fn stop(&self, _name: &str) -> Result<(), Box> {
+ todo!()
+ }
+}
\ No newline at end of file
diff --git a/tfclient/src/service/mod.rs b/tfclient/src/service/mod.rs
new file mode 100644
index 0000000..e00c8fb
--- /dev/null
+++ b/tfclient/src/service/mod.rs
@@ -0,0 +1,17 @@
+use std::error::Error;
+use std::path::PathBuf;
+
+pub mod codegen;
+pub mod systemd;
+pub mod detect;
+pub mod entry;
+pub mod windows;
+pub mod macos;
+pub mod runit;
+
+pub trait ServiceManager {
+ fn install(&self, bin_path: PathBuf, name: &str, server_url: &str) -> Result<(), Box>;
+ fn uninstall(&self, name: &str) -> Result<(), Box>;
+ fn start(&self, name: &str) -> Result<(), Box>;
+ fn stop(&self, name: &str) -> Result<(), Box>;
+}
\ No newline at end of file
diff --git a/tfclient/src/service/runit.rs b/tfclient/src/service/runit.rs
new file mode 100644
index 0000000..db07e08
--- /dev/null
+++ b/tfclient/src/service/runit.rs
@@ -0,0 +1,22 @@
+use std::error::Error;
+use std::path::PathBuf;
+use crate::service::ServiceManager;
+
+pub struct RunitServiceManager {}
+impl ServiceManager for RunitServiceManager {
+ fn install(&self, _bin_path: PathBuf, _name: &str, _server_url: &str) -> Result<(), Box> {
+ todo!()
+ }
+
+ fn uninstall(&self, _name: &str) -> Result<(), Box> {
+ todo!()
+ }
+
+ fn start(&self, _name: &str) -> Result<(), Box> {
+ todo!()
+ }
+
+ fn stop(&self, _name: &str) -> Result<(), Box> {
+ todo!()
+ }
+}
\ No newline at end of file
diff --git a/tfclient/src/service/systemd.rs b/tfclient/src/service/systemd.rs
new file mode 100644
index 0000000..05d3cc3
--- /dev/null
+++ b/tfclient/src/service/systemd.rs
@@ -0,0 +1,86 @@
+use std::error::Error;
+use std::path::PathBuf;
+use std::process::Command;
+use log::{error, info};
+use crate::service::codegen::ServiceFileGenerator;
+use crate::service::codegen::systemd::SystemDServiceFileGenerator;
+use crate::service::ServiceManager;
+
+pub struct SystemDServiceManager {}
+impl ServiceManager for SystemDServiceManager {
+ fn install(&self, bin_path: PathBuf, name: &str, server_url: &str) -> Result<(), Box> {
+ info!("Installing for SystemD");
+
+ SystemDServiceFileGenerator::create_service_files(bin_path, name, server_url)?;
+
+ info!("Enabling the SystemD service");
+
+ let out = Command::new("systemctl").args(["enable", &SystemDServiceFileGenerator::get_service_file_name(name)]).output()?;
+ if !out.status.success() {
+ error!("Error enabling the SystemD service (command exited with non-zero exit code)");
+ error!("stdout:");
+ error!("{}", String::from_utf8(out.stdout)?);
+ error!("stderr:");
+ error!("{}", String::from_utf8(out.stderr)?);
+ return Err("Command exited with non-zero exit code".into());
+ }
+
+ info!("Installation successful");
+
+ Ok(())
+ }
+
+ fn uninstall(&self, name: &str) -> Result<(), Box> {
+ info!("Uninstalling SystemD service files");
+
+ info!("Disabling the SystemD service");
+
+ let out = Command::new("systemctl").args(["disable", &SystemDServiceFileGenerator::get_service_file_name(name)]).output()?;
+ if !out.status.success() {
+ error!("Error disabling the SystemD service (command exited with non-zero exit code)");
+ error!("stdout:");
+ error!("{}", String::from_utf8(out.stdout)?);
+ error!("stderr:");
+ error!("{}", String::from_utf8(out.stderr)?);
+ return Err("Command exited with non-zero exit code".into());
+ }
+
+ info!("Removing the service files");
+
+ SystemDServiceFileGenerator::delete_service_files(name)?;
+
+ Ok(())
+ }
+
+ fn start(&self, name: &str) -> Result<(), Box> {
+ info!("Starting the SystemD service");
+
+ let out = Command::new("systemctl").args(["start", &SystemDServiceFileGenerator::get_service_file_name(name)]).output()?;
+ if !out.status.success() {
+ error!("Error starting the SystemD service (command exited with non-zero exit code)");
+ error!("stdout:");
+ error!("{}", String::from_utf8(out.stdout)?);
+ error!("stderr:");
+ error!("{}", String::from_utf8(out.stderr)?);
+ return Err("Command exited with non-zero exit code".into());
+ }
+
+ Ok(())
+ }
+
+ fn stop(&self, name: &str) -> Result<(), Box> {
+ info!("Stopping the SystemD service");
+
+ let out = Command::new("systemctl").args(["stop", &SystemDServiceFileGenerator::get_service_file_name(name)]).output()?;
+ if !out.status.success() {
+ error!("Error stopping the SystemD service (command exited with non-zero exit code)");
+ error!("stdout:");
+ error!("{}", String::from_utf8(out.stdout)?);
+ error!("stderr:");
+ error!("{}", String::from_utf8(out.stderr)?);
+ return Err("Command exited with non-zero exit code".into());
+ }
+
+ Ok(())
+ }
+}
\ No newline at end of file
diff --git a/tfclient/src/service/windows.rs b/tfclient/src/service/windows.rs
new file mode 100644
index 0000000..8006a06
--- /dev/null
+++ b/tfclient/src/service/windows.rs
@@ -0,0 +1,22 @@
+use std::error::Error;
+use std::path::PathBuf;
+use crate::service::ServiceManager;
+
+pub struct WindowsServiceManager {}
+impl ServiceManager for WindowsServiceManager {
+ fn install(&self, _bin_path: PathBuf, _name: &str, _server_url: &str) -> Result<(), Box> {
+ todo!()
+ }
+
+ fn uninstall(&self, _name: &str) -> Result<(), Box> {
+ todo!()
+ }
+
+ fn start(&self, _name: &str) -> Result<(), Box> {
+ todo!()
+ }
+
+ fn stop(&self, _name: &str) -> Result<(), Box> {
+ todo!()
+ }
+}
\ No newline at end of file
diff --git a/tfclient/src/socketclient.rs b/tfclient/src/socketclient.rs
new file mode 100644
index 0000000..62e0449
--- /dev/null
+++ b/tfclient/src/socketclient.rs
@@ -0,0 +1,58 @@
+use std::error::Error;
+use std::io::{BufRead, BufReader, Write};
+use std::net::{IpAddr, SocketAddr, TcpStream};
+use log::{error, info};
+use crate::config::TFClientConfig;
+use crate::socketworker::{ctob, DisconnectReason, JSON_API_VERSION, JsonMessage};
+
+pub fn enroll(code: &str, config: &TFClientConfig) -> Result<(), Box> {
+ info!("Connecting to local command socket...");
+ let mut stream = TcpStream::connect(SocketAddr::new(IpAddr::from([127, 0, 0, 1]), config.listen_port))?;
+ let stream2 = stream.try_clone()?;
+ let mut reader = BufReader::new(&stream2);
+
+ info!("Sending Hello...");
+ stream.write_all(&ctob(JsonMessage::Hello {
+ version: JSON_API_VERSION,
+ }))?;
+ info!("Waiting for hello...");
+ let msg = read_msg(&mut reader)?;
+ match msg {
+ JsonMessage::Hello { .. } => {
+ info!("Server sent hello, connection established")
+ }
+ JsonMessage::Goodbye { reason } => {
+ error!("Disconnected by server. Reason: {:?}", reason);
+ return Err("Disconnected by server".into());
+ }
+ _ => {
+ error!("Server returned unexpected message: {:?}", msg);
+ error!("Sending goodbye and exiting");
+ stream.write_all(&ctob(JsonMessage::Goodbye {
+ reason: DisconnectReason::UnexpectedMessageType,
+ }))?;
+ return Err("Unexpected message type by server".into());
+ }
+ }
+
+ info!("Sending enroll request...");
+ stream.write_all(&ctob(JsonMessage::Enroll {
+ code: code.to_string(),
+ }))?;
+
+ info!("Sending disconnect...");
+ stream.write_all(&ctob(JsonMessage::Goodbye {
+ reason: DisconnectReason::Done,
+ }))?;
+
+ info!("Sent enroll request to tfclient daemon. Check logs to see if the enrollment was successful.");
+
+ Ok(())
+}
+
+fn read_msg(reader: &mut BufReader<&TcpStream>) -> Result> {
+ let mut str = String::new();
+ reader.read_line(&mut str)?;
+ let msg: JsonMessage = serde_json::from_str(&str)?;
+ Ok(msg)
+}
\ No newline at end of file
diff --git a/tfclient/src/socketworker.rs b/tfclient/src/socketworker.rs
new file mode 100644
index 0000000..a87ec21
--- /dev/null
+++ b/tfclient/src/socketworker.rs
@@ -0,0 +1,305 @@
+// Code to handle the nebula worker
+
+use std::error::Error;
+use std::{io, thread};
+use std::io::{BufRead, BufReader, BufWriter, Write};
+use std::net::{IpAddr, Shutdown, SocketAddr, TcpListener, TcpStream};
+use std::sync::mpsc::{Receiver, TryRecvError};
+
+use log::{debug, error, info, trace, warn};
+use serde::{Deserialize, Serialize};
+use crate::apiworker::APIWorkerMessage;
+use crate::config::{load_cdata, TFClientConfig};
+use crate::daemon::ThreadMessageSender;
+use crate::nebulaworker::NebulaWorkerMessage;
+use crate::timerworker::TimerWorkerMessage;
+
+pub enum SocketWorkerMessage {
+ Shutdown
+}
+
+pub fn socketworker_main(config: TFClientConfig, instance: String, transmitter: ThreadMessageSender, rx: Receiver) {
+ info!("socketworker_main called, entering realmain");
+ match _main(config, instance, transmitter, rx) {
+ Ok(_) => (),
+ Err(e) => {
+ error!("Error in socket thread: {}", e);
+ }
+ };
+}
+
+fn _main(config: TFClientConfig, instance: String, transmitter: ThreadMessageSender, rx: Receiver) -> Result<(), Box> {
+ let listener = TcpListener::bind(SocketAddr::new(IpAddr::from([127, 0, 0, 1]), config.listen_port))?;
+ listener.set_nonblocking(true)?;
+
+ loop {
+ match listener.accept() {
+ Ok(stream) => {
+ let transmitter_clone = transmitter.clone();
+ let config_clone = config.clone();
+ let instance_clone = instance.clone();
+ thread::spawn(|| {
+ match handle_stream(stream, transmitter_clone, config_clone, instance_clone) {
+ Ok(_) => (),
+ Err(e) => {
+ error!("Error in client thread: {}", e);
+ }
+ }
+ });
+ },
+ Err(e) if e.kind() == io::ErrorKind::WouldBlock => (),
+ Err(e) => { Err(e)?; }
+ }
+
+ match rx.try_recv() {
+ Ok(msg) => {
+ match msg {
+ SocketWorkerMessage::Shutdown => {
+ info!("recv on command socket: shutdown, stopping");
+ break;
+ }
+ }
+ },
+ Err(e) => {
+ match e {
+ TryRecvError::Empty => {}
+ TryRecvError::Disconnected => {
+ error!("socketworker command socket disconnected, shutting down to prevent orphaning");
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ Ok(())
+}
+
+fn handle_stream(stream: (TcpStream, SocketAddr), transmitter: ThreadMessageSender, config: TFClientConfig, instance: String) -> Result<(), io::Error> {
+ info!("Incoming client");
+ match handle_client(stream.0, transmitter, config, instance) {
+ Ok(()) => (),
+ Err(e) if e.kind() == io::ErrorKind::TimedOut => {
+ warn!("Client timed out, connection aborted");
+ },
+ Err(e) if e.kind() == io::ErrorKind::NotConnected => {
+ warn!("Client connection severed");
+ },
+ Err(e) if e.kind() == io::ErrorKind::BrokenPipe => {
+ warn!("Client connection returned error: broken pipe");
+ },
+ Err(e) if e.kind() == io::ErrorKind::ConnectionAborted => {
+ warn!("Client aborted connection");
+ },
+ Err(e) => {
+ error!("Error in client handler: {}", e);
+ return Err(e);
+ }
+ };
+ Ok(())
+}
+
+fn handle_client(stream: TcpStream, transmitter: ThreadMessageSender, config: TFClientConfig, instance: String) -> Result<(), io::Error> {
+ info!("Handling connection from {}", stream.peer_addr()?);
+
+ let mut client = Client {
+ state: ClientState::WaitHello,
+ reader: BufReader::new(&stream),
+ writer: BufWriter::new(&stream),
+ stream: &stream,
+ config,
+ instance,
+ };
+
+ loop {
+ let mut command = String::new();
+ client.reader.read_line(&mut command)?;
+
+ let command: JsonMessage = serde_json::from_str(&command)?;
+
+ trace!("recv {:?} from {}", command, client.stream.peer_addr()?);
+
+ let should_disconnect;
+
+ match client.state {
+ ClientState::WaitHello => {
+ should_disconnect = waithello_handle(&mut client, &transmitter, command)?;
+ }
+ ClientState::SentHello => {
+ should_disconnect = senthello_handle(&mut client, &transmitter, command)?;
+ }
+ }
+
+ if should_disconnect { break; }
+ }
+
+ // Gracefully close the connection
+ stream.shutdown(Shutdown::Both)?;
+
+ Ok(())
+}
+
+struct Client<'a> {
+ state: ClientState,
+ reader: BufReader<&'a TcpStream>,
+ writer: BufWriter<&'a TcpStream>,
+ stream: &'a TcpStream,
+ config: TFClientConfig,
+ instance: String
+}
+
+fn waithello_handle(client: &mut Client, _transmitter: &ThreadMessageSender, command: JsonMessage) -> Result {
+ trace!("state: WaitHello, handing with waithello_handle");
+ let mut should_disconnect = false;
+
+ match command {
+ JsonMessage::Hello { version } => {
+ if version != JSON_API_VERSION {
+ should_disconnect = true;
+ client.stream.write_all(&ctob(JsonMessage::Goodbye {
+ reason: DisconnectReason::UnsupportedVersion {
+ expected: JSON_API_VERSION,
+ got: version
+ }
+ }))?;
+ }
+ client.stream.write_all(&ctob(JsonMessage::Hello {
+ version: JSON_API_VERSION
+ }))?;
+ client.state = ClientState::SentHello;
+ trace!("setting state to SentHello");
+ },
+ JsonMessage::Goodbye { reason } => {
+ info!("Client sent disconnect: {:?}", reason);
+ should_disconnect = true;
+ },
+ _ => {
+ debug!("message type unexpected in WaitHello state");
+ should_disconnect = true;
+ client.stream.write_all(&ctob(JsonMessage::Goodbye {
+ reason: DisconnectReason::UnexpectedMessageType,
+ }))?;
+ }
+ }
+
+ Ok(should_disconnect)
+}
+
+fn senthello_handle(client: &mut Client, transmitter: &ThreadMessageSender, command: JsonMessage) -> Result {
+ trace!("state: SentHello, handing with senthello_handle");
+ let mut should_disconnect = false;
+
+ match command {
+ JsonMessage::Goodbye { reason } => {
+ info!("Client sent disconnect: {:?}", reason);
+ should_disconnect = true;
+ },
+
+ JsonMessage::Shutdown {} => {
+ info!("Requested to shutdown by local control socket. Sending shutdown message to threads");
+ match transmitter.nebula_thread.send(NebulaWorkerMessage::Shutdown) {
+ Ok(_) => (),
+ Err(e) => {
+ error!("Error sending shutdown message to nebula worker thread: {}", e);
+ }
+ }
+ match transmitter.api_thread.send(APIWorkerMessage::Shutdown) {
+ Ok(_) => (),
+ Err(e) => {
+ error!("Error sending shutdown message to api worker thread: {}", e);
+ }
+ }
+ match transmitter.socket_thread.send(SocketWorkerMessage::Shutdown) {
+ Ok(_) => (),
+ Err(e) => {
+ error!("Error sending shutdown message to socket worker thread: {}", e);
+ }
+ }
+ match transmitter.timer_thread.send(TimerWorkerMessage::Shutdown) {
+ Ok(_) => (),
+ Err(e) => {
+ error!("Error sending shutdown message to timer worker thread: {}", e);
+ }
+ }
+ },
+
+ JsonMessage::GetHostID {} => {
+ let data = match load_cdata(&client.instance) {
+ Ok(d) => d,
+ Err(e) => {
+ error!("Error loading cdata: {}", e);
+ panic!("{}", e); // TODO: Find a better way of handling this
+ }
+ };
+ client.stream.write_all(&ctob(JsonMessage::HostID {
+ has_id: data.creds.is_some(),
+ id: data.creds.map(|c| c.host_id)
+ }))?;
+ },
+
+ JsonMessage::Enroll { code } => {
+ info!("Client sent enroll with code {}", code);
+ info!("Sending enroll request to apiworker");
+ transmitter.api_thread.send(APIWorkerMessage::Enroll { code }).unwrap();
+ }
+
+ _ => {
+ debug!("message type unexpected in SentHello state");
+ should_disconnect = true;
+ client.stream.write_all(&ctob(JsonMessage::Goodbye {
+ reason: DisconnectReason::UnexpectedMessageType,
+ }))?;
+ }
+ }
+
+ Ok(should_disconnect)
+}
+
+pub fn ctob(command: JsonMessage) -> Vec {
+ let command_str = serde_json::to_string(&command).unwrap() + "\n";
+ command_str.into_bytes()
+}
+
+enum ClientState {
+ WaitHello,
+ SentHello
+}
+
+pub const JSON_API_VERSION: i32 = 1;
+
+#[derive(Serialize, Deserialize, Debug)]
+#[serde(tag = "method")]
+pub enum JsonMessage {
+ #[serde(rename = "hello")]
+ Hello {
+ version: i32
+ },
+ #[serde(rename = "goodbye")]
+ Goodbye {
+ reason: DisconnectReason
+ },
+ #[serde(rename = "shutdown")]
+ Shutdown {},
+ #[serde(rename = "get_host_id")]
+ GetHostID {},
+ #[serde(rename = "host_id")]
+ HostID {
+ has_id: bool,
+ id: Option
+ },
+ #[serde(rename = "enroll")]
+ Enroll {
+ code: String
+ }
+}
+
+#[derive(Serialize, Deserialize, Debug)]
+#[serde(tag = "type")]
+pub enum DisconnectReason {
+ #[serde(rename = "unsupported_version")]
+ UnsupportedVersion { expected: i32, got: i32 },
+ #[serde(rename = "unexpected_message_type")]
+ UnexpectedMessageType,
+ #[serde(rename = "done")]
+ Done
+}
\ No newline at end of file
diff --git a/tfclient/src/timerworker.rs b/tfclient/src/timerworker.rs
new file mode 100644
index 0000000..7fabba1
--- /dev/null
+++ b/tfclient/src/timerworker.rs
@@ -0,0 +1,47 @@
+use std::ops::Add;
+use std::sync::mpsc::{Receiver, TryRecvError};
+use std::time::{Duration, SystemTime};
+use log::{error, info};
+use crate::apiworker::APIWorkerMessage;
+use crate::daemon::ThreadMessageSender;
+
+pub enum TimerWorkerMessage {
+ Shutdown
+}
+
+pub fn timer_main(tx: ThreadMessageSender, rx: Receiver) {
+ let mut api_reload_timer = SystemTime::now().add(Duration::from_secs(60));
+
+ loop {
+ match rx.try_recv() {
+ Ok(msg) => {
+ match msg {
+ TimerWorkerMessage::Shutdown => {
+ info!("recv on command socket: shutdown, stopping");
+ break;
+ }
+ }
+ },
+ Err(e) => {
+ match e {
+ TryRecvError::Empty => {}
+ TryRecvError::Disconnected => {
+ error!("timerworker command socket disconnected, shutting down to prevent orphaning");
+ break;
+ }
+ }
+ }
+ }
+
+ if SystemTime::now().gt(&api_reload_timer) {
+ info!("Timer triggered: API_RELOAD_TIMER");
+ api_reload_timer = SystemTime::now().add(Duration::from_secs(60));
+ match tx.api_thread.send(APIWorkerMessage::Timer) {
+ Ok(_) => (),
+ Err(e) => {
+ error!("Error sending timer message to api worker thread: {}", e);
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/tfclient/src/util.rs b/tfclient/src/util.rs
new file mode 100644
index 0000000..eeecaca
--- /dev/null
+++ b/tfclient/src/util.rs
@@ -0,0 +1,29 @@
+use log::{error, warn};
+use sha2::Sha256;
+use sha2::Digest;
+use url::Url;
+
+pub fn sha256(bytes: &[u8]) -> String {
+ let mut hasher = Sha256::new();
+ hasher.update(bytes);
+ let digest = hasher.finalize();
+ hex::encode(digest)
+}
+
+pub fn check_server_url(server: &str) {
+ let api_base = match Url::parse(&server) {
+ Ok(u) => u,
+ Err(e) => {
+ error!("Invalid server url `{}`: {}", server, e);
+ std::process::exit(1);
+ }
+ };
+ match api_base.scheme() {
+ "http" => { warn!("HTTP api urls are not reccomended. Please switch to HTTPS if possible.") },
+ "https" => (),
+ _ => {
+ error!("Unsupported protocol `{}` (expected one of http, https)", api_base.scheme());
+ std::process::exit(1);
+ }
+ }
+}
\ No newline at end of file
diff --git a/trifid-pki/Cargo.toml b/trifid-pki/Cargo.toml
index ce4daba..fae5fb2 100644
--- a/trifid-pki/Cargo.toml
+++ b/trifid-pki/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "trifid-pki"
-version = "0.1.4"
+version = "0.1.9"
edition = "2021"
description = "A rust implementation of the Nebula PKI system"
license = "AGPL-3.0-or-later"
@@ -19,4 +19,9 @@ quick-protobuf = "0.8.1"
hex = "0.4.3"
sha2 = "0.10.6"
rand_core = "0.6.4"
-rand = "0.8.5"
\ No newline at end of file
+rand = "0.8.5"
+serde = { version = "1", features = ["derive"], optional = true }
+
+[features]
+default = []
+serde_derive = ["serde", "ipnet/serde", "x25519-dalek/serde", "ed25519-dalek/serde"]
\ No newline at end of file
diff --git a/trifid-pki/src/ca.rs b/trifid-pki/src/ca.rs
index 66dc20a..d5b3aaf 100644
--- a/trifid-pki/src/ca.rs
+++ b/trifid-pki/src/ca.rs
@@ -7,9 +7,13 @@ use std::time::SystemTime;
use ed25519_dalek::VerifyingKey;
use crate::cert::{deserialize_nebula_certificate_from_pem, NebulaCertificate};
+#[cfg(feature = "serde_derive")]
+use serde::{Serialize, Deserialize};
+
/// A pool of trusted CA certificates, and certificates that should be blocked.
/// This is equivalent to the `pki` section in a typical Nebula config.yml.
-#[derive(Default)]
+#[derive(Default, Clone)]
+#[cfg_attr(feature = "serde_derive", derive(Serialize, Deserialize))]
pub struct NebulaCAPool {
/// The list of CA root certificates that should be trusted.
pub cas: HashMap,
@@ -102,8 +106,9 @@ impl NebulaCAPool {
}
}
-#[derive(Debug)]
/// A list of errors that can happen when working with a CA Pool
+#[derive(Debug)]
+#[cfg_attr(feature = "serde_derive", derive(Serialize, Deserialize))]
pub enum CaPoolError {
/// Tried to add a non-CA cert to the CA pool
NotACA,
diff --git a/trifid-pki/src/cert.rs b/trifid-pki/src/cert.rs
index e328307..60b43fd 100644
--- a/trifid-pki/src/cert.rs
+++ b/trifid-pki/src/cert.rs
@@ -15,6 +15,9 @@ use crate::ca::NebulaCAPool;
use crate::cert_codec::{RawNebulaCertificate, RawNebulaCertificateDetails};
use sha2::Digest;
+#[cfg(feature = "serde_derive")]
+use serde::{Serialize, Deserialize};
+
/// The length, in bytes, of public keys
pub const PUBLIC_KEY_LENGTH: i32 = 32;
@@ -31,6 +34,7 @@ pub const ED25519_PUBLIC_KEY_BANNER: &str = "NEBULA ED25519 PUBLIC KEY";
/// A Nebula PKI certificate
#[derive(Debug, Clone)]
+#[cfg_attr(feature = "serde_derive", derive(Serialize, Deserialize))]
pub struct NebulaCertificate {
/// The signed data of this certificate
pub details: NebulaCertificateDetails,
@@ -40,6 +44,7 @@ pub struct NebulaCertificate {
/// The signed details contained in a Nebula PKI certificate
#[derive(Debug, Clone)]
+#[cfg_attr(feature = "serde_derive", derive(Serialize, Deserialize))]
pub struct NebulaCertificateDetails {
/// The name of the identity this certificate was issued for
pub name: String,
@@ -63,6 +68,7 @@ pub struct NebulaCertificateDetails {
/// A list of errors that can occur parsing certificates
#[derive(Debug)]
+#[cfg_attr(feature = "serde_derive", derive(Serialize, Deserialize))]
pub enum CertificateError {
/// Attempted to deserialize a certificate from an empty byte array
EmptyByteArray,
@@ -186,6 +192,7 @@ pub fn deserialize_nebula_certificate(bytes: &[u8]) -> Result Result, Box Result>, Box> {
+ let mut keys = vec![];
+ let pems = pem::parse_many(bytes)?;
+
+ for pem in pems {
+ if pem.tag != ED25519_PUBLIC_KEY_BANNER {
+ return Err(KeyError::WrongPemTag.into())
+ }
+ if pem.contents.len() != 32 {
+ return Err(KeyError::Not32Bytes.into())
+ }
+ keys.push(pem.contents);
+ }
+
+ Ok(keys)
+}
+
impl NebulaCertificate {
/// Sign a nebula certificate with the provided private key
/// # Errors
@@ -520,6 +547,7 @@ impl NebulaCertificate {
/// A list of possible errors that can happen validating a certificate
#[derive(Eq, PartialEq, Debug)]
+#[cfg_attr(feature = "serde_derive", derive(Serialize, Deserialize))]
pub enum CertificateValidity {
/// There are no issues with this certificate
Ok,
diff --git a/trifid-pki/src/lib.rs b/trifid-pki/src/lib.rs
index 7323013..4c67fe0 100644
--- a/trifid-pki/src/lib.rs
+++ b/trifid-pki/src/lib.rs
@@ -57,4 +57,7 @@ pub mod cert;
pub(crate) mod cert_codec;
#[cfg(test)]
#[macro_use]
-pub mod test;
\ No newline at end of file
+pub mod test;
+
+/// Get the compiled version of trifid-pki.
+pub const TRIFID_PKI_VERSION: &str = env!("CARGO_PKG_VERSION");
\ No newline at end of file
diff --git a/trifid-pki/src/test.rs b/trifid-pki/src/test.rs
index 55f465e..2a409c5 100644
--- a/trifid-pki/src/test.rs
+++ b/trifid-pki/src/test.rs
@@ -6,7 +6,7 @@ use std::net::Ipv4Addr;
use std::ops::{Add, Sub};
use std::time::{Duration, SystemTime, SystemTimeError, UNIX_EPOCH};
use ipnet::Ipv4Net;
-use crate::cert::{CertificateValidity, deserialize_ed25519_private, deserialize_ed25519_public, deserialize_nebula_certificate, deserialize_nebula_certificate_from_pem, deserialize_x25519_private, deserialize_x25519_public, NebulaCertificate, NebulaCertificateDetails, serialize_ed25519_private, serialize_ed25519_public, serialize_x25519_private, serialize_x25519_public};
+use crate::cert::{CertificateValidity, deserialize_ed25519_private, deserialize_ed25519_public, deserialize_ed25519_public_many, deserialize_nebula_certificate, deserialize_nebula_certificate_from_pem, deserialize_x25519_private, deserialize_x25519_public, NebulaCertificate, NebulaCertificateDetails, serialize_ed25519_private, serialize_ed25519_public, serialize_x25519_private, serialize_x25519_public};
use std::str::FromStr;
use ed25519_dalek::{SigningKey, VerifyingKey};
use quick_protobuf::{MessageWrite, Writer};
@@ -296,10 +296,21 @@ fn x25519_serialization() {
#[test]
fn ed25519_serialization() {
let bytes = [0u8; 64];
+ let bytes2 = [0u8; 32];
assert_eq!(deserialize_ed25519_private(&serialize_ed25519_private(&bytes)).unwrap(), bytes);
assert!(deserialize_ed25519_private(&[0u8; 32]).is_err());
- assert_eq!(deserialize_ed25519_public(&serialize_ed25519_public(&bytes)).unwrap(), bytes);
- assert!(deserialize_ed25519_public(&[0u8; 32]).is_err());
+ assert_eq!(deserialize_ed25519_public(&serialize_ed25519_public(&bytes2)).unwrap(), bytes2);
+ assert!(deserialize_ed25519_public(&[0u8; 64]).is_err());
+
+ let mut bytes = vec![];
+ bytes.append(&mut serialize_ed25519_public(&[0u8; 32]));
+ bytes.append(&mut serialize_ed25519_public(&[1u8; 32]));
+ let deser = deserialize_ed25519_public_many(&bytes).unwrap();
+ assert_eq!(deser[0], [0u8; 32]);
+ assert_eq!(deser[1], [1u8; 32]);
+
+ bytes.append(&mut serialize_ed25519_public(&[1u8; 33]));
+ deserialize_ed25519_public_many(&bytes).unwrap_err();
}
#[test]
@@ -637,11 +648,11 @@ bzBEr00kERQxxTzTsH8cpYEgRoipvmExvg8WP8NdAJEYJosB
#[test]
fn test_deserialize_ed25519_public() {
- let priv_key = b"-----BEGIN NEBULA ED25519 PUBLIC KEY-----
-AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
+ let pub_key = b"-----BEGIN NEBULA ED25519 PUBLIC KEY-----
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
-----END NEBULA ED25519 PUBLIC KEY-----";
let short_key = b"-----BEGIN NEBULA ED25519 PUBLIC KEY-----
-AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
-----END NEBULA ED25519 PUBLIC KEY-----";
let invalid_banner = b"-----BEGIN NOT A NEBULA PUBLIC KEY-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
@@ -650,7 +661,7 @@ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
-END NEBULA ED25519 PUBLIC KEY-----";
- deserialize_ed25519_public(priv_key).unwrap();
+ deserialize_ed25519_public(pub_key).unwrap();
deserialize_ed25519_public(short_key).unwrap_err();