initial bot stuff
This commit is contained in:
commit
ba0a87ea08
|
@ -0,0 +1 @@
|
|||
/target
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,14 @@
|
|||
[package]
|
||||
name = "discord_bot"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
serenity = "0.11"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
log = "0.4"
|
||||
simple_logger = "4.2"
|
||||
toml = "0.7"
|
|
@ -0,0 +1,101 @@
|
|||
use std::{env, fs};
|
||||
use std::net::Ipv4Addr;
|
||||
use std::path::Path;
|
||||
use log::{Level, LevelFilter};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct Config {
|
||||
pub bot: BotConfig,
|
||||
pub permissions: PermissionsGroups,
|
||||
pub relay: RelayConfig,
|
||||
pub logging: LoggingConfig
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct BotConfig {
|
||||
pub discord_token: String,
|
||||
pub prefix: String
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct RelayConfig {
|
||||
pub ip: Ipv4Addr,
|
||||
pub port: u16,
|
||||
pub channels: Vec<u64>,
|
||||
pub webhooks: Vec<String>
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct PermissionsGroups {
|
||||
pub owner: PermissionGroup,
|
||||
pub admin: PermissionGroup
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct PermissionGroup {
|
||||
pub user_members: Vec<u64>,
|
||||
pub group_members: Vec<u64>
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct LoggingConfig {
|
||||
pub enabled: bool,
|
||||
pub level: LogLevel
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum LogLevel {
|
||||
Debug,
|
||||
Error,
|
||||
Info,
|
||||
Trace,
|
||||
Warn
|
||||
}
|
||||
|
||||
impl From<LogLevel> for Level {
|
||||
fn from(value: LogLevel) -> Self {
|
||||
match value {
|
||||
LogLevel::Debug => Level::Debug,
|
||||
LogLevel::Error => Level::Error,
|
||||
LogLevel::Info => Level::Info,
|
||||
LogLevel::Trace => Level::Trace,
|
||||
LogLevel::Warn => Level::Warn
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn load_config() -> Config {
|
||||
let mut args = env::args();
|
||||
|
||||
if args.len() != 2 {
|
||||
eprintln!("usage: discord_bot <config path>");
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
let config_path = args.nth(1).unwrap();
|
||||
if !Path::new(&config_path).exists() {
|
||||
eprintln!("err: config file does not exist");
|
||||
eprintln!("usage: discord_bot <config path>");
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
let config_str = match fs::read_to_string(config_path) {
|
||||
Ok(r) => r,
|
||||
Err(e) => {
|
||||
eprintln!("error loading config: {}", e);
|
||||
std::process::exit(1);
|
||||
}
|
||||
};
|
||||
|
||||
let config: Config = match toml::from_str(&config_str) {
|
||||
Ok(r) => r,
|
||||
Err(e) => {
|
||||
eprintln!("error parsing config: {}", e);
|
||||
std::process::exit(1);
|
||||
}
|
||||
};
|
||||
|
||||
config
|
||||
}
|
|
@ -0,0 +1,279 @@
|
|||
use std::collections::HashMap;
|
||||
use std::{env, fs};
|
||||
use std::net::Ipv4Addr;
|
||||
use std::ops::DerefMut;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use serenity::async_trait;
|
||||
use serenity::builder::ParseValue;
|
||||
use serenity::prelude::*;
|
||||
use serenity::model::channel::Message;
|
||||
use serenity::framework::standard::macros::{command, group, hook};
|
||||
use serenity::framework::standard::{StandardFramework, CommandResult, Args};
|
||||
use serenity::http::Http;
|
||||
use serenity::model::prelude::Webhook;
|
||||
use crate::config::{Config, load_config};
|
||||
use crate::util::ping_channel;
|
||||
|
||||
pub mod config;
|
||||
pub mod util;
|
||||
|
||||
#[group]
|
||||
#[commands(ping, relay_channel, reload_config, webhook_test, botstatus)]
|
||||
struct General;
|
||||
|
||||
struct Handler;
|
||||
|
||||
#[async_trait]
|
||||
impl EventHandler for Handler {}
|
||||
|
||||
struct ConfigLock;
|
||||
|
||||
impl TypeMapKey for ConfigLock {
|
||||
type Value = Arc<RwLock<Config>>;
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let config = load_config();
|
||||
|
||||
let framework = StandardFramework::new()
|
||||
.configure(|c| c.prefix(config.bot.prefix.clone())) // set the bot's prefix to "~"
|
||||
.group(&GENERAL_GROUP)
|
||||
.normal_message(normal_message);
|
||||
|
||||
// Login with a bot token from the environment
|
||||
let intents = GatewayIntents::non_privileged() | GatewayIntents::MESSAGE_CONTENT;
|
||||
let mut client = Client::builder(config.bot.discord_token.clone(), intents)
|
||||
.event_handler(Handler)
|
||||
.framework(framework)
|
||||
.await
|
||||
.expect("Error creating client");
|
||||
|
||||
{
|
||||
// Open the data lock in write mode, so keys can be inserted to it.
|
||||
let mut data = client.data.write().await;
|
||||
|
||||
data.insert::<ConfigLock>(Arc::new(RwLock::new(config)));
|
||||
}
|
||||
|
||||
// start listening for events by starting a single shard
|
||||
if let Err(why) = client.start().await {
|
||||
println!("An error occurred while running the client: {:?}", why);
|
||||
}
|
||||
}
|
||||
|
||||
#[hook]
|
||||
async fn normal_message(ctx: &Context, msg: &Message) {
|
||||
let config = {
|
||||
// While data is a RwLock, it's recommended that you always open the lock as read.
|
||||
// This is mainly done to avoid Deadlocks for having a possible writer waiting for multiple
|
||||
// readers to close.
|
||||
let data_read = ctx.data.read().await;
|
||||
|
||||
// Since the CommandCounter Value is wrapped in an Arc, cloning will not duplicate the
|
||||
// data, instead the reference is cloned.
|
||||
// We wrap every value on in an Arc, as to keep the data lock open for the least time possible,
|
||||
// to again, avoid deadlocking it.
|
||||
data_read.get::<ConfigLock>().expect("Expected ConfigLock in TypeMap.").clone()
|
||||
};
|
||||
|
||||
if !config.read().await.relay.channels.contains(msg.channel_id.as_u64()) { return; }
|
||||
|
||||
msg.reply(ctx, "TEST: msg echo ".to_string() + &msg.content).await.unwrap();
|
||||
}
|
||||
|
||||
#[command]
|
||||
async fn ping(ctx: &Context, msg: &Message) -> CommandResult {
|
||||
msg.reply(ctx, "Pong! Current ping to the LiveOverflow server is DOTHISLATER ms.").await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[command]
|
||||
async fn relay_channel(ctx: &Context, msg: &Message) -> CommandResult {
|
||||
let config = {
|
||||
// While data is a RwLock, it's recommended that you always open the lock as read.
|
||||
// This is mainly done to avoid Deadlocks for having a possible writer waiting for multiple
|
||||
// readers to close.
|
||||
let data_read = ctx.data.read().await;
|
||||
|
||||
// Since the CommandCounter Value is wrapped in an Arc, cloning will not duplicate the
|
||||
// data, instead the reference is cloned.
|
||||
// We wrap every value on in an Arc, as to keep the data lock open for the least time possible,
|
||||
// to again, avoid deadlocking it.
|
||||
data_read.get::<ConfigLock>().expect("Expected ConfigLock in TypeMap.").clone()
|
||||
};
|
||||
|
||||
// Needs group: `admin`
|
||||
let is_user_in_group = {
|
||||
config.read().await.permissions.admin.user_members.contains(msg.author.id.as_u64())
|
||||
};
|
||||
|
||||
let is_role_in_group = {
|
||||
let mut has_role = false;
|
||||
for role in &config.read().await.permissions.admin.group_members {
|
||||
if msg.author.has_role(ctx, msg.guild_id.unwrap(), *role).await? {
|
||||
has_role = true;
|
||||
}
|
||||
}
|
||||
has_role
|
||||
};
|
||||
|
||||
if !is_role_in_group && !is_user_in_group {
|
||||
msg.reply(ctx, "Sorry, you need the `admin` permission group to view this information").await?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
msg.reply(ctx, format!("Configured to relay messages to the following channels: {}", config.read().await.relay.channels.iter().map(|u| ping_channel(*u)).collect::<Vec<_>>().join(" "))).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[command]
|
||||
async fn reload_config(ctx: &Context, msg: &Message) -> CommandResult {
|
||||
let config = {
|
||||
// While data is a RwLock, it's recommended that you always open the lock as read.
|
||||
// This is mainly done to avoid Deadlocks for having a possible writer waiting for multiple
|
||||
// readers to close.
|
||||
let data_read = ctx.data.read().await;
|
||||
|
||||
// Since the CommandCounter Value is wrapped in an Arc, cloning will not duplicate the
|
||||
// data, instead the reference is cloned.
|
||||
// We wrap every value on in an Arc, as to keep the data lock open for the least time possible,
|
||||
// to again, avoid deadlocking it.
|
||||
data_read.get::<ConfigLock>().expect("Expected ConfigLock in TypeMap.").clone()
|
||||
};
|
||||
|
||||
// Needs group: `admin`
|
||||
let is_user_in_group = {
|
||||
config.read().await.permissions.admin.user_members.contains(msg.author.id.as_u64())
|
||||
};
|
||||
|
||||
let is_role_in_group = {
|
||||
let mut has_role = false;
|
||||
for role in &config.read().await.permissions.admin.group_members {
|
||||
if msg.author.has_role(ctx, msg.guild_id.unwrap(), *role).await? {
|
||||
has_role = true;
|
||||
}
|
||||
}
|
||||
has_role
|
||||
};
|
||||
|
||||
if !is_role_in_group && !is_user_in_group {
|
||||
msg.reply(ctx, "Sorry, you need the `admin` permission group to view this information").await?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
msg.reply(ctx, "Reloading bot configuration...").await?;
|
||||
|
||||
{
|
||||
let _ = std::mem::replace(config.write().await.deref_mut(), load_config());
|
||||
}
|
||||
|
||||
msg.reply(ctx, "Bot configuration reloaded.").await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[command]
|
||||
async fn webhook_test(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult {
|
||||
let config = {
|
||||
// While data is a RwLock, it's recommended that you always open the lock as read.
|
||||
// This is mainly done to avoid Deadlocks for having a possible writer waiting for multiple
|
||||
// readers to close.
|
||||
let data_read = ctx.data.read().await;
|
||||
|
||||
// Since the CommandCounter Value is wrapped in an Arc, cloning will not duplicate the
|
||||
// data, instead the reference is cloned.
|
||||
// We wrap every value on in an Arc, as to keep the data lock open for the least time possible,
|
||||
// to again, avoid deadlocking it.
|
||||
data_read.get::<ConfigLock>().expect("Expected ConfigLock in TypeMap.").clone()
|
||||
};
|
||||
|
||||
// Needs group: `admin`
|
||||
let is_user_in_group = {
|
||||
config.read().await.permissions.admin.user_members.contains(msg.author.id.as_u64())
|
||||
};
|
||||
|
||||
let is_role_in_group = {
|
||||
let mut has_role = false;
|
||||
for role in &config.read().await.permissions.admin.group_members {
|
||||
if msg.author.has_role(ctx, msg.guild_id.unwrap(), *role).await? {
|
||||
has_role = true;
|
||||
}
|
||||
}
|
||||
has_role
|
||||
};
|
||||
|
||||
if !is_role_in_group && !is_user_in_group {
|
||||
msg.reply(ctx, "Sorry, you need the `admin` permission group to view this information").await?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
for webhook in &config.read().await.relay.webhooks {
|
||||
let http = Http::new("");
|
||||
let wh = Webhook::from_url(&http, webhook).await?;
|
||||
|
||||
if args.is_empty() {
|
||||
wh
|
||||
.execute(&http, false, |w| w.content("This is a test message sent by ssi_core_bot").username("Webhook test"))
|
||||
.await
|
||||
.expect("Could not execute webhook.");
|
||||
} else {
|
||||
let player: String = args.single()?;
|
||||
|
||||
wh
|
||||
.execute(&http, false, |w| {
|
||||
w.content(format!("<{}> this is a test message sent by \"{}\" on the server", player, player)).username(&player).allowed_mentions(|am| am.parse(ParseValue::Users).parse(ParseValue::Roles)).avatar_url(&format!("https://mc-heads.net/head/{}", player))
|
||||
})
|
||||
.await
|
||||
.expect("Could not execute webhook.");
|
||||
}
|
||||
}
|
||||
|
||||
msg.reply(ctx, "Webhook tests sent.").await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[command]
|
||||
async fn botstatus(ctx: &Context, msg: &Message) -> CommandResult {
|
||||
let config = {
|
||||
// While data is a RwLock, it's recommended that you always open the lock as read.
|
||||
// This is mainly done to avoid Deadlocks for having a possible writer waiting for multiple
|
||||
// readers to close.
|
||||
let data_read = ctx.data.read().await;
|
||||
|
||||
// Since the CommandCounter Value is wrapped in an Arc, cloning will not duplicate the
|
||||
// data, instead the reference is cloned.
|
||||
// We wrap every value on in an Arc, as to keep the data lock open for the least time possible,
|
||||
// to again, avoid deadlocking it.
|
||||
data_read.get::<ConfigLock>().expect("Expected ConfigLock in TypeMap.").clone()
|
||||
};
|
||||
|
||||
// Needs group: `admin`
|
||||
let is_user_in_group = {
|
||||
config.read().await.permissions.admin.user_members.contains(msg.author.id.as_u64())
|
||||
};
|
||||
|
||||
let is_role_in_group = {
|
||||
let mut has_role = false;
|
||||
for role in &config.read().await.permissions.admin.group_members {
|
||||
if msg.author.has_role(ctx, msg.guild_id.unwrap(), *role).await? {
|
||||
has_role = true;
|
||||
}
|
||||
}
|
||||
has_role
|
||||
};
|
||||
|
||||
if !is_role_in_group && !is_user_in_group {
|
||||
msg.reply(ctx, "Sorry, you need the `admin` permission group to view this information").await?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
msg.reply(ctx, "Status: :thumbsup: Connected to Relay API. :thumbsdown: Relay bot offline").await?;
|
||||
|
||||
Ok(())
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
use serenity::model::user::User;
|
||||
use crate::config::Config;
|
||||
|
||||
pub fn ping_user(id: u64) -> String {
|
||||
format!("<@{}>", id)
|
||||
}
|
||||
pub fn ping_role(id: u64) -> String {
|
||||
format!("<@&{}>", id)
|
||||
}
|
||||
pub fn ping_channel(id: u64) -> String {
|
||||
format!("<#{}>", id)
|
||||
}
|
Loading…
Reference in New Issue