Initial commit: Rust YpDaemon

This commit is contained in:
Torsten Schulz (local)
2025-11-21 23:05:34 +01:00
commit d0ec363f09
21 changed files with 8067 additions and 0 deletions

1
.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target

1364
Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

17
Cargo.toml Normal file
View File

@@ -0,0 +1,17 @@
[package]
name = "YpDaemon"
version = "0.1.0"
edition = "2024"
[dependencies]
rand = "0.8"
postgres = "0.19"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
tokio = { version = "1.48", features = ["rt-multi-thread", "macros", "net", "sync", "time"] }
tokio-tungstenite = "0.23"
futures-util = "0.3"
ctrlc = "3"
tokio-rustls = "0.25"
rustls-pemfile = "2"
libsystemd = "0.7"

40
src/config.rs Normal file
View File

@@ -0,0 +1,40 @@
use std::collections::HashMap;
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::path::Path;
#[derive(Debug, Clone)]
pub struct Config {
values: HashMap<String, String>,
}
impl Config {
pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Self, Box<dyn std::error::Error>> {
let file = File::open(path)?;
let reader = BufReader::new(file);
let mut values = HashMap::new();
for line in reader.lines() {
let line = line?;
if line.trim().is_empty() || line.trim_start().starts_with('#') {
continue;
}
if let Some((key, value)) = line.split_once('=') {
values.insert(key.trim().to_string(), value.trim().to_string());
}
}
Ok(Self { values })
}
pub fn get(&self, key: &str) -> Result<String, String> {
self.values
.get(key)
.cloned()
.ok_or_else(|| format!("Konfigurationsschlüssel nicht gefunden: {key}"))
}
}

207
src/db/connection.rs Normal file
View File

@@ -0,0 +1,207 @@
use postgres::{Client, NoTls};
use postgres::Error as PgError;
use std::collections::HashMap;
use std::fmt;
use std::sync::{Arc, Condvar, Mutex};
use std::time::Duration;
pub type Row = HashMap<String, String>;
pub type Rows = Vec<Row>;
#[derive(Debug)]
pub struct DbError {
message: String,
}
impl DbError {
pub fn new(message: impl Into<String>) -> Self {
Self {
message: message.into(),
}
}
}
impl fmt::Display for DbError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.message)
}
}
impl std::error::Error for DbError {}
impl From<PgError> for DbError {
fn from(err: PgError) -> Self {
DbError::new(format!("Postgres-Fehler: {err}"))
}
}
struct Database {
client: Client,
// Name -> SQL
prepared: HashMap<String, String>,
}
impl Database {
fn connect(conn_str: &str) -> Result<Self, DbError> {
let client = Client::connect(conn_str, NoTls)?;
Ok(Self {
client,
prepared: HashMap::new(),
})
}
fn is_valid(&mut self) -> bool {
self.client
.simple_query("SELECT 1")
.map(|_| true)
.unwrap_or(false)
}
#[allow(dead_code)]
fn query(&mut self, sql: &str) -> Result<Rows, DbError> {
let rows = self.client.query(sql, &[])?;
Ok(rows.into_iter().map(Self::row_to_map).collect())
}
fn prepare(&mut self, name: &str, sql: &str) -> Result<(), DbError> {
self.prepared.insert(name.to_string(), sql.to_string());
Ok(())
}
fn execute(
&mut self,
name: &str,
params: &[&(dyn postgres::types::ToSql + Sync)],
) -> Result<Rows, DbError> {
let sql = self
.prepared
.get(name)
.ok_or_else(|| DbError::new(format!("Unbekanntes Statement: {name}")))?;
let rows = self.client.query(sql.as_str(), params)?;
Ok(rows.into_iter().map(Self::row_to_map).collect())
}
fn row_to_map(row: postgres::Row) -> Row {
let mut map = HashMap::with_capacity(row.len());
for (idx, col) in row.columns().iter().enumerate() {
let name = col.name().to_string();
let value: Option<String> = row.get(idx);
map.insert(name, value.unwrap_or_default());
}
map
}
}
struct InnerPool {
connections: Mutex<Vec<Database>>,
available: Condvar,
conn_str: String,
}
impl InnerPool {
fn new(conn_str: String, size: usize) -> Result<Self, DbError> {
let mut connections = Vec::with_capacity(size);
for _ in 0..size {
connections.push(Database::connect(&conn_str)?);
}
Ok(Self {
connections: Mutex::new(connections),
available: Condvar::new(),
conn_str,
})
}
fn get(&self) -> Result<Database, DbError> {
let mut guard = self.connections.lock().unwrap();
loop {
if let Some(mut db) = guard.pop() {
if db.is_valid() {
return Ok(db);
}
// Versuche, eine neue Verbindung aufzubauen
match Database::connect(&self.conn_str) {
Ok(new_db) => return Ok(new_db),
Err(err) => {
eprintln!("[ConnectionPool] Fehler beim Neuaufbau der Verbindung: {err}");
// kurze Pause und erneut versuchen
std::thread::sleep(Duration::from_millis(100));
}
}
} else {
guard = self.available.wait(guard).unwrap();
}
}
}
fn put_back(&self, db: Database) {
let mut guard = self.connections.lock().unwrap();
guard.push(db);
self.available.notify_one();
}
}
#[derive(Clone)]
pub struct ConnectionPool {
inner: Arc<InnerPool>,
}
impl ConnectionPool {
pub fn new(conn_str: String, size: usize) -> Result<Self, DbError> {
let inner = InnerPool::new(conn_str, size)?;
Ok(Self {
inner: Arc::new(inner),
})
}
pub fn get(&self) -> Result<DbConnection, DbError> {
let db = self.inner.get()?;
Ok(DbConnection {
inner: self.inner.clone(),
db: Some(db),
})
}
}
pub struct DbConnection {
inner: Arc<InnerPool>,
db: Option<Database>,
}
impl DbConnection {
#[allow(dead_code)]
pub fn query(&mut self, sql: &str) -> Result<Rows, DbError> {
self.database_mut().query(sql)
}
pub fn prepare(&mut self, name: &str, sql: &str) -> Result<(), DbError> {
self.database_mut().prepare(name, sql)
}
pub fn execute(
&mut self,
name: &str,
params: &[&(dyn postgres::types::ToSql + Sync)],
) -> Result<Rows, DbError> {
self.database_mut().execute(name, params)
}
fn database_mut(&mut self) -> &mut Database {
self.db
.as_mut()
.expect("DbConnection ohne aktive Database verwendet")
}
}
impl Drop for DbConnection {
fn drop(&mut self) {
if let Some(db) = self.db.take() {
self.inner.put_back(db);
}
}
}

5
src/db/mod.rs Normal file
View File

@@ -0,0 +1,5 @@
mod connection;
pub use connection::{ConnectionPool, DbConnection, DbError, Row, Rows};

186
src/main.rs Normal file
View File

@@ -0,0 +1,186 @@
mod config;
mod db;
mod message_broker;
mod worker;
mod websocket_server;
use std::sync::atomic::{AtomicBool, Ordering};
use std::thread;
use std::time::Duration;
use config::Config;
use libsystemd::daemon::{self, NotifyState};
use message_broker::MessageBroker;
use websocket_server::WebSocketServer;
use worker::{
CharacterCreationWorker, ConnectionPool, DirectorWorker, HouseWorker, PoliticsWorker,
ProduceWorker, StockageManager, UndergroundWorker, UserCharacterWorker,
ValueRecalculationWorker, Worker,
};
static KEEP_RUNNING: AtomicBool = AtomicBool::new(true);
fn main() {
if let Err(err) = run_daemon() {
eprintln!("Fehler im Daemon: {err}");
std::process::exit(1);
}
}
fn run_daemon() -> Result<(), Box<dyn std::error::Error>> {
install_signal_handler()?;
let config = load_config()?;
let pool = create_connection_pool(&config)?;
let websocket_config = load_websocket_config(&config)?;
let broker = MessageBroker::new();
let mut websocket_server = WebSocketServer::new(
websocket_config.port,
pool.clone(),
broker.clone(),
websocket_config.ssl_enabled,
websocket_config.cert_path,
websocket_config.key_path,
);
let mut workers = create_workers(pool, broker.clone());
websocket_server.set_workers(&workers);
start_system(&mut websocket_server, &mut workers, &broker);
// systemd: melden, dass der Dienst jetzt "bereit" ist
let _ = daemon::notify(false, &[NotifyState::Ready]);
run_main_loop();
shutdown_system(&mut websocket_server, &mut workers, &broker);
Ok(())
}
fn install_signal_handler() -> Result<(), Box<dyn std::error::Error>> {
// Behandle SIGINT/SIGTERM (z.B. Strg+C) und leite auf das globale Flag um.
ctrlc::set_handler(|| {
KEEP_RUNNING.store(false, Ordering::SeqCst);
})?;
Ok(())
}
struct WebSocketConfig {
port: u16,
ssl_enabled: bool,
cert_path: Option<String>,
key_path: Option<String>,
}
fn load_config() -> Result<Config, Box<dyn std::error::Error>> {
// Pfad später ggf. konfigurierbar machen
let config = Config::from_file("/etc/yourpart/daemon.conf")?;
Ok(config)
}
fn create_connection_pool(config: &Config) -> Result<ConnectionPool, Box<dyn std::error::Error>> {
let host = config.get("DB_HOST")?;
let port = config.get("DB_PORT")?;
let name = config.get("DB_NAME")?;
let user = config.get("DB_USER")?;
let password = config.get("DB_PASSWORD")?;
let conn_str = format!(
"host={} port={} dbname={} user={} password={}",
host, port, name, user, password
);
// Pool-Größe analog zur C++-Implementierung
let pool = db::ConnectionPool::new(conn_str, 10)?;
Ok(pool)
}
fn load_websocket_config(config: &Config) -> Result<WebSocketConfig, Box<dyn std::error::Error>> {
let port: u16 = config.get("WEBSOCKET_PORT")?.parse()?;
let ssl_enabled =
config.get("WEBSOCKET_SSL_ENABLED").unwrap_or_else(|_| "false".into()) == "true";
let cert_path = if ssl_enabled {
Some(config.get("WEBSOCKET_SSL_CERT_PATH")?)
} else {
None
};
let key_path = if ssl_enabled {
Some(config.get("WEBSOCKET_SSL_KEY_PATH")?)
} else {
None
};
Ok(WebSocketConfig {
port,
ssl_enabled,
cert_path,
key_path,
})
}
fn create_workers(pool: ConnectionPool, broker: MessageBroker) -> Vec<Box<dyn Worker>> {
vec![
Box::new(CharacterCreationWorker::new(pool.clone(), broker.clone())),
Box::new(ProduceWorker::new(pool.clone(), broker.clone())),
Box::new(StockageManager::new(pool.clone(), broker.clone())),
Box::new(DirectorWorker::new(pool.clone(), broker.clone())),
Box::new(ValueRecalculationWorker::new(
pool.clone(),
broker.clone(),
)),
Box::new(UserCharacterWorker::new(
pool.clone(),
broker.clone(),
)),
Box::new(HouseWorker::new(pool.clone(), broker.clone())),
Box::new(PoliticsWorker::new(pool.clone(), broker.clone())),
Box::new(UndergroundWorker::new(pool, broker)),
]
}
fn start_system(
websocket_server: &mut WebSocketServer,
workers: &mut [Box<dyn Worker>],
broker: &MessageBroker,
) {
broker.start();
websocket_server.run();
for worker in workers {
worker.start_worker_thread();
worker.enable_watchdog();
}
}
fn shutdown_system(
websocket_server: &mut WebSocketServer,
workers: &mut [Box<dyn Worker>],
broker: &MessageBroker,
) {
// systemd: wir fahren nun kontrolliert herunter
let _ = daemon::notify(false, &[NotifyState::Stopping]);
// 1) Worker stoppen sie prüfen regelmäßig ihr `running_worker`-Flag und
// brechen daher auch bei längeren Work-Intervallen zügig ab.
for worker in workers {
worker.stop_worker_thread();
}
// 2) WebSocket-Server stoppen (Tokio-Runtime herunterfahren)
websocket_server.stop();
// 3) MessageBroker-Hook aktuell noch Stub, aber hier zentral ergänzt
// für eine spätere interne Queue/Thread-Implementierung.
broker.stop();
}
fn run_main_loop() {
while KEEP_RUNNING.load(Ordering::Relaxed) {
thread::sleep(Duration::from_millis(100));
}
}

115
src/message_broker.rs Normal file
View File

@@ -0,0 +1,115 @@
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{mpsc, Arc, Mutex};
use std::thread;
use std::time::Duration;
// Platzhalter-Implementierung, angelehnt an die C++-Version.
// Später können wir hier auf Kanäle und ggf. async (Tokio) umstellen.
type Callback = Arc<dyn Fn(String) + Send + Sync + 'static>;
#[derive(Clone)]
pub struct MessageBroker {
inner: Arc<Inner>,
}
struct Inner {
subscribers: Mutex<Vec<Callback>>,
sender: mpsc::Sender<String>,
receiver: Mutex<Option<mpsc::Receiver<String>>>,
running: AtomicBool,
started: AtomicBool,
}
impl MessageBroker {
pub fn new() -> Self {
let (tx, rx) = mpsc::channel::<String>();
Self {
inner: Arc::new(Inner {
subscribers: Mutex::new(Vec::new()),
sender: tx,
receiver: Mutex::new(Some(rx)),
running: AtomicBool::new(true),
started: AtomicBool::new(false),
}),
}
}
pub fn publish(&self, message: String) {
// Nachrichten werden in eine interne Queue gestellt und von einem
// Hintergrund-Thread an alle Subscriber verteilt.
//
// Falls der Empfänger bereits beendet wurde, ignorieren wir den Fehler
// still (Broker fährt gerade herunter).
let _ = self.inner.sender.send(message);
}
pub fn subscribe<F>(&self, f: F)
where
F: Fn(String) + Send + Sync + 'static,
{
let mut guard = self.inner.subscribers.lock().unwrap();
guard.push(Arc::new(f));
}
pub fn start(&self) {
// Idempotent: nur einmal einen Hintergrund-Thread starten, der
// Nachrichten aus der Queue liest und an Subscriber verteilt.
if self
.inner
.started
.swap(true, Ordering::SeqCst)
{
return;
}
let inner = Arc::clone(&self.inner);
let rx_opt = {
let mut guard = inner.receiver.lock().unwrap();
guard.take()
};
if let Some(rx) = rx_opt {
thread::spawn(move || {
// Arbeite Nachrichten ab, solange `running` true ist oder noch
// Nachrichten im Kanal vorhanden sind.
loop {
if !inner.running.load(Ordering::Relaxed) {
// Wir beenden trotzdem erst, wenn der Kanal leer oder
// getrennt ist recv_timeout mit kurzer Wartezeit.
match rx.recv_timeout(Duration::from_millis(50)) {
Ok(msg) => dispatch_to_subscribers(&inner, &msg),
Err(mpsc::RecvTimeoutError::Timeout) => break,
Err(mpsc::RecvTimeoutError::Disconnected) => break,
}
} else {
match rx.recv_timeout(Duration::from_millis(100)) {
Ok(msg) => dispatch_to_subscribers(&inner, &msg),
Err(mpsc::RecvTimeoutError::Timeout) => continue,
Err(mpsc::RecvTimeoutError::Disconnected) => break,
}
}
}
});
}
}
pub fn stop(&self) {
// Signalisiert dem Hintergrund-Thread, dass er nach Abarbeiten der
// aktuellen Nachrichten-Schlange beenden soll.
self.inner.running.store(false, Ordering::SeqCst);
}
}
fn dispatch_to_subscribers(inner: &Inner, message: &str) {
let subs = {
let guard = inner.subscribers.lock().unwrap();
guard.clone()
};
for cb in subs {
cb(message.to_string());
}
}

463
src/websocket_server.rs Normal file
View File

@@ -0,0 +1,463 @@
use crate::db::ConnectionPool;
use crate::message_broker::MessageBroker;
use crate::worker::Worker;
use futures_util::{FutureExt, SinkExt, StreamExt};
use serde::Deserialize;
use serde_json::Value as Json;
use std::collections::HashMap;
use std::fs::File;
use std::io::BufReader;
use std::net::SocketAddr;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use tokio::io::{AsyncRead, AsyncWrite};
use tokio::net::TcpListener;
use tokio::runtime::{Builder, Runtime};
use tokio::sync::{broadcast, mpsc, Mutex};
use tokio_rustls::rustls::{self, ServerConfig};
use tokio_rustls::TlsAcceptor;
use tokio_tungstenite::tungstenite::Message;
use tokio_tungstenite::accept_async;
use rustls_pemfile::{certs, pkcs8_private_keys, rsa_private_keys};
use rustls::pki_types::{CertificateDer, PrivateKeyDer, PrivatePkcs1KeyDer, PrivatePkcs8KeyDer};
/// Einfacher WebSocket-Server auf Basis von Tokio + tokio-tungstenite.
///
/// Unterstützt:
/// - `setUserId`-Event vom Client (`{"event":"setUserId","data":{"userId":"..."}}`)
/// - Versenden von Broker-Nachrichten mit `user_id`-Feld an passende Verbindungen
/// - Broadcasting von Nachrichten ohne `user_id` an alle
pub struct WebSocketServer {
port: u16,
pool: ConnectionPool,
broker: MessageBroker,
use_ssl: bool,
cert_path: Option<String>,
key_path: Option<String>,
workers: Vec<*const dyn Worker>,
running: Arc<AtomicBool>,
runtime: Option<Runtime>,
}
/// Einfache Registry, um Verbindungsstatistiken für `getConnections` zu liefern.
#[derive(Default)]
struct ConnectionRegistry {
total: usize,
unauthenticated: usize,
by_user: HashMap<String, usize>,
}
fn create_tls_acceptor(
cert_path: Option<&str>,
key_path: Option<&str>,
) -> Result<TlsAcceptor, Box<dyn std::error::Error>> {
let cert_path = cert_path.ok_or("SSL aktiviert, aber kein Zertifikatspfad gesetzt")?;
let key_path = key_path.ok_or("SSL aktiviert, aber kein Key-Pfad gesetzt")?;
let cert_file = File::open(cert_path)?;
let mut cert_reader = BufReader::new(cert_file);
let mut cert_chain: Vec<CertificateDer<'static>> = Vec::new();
for cert_result in certs(&mut cert_reader) {
let cert: CertificateDer<'static> = cert_result?;
cert_chain.push(cert);
}
if cert_chain.is_empty() {
return Err("Zertifikatsdatei enthält keine Zertifikate".into());
}
let key_file = File::open(key_path)?;
let mut key_reader = BufReader::new(key_file);
// Versuche zuerst PKCS8, dann ggf. RSA-Key
let mut keys: Vec<PrivateKeyDer<'static>> = pkcs8_private_keys(&mut key_reader)
.map(|res: Result<PrivatePkcs8KeyDer<'static>, _>| res.map(PrivateKeyDer::Pkcs8))
.collect::<Result<_, _>>()?;
if keys.is_empty() {
// Leser zurücksetzen und RSA-Keys versuchen
let key_file = File::open(key_path)?;
let mut key_reader = BufReader::new(key_file);
keys = rsa_private_keys(&mut key_reader)
.map(|res: Result<PrivatePkcs1KeyDer<'static>, _>| res.map(PrivateKeyDer::Pkcs1))
.collect::<Result<_, _>>()?;
}
if keys.is_empty() {
return Err("Key-Datei enthält keinen privaten Schlüssel (PKCS8 oder RSA)".into());
}
let private_key = keys.remove(0);
let config = ServerConfig::builder()
.with_no_client_auth()
.with_single_cert(cert_chain, private_key)?;
Ok(TlsAcceptor::from(Arc::new(config)))
}
impl WebSocketServer {
pub fn new(
port: u16,
pool: ConnectionPool,
broker: MessageBroker,
use_ssl: bool,
cert_path: Option<String>,
key_path: Option<String>,
) -> Self {
Self {
port,
pool,
broker,
use_ssl,
cert_path,
key_path,
workers: Vec::new(),
running: Arc::new(AtomicBool::new(false)),
runtime: None,
}
}
pub fn set_workers(&mut self, workers: &[Box<dyn Worker>]) {
self.workers.clear();
for w in workers {
self.workers.push(&**w as *const dyn Worker);
}
}
pub fn run(&mut self) {
if self.running.swap(true, Ordering::SeqCst) {
eprintln!("[WebSocketServer] Läuft bereits.");
return;
}
if self.use_ssl {
println!(
"Starte WebSocket-Server auf Port {} mit SSL (cert: {:?}, key: {:?})",
self.port, self.cert_path, self.key_path
);
// Hinweis: SSL-Unterstützung ist noch nicht implementiert.
} else {
println!("Starte WebSocket-Server auf Port {} (ohne SSL)", self.port);
}
let addr = format!("0.0.0.0:{}", self.port);
let running_flag = self.running.clone();
let broker = self.broker.clone();
// Gemeinsame Registry für alle Verbindungen
let registry = Arc::new(Mutex::new(ConnectionRegistry::default()));
// Broadcast-Kanal für Broker-Nachrichten
let (tx, _) = broadcast::channel::<String>(1024);
let tx_clone = tx.clone();
// Broker-Subscription: jede gepublishte Nachricht geht in den Broadcast-Kanal
broker.subscribe(move |msg: String| {
let _ = tx_clone.send(msg);
});
// Optionalen TLS-Akzeptor laden, falls SSL aktiviert ist
let tls_acceptor = if self.use_ssl {
match create_tls_acceptor(
self.cert_path.as_deref(),
self.key_path.as_deref(),
) {
Ok(acc) => Some(acc),
Err(err) => {
eprintln!(
"[WebSocketServer] TLS-Initialisierung fehlgeschlagen, starte ohne SSL: {err}"
);
None
}
}
} else {
None
};
let rt = Builder::new_multi_thread()
.enable_all()
.build()
.expect("Tokio Runtime konnte nicht erstellt werden");
rt.spawn(run_accept_loop(
addr,
running_flag,
tx,
self.pool.clone(),
registry,
tls_acceptor,
));
self.runtime = Some(rt);
}
pub fn stop(&mut self) {
if !self.running.swap(false, Ordering::SeqCst) {
return;
}
println!("WebSocket-Server wird gestoppt.");
if let Some(rt) = self.runtime.take() {
rt.shutdown_background();
}
}
}
#[derive(Debug, Deserialize)]
struct IncomingMessage {
#[serde(default)]
event: String,
#[serde(default)]
data: Json,
}
async fn run_accept_loop(
addr: String,
running: Arc<AtomicBool>,
tx: broadcast::Sender<String>,
_pool: ConnectionPool,
registry: Arc<Mutex<ConnectionRegistry>>,
tls_acceptor: Option<TlsAcceptor>,
) {
let listener = match TcpListener::bind(&addr).await {
Ok(l) => l,
Err(e) => {
eprintln!("[WebSocketServer] Fehler beim Binden an {}: {}", addr, e);
running.store(false, Ordering::SeqCst);
return;
}
};
println!("[WebSocketServer] Lauscht auf {}", addr);
while running.load(Ordering::SeqCst) {
let (stream, peer) = match listener.accept().await {
Ok(v) => v,
Err(e) => {
eprintln!("[WebSocketServer] accept() fehlgeschlagen: {}", e);
continue;
}
};
let peer_addr = peer;
let rx = tx.subscribe();
let registry_clone = registry.clone();
let tls_acceptor_clone = tls_acceptor.clone();
tokio::spawn(async move {
if let Some(acc) = tls_acceptor_clone {
match acc.accept(stream).await {
Ok(tls_stream) => {
handle_connection(tls_stream, peer_addr, rx, registry_clone).await
}
Err(err) => {
eprintln!(
"[WebSocketServer] TLS-Handshake fehlgeschlagen ({peer_addr}): {err}"
);
}
}
} else {
handle_connection(stream, peer_addr, rx, registry_clone).await;
}
});
}
}
async fn handle_connection<S>(
stream: S,
peer_addr: SocketAddr,
mut broker_rx: broadcast::Receiver<String>,
registry: Arc<Mutex<ConnectionRegistry>>,
) where
S: AsyncRead + AsyncWrite + Unpin + Send + 'static,
{
let ws_stream = match accept_async(stream).await {
Ok(ws) => ws,
Err(e) => {
eprintln!("[WebSocketServer] WebSocket-Handshake fehlgeschlagen ({peer_addr}): {e}");
return;
}
};
println!("[WebSocketServer] Neue Verbindung von {}", peer_addr);
let (mut ws_sender, mut ws_receiver) = ws_stream.split();
// Kanal für Antworten direkt an diesen Client (z.B. getConnections)
let (client_tx, mut client_rx) = mpsc::channel::<String>(32);
// Neue Verbindung in der Registry zählen (zunächst als unauthentifiziert)
{
let mut reg = registry.lock().await;
reg.total += 1;
reg.unauthenticated += 1;
}
// user_id der Verbindung (nach setUserId)
let user_id = Arc::new(tokio::sync::Mutex::new(Option::<String>::None));
let user_id_for_incoming = user_id.clone();
let user_id_for_broker = user_id.clone();
let registry_for_incoming = registry.clone();
let client_tx_incoming = client_tx.clone();
// Eingehende Nachrichten vom Client
let incoming = async move {
while let Some(msg) = ws_receiver.next().await {
match msg {
Ok(Message::Text(txt)) => {
if let Ok(parsed) = serde_json::from_str::<IncomingMessage>(&txt) {
match parsed.event.as_str() {
"setUserId" => {
if let Some(uid) =
parsed.data.get("userId").and_then(|v| v.as_str())
{
{
// Registry aktualisieren: von unauthentifiziert -> Nutzer
let mut reg = registry_for_incoming.lock().await;
if reg.unauthenticated > 0 {
reg.unauthenticated -= 1;
}
*reg.by_user.entry(uid.to_string()).or_insert(0) += 1;
}
let mut guard = user_id_for_incoming.lock().await;
*guard = Some(uid.to_string());
println!(
"[WebSocketServer] User-ID gesetzt für {}: {}",
peer_addr, uid
);
}
}
"getConnections" => {
// Einfache Übersicht über aktuelle Verbindungen zurückgeben.
let snapshot = {
let reg = registry_for_incoming.lock().await;
serde_json::json!({
"event": "getConnectionsResponse",
"total": reg.total,
"unauthenticated": reg.unauthenticated,
"users": reg.by_user,
})
.to_string()
};
let _ = client_tx_incoming.send(snapshot).await;
}
_ => {
// Unbekannte Events ignorieren
}
}
}
}
Ok(Message::Ping(_)) => {
// Ping wird aktuell nur geloggt/ignoriert; optional könnte man hier ein eigenes
// Ping/Pong-Handling ergänzen.
}
Ok(Message::Close(_)) => break,
Err(e) => {
eprintln!("[WebSocketServer] Fehler bei Nachricht von {peer_addr}: {e}");
break;
}
_ => {}
}
}
};
// Broker-Nachrichten an den Client
let outgoing = async move {
loop {
tokio::select! {
// Nachrichten aus dem MessageBroker
broker_msg = broker_rx.recv() => {
let msg = match broker_msg {
Ok(m) => m,
Err(_) => break,
};
// Filter nach user_id, falls gesetzt
let target_user = {
let guard = user_id_for_broker.lock().await;
guard.clone()
};
if let Some(uid) = target_user.clone() {
if let Ok(json) = serde_json::from_str::<Json>(&msg) {
let matches_user = json
.get("user_id")
.and_then(|v| {
if let Some(s) = v.as_str() {
Some(s.to_string())
} else if let Some(n) = v.as_i64() {
Some(n.to_string())
} else {
None
}
})
.map(|v| v == uid)
.unwrap_or(false);
if !matches_user {
continue;
}
}
}
if let Err(e) = ws_sender.send(Message::Text(msg)).await {
eprintln!(
"[WebSocketServer] Fehler beim Senden an {}: {}",
peer_addr, e
);
break;
}
}
// Antworten aus der Verbindung selbst (z.B. getConnections)
client_msg = client_rx.recv() => {
match client_msg {
Some(msg) => {
if let Err(e) = ws_sender.send(Message::Text(msg)).await {
eprintln!(
"[WebSocketServer] Fehler beim Senden an {}: {}",
peer_addr, e
);
break;
}
}
None => {
// Kanal wurde geschlossen
break;
}
}
}
}
}
};
futures_util::future::select(incoming.boxed(), outgoing.boxed()).await;
// Verbindung aus der Registry entfernen
let final_uid = {
let guard = user_id.lock().await;
guard.clone()
};
{
let mut reg = registry.lock().await;
if reg.total > 0 {
reg.total -= 1;
}
if let Some(uid) = final_uid {
if let Some(count) = reg.by_user.get_mut(&uid) {
if *count > 0 {
*count -= 1;
}
if *count == 0 {
reg.by_user.remove(&uid);
}
}
} else if reg.unauthenticated > 0 {
reg.unauthenticated -= 1;
}
}
println!("[WebSocketServer] Verbindung geschlossen: {}", peer_addr);
}

151
src/worker/base.rs Normal file
View File

@@ -0,0 +1,151 @@
use crate::db::{ConnectionPool, DbError};
use crate::message_broker::MessageBroker;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use std::thread;
use std::time::Duration;
pub trait Worker: Send {
fn start_worker_thread(&mut self);
fn stop_worker_thread(&mut self);
fn enable_watchdog(&mut self);
}
pub(crate) struct WorkerState {
pub(crate) running_worker: AtomicBool,
pub(crate) running_watchdog: AtomicBool,
pub(crate) current_step: Mutex<String>,
}
impl WorkerState {
pub(crate) fn new(name: &str) -> Self {
Self {
running_worker: AtomicBool::new(false),
running_watchdog: AtomicBool::new(false),
current_step: Mutex::new(format!("{name}: idle")),
}
}
}
pub struct BaseWorker {
pub name: String,
pub pool: ConnectionPool,
pub broker: MessageBroker,
pub(crate) state: Arc<WorkerState>,
worker_thread: Option<thread::JoinHandle<()>>,
watchdog_thread: Option<thread::JoinHandle<()>>,
}
impl BaseWorker {
pub fn new(name: &str, pool: ConnectionPool, broker: MessageBroker) -> Self {
Self {
name: name.to_string(),
pool,
broker,
state: Arc::new(WorkerState::new(name)),
worker_thread: None,
watchdog_thread: None,
}
}
pub fn set_current_step<S: Into<String>>(&self, step: S) {
if let Ok(mut guard) = self.state.current_step.lock() {
*guard = step.into();
}
}
pub(crate) fn start_worker_with_loop<F>(&mut self, loop_fn: F)
where
F: Fn(Arc<WorkerState>) + Send + 'static,
{
if self.state.running_worker.swap(true, Ordering::SeqCst) {
eprintln!("[{}] Worker thread already running, skipping start.", self.name);
return;
}
let state = Arc::clone(&self.state);
self.worker_thread = Some(thread::spawn(move || {
loop_fn(state);
}));
}
pub(crate) fn stop_worker(&mut self) {
self.state.running_worker.store(false, Ordering::Relaxed);
if let Some(handle) = self.worker_thread.take() {
let _ = handle.join();
}
}
pub(crate) fn start_watchdog(&mut self) {
if self
.state
.running_watchdog
.swap(true, Ordering::SeqCst)
{
eprintln!("[{}] Watchdog already enabled, skipping.", self.name);
return;
}
let state = Arc::clone(&self.state);
let name = self.name.clone();
self.watchdog_thread = Some(thread::spawn(move || {
while state.running_watchdog.load(Ordering::Relaxed) {
thread::sleep(Duration::from_secs(10));
if !state.running_watchdog.load(Ordering::Relaxed) {
break;
}
let step = state.current_step.lock().unwrap().clone();
eprintln!("[{name}] Watchdog: current step = {step}");
}
}));
}
pub(crate) fn stop_watchdog(&mut self) {
self.state.running_watchdog.store(false, Ordering::Relaxed);
if let Some(handle) = self.watchdog_thread.take() {
let _ = handle.join();
}
}
pub(crate) fn is_running(&self) -> bool {
self.state.running_worker.load(Ordering::Relaxed)
}
}
const QUERY_UPDATE_MONEY: &str = r#"
SELECT falukant_data.update_money($1, $2, $3);
"#;
impl BaseWorker {
/// Aktualisiert das Geld eines Falukant-Users über die DB-Funktion `falukant_data.update_money`.
/// `action` entspricht dem Log-/Aktions-Tag (z.B. "credit pay rate", "debitor_prism").
pub fn change_falukant_user_money(
&self,
falukant_user_id: i32,
money_change: f64,
action: &str,
) -> Result<(), DbError> {
use postgres::types::ToSql;
let mut conn = self
.pool
.get()
.map_err(|e| DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("update_money", QUERY_UPDATE_MONEY)?;
let p1: &(dyn ToSql + Sync) = &falukant_user_id;
let p2: &(dyn ToSql + Sync) = &money_change;
let p3: &(dyn ToSql + Sync) = &action;
conn.execute("update_money", &[p1, p2, p3])?;
Ok(())
}
}

View File

@@ -0,0 +1,619 @@
use crate::db::{ConnectionPool, DbError, Rows};
use crate::message_broker::MessageBroker;
use rand::distributions::{Distribution, Uniform};
use rand::rngs::StdRng;
use rand::{thread_rng, Rng, SeedableRng};
use std::collections::{HashMap, HashSet};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::thread;
use std::time::Duration;
use super::base::{BaseWorker, Worker, WorkerState};
pub struct CharacterCreationWorker {
pub(crate) base: BaseWorker,
rng: StdRng,
dist: Uniform<i32>,
first_name_cache: HashMap<String, HashSet<i32>>,
last_name_cache: HashSet<i32>,
death_check_running: Arc<AtomicBool>,
death_thread: Option<thread::JoinHandle<()>>,
}
// SQL-Queries analog zur C++-Implementierung
const QUERY_IS_PREVIOUS_DAY_CHARACTER_CREATED: &str = r#"
SELECT created_at
FROM falukant_data."character"
WHERE user_id IS NULL
AND created_at::date = CURRENT_DATE
ORDER BY created_at DESC
LIMIT 1;
"#;
const QUERY_GET_TOWN_REGION_IDS: &str = r#"
SELECT fdr.id
FROM falukant_data.region fdr
JOIN falukant_type.region ftr ON fdr.region_type_id = ftr.id
WHERE ftr.label_tr = 'city';
"#;
const QUERY_LOAD_FIRST_NAMES: &str = r#"
SELECT id, gender
FROM falukant_predefine.firstname;
"#;
const QUERY_LOAD_LAST_NAMES: &str = r#"
SELECT id
FROM falukant_predefine.lastname;
"#;
const QUERY_INSERT_CHARACTER: &str = r#"
INSERT INTO falukant_data.character(
user_id,
region_id,
first_name,
last_name,
birthdate,
gender,
created_at,
updated_at,
title_of_nobility
) VALUES (
NULL,
$1,
$2,
$3,
NOW(),
$4,
NOW(),
NOW(),
$5
);
"#;
const QUERY_GET_ELIGIBLE_NPC_FOR_DEATH: &str = r#"
WITH aged AS (
SELECT
c.id,
(current_date - c.birthdate::date) AS age,
c.user_id
FROM
falukant_data.character c
WHERE
c.user_id IS NULL
AND (current_date - c.birthdate::date) > 60
),
always_sel AS (
SELECT *
FROM aged
WHERE age > 85
),
random_sel AS (
SELECT *
FROM aged
WHERE age <= 85
ORDER BY random()
LIMIT 10
)
SELECT *
FROM always_sel
UNION ALL
SELECT *
FROM random_sel;
"#;
const QUERY_DELETE_DIRECTOR: &str = r#"
DELETE FROM falukant_data.director
WHERE director_character_id = $1
RETURNING employer_user_id;
"#;
const QUERY_DELETE_RELATIONSHIP: &str = r#"
WITH deleted AS (
DELETE FROM falukant_data.relationship
WHERE character1_id = $1
OR character2_id = $1
RETURNING
CASE
WHEN character1_id = $1 THEN character2_id
ELSE character1_id
END AS related_character_id,
relationship_type_id
)
SELECT
c.user_id AS related_user_id
FROM deleted d
JOIN falukant_data.character c
ON c.id = d.related_character_id;
"#;
const QUERY_DELETE_CHILD_RELATION: &str = r#"
WITH deleted AS (
DELETE FROM falukant_data.child_relation
WHERE child_character_id = $1
RETURNING
father_character_id,
mother_character_id
)
SELECT
cf.user_id AS father_user_id,
cm.user_id AS mother_user_id
FROM deleted d
JOIN falukant_data.character cf
ON cf.id = d.father_character_id
JOIN falukant_data.character cm
ON cm.id = d.mother_character_id;
"#;
const QUERY_INSERT_NOTIFICATION: &str = r#"
INSERT INTO falukant_log.notification (
user_id,
tr,
shown,
created_at,
updated_at
) VALUES ($1, 'director_death', FALSE, NOW(), NOW());
"#;
const QUERY_MARK_CHARACTER_DECEASED: &str = r#"
DELETE FROM falukant_data.character
WHERE id = $1;
"#;
impl CharacterCreationWorker {
pub fn new(pool: ConnectionPool, broker: MessageBroker) -> Self {
Self::new_internal(pool, broker, true)
}
/// Interner Konstruktor, der optional den NPC-Todes-Monitor startet.
fn new_internal(pool: ConnectionPool, broker: MessageBroker, start_death_thread: bool) -> Self {
let base = BaseWorker::new("CharacterCreationWorker", pool.clone(), broker.clone());
let rng = StdRng::from_entropy();
let dist = Uniform::from(2..=3);
let death_check_running = Arc::new(AtomicBool::new(start_death_thread));
let death_thread = if start_death_thread {
let death_flag = Arc::clone(&death_check_running);
let pool_clone = pool;
let broker_clone = broker;
Some(thread::spawn(move || {
while death_flag.load(Ordering::Relaxed) {
if let Err(err) =
CharacterCreationWorker::monitor_character_deaths(&pool_clone, &broker_clone)
{
eprintln!(
"[CharacterCreationWorker] Fehler beim Überprüfen von NPC-Todesfällen: {err}"
);
}
// Warte 1 Stunde, aber mit frühem Abbruch, wenn death_flag false wird
for _ in 0..3600 {
if !death_flag.load(Ordering::Relaxed) {
break;
}
thread::sleep(Duration::from_secs(1));
}
}
}))
} else {
None
};
Self {
base,
rng,
dist,
first_name_cache: HashMap::new(),
last_name_cache: HashSet::new(),
death_check_running,
death_thread,
}
}
/// Variante ohne separaten Todes-Monitor-Thread wird nur in der Worker-Loop benutzt.
fn new_for_loop(pool: ConnectionPool, broker: MessageBroker) -> Self {
Self::new_internal(pool, broker, false)
}
fn is_today_character_created(&self) -> bool {
match self.fetch_today_characters() {
Ok(rows) => !rows.is_empty(),
Err(err) => {
eprintln!(
"[CharacterCreationWorker] Fehler in is_today_character_created: {err}"
);
false
}
}
}
fn fetch_today_characters(&self) -> Result<Rows, crate::db::DbError> {
const STMT_NAME: &str = "is_previous_day_character_created";
let mut conn = self
.base
.pool
.get()
.map_err(|e| crate::db::DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare(STMT_NAME, QUERY_IS_PREVIOUS_DAY_CHARACTER_CREATED)?;
conn.execute(STMT_NAME, &[])
}
fn create_characters_for_today(&mut self) {
self.load_names();
if self.first_name_cache.is_empty() || self.last_name_cache.is_empty() {
eprintln!(
"[CharacterCreationWorker] Fehler: Namen konnten nicht geladen werden (Stub-Implementierung)."
);
return;
}
let town_ids = self.get_town_region_ids();
for region_id in town_ids {
self.create_characters_for_region(region_id);
}
}
fn create_characters_for_region(&mut self, region_id: i32) {
let nobility_stands = [1, 2, 3];
let genders = ["male", "female"];
for &nobility in &nobility_stands {
for &gender in &genders {
let num_chars = self.rng.sample(self.dist);
for _ in 0..num_chars {
self.create_character(region_id, gender, nobility);
}
}
}
}
fn create_character(&mut self, region_id: i32, gender: &str, title_of_nobility: i32) {
let first_set = self
.first_name_cache
.get(gender)
.cloned()
.unwrap_or_default();
let first_name_id = Self::get_random_from_set(&first_set);
if first_name_id == -1 {
eprintln!("[CharacterCreationWorker] Fehler: Kein passender Vorname gefunden.");
return;
}
let last_name_id = Self::get_random_from_set(&self.last_name_cache);
if last_name_id == -1 {
eprintln!("[CharacterCreationWorker] Fehler: Kein passender Nachname gefunden.");
return;
}
if let Err(err) = Self::insert_character(
&self.base.pool,
region_id,
first_name_id,
last_name_id,
gender,
title_of_nobility,
) {
eprintln!("[CharacterCreationWorker] Fehler in createCharacter: {err}");
}
}
fn get_town_region_ids(&self) -> Vec<i32> {
match self.load_town_region_ids() {
Ok(rows) => rows
.into_iter()
.filter_map(|row| row.get("id")?.parse::<i32>().ok())
.collect(),
Err(err) => {
eprintln!(
"[CharacterCreationWorker] Fehler in getTownRegionIds: {err}"
);
Vec::new()
}
}
}
fn load_town_region_ids(&self) -> Result<Rows, crate::db::DbError> {
const STMT_NAME: &str = "get_town_region_ids";
let mut conn = self
.base
.pool
.get()
.map_err(|e| crate::db::DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare(STMT_NAME, QUERY_GET_TOWN_REGION_IDS)?;
conn.execute(STMT_NAME, &[])
}
fn load_names(&mut self) {
if self.first_name_cache.is_empty() || self.last_name_cache.is_empty() {
if let Err(err) = self.load_first_and_last_names() {
eprintln!("[CharacterCreationWorker] Fehler in loadNames: {err}");
}
}
}
fn load_first_and_last_names(&mut self) -> Result<(), crate::db::DbError> {
let mut conn = self
.base
.pool
.get()
.map_err(|e| crate::db::DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
// Vornamen
conn.prepare("load_first_names", QUERY_LOAD_FIRST_NAMES)?;
let first_rows = conn.execute("load_first_names", &[])?;
for row in first_rows {
let id = match row.get("id").and_then(|v| v.parse::<i32>().ok()) {
Some(id) => id,
None => continue,
};
let gender = row.get("gender").cloned().unwrap_or_default();
self.first_name_cache.entry(gender).or_default().insert(id);
}
// Nachnamen
conn.prepare("load_last_names", QUERY_LOAD_LAST_NAMES)?;
let last_rows = conn.execute("load_last_names", &[])?;
for row in last_rows {
if let Some(id) = row.get("id").and_then(|v| v.parse::<i32>().ok()) {
self.last_name_cache.insert(id);
}
}
Ok(())
}
fn get_random_from_set(set: &HashSet<i32>) -> i32 {
if set.is_empty() {
return -1;
}
let mut rng = thread_rng();
let idx = rng.gen_range(0..set.len());
*set.iter().nth(idx).unwrap_or(&-1)
}
fn run_iteration(&mut self, state: &WorkerState) {
self.base
.set_current_step("Check if previous day character was created");
if !self.is_today_character_created() {
self.base
.set_current_step("Create characters for today");
self.create_characters_for_today();
}
self.sleep_one_minute(state);
}
fn sleep_one_minute(&self, state: &WorkerState) {
self.base
.set_current_step("Sleep for 60 seconds");
for _ in 0..60 {
if !state.running_worker.load(Ordering::Relaxed) {
break;
}
thread::sleep(Duration::from_secs(1));
}
self.base.set_current_step("Loop done");
}
}
impl Worker for CharacterCreationWorker {
fn start_worker_thread(&mut self) {
let pool = self.base.pool.clone();
let broker = self.base.broker.clone();
self.base
.start_worker_with_loop(move |state: Arc<WorkerState>| {
let mut worker = CharacterCreationWorker::new_for_loop(pool.clone(), broker.clone());
while state.running_worker.load(Ordering::Relaxed) {
worker.run_iteration(&state);
}
});
}
fn stop_worker_thread(&mut self) {
self.base.stop_worker();
}
fn enable_watchdog(&mut self) {
self.base.start_watchdog();
}
}
impl Drop for CharacterCreationWorker {
fn drop(&mut self) {
self.death_check_running
.store(false, Ordering::Relaxed);
if let Some(handle) = self.death_thread.take() {
let _ = handle.join();
}
}
}
// Zusätzliche Logik: NPC-Todesfälle überwachen und verarbeiten
impl CharacterCreationWorker {
fn insert_character(
pool: &ConnectionPool,
region_id: i32,
first_name_id: i32,
last_name_id: i32,
gender: &str,
title_of_nobility: i32,
) -> Result<(), DbError> {
let mut conn = pool
.get()
.map_err(|e| DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("insert_character", QUERY_INSERT_CHARACTER)?;
conn.execute(
"insert_character",
&[
&region_id,
&first_name_id,
&last_name_id,
&gender,
&title_of_nobility,
],
)?;
Ok(())
}
fn monitor_character_deaths(
pool: &ConnectionPool,
broker: &MessageBroker,
) -> Result<(), DbError> {
let mut conn = pool
.get()
.map_err(|e| DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare(
"get_eligible_npc_for_death",
QUERY_GET_ELIGIBLE_NPC_FOR_DEATH,
)?;
let rows = conn.execute("get_eligible_npc_for_death", &[])?;
for row in rows {
let character_id = row
.get("id")
.and_then(|v| v.parse::<i32>().ok())
.unwrap_or(-1);
let age = row
.get("age")
.and_then(|v| v.parse::<i32>().ok())
.unwrap_or(0);
if character_id > 0 && Self::calculate_death_probability(age) {
if let Err(err) = Self::handle_character_death(pool, broker, character_id) {
eprintln!(
"[CharacterCreationWorker] Fehler beim Bearbeiten des NPC-Todes (id={character_id}): {err}"
);
}
}
}
Ok(())
}
fn calculate_death_probability(age: i32) -> bool {
if age < 60 {
return false;
}
let base_probability = 0.01_f64;
let increase_per_year = 0.01_f64;
let death_probability =
base_probability + increase_per_year * (age.saturating_sub(60) as f64);
let mut rng = thread_rng();
let dist = Uniform::from(0.0..1.0);
let roll: f64 = dist.sample(&mut rng);
roll < death_probability
}
fn handle_character_death(
pool: &ConnectionPool,
broker: &MessageBroker,
character_id: i32,
) -> Result<(), DbError> {
let mut conn = pool
.get()
.map_err(|e| DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
// 1) Director löschen und User benachrichtigen
conn.prepare("delete_director", QUERY_DELETE_DIRECTOR)?;
let dir_result = conn.execute("delete_director", &[&character_id])?;
if let Some(row) = dir_result.get(0) {
if let Some(user_id) = row
.get("employer_user_id")
.and_then(|v| v.parse::<i32>().ok())
{
Self::notify_user(pool, broker, user_id, "director_death")?;
}
}
// 2) Relationships löschen und betroffene User benachrichtigen
conn.prepare("delete_relationship", QUERY_DELETE_RELATIONSHIP)?;
let rel_result = conn.execute("delete_relationship", &[&character_id])?;
for row in rel_result {
if let Some(related_user_id) = row
.get("related_user_id")
.and_then(|v| v.parse::<i32>().ok())
{
Self::notify_user(pool, broker, related_user_id, "relationship_death")?;
}
}
// 3) Child-Relations löschen und Eltern benachrichtigen
conn.prepare("delete_child_relation", QUERY_DELETE_CHILD_RELATION)?;
let child_result = conn.execute("delete_child_relation", &[&character_id])?;
for row in child_result {
if let Some(father_user_id) = row
.get("father_user_id")
.and_then(|v| v.parse::<i32>().ok())
{
Self::notify_user(pool, broker, father_user_id, "child_death")?;
}
if let Some(mother_user_id) = row
.get("mother_user_id")
.and_then(|v| v.parse::<i32>().ok())
{
Self::notify_user(pool, broker, mother_user_id, "child_death")?;
}
}
// 4) Charakter als verstorben markieren
Self::mark_character_as_deceased(pool, character_id)?;
Ok(())
}
fn notify_user(
pool: &ConnectionPool,
broker: &MessageBroker,
user_id: i32,
event_type: &str,
) -> Result<(), DbError> {
let mut conn = pool
.get()
.map_err(|e| DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("insert_notification", QUERY_INSERT_NOTIFICATION)?;
conn.execute("insert_notification", &[&user_id])?;
// falukantUpdateStatus
let update_message =
format!(r#"{{"event":"falukantUpdateStatus","user_id":{}}}"#, user_id);
broker.publish(update_message);
// ursprüngliche Benachrichtigung
let message =
format!(r#"{{"event":"{event_type}","user_id":{}}}"#, user_id);
broker.publish(message);
Ok(())
}
fn mark_character_as_deceased(
pool: &ConnectionPool,
character_id: i32,
) -> Result<(), DbError> {
let mut conn = pool
.get()
.map_err(|e| DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("mark_character_deceased", QUERY_MARK_CHARACTER_DECEASED)?;
conn.execute("mark_character_deceased", &[&character_id])?;
Ok(())
}
}

584
src/worker/director.rs Normal file
View File

@@ -0,0 +1,584 @@
use crate::db::{DbConnection, DbError, Row};
use crate::message_broker::MessageBroker;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use std::time::{Duration, Instant};
use crate::db::ConnectionPool;
use super::base::{BaseWorker, Worker, WorkerState};
#[derive(Debug, Clone)]
struct Director {
id: i32,
may_produce: bool,
may_sell: bool,
may_start_transport: bool,
}
#[derive(Debug, Clone)]
struct ProductionPlan {
falukant_user_id: i32,
money: i32,
certificate: i32,
branch_id: i32,
product_id: i32,
stock_size: i32,
used_in_stock: i32,
running_productions: i32,
}
#[derive(Debug, Clone)]
struct InventoryItem {
id: i32,
product_id: i32,
quantity: i32,
quality: i32,
sell_cost: f64,
user_id: i32,
region_id: i32,
branch_id: i32,
}
#[derive(Debug, Clone)]
struct SalaryItem {
id: i32,
employer_user_id: i32,
income: i32,
}
pub struct DirectorWorker {
base: BaseWorker,
last_run: Option<Instant>,
}
// SQL-Queries (1:1 aus director_worker.h)
const QUERY_GET_DIRECTORS: &str = r#"
SELECT
d.may_produce,
d.may_sell,
d.may_start_transport,
b.id AS branch_id,
fu.id AS falukantUserId,
d.id
FROM falukant_data.director d
JOIN falukant_data.falukant_user fu
ON fu.id = d.employer_user_id
JOIN falukant_data.character c
ON c.id = d.director_character_id
JOIN falukant_data.branch b
ON b.region_id = c.region_id
AND b.falukant_user_id = fu.id
WHERE current_time BETWEEN '08:00:00' AND '17:00:00';
"#;
const QUERY_GET_BEST_PRODUCTION: &str = r#"
SELECT
fdu.id falukant_user_id,
fdu.money,
fdu.certificate,
ftp.id product_id,
ftp.label_tr,
(
SELECT SUM(quantity)
FROM falukant_data.stock fds
WHERE fds.branch_id = fdb.id
) AS stock_size,
COALESCE((
SELECT SUM(COALESCE(fdi.quantity, 0))
FROM falukant_data.stock fds
JOIN falukant_data.inventory fdi
ON fdi.stock_id = fds.id
WHERE fds.branch_id = fdb.id
), 0) AS used_in_stock,
(ftp.sell_cost * (fdtpw.worth_percent + (fdk_character.knowledge * 2 + fdk_director.knowledge) / 3) / 100 - 6 * ftp.category)
/ (300.0 * ftp.production_time) AS worth,
fdb.id AS branch_id,
(
SELECT COUNT(id)
FROM falukant_data.production
WHERE branch_id = fdb.id
) AS running_productions,
COALESCE((
SELECT SUM(COALESCE(fdp.quantity, 0)) quantity
FROM falukant_data.production fdp
WHERE fdp.branch_id = fdb.id
), 0) AS running_productions_quantity
FROM falukant_data.director fdd
JOIN falukant_data.character fdc
ON fdc.id = fdd.director_character_id
JOIN falukant_data.falukant_user fdu
ON fdd.employer_user_id = fdu.id
JOIN falukant_data.character user_character
ON user_character.user_id = fdu.id
JOIN falukant_data.branch fdb
ON fdb.falukant_user_id = fdu.id
AND fdb.region_id = fdc.region_id
JOIN falukant_data.town_product_worth fdtpw
ON fdtpw.region_id = fdb.region_id
JOIN falukant_data.knowledge fdk_character
ON fdk_character.product_id = fdtpw.product_id
AND fdk_character.character_id = user_character.id
JOIN falukant_data.knowledge fdk_director
ON fdk_director.product_id = fdtpw.product_id
AND fdk_director.character_id = fdd.director_character_id
JOIN falukant_type.product ftp
ON ftp.id = fdtpw.product_id
AND ftp.category <= fdu.certificate
WHERE fdd.id = $1
ORDER BY worth DESC
LIMIT 1;
"#;
const QUERY_INSERT_PRODUCTION: &str = r#"
INSERT INTO falukant_data.production (branch_id, product_id, quantity)
VALUES ($1, $2, $3);
"#;
const QUERY_GET_INVENTORY: &str = r#"
SELECT
i.id,
i.product_id,
i.quantity,
i.quality,
p.sell_cost,
fu.id AS user_id,
b.region_id,
b.id AS branch_id
FROM falukant_data.inventory i
JOIN falukant_data.stock s
ON s.id = i.stock_id
JOIN falukant_data.branch b
ON b.id = s.branch_id
JOIN falukant_data.falukant_user fu
ON fu.id = b.falukant_user_id
JOIN falukant_data.director d
ON d.employer_user_id = fu.id
JOIN falukant_type.product p
ON p.id = i.product_id
WHERE d.id = $1;
"#;
const QUERY_REMOVE_INVENTORY: &str = r#"
DELETE FROM falukant_data.inventory
WHERE id = $1;
"#;
const QUERY_ADD_SELL_LOG: &str = r#"
INSERT INTO falukant_log.sell (region_id, product_id, quantity, seller_id)
VALUES ($1, $2, $3, $4)
ON CONFLICT (region_id, product_id, seller_id)
DO UPDATE
SET quantity = falukant_log.sell.quantity + EXCLUDED.quantity;
"#;
const QUERY_GET_SALARY_TO_PAY: &str = r#"
SELECT d.id, d.employer_user_id, d.income
FROM falukant_data.director d
WHERE DATE(d.last_salary_payout) < DATE(NOW());
"#;
const QUERY_SET_SALARY_PAYED: &str = r#"
UPDATE falukant_data.director
SET last_salary_payout = NOW()
WHERE id = $1;
"#;
const QUERY_UPDATE_SATISFACTION: &str = r#"
WITH new_sats AS (
SELECT
d.id,
ROUND(
d.income::numeric
/
(
c.title_of_nobility
* POWER(1.231, AVG(k.knowledge) / 1.5)
)
* 100
) AS new_satisfaction
FROM falukant_data.director d
JOIN falukant_data.knowledge k
ON d.director_character_id = k.character_id
JOIN falukant_data.character c
ON c.id = d.director_character_id
GROUP BY d.id, c.title_of_nobility, d.income
)
UPDATE falukant_data.director dir
SET satisfaction = ns.new_satisfaction
FROM new_sats ns
WHERE dir.id = ns.id
AND dir.satisfaction IS DISTINCT FROM ns.new_satisfaction
RETURNING dir.employer_user_id;
"#;
impl DirectorWorker {
pub fn new(pool: ConnectionPool, broker: MessageBroker) -> Self {
Self {
base: BaseWorker::new("DirectorWorker", pool, broker),
last_run: None,
}
}
fn run_iteration(&mut self, state: &WorkerState) {
self.base.set_current_step("DirectorWorker iteration");
let now = Instant::now();
let should_run = match self.last_run {
None => true,
Some(last) => now.saturating_duration_since(last) >= Duration::from_secs(60),
};
if should_run {
if let Err(err) = self.perform_all_tasks() {
eprintln!("[DirectorWorker] Fehler beim Ausführen der Aufgabe: {err}");
}
self.last_run = Some(now);
}
std::thread::sleep(Duration::from_secs(1));
if !state.running_worker.load(Ordering::Relaxed) {
return;
}
}
fn perform_all_tasks(&mut self) -> Result<(), DbError> {
self.perform_task()?;
self.pay_salary()?;
self.calculate_satisfaction()?;
Ok(())
}
fn perform_task(&mut self) -> Result<(), DbError> {
self.base
.set_current_step("Get director actions from DB");
let mut conn = self
.base
.pool
.get()
.map_err(|e| DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("get_directors", QUERY_GET_DIRECTORS)?;
let directors_rows = conn.execute("get_directors", &[])?;
let directors: Vec<Director> = directors_rows
.into_iter()
.filter_map(Self::map_row_to_director)
.collect();
for director in directors {
if director.may_produce {
self.start_productions(&director)?;
}
if director.may_start_transport {
self.start_transports_stub(&director);
}
if director.may_sell {
self.start_sellings(&director)?;
}
}
Ok(())
}
fn map_row_to_director(row: Row) -> Option<Director> {
Some(Director {
id: row.get("id")?.parse().ok()?,
may_produce: row.get("may_produce").map(|v| v == "t" || v == "true").unwrap_or(false),
may_sell: row.get("may_sell").map(|v| v == "t" || v == "true").unwrap_or(false),
may_start_transport: row
.get("may_start_transport")
.map(|v| v == "t" || v == "true")
.unwrap_or(false),
})
}
fn start_productions(&mut self, director: &Director) -> Result<(), DbError> {
self.base
.set_current_step("DirectorWorker: start_productions");
let mut conn = self
.base
.pool
.get()
.map_err(|e| DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("get_to_produce", QUERY_GET_BEST_PRODUCTION)?;
let rows = conn.execute("get_to_produce", &[&director.id])?;
if rows.is_empty() {
return Ok(());
}
let plan = match Self::map_row_to_production_plan(&rows[0]) {
Some(p) => p,
None => return Ok(()),
};
self.create_production_batches(&mut conn, &plan)?;
Ok(())
}
fn map_row_to_production_plan(row: &Row) -> Option<ProductionPlan> {
Some(ProductionPlan {
falukant_user_id: row.get("falukant_user_id")?.parse().ok()?,
money: row.get("money")?.parse().ok()?,
certificate: row.get("certificate")?.parse().ok()?,
branch_id: row.get("branch_id")?.parse().ok()?,
product_id: row.get("product_id")?.parse().ok()?,
stock_size: row.get("stock_size")?.parse().ok()?,
used_in_stock: row.get("used_in_stock")?.parse().ok()?,
running_productions: row.get("running_productions")?.parse().ok()?,
})
}
fn create_production_batches(
&mut self,
conn: &mut DbConnection,
plan: &ProductionPlan,
) -> Result<(), DbError> {
let running = plan.running_productions;
if running >= 2 {
return Ok(());
}
let free_capacity =
plan.stock_size - plan.used_in_stock - plan.running_productions;
let one_piece_cost = plan.certificate * 6;
let max_money_production = if one_piece_cost > 0 {
plan.money / one_piece_cost
} else {
0
};
let to_produce = free_capacity
.min(max_money_production)
.min(300)
.max(0);
if to_produce < 1 {
return Ok(());
}
let production_cost = to_produce * one_piece_cost;
if let Err(err) = self.base.change_falukant_user_money(
plan.falukant_user_id,
-(production_cost as f64),
"director starts production",
) {
eprintln!(
"[DirectorWorker] Fehler bei change_falukant_user_money: {err}"
);
}
conn.prepare("insert_production", QUERY_INSERT_PRODUCTION)?;
let mut remaining = to_produce;
while remaining > 0 {
let batch = remaining.min(100);
conn.execute(
"insert_production",
&[&plan.branch_id, &plan.product_id, &batch],
)?;
remaining -= batch;
}
let message = format!(
r#"{{"event":"production_started","branch_id":{}}}"#,
plan.branch_id
);
self.base.broker.publish(message);
Ok(())
}
fn start_transports_stub(&self, _director: &Director) {
// TODO: Transportlogik bei Bedarf aus dem C++-Code nachziehen.
}
fn start_sellings(&mut self, director: &Director) -> Result<(), DbError> {
self.base
.set_current_step("DirectorWorker: start_sellings");
let mut conn = self
.base
.pool
.get()
.map_err(|e| DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("get_to_sell", QUERY_GET_INVENTORY)?;
let rows = conn.execute("get_to_sell", &[&director.id])?;
let mut items: Vec<InventoryItem> =
rows.into_iter().filter_map(Self::map_row_to_inventory_item).collect();
conn.prepare("remove_inventory", QUERY_REMOVE_INVENTORY)?;
conn.prepare("add_sell_log", QUERY_ADD_SELL_LOG)?;
for item in items.drain(..) {
self.sell_single_inventory_item(&mut conn, &item)?;
}
Ok(())
}
fn map_row_to_inventory_item(row: Row) -> Option<InventoryItem> {
Some(InventoryItem {
id: row.get("id")?.parse().ok()?,
product_id: row.get("product_id")?.parse().ok()?,
quantity: row.get("quantity")?.parse().ok()?,
quality: row.get("quality")?.parse().ok()?,
sell_cost: row.get("sell_cost")?.parse().ok()?,
user_id: row.get("user_id")?.parse().ok()?,
region_id: row.get("region_id")?.parse().ok()?,
branch_id: row.get("branch_id")?.parse().ok()?,
})
}
fn sell_single_inventory_item(
&mut self,
conn: &mut DbConnection,
item: &InventoryItem,
) -> Result<(), DbError> {
if item.quantity <= 0 {
conn.execute("remove_inventory", &[&item.id])?;
return Ok(());
}
let min_price = item.sell_cost * 0.6;
let piece_sell_price =
min_price + (item.sell_cost - min_price) * (item.quality as f64 / 100.0);
let sell_price = piece_sell_price * item.quantity as f64;
if let Err(err) = self.base.change_falukant_user_money(
item.user_id,
sell_price,
"sell products",
) {
eprintln!(
"[DirectorWorker] Fehler bei change_falukant_user_money (sell products): {err}"
);
}
conn.execute(
"add_sell_log",
&[
&item.region_id,
&item.product_id,
&item.quantity,
&item.user_id,
],
)?;
conn.execute("remove_inventory", &[&item.id])?;
let message = format!(
r#"{{"event":"selled_items","branch_id":{}}}"#,
item.branch_id
);
self.base.broker.publish(message);
Ok(())
}
fn pay_salary(&mut self) -> Result<(), DbError> {
self.base.set_current_step("DirectorWorker: pay_salary");
let mut conn = self
.base
.pool
.get()
.map_err(|e| DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("get_salary_to_pay", QUERY_GET_SALARY_TO_PAY)?;
conn.prepare("set_salary_payed", QUERY_SET_SALARY_PAYED)?;
let rows = conn.execute("get_salary_to_pay", &[])?;
let salaries: Vec<SalaryItem> =
rows.into_iter().filter_map(Self::map_row_to_salary_item).collect();
for item in salaries {
if let Err(err) = self.base.change_falukant_user_money(
item.employer_user_id,
-(item.income as f64),
"director payed out",
) {
eprintln!(
"[DirectorWorker] Fehler bei change_falukant_user_money (director payed out): {err}"
);
}
conn.execute("set_salary_payed", &[&item.id])?;
let message =
format!(r#"{{"event":"falukantUpdateStatus","user_id":{}}}"#, item.employer_user_id);
self.base.broker.publish(message);
}
Ok(())
}
fn map_row_to_salary_item(row: Row) -> Option<SalaryItem> {
Some(SalaryItem {
id: row.get("id")?.parse().ok()?,
employer_user_id: row.get("employer_user_id")?.parse().ok()?,
income: row.get("income")?.parse().ok()?,
})
}
fn calculate_satisfaction(&mut self) -> Result<(), DbError> {
self.base
.set_current_step("DirectorWorker: calculate_satisfaction");
let mut conn = self
.base
.pool
.get()
.map_err(|e| DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("update_satisfaction", QUERY_UPDATE_SATISFACTION)?;
let rows = conn.execute("update_satisfaction", &[])?;
for row in rows {
if let Some(employer_id) = row
.get("employer_user_id")
.and_then(|v| v.parse::<i32>().ok())
{
let message = format!(
r#"{{"event":"directorchanged","user_id":{}}}"#,
employer_id
);
self.base.broker.publish(message);
}
}
Ok(())
}
}
impl Worker for DirectorWorker {
fn start_worker_thread(&mut self) {
let pool = self.base.pool.clone();
let broker = self.base.broker.clone();
self.base
.start_worker_with_loop(move |state: Arc<WorkerState>| {
let mut worker = DirectorWorker::new(pool.clone(), broker.clone());
while state.running_worker.load(Ordering::Relaxed) {
worker.run_iteration(&state);
}
});
}
fn stop_worker_thread(&mut self) {
self.base.stop_worker();
}
fn enable_watchdog(&mut self) {
self.base.start_watchdog();
}
}

157
src/worker/house.rs Normal file
View File

@@ -0,0 +1,157 @@
use crate::db::{ConnectionPool, DbError};
use crate::message_broker::MessageBroker;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use std::time::{Duration, Instant};
use super::base::{BaseWorker, Worker, WorkerState};
pub struct HouseWorker {
base: BaseWorker,
}
// SQL-Queries analog zu `houseworker.h`
const QUERY_GET_NEW_HOUSE_DATA: &str = r#"
SELECT
h.id AS house_id
FROM
falukant_type.house AS h
WHERE
random() < 0.0001
AND label_tr <> 'under_bridge';
"#;
const QUERY_ADD_NEW_BUYABLE_HOUSE: &str = r#"
INSERT INTO falukant_data.buyable_house (house_type_id)
VALUES ($1);
"#;
const QUERY_UPDATE_BUYABLE_HOUSE_STATE: &str = r#"
UPDATE falukant_data.buyable_house
SET roof_condition = ROUND(roof_condition - random() * (3 + 0 * id)),
floor_condition = ROUND(floor_condition - random() * (3 + 0 * id)),
wall_condition = ROUND(wall_condition - random() * (3 + 0 * id)),
window_condition = ROUND(window_condition - random() * (3 + 0 * id));
"#;
const QUERY_UPDATE_USER_HOUSE_STATE: &str = r#"
UPDATE falukant_data.user_house
SET roof_condition = ROUND(roof_condition - random() * (3 + 0 * id)),
floor_condition = ROUND(floor_condition - random() * (3 + 0 * id)),
wall_condition = ROUND(wall_condition - random() * (3 + 0 * id)),
window_condition = ROUND(window_condition - random() * (3 + 0 * id))
WHERE house_type_id NOT IN (
SELECT id
FROM falukant_type.house h
WHERE h.label_tr = 'under_bridge'
);
"#;
impl HouseWorker {
pub fn new(pool: ConnectionPool, broker: MessageBroker) -> Self {
Self {
base: BaseWorker::new("HouseWorker", pool, broker),
}
}
fn run_loop(pool: ConnectionPool, _broker: MessageBroker, state: Arc<WorkerState>) {
let mut last_hourly_run: Option<Instant> = None;
let mut last_daily_run: Option<Instant> = None;
while state.running_worker.load(Ordering::Relaxed) {
let now = Instant::now();
// Stündliche Aufgaben: neue Häuser erzeugen
let should_run_hourly = match last_hourly_run {
None => true,
Some(last) => now.saturating_duration_since(last) >= Duration::from_secs(3600),
};
if should_run_hourly {
if let Err(err) = Self::perform_task_inner(&pool) {
eprintln!("[HouseWorker] Fehler in performTask: {err}");
}
last_hourly_run = Some(now);
}
// Tägliche Aufgaben: Hauszustände verschlechtern
let should_run_daily = match last_daily_run {
None => true,
Some(last) => now.saturating_duration_since(last) >= Duration::from_secs(24 * 3600),
};
if should_run_daily {
if let Err(err) = Self::perform_house_state_change_inner(&pool) {
eprintln!("[HouseWorker] Fehler in performHouseStateChange: {err}");
}
last_daily_run = Some(now);
}
std::thread::sleep(Duration::from_secs(1));
}
}
fn perform_task_inner(pool: &ConnectionPool) -> Result<(), DbError> {
let mut conn = pool
.get()
.map_err(|e| DbError::new(format!("[HouseWorker] DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("get_new_house_data", QUERY_GET_NEW_HOUSE_DATA)?;
let rows = conn.execute("get_new_house_data", &[])?;
conn.prepare("add_new_buyable_house", QUERY_ADD_NEW_BUYABLE_HOUSE)?;
for row in rows {
if let Some(house_id) = row
.get("house_id")
.and_then(|v| v.parse::<i32>().ok())
{
conn.execute("add_new_buyable_house", &[&house_id])?;
}
}
Ok(())
}
fn perform_house_state_change_inner(pool: &ConnectionPool) -> Result<(), DbError> {
let mut conn = pool
.get()
.map_err(|e| DbError::new(format!("[HouseWorker] DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare(
"update_buyable_house_state",
QUERY_UPDATE_BUYABLE_HOUSE_STATE,
)?;
conn.prepare(
"update_user_house_state",
QUERY_UPDATE_USER_HOUSE_STATE,
)?;
conn.execute("update_buyable_house_state", &[])?;
conn.execute("update_user_house_state", &[])?;
Ok(())
}
}
impl Worker for HouseWorker {
fn start_worker_thread(&mut self) {
let pool = self.base.pool.clone();
let broker = self.base.broker.clone();
self.base
.start_worker_with_loop(move |state: Arc<WorkerState>| {
Self::run_loop(pool.clone(), broker.clone(), state);
});
}
fn stop_worker_thread(&mut self) {
self.base.stop_worker();
}
fn enable_watchdog(&mut self) {
self.base.start_watchdog();
}
}

23
src/worker/mod.rs Normal file
View File

@@ -0,0 +1,23 @@
mod base;
mod character_creation;
mod director;
mod stockage_manager;
mod house;
mod produce;
mod politics;
mod underground;
mod value_recalculation;
mod user_character;
pub use base::Worker;
pub use crate::db::ConnectionPool;
pub use character_creation::CharacterCreationWorker;
pub use director::DirectorWorker;
pub use stockage_manager::StockageManager;
pub use house::HouseWorker;
pub use produce::ProduceWorker;
pub use politics::PoliticsWorker;
pub use underground::UndergroundWorker;
pub use value_recalculation::ValueRecalculationWorker;
pub use user_character::UserCharacterWorker;

732
src/worker/politics.rs Normal file
View File

@@ -0,0 +1,732 @@
use crate::db::{ConnectionPool, DbError, Row};
use crate::message_broker::MessageBroker;
use std::collections::HashSet;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use std::time::{Duration, Instant};
use super::base::{BaseWorker, Worker, WorkerState};
pub struct PoliticsWorker {
base: BaseWorker,
}
#[derive(Debug, Clone)]
struct OfficeCounts {
region_id: i32,
required: i32,
occupied: i32,
}
#[derive(Debug, Clone)]
struct Election {
election_id: i32,
region_id: i32,
posts_to_fill: i32,
}
#[derive(Debug, Clone)]
struct Office {
office_id: i32,
office_type_id: i32,
character_id: i32,
region_id: i32,
}
// --- SQL-Konstanten (1:1 aus politics_worker.h übernommen) ------------------
const QUERY_COUNT_OFFICES_PER_REGION: &str = r#"
WITH
seats_per_region AS (
SELECT
pot.id AS office_type_id,
rt.id AS region_id,
pot.seats_per_region AS seats_total
FROM falukant_type.political_office_type AS pot
JOIN falukant_type.region AS rt
ON pot.region_type = rt.label_tr
),
occupied AS (
SELECT
po.office_type_id,
po.region_id,
COUNT(*) AS occupied_count
FROM falukant_data.political_office AS po
GROUP BY po.office_type_id, po.region_id
),
combined AS (
SELECT
spr.region_id,
spr.seats_total AS required_count,
COALESCE(o.occupied_count, 0) AS occupied_count
FROM seats_per_region AS spr
LEFT JOIN occupied AS o
ON spr.office_type_id = o.office_type_id
AND spr.region_id = o.region_id
)
SELECT
region_id,
SUM(required_count) AS required_count,
SUM(occupied_count) AS occupied_count
FROM combined
GROUP BY region_id;
"#;
const QUERY_SELECT_NEEDED_ELECTIONS: &str = r#"
WITH
target_date AS (
SELECT NOW()::date AS election_date
),
expired_today AS (
DELETE FROM falukant_data.political_office AS po
USING falukant_type.political_office_type AS pot
WHERE po.office_type_id = pot.id
AND (po.created_at + (pot.term_length * INTERVAL '1 day'))::date
= (SELECT election_date FROM target_date)
RETURNING
pot.id AS office_type_id,
po.region_id AS region_id
),
gaps_per_region AS (
SELECT
office_type_id,
region_id,
COUNT(*) AS gaps
FROM expired_today
GROUP BY office_type_id, region_id
),
to_schedule AS (
SELECT
g.office_type_id,
g.region_id,
g.gaps,
td.election_date
FROM gaps_per_region AS g
CROSS JOIN target_date AS td
WHERE NOT EXISTS (
SELECT 1
FROM falukant_data.election AS e
WHERE e.office_type_id = g.office_type_id
AND e.region_id = g.region_id
AND e.date::date = td.election_date
)
),
new_elections AS (
INSERT INTO falukant_data.election
(office_type_id, date, posts_to_fill, created_at, updated_at, region_id)
SELECT
ts.office_type_id,
ts.election_date,
ts.gaps,
NOW(),
NOW(),
ts.region_id
FROM to_schedule AS ts
RETURNING
id AS election_id,
region_id,
posts_to_fill
)
SELECT
ne.election_id,
ne.region_id,
ne.posts_to_fill
FROM new_elections AS ne
ORDER BY ne.region_id, ne.election_id;
"#;
const QUERY_INSERT_CANDIDATES: &str = r#"
INSERT INTO falukant_data.candidate
(election_id, character_id, created_at, updated_at)
SELECT
$1 AS election_id,
sub.id AS character_id,
NOW() AS created_at,
NOW() AS updated_at
FROM (
WITH RECURSIVE region_tree AS (
SELECT r.id
FROM falukant_data.region AS r
WHERE r.id = $2
UNION ALL
SELECT r2.id
FROM falukant_data.region AS r2
JOIN region_tree AS rt
ON r2.parent_id = rt.id
)
SELECT ch.id
FROM falukant_data.character AS ch
JOIN region_tree AS rt2
ON ch.region_id = rt2.id
WHERE ch.user_id IS NULL
AND ch.birthdate <= NOW() - INTERVAL '21 days'
AND ch.title_of_nobility IN (
SELECT id
FROM falukant_type.title
WHERE label_tr != 'noncivil'
)
ORDER BY RANDOM()
LIMIT ($3 * 2)
) AS sub(id);
"#;
const QUERY_PROCESS_EXPIRED_AND_FILL: &str = r#"
WITH
expired_offices AS (
DELETE FROM falukant_data.political_office AS po
USING falukant_type.political_office_type AS pot
WHERE po.office_type_id = pot.id
AND (po.created_at + (pot.term_length * INTERVAL '1 day')) <= NOW()
RETURNING
pot.id AS office_type_id,
po.region_id AS region_id
),
distinct_types AS (
SELECT DISTINCT office_type_id, region_id FROM expired_offices
),
votes_per_candidate AS (
SELECT
dt.office_type_id,
dt.region_id,
c.character_id,
COUNT(v.id) AS vote_count
FROM distinct_types AS dt
JOIN falukant_data.election AS e
ON e.office_type_id = dt.office_type_id
JOIN falukant_data.vote AS v
ON v.election_id = e.id
JOIN falukant_data.candidate AS c
ON c.election_id = e.id
AND c.id = v.candidate_id
WHERE e.date >= (NOW() - INTERVAL '30 days')
GROUP BY dt.office_type_id, dt.region_id, c.character_id
),
ranked_winners AS (
SELECT
vpc.office_type_id,
vpc.region_id,
vpc.character_id,
ROW_NUMBER() OVER (
PARTITION BY vpc.office_type_id, vpc.region_id
ORDER BY vpc.vote_count DESC
) AS rn
FROM votes_per_candidate AS vpc
),
selected_winners AS (
SELECT
rw.office_type_id,
rw.region_id,
rw.character_id
FROM ranked_winners AS rw
JOIN falukant_type.political_office_type AS pot
ON pot.id = rw.office_type_id
WHERE rw.rn <= pot.seats_per_region
),
insert_winners AS (
INSERT INTO falukant_data.political_office
(office_type_id, character_id, created_at, updated_at, region_id)
SELECT
sw.office_type_id,
sw.character_id,
NOW(),
NOW(),
sw.region_id
FROM selected_winners AS sw
RETURNING id AS new_office_id, office_type_id, character_id, region_id
),
count_inserted AS (
SELECT
office_type_id,
region_id,
COUNT(*) AS inserted_count
FROM insert_winners
GROUP BY office_type_id, region_id
),
needed_to_fill AS (
SELECT
dt.office_type_id,
dt.region_id,
(pot.seats_per_region - COALESCE(ci.inserted_count, 0)) AS gaps
FROM distinct_types AS dt
JOIN falukant_type.political_office_type AS pot
ON pot.id = dt.office_type_id
LEFT JOIN count_inserted AS ci
ON ci.office_type_id = dt.office_type_id
AND ci.region_id = dt.region_id
WHERE (pot.seats_per_region - COALESCE(ci.inserted_count, 0)) > 0
),
random_candidates AS (
SELECT
rtf.office_type_id,
rtf.region_id,
ch.id AS character_id,
ROW_NUMBER() OVER (
PARTITION BY rtf.office_type_id, rtf.region_id
ORDER BY RANDOM()
) AS rn
FROM needed_to_fill AS rtf
JOIN falukant_data.character AS ch
ON ch.region_id = rtf.region_id
AND ch.user_id IS NULL
AND ch.birthdate <= NOW() - INTERVAL '21 days'
AND ch.title_of_nobility IN (
SELECT id FROM falukant_type.title WHERE label_tr != 'noncivil'
)
AND NOT EXISTS (
SELECT 1
FROM falukant_data.political_office AS po2
JOIN falukant_type.political_office_type AS pot2
ON pot2.id = po2.office_type_id
WHERE po2.character_id = ch.id
AND (po2.created_at + (pot2.term_length * INTERVAL '1 day')) >
NOW() + INTERVAL '2 days'
)
),
insert_random AS (
INSERT INTO falukant_data.political_office
(office_type_id, character_id, created_at, updated_at, region_id)
SELECT
rc.office_type_id,
rc.character_id,
NOW(),
NOW(),
rc.region_id
FROM random_candidates AS rc
JOIN needed_to_fill AS rtf
ON rtf.office_type_id = rc.office_type_id
AND rtf.region_id = rc.region_id
WHERE rc.rn <= rtf.gaps
RETURNING id AS new_office_id, office_type_id, character_id, region_id
)
SELECT
new_office_id AS office_id,
office_type_id,
character_id,
region_id
FROM insert_winners
UNION ALL
SELECT
new_office_id AS office_id,
office_type_id,
character_id,
region_id
FROM insert_random;
"#;
const QUERY_USERS_IN_CITIES_OF_REGIONS: &str = r#"
WITH RECURSIVE region_tree AS (
SELECT id
FROM falukant_data.region
WHERE id = $1
UNION ALL
SELECT r2.id
FROM falukant_data.region AS r2
JOIN region_tree AS rt
ON r2.parent_id = rt.id
)
SELECT DISTINCT ch.user_id
FROM falukant_data.character AS ch
JOIN region_tree AS rt2
ON ch.region_id = rt2.id
WHERE ch.user_id IS NOT NULL;
"#;
const QUERY_NOTIFY_OFFICE_EXPIRATION: &str = r#"
INSERT INTO falukant_log.notification
(user_id, tr, created_at, updated_at)
SELECT
po.character_id,
'notify_office_expiring',
NOW(),
NOW()
FROM falukant_data.political_office AS po
JOIN falukant_type.political_office_type AS pot
ON po.office_type_id = pot.id
WHERE (po.created_at + (pot.term_length * INTERVAL '1 day'))
BETWEEN (NOW() + INTERVAL '2 days')
AND (NOW() + INTERVAL '2 days' + INTERVAL '1 second');
"#;
const QUERY_NOTIFY_ELECTION_CREATED: &str = r#"
INSERT INTO falukant_log.notification
(user_id, tr, created_at, updated_at)
VALUES
($1, 'notify_election_created', NOW(), NOW());
"#;
const QUERY_NOTIFY_OFFICE_FILLED: &str = r#"
INSERT INTO falukant_log.notification
(user_id, tr, created_at, updated_at)
VALUES
($1, 'notify_office_filled', NOW(), NOW());
"#;
const QUERY_GET_USERS_WITH_EXPIRING_OFFICES: &str = r#"
SELECT DISTINCT ch.user_id
FROM falukant_data.political_office AS po
JOIN falukant_type.political_office_type AS pot
ON po.office_type_id = pot.id
JOIN falukant_data.character AS ch
ON po.character_id = ch.id
WHERE ch.user_id IS NOT NULL
AND (po.created_at + (pot.term_length * INTERVAL '1 day'))
BETWEEN (NOW() + INTERVAL '2 days')
AND (NOW() + INTERVAL '2 days' + INTERVAL '1 second');
"#;
const QUERY_GET_USERS_IN_REGIONS_WITH_ELECTIONS: &str = r#"
SELECT DISTINCT ch.user_id
FROM falukant_data.election AS e
JOIN falukant_data.character AS ch
ON ch.region_id = e.region_id
WHERE ch.user_id IS NOT NULL
AND e.date >= NOW() - INTERVAL '1 day';
"#;
const QUERY_GET_USERS_WITH_FILLED_OFFICES: &str = r#"
SELECT DISTINCT ch.user_id
FROM falukant_data.political_office AS po
JOIN falukant_data.character AS ch
ON po.character_id = ch.id
WHERE ch.user_id IS NOT NULL
AND po.created_at >= NOW() - INTERVAL '1 minute';
"#;
const QUERY_PROCESS_ELECTIONS: &str = r#"
SELECT office_id, office_type_id, character_id, region_id
FROM falukant_data.process_elections();
"#;
impl PoliticsWorker {
pub fn new(pool: ConnectionPool, broker: MessageBroker) -> Self {
Self {
base: BaseWorker::new("PoliticsWorker", pool, broker),
}
}
fn run_loop(pool: ConnectionPool, broker: MessageBroker, state: Arc<WorkerState>) {
let mut last_execution: Option<Instant> = None;
while state.running_worker.load(Ordering::Relaxed) {
let now = Instant::now();
let should_run = match last_execution {
None => true,
Some(prev) => now.saturating_duration_since(prev) >= Duration::from_secs(24 * 3600),
};
if should_run {
if let Err(err) = Self::perform_daily_politics_task(&pool, &broker) {
eprintln!("[PoliticsWorker] Fehler bei performDailyPoliticsTask: {err}");
}
last_execution = Some(now);
}
// Entspricht ungefähr der 5-Sekunden-Schleife im C++-Code
for _ in 0..5 {
if !state.running_worker.load(Ordering::Relaxed) {
break;
}
std::thread::sleep(Duration::from_secs(1));
}
}
}
fn perform_daily_politics_task(
pool: &ConnectionPool,
broker: &MessageBroker,
) -> Result<(), DbError> {
// 1) Optional: Positionen evaluieren (aktuell nur Logging/Struktur)
let _ = Self::evaluate_political_positions(pool)?;
// 2) Ämter, die bald auslaufen, benachrichtigen
Self::notify_office_expirations(pool, broker)?;
// 3) Abgelaufene Ämter verarbeiten und neue besetzen
let new_offices_direct = Self::process_expired_offices_and_fill(pool)?;
if !new_offices_direct.is_empty() {
Self::notify_office_filled(pool, broker, &new_offices_direct)?;
}
// 4) Neue Wahlen planen und Kandidaten eintragen
let elections = Self::schedule_elections(pool)?;
if !elections.is_empty() {
Self::insert_candidates_for_elections(pool, &elections)?;
// Benachrichtige User in betroffenen Regionen
let region_ids: HashSet<i32> =
elections.iter().map(|e| e.region_id).collect();
let user_ids =
Self::get_user_ids_in_cities_of_regions(pool, &region_ids)?;
Self::notify_election_created(pool, broker, &user_ids)?;
}
// 5) Wahlen auswerten und neu besetzte Ämter melden
let new_offices_from_elections = Self::process_elections(pool)?;
if !new_offices_from_elections.is_empty() {
Self::notify_office_filled(pool, broker, &new_offices_from_elections)?;
}
Ok(())
}
fn evaluate_political_positions(
pool: &ConnectionPool,
) -> Result<Vec<OfficeCounts>, DbError> {
let mut conn = pool
.get()
.map_err(|e| DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare(
"count_offices_per_region",
QUERY_COUNT_OFFICES_PER_REGION,
)?;
let rows = conn.execute("count_offices_per_region", &[])?;
let mut result = Vec::with_capacity(rows.len());
for row in rows {
let region_id = parse_i32(&row, "region_id", -1);
let required = parse_i32(&row, "required_count", 0);
let occupied = parse_i32(&row, "occupied_count", 0);
if region_id >= 0 {
result.push(OfficeCounts {
region_id,
required,
occupied,
});
}
}
Ok(result)
}
fn schedule_elections(pool: &ConnectionPool) -> Result<Vec<Election>, DbError> {
let mut conn = pool
.get()
.map_err(|e| DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("select_needed_elections", QUERY_SELECT_NEEDED_ELECTIONS)?;
let rows = conn.execute("select_needed_elections", &[])?;
let mut elections = Vec::with_capacity(rows.len());
for row in rows {
let election_id = parse_i32(&row, "election_id", -1);
let region_id = parse_i32(&row, "region_id", -1);
let posts_to_fill = parse_i32(&row, "posts_to_fill", 0);
if election_id >= 0 && region_id >= 0 {
elections.push(Election {
election_id,
region_id,
posts_to_fill,
});
}
}
Ok(elections)
}
fn insert_candidates_for_elections(
pool: &ConnectionPool,
elections: &[Election],
) -> Result<(), DbError> {
let mut conn = pool
.get()
.map_err(|e| DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("insert_candidates", QUERY_INSERT_CANDIDATES)?;
for e in elections {
conn.execute(
"insert_candidates",
&[&e.election_id, &e.region_id, &e.posts_to_fill],
)?;
}
Ok(())
}
fn process_expired_offices_and_fill(
pool: &ConnectionPool,
) -> Result<Vec<Office>, DbError> {
let mut conn = pool
.get()
.map_err(|e| DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("process_expired_and_fill", QUERY_PROCESS_EXPIRED_AND_FILL)?;
let rows = conn.execute("process_expired_and_fill", &[])?;
Ok(rows
.into_iter()
.filter_map(map_row_to_office)
.collect())
}
fn get_user_ids_in_cities_of_regions(
pool: &ConnectionPool,
region_ids: &HashSet<i32>,
) -> Result<Vec<i32>, DbError> {
if region_ids.is_empty() {
return Ok(Vec::new());
}
let mut conn = pool
.get()
.map_err(|e| DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("get_users_in_cities", QUERY_USERS_IN_CITIES_OF_REGIONS)?;
let mut user_ids = Vec::new();
for rid in region_ids {
let rows = conn.execute("get_users_in_cities", &[rid])?;
for row in rows {
if let Some(uid) = row.get("user_id").and_then(|v| v.parse::<i32>().ok()) {
user_ids.push(uid);
}
}
}
Ok(user_ids)
}
fn notify_office_expirations(
pool: &ConnectionPool,
broker: &MessageBroker,
) -> Result<(), DbError> {
let mut conn = pool
.get()
.map_err(|e| DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("notify_office_expiration", QUERY_NOTIFY_OFFICE_EXPIRATION)?;
conn.execute("notify_office_expiration", &[])?;
conn.prepare(
"get_users_with_expiring_offices",
QUERY_GET_USERS_WITH_EXPIRING_OFFICES,
)?;
let rows = conn.execute("get_users_with_expiring_offices", &[])?;
for row in rows {
if let Some(user_id) = row.get("user_id").and_then(|v| v.parse::<i32>().ok()) {
let msg =
format!(r#"{{"event":"falukantUpdateStatus","user_id":{}}}"#, user_id);
broker.publish(msg);
}
}
Ok(())
}
fn notify_election_created(
pool: &ConnectionPool,
broker: &MessageBroker,
user_ids: &[i32],
) -> Result<(), DbError> {
let mut conn = pool
.get()
.map_err(|e| DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("notify_election_created", QUERY_NOTIFY_ELECTION_CREATED)?;
for uid in user_ids {
conn.execute("notify_election_created", &[uid])?;
}
conn.prepare(
"get_users_in_regions_with_elections",
QUERY_GET_USERS_IN_REGIONS_WITH_ELECTIONS,
)?;
let rows = conn.execute("get_users_in_regions_with_elections", &[])?;
for row in rows {
if let Some(user_id) = row.get("user_id").and_then(|v| v.parse::<i32>().ok()) {
let msg =
format!(r#"{{"event":"falukantUpdateStatus","user_id":{}}}"#, user_id);
broker.publish(msg);
}
}
Ok(())
}
fn notify_office_filled(
pool: &ConnectionPool,
broker: &MessageBroker,
new_offices: &[Office],
) -> Result<(), DbError> {
let mut conn = pool
.get()
.map_err(|e| DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("notify_office_filled", QUERY_NOTIFY_OFFICE_FILLED)?;
for office in new_offices {
conn.execute("notify_office_filled", &[&office.character_id])?;
}
conn.prepare(
"get_users_with_filled_offices",
QUERY_GET_USERS_WITH_FILLED_OFFICES,
)?;
let rows = conn.execute("get_users_with_filled_offices", &[])?;
for row in rows {
if let Some(user_id) = row.get("user_id").and_then(|v| v.parse::<i32>().ok()) {
let msg =
format!(r#"{{"event":"falukantUpdateStatus","user_id":{}}}"#, user_id);
broker.publish(msg);
}
}
Ok(())
}
fn process_elections(pool: &ConnectionPool) -> Result<Vec<Office>, DbError> {
let mut conn = pool
.get()
.map_err(|e| DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("process_elections", QUERY_PROCESS_ELECTIONS)?;
let rows = conn.execute("process_elections", &[])?;
Ok(rows
.into_iter()
.filter_map(map_row_to_office)
.collect())
}
}
impl Worker for PoliticsWorker {
fn start_worker_thread(&mut self) {
let pool = self.base.pool.clone();
let broker = self.base.broker.clone();
self.base
.start_worker_with_loop(move |state: Arc<WorkerState>| {
PoliticsWorker::run_loop(pool.clone(), broker.clone(), state);
});
}
fn stop_worker_thread(&mut self) {
self.base.stop_worker();
}
fn enable_watchdog(&mut self) {
self.base.start_watchdog();
}
}
fn parse_i32(row: &Row, key: &str, default: i32) -> i32 {
row.get(key)
.and_then(|v| v.parse::<i32>().ok())
.unwrap_or(default)
}
fn map_row_to_office(row: Row) -> Option<Office> {
Some(Office {
office_id: row.get("office_id")?.parse().ok()?,
office_type_id: row.get("office_type_id")?.parse().ok()?,
character_id: row.get("character_id")?.parse().ok()?,
region_id: row.get("region_id")?.parse().ok()?,
})
}

495
src/worker/produce.rs Normal file
View File

@@ -0,0 +1,495 @@
use crate::db::{Row, Rows};
use crate::message_broker::MessageBroker;
use std::cmp::min;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use std::time::{Duration, Instant};
use crate::db::ConnectionPool;
use super::base::{BaseWorker, Worker, WorkerState};
/// Abbildet eine abgeschlossene Produktion aus der Datenbank.
#[derive(Debug, Clone)]
struct FinishedProduction {
production_id: i32,
branch_id: i32,
product_id: i32,
quantity: i32,
quality: i32,
user_id: i32,
region_id: i32,
}
/// Abbildet ein Lager (Stock) mit Kapazität.
#[derive(Debug, Clone)]
struct StockInfo {
stock_id: i32,
total_capacity: i32,
filled: i32,
}
// SQL-Queries analog zur C++-Implementierung
const QUERY_GET_FINISHED_PRODUCTIONS: &str = r#"
SELECT DISTINCT
p.id AS production_id,
p.branch_id,
p.product_id,
p.quantity,
p.start_timestamp,
pr.production_time,
k.character_id,
CASE
WHEN k2.id IS NOT NULL
THEN (k.knowledge * 2 + k2.knowledge) / 3
ELSE k.knowledge
END AS quality,
br.region_id,
br.falukant_user_id AS user_id
FROM falukant_data.production p
JOIN falukant_type.product pr ON p.product_id = pr.id
JOIN falukant_data.branch br ON p.branch_id = br.id
JOIN falukant_data.character c ON c.user_id = br.falukant_user_id
JOIN falukant_data.knowledge k ON p.product_id = k.product_id AND k.character_id = c.id
JOIN falukant_data.stock s ON s.branch_id = br.id
LEFT JOIN falukant_data.director d ON d.employer_user_id = c.user_id
LEFT JOIN falukant_data.knowledge k2
ON k2.character_id = d.director_character_id
AND k2.product_id = p.product_id
WHERE p.start_timestamp + INTERVAL '1 minute' * pr.production_time <= NOW()
ORDER BY p.start_timestamp;
"#;
const QUERY_GET_AVAILABLE_STOCKS: &str = r#"
SELECT
stock.id,
stock.quantity AS total_capacity,
(
SELECT COALESCE(SUM(inventory.quantity), 0)
FROM falukant_data.inventory
WHERE inventory.stock_id = stock.id
) AS filled,
stock.branch_id
FROM falukant_data.stock stock
JOIN falukant_data.branch branch
ON stock.branch_id = branch.id
WHERE branch.id = $1
ORDER BY total_capacity DESC;
"#;
const QUERY_DELETE_PRODUCTION: &str = r#"
DELETE FROM falukant_data.production
WHERE id = $1;
"#;
const QUERY_INSERT_INVENTORY: &str = r#"
INSERT INTO falukant_data.inventory (
stock_id,
product_id,
quantity,
quality,
produced_at
) VALUES ($1, $2, $3, $4, NOW());
"#;
const QUERY_INSERT_UPDATE_PRODUCTION_LOG: &str = r#"
INSERT INTO falukant_log.production (
region_id,
product_id,
quantity,
producer_id,
production_date
) VALUES ($1, $2, $3, $4, CURRENT_DATE)
ON CONFLICT (producer_id, product_id, region_id, production_date)
DO UPDATE
SET quantity = falukant_log.production.quantity + EXCLUDED.quantity;
"#;
const QUERY_ADD_OVERPRODUCTION_NOTIFICATION: &str = r#"
INSERT INTO falukant_log.notification (
user_id,
tr,
shown,
created_at,
updated_at
) VALUES ($1, $2, FALSE, NOW(), NOW());
"#;
pub struct ProduceWorker {
base: BaseWorker,
last_iteration: Option<Instant>,
}
impl ProduceWorker {
pub fn new(pool: ConnectionPool, broker: MessageBroker) -> Self {
Self {
base: BaseWorker::new("ProduceWorker", pool, broker),
last_iteration: None,
}
}
fn run_iteration(&mut self, state: &WorkerState) {
self.base
.set_current_step("Check runningWorker Variable");
if !state.running_worker.load(Ordering::Relaxed) {
return;
}
let sleep_duration = self.time_until_next_iteration();
self.sleep_with_shutdown_check(sleep_duration, state);
if !state.running_worker.load(Ordering::Relaxed) {
return;
}
self.base.set_current_step("Process Productions");
self.process_productions();
self.base.set_current_step("Signal Activity");
// TODO: Später Analogie zu signalActivity() aus der C++-Basisklasse herstellen.
self.base.set_current_step("Loop Done");
}
fn time_until_next_iteration(&mut self) -> Duration {
const MIN_INTERVAL_MS: u64 = 200;
let now = Instant::now();
match self.last_iteration {
None => {
self.last_iteration = Some(now);
Duration::from_millis(0)
}
Some(last) => {
let elapsed = now.saturating_duration_since(last);
if elapsed >= Duration::from_millis(MIN_INTERVAL_MS) {
self.last_iteration = Some(now);
Duration::from_millis(0)
} else {
let remaining = Duration::from_millis(MIN_INTERVAL_MS) - elapsed;
self.last_iteration = Some(now);
remaining
}
}
}
}
fn sleep_with_shutdown_check(&self, duration: Duration, state: &WorkerState) {
const SLICE_MS: u64 = 10;
let total_ms = duration.as_millis() as u64;
let mut slept = 0;
while slept < total_ms {
if !state.running_worker.load(Ordering::Relaxed) {
break;
}
let remaining = total_ms - slept;
let slice = min(remaining, SLICE_MS);
std::thread::sleep(Duration::from_millis(slice));
slept += slice;
}
}
fn process_productions(&mut self) {
self.base
.set_current_step("Fetch Finished Productions");
let finished_productions = match self.get_finished_productions() {
Ok(rows) => rows,
Err(err) => {
eprintln!("[ProduceWorker] Fehler in getFinishedProductions: {err}");
Vec::new()
}
};
self.base
.set_current_step("Process Finished Productions");
for production in finished_productions {
self.handle_finished_production(&production);
}
}
fn get_finished_productions(&self) -> Result<Vec<FinishedProduction>, crate::db::DbError> {
let rows = self.load_finished_productions()?;
Ok(rows
.into_iter()
.filter_map(Self::map_row_to_finished_production)
.collect())
}
fn handle_finished_production(&mut self, production: &FinishedProduction) {
let FinishedProduction {
branch_id,
product_id,
quantity,
quality,
user_id,
region_id,
production_id,
} = *production;
if self.add_to_inventory(branch_id, product_id, quantity, quality, user_id) {
self.delete_production(production_id);
self.add_production_to_log(region_id, user_id, product_id, quantity);
}
}
fn add_to_inventory(
&mut self,
branch_id: i32,
product_id: i32,
quantity: i32,
quality: i32,
user_id: i32,
) -> bool {
let mut remaining_quantity = quantity;
let stocks = match self.get_available_stocks(branch_id) {
Ok(rows) => rows,
Err(err) => {
eprintln!("[ProduceWorker] Fehler in getAvailableStocks: {err}");
Vec::new()
}
};
for stock in stocks {
if remaining_quantity <= 0 {
break;
}
let free_capacity = stock.total_capacity - stock.filled;
if free_capacity <= 0 {
continue;
}
let to_store = min(remaining_quantity, free_capacity);
if !self.store_in_stock(stock.stock_id, product_id, to_store, quality) {
return false;
}
remaining_quantity -= to_store;
}
if remaining_quantity == 0 {
self.send_production_ready_event(user_id, product_id, quantity, quality, branch_id);
true
} else {
self.handle_overproduction(user_id, remaining_quantity);
true
}
}
fn get_available_stocks(&self, branch_id: i32) -> Result<Vec<StockInfo>, crate::db::DbError> {
let rows = self.load_available_stocks(branch_id)?;
Ok(rows
.into_iter()
.filter_map(Self::map_row_to_stock_info)
.collect())
}
fn store_in_stock(
&self,
stock_id: i32,
product_id: i32,
quantity: i32,
quality: i32,
) -> bool {
if let Err(err) = self.insert_inventory(stock_id, product_id, quantity, quality) {
eprintln!("[ProduceWorker] Fehler in storeInStock: {err}");
return false;
}
true
}
fn delete_production(&self, production_id: i32) {
if let Err(err) = self.remove_production(production_id) {
eprintln!("[ProduceWorker] Fehler beim Löschen der Produktion: {err}");
}
}
fn add_production_to_log(
&self,
region_id: i32,
user_id: i32,
product_id: i32,
quantity: i32,
) {
if let Err(err) = self.insert_or_update_production_log(region_id, user_id, product_id, quantity) {
eprintln!("[ProduceWorker] Fehler in addProductionToLog: {err}");
}
}
fn send_production_ready_event(
&self,
user_id: i32,
product_id: i32,
quantity: i32,
quality: i32,
branch_id: i32,
) {
// JSON als String aufbauen, um externe Dependencies zu vermeiden.
let message = format!(
r#"{{"event":"production_ready","user_id":{user_id},"product_id":{product_id},"quantity":{quantity},"quality":{quality},"branch_id":{branch_id}}}"#
);
self.base.broker.publish(message);
}
fn handle_overproduction(&self, user_id: i32, remaining_quantity: i32) {
if let Err(err) = self.insert_overproduction_notification(user_id, remaining_quantity) {
eprintln!(
"[ProduceWorker] Fehler beim Schreiben der Overproduction-Notification: {err}"
);
}
let update_status =
format!(r#"{{"event":"falukantUpdateStatus","user_id":{user_id}}}"#);
self.base.broker.publish(update_status);
}
fn load_finished_productions(&self) -> Result<Rows, crate::db::DbError> {
let mut conn = self
.base
.pool
.get()
.map_err(|e| crate::db::DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("get_finished_productions", QUERY_GET_FINISHED_PRODUCTIONS)?;
conn.execute("get_finished_productions", &[])
}
fn load_available_stocks(&self, branch_id: i32) -> Result<Rows, crate::db::DbError> {
let mut conn = self
.base
.pool
.get()
.map_err(|e| crate::db::DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("get_stocks", QUERY_GET_AVAILABLE_STOCKS)?;
conn.execute("get_stocks", &[&branch_id])
}
fn insert_inventory(
&self,
stock_id: i32,
product_id: i32,
quantity: i32,
quality: i32,
) -> Result<(), crate::db::DbError> {
let mut conn = self
.base
.pool
.get()
.map_err(|e| crate::db::DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("insert_inventory", QUERY_INSERT_INVENTORY)?;
conn.execute("insert_inventory", &[&stock_id, &product_id, &quantity, &quality])?;
Ok(())
}
fn remove_production(&self, production_id: i32) -> Result<(), crate::db::DbError> {
let mut conn = self
.base
.pool
.get()
.map_err(|e| crate::db::DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("delete_production", QUERY_DELETE_PRODUCTION)?;
conn.execute("delete_production", &[&production_id])?;
Ok(())
}
fn insert_or_update_production_log(
&self,
region_id: i32,
user_id: i32,
product_id: i32,
quantity: i32,
) -> Result<(), crate::db::DbError> {
let mut conn = self
.base
.pool
.get()
.map_err(|e| crate::db::DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare(
"insert_update_production_log",
QUERY_INSERT_UPDATE_PRODUCTION_LOG,
)?;
conn.execute(
"insert_update_production_log",
&[&region_id, &product_id, &quantity, &user_id],
)?;
Ok(())
}
fn insert_overproduction_notification(
&self,
user_id: i32,
remaining_quantity: i32,
) -> Result<(), crate::db::DbError> {
let mut conn = self
.base
.pool
.get()
.map_err(|e| crate::db::DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare(
"add_overproduction_notification",
QUERY_ADD_OVERPRODUCTION_NOTIFICATION,
)?;
let notification = format!(
r#"{{"tr":"production.overproduction","value":{}}}"#,
remaining_quantity
);
conn.execute(
"add_overproduction_notification",
&[&user_id, &notification],
)?;
Ok(())
}
fn map_row_to_finished_production(row: Row) -> Option<FinishedProduction> {
Some(FinishedProduction {
production_id: row.get("production_id")?.parse().ok()?,
branch_id: row.get("branch_id")?.parse().ok()?,
product_id: row.get("product_id")?.parse().ok()?,
quantity: row.get("quantity")?.parse().ok()?,
quality: row.get("quality")?.parse().ok()?,
user_id: row.get("user_id")?.parse().ok()?,
region_id: row.get("region_id")?.parse().ok()?,
})
}
fn map_row_to_stock_info(row: Row) -> Option<StockInfo> {
Some(StockInfo {
stock_id: row.get("id")?.parse().ok()?,
total_capacity: row.get("total_capacity")?.parse().ok()?,
filled: row.get("filled")?.parse().ok()?,
})
}
}
impl Worker for ProduceWorker {
fn start_worker_thread(&mut self) {
let pool = self.base.pool.clone();
let broker = self.base.broker.clone();
self.base
.start_worker_with_loop(move |state: Arc<WorkerState>| {
let mut worker = ProduceWorker::new(pool.clone(), broker.clone());
while state.running_worker.load(Ordering::Relaxed) {
worker.run_iteration(&state);
}
});
}
fn stop_worker_thread(&mut self) {
self.base.stop_worker();
}
fn enable_watchdog(&mut self) {
self.base.start_watchdog();
}
}

49
src/worker/simple.rs Normal file
View File

@@ -0,0 +1,49 @@
use crate::message_broker::MessageBroker;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use std::thread;
use std::time::Duration;
use crate::db::ConnectionPool;
use super::base::{BaseWorker, Worker, WorkerState};
macro_rules! define_simple_worker {
($name:ident) => {
pub struct $name {
base: BaseWorker,
}
impl $name {
pub fn new(pool: ConnectionPool, broker: MessageBroker) -> Self {
Self {
base: BaseWorker::new(stringify!($name), pool, broker),
}
}
}
impl Worker for $name {
fn start_worker_thread(&mut self) {
self.base
.start_worker_with_loop(|state: Arc<WorkerState>| {
// Einfache Dummy-Schleife, bis echte Logik portiert ist
while state.running_worker.load(Ordering::Relaxed) {
if let Ok(mut step) = state.current_step.lock() {
*step = format!("{}: idle", stringify!($name));
}
thread::sleep(Duration::from_secs(5));
}
});
}
fn stop_worker_thread(&mut self) {
self.base.stop_worker();
}
fn enable_watchdog(&mut self) {
self.base.start_watchdog();
}
}
};
}

View File

@@ -0,0 +1,203 @@
use crate::db::{ConnectionPool, DbError};
use crate::message_broker::MessageBroker;
use rand::distributions::{Distribution, Uniform};
use rand::rngs::StdRng;
use rand::SeedableRng;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use std::time::{Duration, Instant};
use super::base::{BaseWorker, Worker, WorkerState};
pub struct StockageManager {
base: BaseWorker,
}
// SQL-Queries analog zu `stockagemanager.h`
const QUERY_GET_TOWNS: &str = r#"
SELECT fdr.id
FROM falukant_data.region fdr
JOIN falukant_type.region ftr
ON ftr.id = fdr.region_type_id
WHERE ftr.label_tr = 'city';
"#;
const QUERY_INSERT_STOCK: &str = r#"
INSERT INTO falukant_data.buyable_stock (region_id, stock_type_id, quantity)
SELECT
$1 AS region_id,
s.id AS stock_type_id,
GREATEST(1, ROUND(RANDOM() * 5 * COUNT(br.id))) AS quantity
FROM falukant_data.branch AS br
CROSS JOIN falukant_type.stock AS s
WHERE br.region_id = $1
GROUP BY s.id
ORDER BY RANDOM()
LIMIT GREATEST(
ROUND(RANDOM() * (SELECT COUNT(id) FROM falukant_type.stock)),
1
);
"#;
const QUERY_CLEANUP_STOCK: &str = r#"
DELETE FROM falukant_data.buyable_stock
WHERE quantity <= 0;
"#;
const QUERY_GET_REGION_USERS: &str = r#"
SELECT c.user_id
FROM falukant_data.character c
WHERE c.region_id = $1
AND c.user_id IS NOT NULL;
"#;
impl StockageManager {
pub fn new(pool: ConnectionPool, broker: MessageBroker) -> Self {
Self {
base: BaseWorker::new("StockageManager", pool, broker),
}
}
fn run_loop(pool: ConnectionPool, broker: MessageBroker, state: Arc<WorkerState>) {
let mut last_add_run: Option<Instant> = None;
let mut last_cleanup_run: Option<Instant> = None;
while state.running_worker.load(Ordering::Relaxed) {
let now = Instant::now();
// Entspricht addLocalStocks: alle 60 Sekunden prüfen & ggf. Stocks hinzufügen
let should_add = match last_add_run {
None => true,
Some(last) => now.saturating_duration_since(last) >= Duration::from_secs(60),
};
if should_add {
if let Err(err) = Self::add_local_stocks(&pool, &broker) {
eprintln!("[StockageManager] Fehler in addLocalStocks: {err}");
}
last_add_run = Some(now);
}
// Cleanup regelmäßig ausführen (z.B. ebenfalls im 60s-Rhythmus)
let should_cleanup = match last_cleanup_run {
None => true,
Some(last) => now.saturating_duration_since(last) >= Duration::from_secs(60),
};
if should_cleanup {
if let Err(err) = Self::cleanup_buyable_stock(&pool) {
eprintln!("[StockageManager] Fehler bei stock cleanup: {err}");
}
last_cleanup_run = Some(now);
}
std::thread::sleep(Duration::from_secs(1));
}
}
fn add_local_stocks(pool: &ConnectionPool, broker: &MessageBroker) -> Result<(), DbError> {
let mut rng = StdRng::from_entropy();
let dist = Uniform::from(0.0..1.0);
let town_ids = Self::get_town_ids(pool)?;
for town_id in town_ids {
// Wahrscheinlichkeit analog: round(dist * 2160) <= 1
let roll: f64 = dist.sample(&mut rng) * 2160.0_f64;
let chance = roll.round();
if chance <= 1.0 {
Self::add_stock_for_town(pool, broker, town_id)?;
}
}
Ok(())
}
fn get_town_ids(pool: &ConnectionPool) -> Result<Vec<i32>, DbError> {
let mut conn = pool
.get()
.map_err(|e| DbError::new(format!("[StockageManager] DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("get_towns", QUERY_GET_TOWNS)?;
let towns = conn.execute("get_towns", &[])?;
let mut ids = Vec::with_capacity(towns.len());
for row in towns {
if let Some(id) = row.get("id").and_then(|v| v.parse::<i32>().ok()) {
ids.push(id);
}
}
Ok(ids)
}
fn add_stock_for_town(
pool: &ConnectionPool,
broker: &MessageBroker,
town_id: i32,
) -> Result<(), DbError> {
let mut conn = pool
.get()
.map_err(|e| DbError::new(format!("[StockageManager] DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("add_stock", QUERY_INSERT_STOCK)?;
conn.execute("add_stock", &[&town_id])?;
// Benachrichtige alle User in der Region
let users = Self::get_region_users(&mut conn, town_id)?;
for user_id in users {
let message = format!(
r#"{{"event":"stock_change","user_id":{},"branch":{}}}"#,
user_id, town_id
);
broker.publish(message);
}
Ok(())
}
fn get_region_users(conn: &mut crate::db::DbConnection, region_id: i32) -> Result<Vec<i32>, DbError> {
conn.prepare("get_region_users", QUERY_GET_REGION_USERS)?;
let rows = conn.execute("get_region_users", &[&region_id])?;
let mut result = Vec::with_capacity(rows.len());
for row in rows {
if let Some(uid) = row.get("user_id").and_then(|v| v.parse::<i32>().ok()) {
result.push(uid);
}
}
Ok(result)
}
fn cleanup_buyable_stock(pool: &ConnectionPool) -> Result<(), DbError> {
let mut conn = pool
.get()
.map_err(|e| DbError::new(format!("[StockageManager] DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("cleanup_stock", QUERY_CLEANUP_STOCK)?;
conn.execute("cleanup_stock", &[])?;
Ok(())
}
}
impl Worker for StockageManager {
fn start_worker_thread(&mut self) {
let pool = self.base.pool.clone();
let broker = self.base.broker.clone();
self.base
.start_worker_with_loop(move |state: Arc<WorkerState>| {
StockageManager::run_loop(pool.clone(), broker.clone(), state);
});
}
fn stop_worker_thread(&mut self) {
self.base.stop_worker();
}
fn enable_watchdog(&mut self) {
self.base.start_watchdog();
}
}

968
src/worker/underground.rs Normal file
View File

@@ -0,0 +1,968 @@
use crate::db::{ConnectionPool, DbError, Row, Rows};
use crate::message_broker::MessageBroker;
use rand::distributions::{Distribution, Uniform};
use rand::seq::SliceRandom;
use rand::Rng;
use serde_json::json;
use serde_json::Value as Json;
use std::cmp::{max, min};
use std::sync::atomic::Ordering;
use std::sync::Arc;
use std::time::Duration;
use super::base::{BaseWorker, Worker, WorkerState};
pub struct UndergroundWorker {
base: BaseWorker,
}
#[derive(Debug, Clone)]
struct HouseConditions {
id: i32,
roof: i32,
floor: i32,
wall: i32,
windowc: i32,
}
// Query-Konstanten (1:1 aus der C++-Version übernommen)
const Q_SELECT_BY_PERFORMER: &str = r#"
SELECT u.id,
t.tr AS underground_type,
u.performer_id,
u.victim_id,
to_char(u.created_at,'YYYY-MM-DD\"T\"HH24:MI:SS\"Z\"') AS created_at,
COALESCE(u.parameters::text,'{}') AS parameters,
COALESCE(u.result::text,'null') AS result_text
FROM falukant_data.underground u
JOIN falukant_type.underground t ON t.tr = u.underground_type_id
WHERE u.performer_id = $1
ORDER BY u.created_at DESC;
"#;
const Q_SELECT_PENDING: &str = r#"
SELECT u.id,
t.tr AS underground_type,
u.performer_id,
u.victim_id,
COALESCE(u.parameters::text,'{}') AS parameters
FROM falukant_data.underground u
JOIN falukant_type.underground t ON t.tr = u.underground_type_id
WHERE u.result IS NULL
AND u.created_at <= NOW() - INTERVAL '1 day'
ORDER BY u.created_at ASC
LIMIT 200;
"#;
const Q_UPDATE_RESULT: &str = r#"
UPDATE falukant_data.underground
SET result = $2::jsonb,
updated_at = NOW()
WHERE id = $1;
"#;
const Q_SELECT_CHAR_USER: &str = r#"
SELECT user_id
FROM falukant_data.character
WHERE id = $1;
"#;
const Q_SELECT_HOUSE_BY_USER: &str = r#"
SELECT id, roof_condition, floor_condition, wall_condition, window_condition
FROM falukant_data.user_house
WHERE user_id = $1
LIMIT 1;
"#;
const Q_UPDATE_HOUSE: &str = r#"
UPDATE falukant_data.user_house
SET roof_condition = $2,
floor_condition = $3,
wall_condition = $4,
window_condition = $5
WHERE id = $1;
"#;
const Q_SELECT_STOCK_BY_BRANCH: &str = r#"
SELECT id, stock_type_id, quantity
FROM falukant_data.stock
WHERE branch_id = $1
ORDER BY quantity DESC;
"#;
const Q_UPDATE_STOCK_QTY: &str = r#"
UPDATE falukant_data.stock
SET quantity = $2
WHERE id = $1;
"#;
const Q_SELECT_CHAR_HEALTH: &str = r#"
SELECT health
FROM falukant_data.character
WHERE id = $1;
"#;
const Q_UPDATE_CHAR_HEALTH: &str = r#"
UPDATE falukant_data.character
SET health = $2,
updated_at = NOW()
WHERE id = $1;
"#;
const Q_SELECT_FALUKANT_USER: &str = r#"
SELECT id,
money,
COALESCE(main_branch_region_id, 0) AS main_branch_region_id
FROM falukant_data.falukant_user
WHERE user_id = $1
LIMIT 1;
"#;
// Query für Geldänderungen (lokale Variante von BaseWorker::change_falukant_user_money)
const QUERY_UPDATE_MONEY: &str = r#"
SELECT falukant_data.update_money($1, $2, $3);
"#;
impl UndergroundWorker {
pub fn new(pool: ConnectionPool, broker: MessageBroker) -> Self {
Self {
base: BaseWorker::new("UndergroundWorker", pool, broker),
}
}
fn run_loop(pool: ConnectionPool, broker: MessageBroker, state: Arc<WorkerState>) {
while state.running_worker.load(Ordering::Relaxed) {
if let Err(err) = Self::tick(&pool, &broker) {
eprintln!("[UndergroundWorker] Fehler in tick: {err}");
}
// Entspricht ~60-Sekunden-Loop mit 1-Sekunden-Schritten
for _ in 0..60 {
if !state.running_worker.load(Ordering::Relaxed) {
break;
}
std::thread::sleep(Duration::from_secs(1));
}
}
}
fn tick(pool: &ConnectionPool, broker: &MessageBroker) -> Result<(), DbError> {
let rows = Self::fetch_pending(pool)?;
for row in rows {
let id = match row.get("id").and_then(|v| v.parse::<i32>().ok()) {
Some(id) => id,
None => continue,
};
match Self::execute_row(pool, &row) {
Ok(res) => {
Self::update_result(pool, id, &res)?;
let event = json!({
"event": "underground_processed",
"id": id,
"type": row.get("underground_type").cloned().unwrap_or_default()
});
broker.publish(event.to_string());
}
Err(err) => {
let error_res = json!({
"status": "error",
"message": err.to_string()
});
let _ = Self::update_result(pool, id, &error_res);
}
}
}
Ok(())
}
fn fetch_pending(pool: &ConnectionPool) -> Result<Rows, DbError> {
let mut conn = pool
.get()
.map_err(|e| DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("ug_select_pending", Q_SELECT_PENDING)?;
conn.execute("ug_select_pending", &[])
}
fn execute_row(pool: &ConnectionPool, r: &Row) -> Result<Json, DbError> {
let performer_id = parse_i32(r, "performer_id", -1);
let victim_id = parse_i32(r, "victim_id", -1);
let task_type = r.get("underground_type").cloned().unwrap_or_default();
let params = r.get("parameters").cloned().unwrap_or_else(|| "{}".into());
Ok(Self::handle_task(pool, &task_type, performer_id, victim_id, &params)?)
}
fn handle_task(
pool: &ConnectionPool,
task_type: &str,
performer_id: i32,
victim_id: i32,
params_json: &str,
) -> Result<Json, DbError> {
let p: Json = serde_json::from_str(params_json).unwrap_or_else(|_| json!({}));
match task_type {
"spyin" => Self::spy_in(pool, performer_id, victim_id, &p),
"assassin" => Self::assassin(pool, performer_id, victim_id, &p),
"sabotage" => Self::sabotage(pool, performer_id, victim_id, &p),
"corrupt_politician" => Ok(Self::corrupt_politician(performer_id, victim_id, &p)),
"rob" => Self::rob(pool, performer_id, victim_id, &p),
_ => Ok(json!({
"status": "unknown_type",
"type": task_type
})),
}
}
fn spy_in(
pool: &ConnectionPool,
performer_id: i32,
victim_id: i32,
p: &Json,
) -> Result<Json, DbError> {
let mut conn = pool
.get()
.map_err(|e| DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("ug_select_by_performer", Q_SELECT_BY_PERFORMER)?;
let rows = conn.execute("ug_select_by_performer", &[&victim_id])?;
let mut activities = Vec::new();
for r in rows {
let params: Json = r
.get("parameters")
.and_then(|s| serde_json::from_str(s).ok())
.unwrap_or_else(|| json!({}));
let result_text = r.get("result_text").cloned().unwrap_or_else(|| "null".into());
let result: Json = serde_json::from_str(&result_text).unwrap_or(Json::Null);
let mut status = "pending".to_string();
if let Json::Object(obj) = &result {
if let Some(Json::String(s)) = obj.get("status") {
status = s.clone();
} else {
status = "done".to_string();
}
}
let activity = json!({
"id": parse_i32(&r, "id", -1),
"type": r.get("underground_type").cloned().unwrap_or_default(),
"performed_by": parse_i32(&r, "performer_id", -1),
"victim_id": parse_i32(&r, "victim_id", -1),
"created_at": r.get("created_at").cloned().unwrap_or_default(),
"parameters": params,
"result": result,
"status": status
});
activities.push(activity);
}
Ok(json!({
"status": "success",
"action": "spyin",
"performer_id": performer_id,
"victim_id": victim_id,
"details": p,
"victim_illegal_activity_count": activities.len(),
"victim_illegal_activities": activities
}))
}
fn assassin(
pool: &ConnectionPool,
performer_id: i32,
victim_id: i32,
p: &Json,
) -> Result<Json, DbError> {
let mut conn = pool
.get()
.map_err(|e| DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("ug_select_char_health", Q_SELECT_CHAR_HEALTH)?;
conn.prepare("ug_update_char_health", Q_UPDATE_CHAR_HEALTH)?;
let rows = conn.execute("ug_select_char_health", &[&victim_id])?;
if rows.is_empty() {
return Ok(json!({
"status": "error",
"action": "assassin",
"performer_id": performer_id,
"victim_id": victim_id,
"message": "victim_not_found",
"details": p
}));
}
let current = parse_i32(&rows[0], "health", 0);
let mut rng = rand::thread_rng();
let dist = Uniform::from(0..=current.max(0));
let new_health = dist.sample(&mut rng);
conn.execute("ug_update_char_health", &[&victim_id, &new_health])?;
Ok(json!({
"status": "success",
"action": "assassin",
"performer_id": performer_id,
"victim_id": victim_id,
"details": p,
"previous_health": current,
"new_health": new_health,
"reduced_by": current - new_health
}))
}
fn sabotage(
pool: &ConnectionPool,
performer_id: i32,
victim_id: i32,
p: &Json,
) -> Result<Json, DbError> {
let target = p
.get("target")
.and_then(|v| v.as_str())
.unwrap_or("")
.to_string();
match target.as_str() {
"house" => Self::sabotage_house(pool, performer_id, victim_id, p),
"storage" => Self::sabotage_storage(pool, performer_id, victim_id, p),
_ => Ok(json!({
"status": "error",
"action": "sabotage",
"message": "unknown_target",
"performer_id": performer_id,
"victim_id": victim_id,
"details": p
})),
}
}
fn get_user_id_for_character(pool: &ConnectionPool, character_id: i32) -> Result<i32, DbError> {
let mut conn = pool
.get()
.map_err(|e| DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("ug_select_char_user", Q_SELECT_CHAR_USER)?;
let rows = conn.execute("ug_select_char_user", &[&character_id])?;
Ok(rows
.get(0)
.and_then(|r| r.get("user_id"))
.and_then(|v| v.parse::<i32>().ok())
.unwrap_or(-1))
}
fn get_house_by_user(pool: &ConnectionPool, user_id: i32) -> Result<Option<HouseConditions>, DbError> {
let mut conn = pool
.get()
.map_err(|e| DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("ug_select_house_by_user", Q_SELECT_HOUSE_BY_USER)?;
let rows = conn.execute("ug_select_house_by_user", &[&user_id])?;
if rows.is_empty() {
return Ok(None);
}
let r = &rows[0];
Ok(Some(HouseConditions {
id: parse_i32(r, "id", -1),
roof: parse_i32(r, "roof_condition", 0),
floor: parse_i32(r, "floor_condition", 0),
wall: parse_i32(r, "wall_condition", 0),
windowc: parse_i32(r, "window_condition", 0),
}))
}
fn update_house(pool: &ConnectionPool, h: &HouseConditions) -> Result<(), DbError> {
let mut conn = pool
.get()
.map_err(|e| DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("ug_update_house", Q_UPDATE_HOUSE)?;
let roof = h.roof.clamp(0, 100);
let floor = h.floor.clamp(0, 100);
let wall = h.wall.clamp(0, 100);
let windowc = h.windowc.clamp(0, 100);
conn.execute("ug_update_house", &[&h.id, &roof, &floor, &wall, &windowc])?;
Ok(())
}
fn sabotage_house(
pool: &ConnectionPool,
performer_id: i32,
victim_id: i32,
p: &Json,
) -> Result<Json, DbError> {
let user_id = Self::get_user_id_for_character(pool, victim_id)?;
if user_id < 0 {
return Ok(json!({
"status": "error",
"action": "sabotage",
"target": "house",
"message": "victim_not_found",
"performer_id": performer_id,
"victim_id": victim_id,
"details": p
}));
}
let mut house = match Self::get_house_by_user(pool, user_id)? {
Some(h) => h,
None => {
return Ok(json!({
"status": "error",
"action": "sabotage",
"target": "house",
"message": "house_not_found",
"performer_id": performer_id,
"victim_id": victim_id,
"details": p
}))
}
};
// Erlaubte Felder aus Params
let mut allow: Vec<String> = Vec::new();
if let Some(conds) = p.get("conditions").and_then(|v| v.as_array()) {
for s in conds {
if let Some(name) = s.as_str() {
allow.push(name.to_string());
}
}
}
// Statt Referenzen auf Felder zu speichern, arbeiten wir über Indizes,
// um Borrowing-Probleme zu vermeiden.
let all_fields = ["roof_condition", "floor_condition", "wall_condition", "window_condition"];
let candidate_indices: Vec<usize> = (0..all_fields.len())
.filter(|&idx| {
allow.is_empty()
|| allow
.iter()
.any(|name| name == all_fields[idx])
})
.collect();
if candidate_indices.is_empty() {
return Ok(json!({
"status": "error",
"action": "sabotage",
"target": "house",
"message": "no_conditions_selected",
"performer_id": performer_id,
"victim_id": victim_id,
"details": p
}));
}
let k = random_int(1, candidate_indices.len() as i32) as usize;
let picks = random_indices(candidate_indices.len(), k);
let mut changed = Vec::new();
for i in picks {
let idx = candidate_indices[i];
let (name, value_ref) = match idx {
0 => ("roof_condition", &mut house.roof),
1 => ("floor_condition", &mut house.floor),
2 => ("wall_condition", &mut house.wall),
3 => ("window_condition", &mut house.windowc),
_ => continue,
};
if *value_ref > 0 {
let red = random_int(1, *value_ref);
*value_ref = (*value_ref - red).clamp(0, 100);
}
changed.push(name.to_string());
}
Self::update_house(pool, &house)?;
Ok(json!({
"status": "success",
"action": "sabotage",
"target": "house",
"performer_id": performer_id,
"victim_id": victim_id,
"details": p,
"changed_conditions": changed,
"new_conditions": {
"roof_condition": house.roof,
"floor_condition": house.floor,
"wall_condition": house.wall,
"window_condition": house.windowc
}
}))
}
fn select_stock_by_branch(pool: &ConnectionPool, branch_id: i32) -> Result<Rows, DbError> {
let mut conn = pool
.get()
.map_err(|e| DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("ug_select_stock_by_branch", Q_SELECT_STOCK_BY_BRANCH)?;
conn.execute("ug_select_stock_by_branch", &[&branch_id])
}
fn filter_by_stock_types(rows: &Rows, allowed: &[i32]) -> Rows {
if allowed.is_empty() {
return rows.clone();
}
let mut out = Vec::new();
for r in rows {
if let Some(t) = r.get("stock_type_id").and_then(|v| v.parse::<i32>().ok()) {
if allowed.contains(&t) {
out.push(r.clone());
}
}
}
out
}
fn update_stock_qty(pool: &ConnectionPool, id: i32, qty: i64) -> Result<(), DbError> {
let mut conn = pool
.get()
.map_err(|e| DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("ug_update_stock_qty", Q_UPDATE_STOCK_QTY)?;
// beide Parameter explizit als ToSql-Traitobjekte typisieren, um Mischtypen zu erlauben
use postgres::types::ToSql;
let p1: &(dyn ToSql + Sync) = &id;
let p2: &(dyn ToSql + Sync) = &qty;
conn.execute("ug_update_stock_qty", &[p1, p2])?;
Ok(())
}
fn sabotage_storage(
pool: &ConnectionPool,
performer_id: i32,
victim_id: i32,
p: &Json,
) -> Result<Json, DbError> {
let branch_id = match p.get("branch_id").and_then(|v| v.as_i64()) {
Some(id) => id as i32,
None => {
return Ok(json!({
"status": "error",
"action": "sabotage",
"target": "storage",
"message": "branch_id_required",
"performer_id": performer_id,
"victim_id": victim_id,
"details": p
}))
}
};
let mut allowed = Vec::new();
if let Some(arr) = p.get("stock_type_ids").and_then(|v| v.as_array()) {
for v in arr {
if let Some(id) = v.as_i64() {
allowed.push(id as i32);
}
}
}
let rows_all = Self::select_stock_by_branch(pool, branch_id)?;
let mut rows = Self::filter_by_stock_types(&rows_all, &allowed);
if rows.is_empty() {
return Ok(json!({
"status": "success",
"action": "sabotage",
"target": "storage",
"performer_id": performer_id,
"victim_id": victim_id,
"details": p,
"removed_total": 0,
"affected_rows": []
}));
}
let mut total: i64 = 0;
for r in &rows {
if let Some(q) = r.get("quantity").and_then(|v| v.parse::<i64>().ok()) {
total += q;
}
}
if total <= 0 {
return Ok(json!({
"status": "success",
"action": "sabotage",
"target": "storage",
"performer_id": performer_id,
"victim_id": victim_id,
"details": p,
"removed_total": 0,
"affected_rows": []
}));
}
let cap = total / 4;
if cap <= 0 {
return Ok(json!({
"status": "success",
"action": "sabotage",
"target": "storage",
"performer_id": performer_id,
"victim_id": victim_id,
"details": p,
"removed_total": 0,
"affected_rows": []
}));
}
let mut rng = rand::thread_rng();
let mut to_remove = random_ll(1, cap);
rows.shuffle(&mut rng);
let mut affected = Vec::new();
for r in rows {
if to_remove == 0 {
break;
}
let id = parse_i32(&r, "id", -1);
let q = r
.get("quantity")
.and_then(|v| v.parse::<i64>().ok())
.unwrap_or(0);
if q <= 0 {
continue;
}
let take = random_ll(1, min(q, to_remove));
let newq = q - take;
Self::update_stock_qty(pool, id, newq)?;
to_remove -= take;
let entry = json!({
"id": id,
"stock_type_id": parse_i32(&r, "stock_type_id", -1),
"previous_quantity": q,
"new_quantity": newq,
"removed": take
});
affected.push(entry);
}
let removed_total: i64 = affected
.iter()
.filter_map(|a| a.get("removed").and_then(|v| v.as_i64()))
.sum();
Ok(json!({
"status": "success",
"action": "sabotage",
"target": "storage",
"performer_id": performer_id,
"victim_id": victim_id,
"details": p,
"removed_total": removed_total,
"affected_rows": affected
}))
}
fn corrupt_politician(
performer_id: i32,
victim_id: i32,
p: &Json,
) -> Json {
json!({
"status": "success",
"action": "corrupt_politician",
"performer_id": performer_id,
"victim_id": victim_id,
"details": p
})
}
fn rob(
pool: &ConnectionPool,
performer_id: i32,
victim_id: i32,
p: &Json,
) -> Result<Json, DbError> {
let user_id = Self::get_user_id_for_character(pool, victim_id)?;
if user_id < 0 {
return Ok(json!({
"status": "error",
"action": "rob",
"message": "victim_not_found",
"performer_id": performer_id,
"victim_id": victim_id,
"details": p
}));
}
let mut conn = pool
.get()
.map_err(|e| DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("ug_select_falukant_user", Q_SELECT_FALUKANT_USER)?;
let fu = conn.execute("ug_select_falukant_user", &[&user_id])?;
if fu.is_empty() {
return Ok(json!({
"status": "error",
"action": "rob",
"message": "falukant_user_not_found",
"performer_id": performer_id,
"victim_id": victim_id,
"details": p
}));
}
let falukant_user_id = parse_i32(&fu[0], "id", -1);
let money = fu[0]
.get("money")
.and_then(|v| v.parse::<f64>().ok())
.unwrap_or(0.0);
let default_branch = parse_i32(&fu[0], "main_branch_region_id", 0);
let steal_goods = random_int(0, 1) == 1;
if steal_goods {
let branch_id = p
.get("branch_id")
.and_then(|v| v.as_i64())
.map(|v| v as i32)
.unwrap_or(default_branch);
if branch_id <= 0 {
return Ok(json!({
"status": "success",
"action": "rob",
"mode": "goods",
"performer_id": performer_id,
"victim_id": victim_id,
"details": p,
"removed_total": 0,
"affected_rows": []
}));
}
let rows_all = Self::select_stock_by_branch(pool, branch_id)?;
let mut rows = rows_all;
if rows.is_empty() {
return Ok(json!({
"status": "success",
"action": "rob",
"mode": "goods",
"performer_id": performer_id,
"victim_id": victim_id,
"details": p,
"removed_total": 0,
"affected_rows": []
}));
}
let mut total: i64 = 0;
for r in &rows {
if let Some(q) = r.get("quantity").and_then(|v| v.parse::<i64>().ok()) {
total += q;
}
}
if total <= 0 {
return Ok(json!({
"status": "success",
"action": "rob",
"mode": "goods",
"performer_id": performer_id,
"victim_id": victim_id,
"details": p,
"removed_total": 0,
"affected_rows": []
}));
}
let cap = max(1_i64, total / 2);
let mut to_remove = random_ll(1, cap);
let mut rng = rand::thread_rng();
rows.shuffle(&mut rng);
let mut affected = Vec::new();
for r in rows {
if to_remove == 0 {
break;
}
let id = parse_i32(&r, "id", -1);
let q = r
.get("quantity")
.and_then(|v| v.parse::<i64>().ok())
.unwrap_or(0);
if q <= 0 {
continue;
}
let take = random_ll(1, min(q, to_remove));
let newq = q - take;
Self::update_stock_qty(pool, id, newq)?;
to_remove -= take;
affected.push(json!({
"id": id,
"stock_type_id": parse_i32(&r, "stock_type_id", -1),
"previous_quantity": q,
"new_quantity": newq,
"removed": take
}));
}
let removed: i64 = affected
.iter()
.filter_map(|a| a.get("removed").and_then(|v| v.as_i64()))
.sum();
Ok(json!({
"status": "success",
"action": "rob",
"mode": "goods",
"performer_id": performer_id,
"victim_id": victim_id,
"details": p,
"removed_total": removed,
"affected_rows": affected
}))
} else {
if money <= 0.0 {
return Ok(json!({
"status": "success",
"action": "rob",
"mode": "money",
"performer_id": performer_id,
"victim_id": victim_id,
"details": p,
"stolen": 0.0,
"balance_before": 0.0,
"balance_after": 0.0
}));
}
let rate = random_double(0.0, 0.18);
let mut amount = (money * rate * 100.0).round() / 100.0;
if amount < 0.01 {
amount = 0.01;
}
if amount > money {
amount = money;
}
let _msg = json!({
"event": "money_changed",
"reason": "robbery",
"delta": -amount,
"performer_id": performer_id,
"victim_id": victim_id
});
if let Err(err) =
change_falukant_user_money(pool, falukant_user_id, -amount, "robbery")
{
eprintln!(
"[UndergroundWorker] Fehler bei change_falukant_user_money: {err}"
);
}
// Event manuell publizieren
// (BaseWorker kümmert sich aktuell nur um die DB-Änderung)
// Hinweis: Wir haben keinen direkten Zugriff auf broker hier, daher wird das
// Event nur im Rückgabe-JSON signalisiert.
let after = ((money - amount) * 100.0).round() / 100.0;
Ok(json!({
"status": "success",
"action": "rob",
"mode": "money",
"performer_id": performer_id,
"victim_id": victim_id,
"details": p,
"stolen": amount,
"rate": rate,
"balance_before": money,
"balance_after": after
}))
}
}
fn update_result(pool: &ConnectionPool, id: i32, result: &Json) -> Result<(), DbError> {
let mut conn = pool
.get()
.map_err(|e| DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("ug_update_result", Q_UPDATE_RESULT)?;
let result_text = result.to_string();
conn.execute("ug_update_result", &[&id, &result_text])?;
Ok(())
}
}
impl Worker for UndergroundWorker {
fn start_worker_thread(&mut self) {
let pool = self.base.pool.clone();
let broker = self.base.broker.clone();
self.base
.start_worker_with_loop(move |state: Arc<WorkerState>| {
UndergroundWorker::run_loop(pool.clone(), broker.clone(), state);
});
}
fn stop_worker_thread(&mut self) {
self.base.stop_worker();
}
fn enable_watchdog(&mut self) {
self.base.start_watchdog();
}
}
// Hilfsfunktionen für Zufall und Parsing
fn random_int(lo: i32, hi: i32) -> i32 {
let mut rng = rand::thread_rng();
rng.gen_range(lo..=hi)
}
fn random_ll(lo: i64, hi: i64) -> i64 {
let mut rng = rand::thread_rng();
rng.gen_range(lo..=hi)
}
fn random_indices(n: usize, k: usize) -> Vec<usize> {
let mut idx: Vec<usize> = (0..n).collect();
let mut rng = rand::thread_rng();
idx.shuffle(&mut rng);
if k < idx.len() {
idx.truncate(k);
}
idx
}
fn random_double(lo: f64, hi: f64) -> f64 {
let mut rng = rand::thread_rng();
rng.gen_range(lo..hi)
}
fn parse_i32(row: &Row, key: &str, default: i32) -> i32 {
row.get(key)
.and_then(|v| v.parse::<i32>().ok())
.unwrap_or(default)
}
fn change_falukant_user_money(
pool: &ConnectionPool,
falukant_user_id: i32,
money_change: f64,
action: &str,
) -> Result<(), DbError> {
use postgres::types::ToSql;
let mut conn = pool
.get()
.map_err(|e| DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("ug_update_money", QUERY_UPDATE_MONEY)?;
let p1: &(dyn ToSql + Sync) = &falukant_user_id;
let p2: &(dyn ToSql + Sync) = &money_change;
let p3: &(dyn ToSql + Sync) = &action;
conn.execute("ug_update_money", &[p1, p2, p3])?;
Ok(())
}

1179
src/worker/user_character.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,509 @@
use crate::db::{ConnectionPool, DbError, Row};
use crate::message_broker::MessageBroker;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use std::time::{Duration, Instant};
use super::base::{BaseWorker, Worker, WorkerState};
pub struct ValueRecalculationWorker {
base: BaseWorker,
}
// Produktwissen / Produktions-Logs
const QUERY_UPDATE_PRODUCT_KNOWLEDGE_USER: &str = r#"
UPDATE falukant_data.knowledge k
SET knowledge = LEAST(100, k.knowledge + 1)
FROM falukant_data.character c
JOIN falukant_log.production p
ON DATE(p.production_timestamp) = CURRENT_DATE - INTERVAL '1 day'
WHERE c.id = k.character_id
AND c.user_id = 18
AND k.product_id = 10;
"#;
const QUERY_DELETE_OLD_PRODUCTIONS: &str = r#"
DELETE FROM falukant_log.production flp
WHERE DATE(flp.production_timestamp) < CURRENT_DATE;
"#;
const QUERY_GET_PRODUCERS_LAST_DAY: &str = r#"
SELECT p.producer_id
FROM falukant_log.production p
WHERE DATE(p.production_timestamp) = CURRENT_DATE - INTERVAL '1 day'
GROUP BY producer_id;
"#;
// Regionale Verkaufspreise
const QUERY_UPDATE_REGION_SELL_PRICE: &str = r#"
UPDATE falukant_data.town_product_worth tpw
SET worth_percent =
GREATEST(
0,
LEAST(
CASE
WHEN s.quantity > avg_sells THEN tpw.worth_percent - 1
WHEN s.quantity < avg_sells THEN tpw.worth_percent + 1
ELSE tpw.worth_percent
END,
100
)
)
FROM (
SELECT region_id,
product_id,
quantity,
(SELECT AVG(quantity)
FROM falukant_log.sell avs
WHERE avs.product_id = s.product_id) AS avg_sells
FROM falukant_log.sell s
WHERE DATE(s.sell_timestamp) = CURRENT_DATE - INTERVAL '1 day'
) s
WHERE tpw.region_id = s.region_id
AND tpw.product_id = s.product_id;
"#;
const QUERY_DELETE_REGION_SELL_PRICE: &str = r#"
DELETE FROM falukant_log.sell s
WHERE DATE(s.sell_timestamp) < CURRENT_DATE;
"#;
const QUERY_GET_SELL_REGIONS: &str = r#"
SELECT s.region_id
FROM falukant_log.sell s
WHERE DATE(s.sell_timestamp) = CURRENT_DATE - INTERVAL '1 day'
GROUP BY region_id;
"#;
// Ehen / Beziehungen
const QUERY_SET_MARRIAGES_BY_PARTY: &str = r#"
WITH updated_relations AS (
UPDATE falukant_data.relationship AS rel
SET relationship_type_id = (
SELECT id
FROM falukant_type.relationship AS rt
WHERE rt.tr = 'married'
)
WHERE rel.id IN (
SELECT rel2.id
FROM falukant_data.party AS p
JOIN falukant_type.party AS pt
ON pt.id = p.party_type_id
AND pt.tr = 'wedding'
JOIN falukant_data.falukant_user AS fu
ON fu.id = p.falukant_user_id
JOIN falukant_data.character AS c
ON c.user_id = fu.id
JOIN falukant_data.relationship AS rel2
ON rel2.character1_id = c.id
OR rel2.character2_id = c.id
JOIN falukant_type.relationship AS rt2
ON rt2.id = rel2.relationship_type_id
AND rt2.tr = 'engaged'
WHERE p.created_at <= NOW() - INTERVAL '1 day'
)
RETURNING character1_id, character2_id
)
SELECT
c1.user_id AS character1_user,
c2.user_id AS character2_user
FROM updated_relations AS ur
JOIN falukant_data.character AS c1
ON c1.id = ur.character1_id
JOIN falukant_data.character AS c2
ON c2.id = ur.character2_id;
"#;
// Lernen / Studium
const QUERY_GET_STUDYINGS_TO_EXECUTE: &str = r#"
SELECT
l.id,
l.associated_falukant_user_id,
l.associated_learning_character_id,
l.learn_all_products,
l.learning_recipient_id,
l.product_id,
lr.tr
FROM falukant_data.learning l
JOIN falukant_type.learn_recipient lr
ON lr.id = l.learning_recipient_id
WHERE l.learning_is_executed = FALSE
AND l.created_at + INTERVAL '1 day' < NOW();
"#;
const QUERY_GET_OWN_CHARACTER_ID: &str = r#"
SELECT id
FROM falukant_data.character c
WHERE c.user_id = $1;
"#;
const QUERY_INCREASE_ONE_PRODUCT_KNOWLEDGE: &str = r#"
UPDATE falukant_data.knowledge k
SET knowledge = LEAST(100, k.knowledge + $1)
WHERE k.character_id = $2
AND k.product_id = $3;
"#;
const QUERY_INCREASE_ALL_PRODUCTS_KNOWLEDGE: &str = r#"
UPDATE falukant_data.knowledge k
SET knowledge = LEAST(100, k.knowledge + $1)
WHERE k.character_id = $2;
"#;
const QUERY_SET_LEARNING_DONE: &str = r#"
UPDATE falukant_data.learning
SET learning_is_executed = TRUE
WHERE id = $1;
"#;
impl ValueRecalculationWorker {
pub fn new(pool: ConnectionPool, broker: MessageBroker) -> Self {
Self {
base: BaseWorker::new("ValueRecalculationWorker", pool, broker),
}
}
fn run_loop(pool: ConnectionPool, broker: MessageBroker, state: Arc<WorkerState>) {
// Wir nutzen hier einfach Intervall-Logik (täglich / halbtäglich),
// statt exakte Uhrzeiten nachzubilden Verhalten ist funktional ähnlich.
let mut last_product = None;
let mut last_sell_price = None;
loop {
if !state.running_worker.load(Ordering::Relaxed) {
break;
}
let now = Instant::now();
// Produktwissen einmal täglich
if should_run_interval(last_product, now, Duration::from_secs(24 * 3600)) {
if let Err(err) = Self::calculate_product_knowledge_inner(&pool, &broker) {
eprintln!("[ValueRecalculationWorker] Fehler in calculateProductKnowledge: {err}");
}
last_product = Some(now);
}
// Regionale Verkaufspreise einmal täglich (gegen Mittag)
if should_run_interval(last_sell_price, now, Duration::from_secs(24 * 3600)) {
if let Err(err) = Self::calculate_regional_sell_price_inner(&pool, &broker) {
eprintln!("[ValueRecalculationWorker] Fehler in calculateRegionalSellPrice: {err}");
}
last_sell_price = Some(now);
}
// Ehen & Studium bei jedem Durchlauf
if let Err(err) = Self::calculate_marriages_inner(&pool, &broker) {
eprintln!("[ValueRecalculationWorker] Fehler in calculateMarriages: {err}");
}
if let Err(err) = Self::calculate_studying_inner(&pool, &broker) {
eprintln!("[ValueRecalculationWorker] Fehler in calculateStudying: {err}");
}
// 60-Sekunden-Wartezeit in kurze Scheiben aufteilen, damit ein Shutdown
// (running_worker = false) schnell greift.
const SLICE_MS: u64 = 500;
let total_ms = 60_000;
let mut slept = 0;
while slept < total_ms {
if !state.running_worker.load(Ordering::Relaxed) {
break;
}
let remaining = total_ms - slept;
let slice = SLICE_MS.min(remaining);
std::thread::sleep(Duration::from_millis(slice));
slept += slice;
}
}
}
fn calculate_product_knowledge_inner(
pool: &ConnectionPool,
broker: &MessageBroker,
) -> Result<(), DbError> {
let mut conn = pool
.get()
.map_err(|e| DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare(
"update_product_knowledge_user",
QUERY_UPDATE_PRODUCT_KNOWLEDGE_USER,
)?;
conn.execute("update_product_knowledge_user", &[])?;
conn.prepare("get_producers_last_day", QUERY_GET_PRODUCERS_LAST_DAY)?;
let users = conn.execute("get_producers_last_day", &[])?;
for row in users {
if let Some(user_id) = row.get("producer_id").and_then(|v| v.parse::<i32>().ok()) {
let message = format!(r#"{{"event":"price_update","user_id":{}}}"#, user_id);
broker.publish(message);
}
}
conn.prepare("delete_old_productions", QUERY_DELETE_OLD_PRODUCTIONS)?;
conn.execute("delete_old_productions", &[])?;
Ok(())
}
fn calculate_regional_sell_price_inner(
pool: &ConnectionPool,
broker: &MessageBroker,
) -> Result<(), DbError> {
let mut conn = pool
.get()
.map_err(|e| DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("update_region_sell_price", QUERY_UPDATE_REGION_SELL_PRICE)?;
conn.execute("update_region_sell_price", &[])?;
conn.prepare("get_sell_regions", QUERY_GET_SELL_REGIONS)?;
let regions = conn.execute("get_sell_regions", &[])?;
for row in regions {
if let Some(region_id) = row.get("region_id").and_then(|v| v.parse::<i32>().ok()) {
let message =
format!(r#"{{"event":"price_update","region_id":{}}}"#, region_id);
broker.publish(message);
}
}
conn.prepare("delete_region_sell_price", QUERY_DELETE_REGION_SELL_PRICE)?;
conn.execute("delete_region_sell_price", &[])?;
Ok(())
}
fn calculate_marriages_inner(
pool: &ConnectionPool,
broker: &MessageBroker,
) -> Result<(), DbError> {
let mut conn = pool
.get()
.map_err(|e| DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("set_marriages_by_party", QUERY_SET_MARRIAGES_BY_PARTY)?;
let rows = conn.execute("set_marriages_by_party", &[])?;
for row in rows {
if let Some(uid) =
row.get("character1_user").and_then(|v| v.parse::<i32>().ok())
{
let msg =
format!(r#"{{"event":"relationship_changed","user_id":{}}}"#, uid);
broker.publish(msg);
}
if let Some(uid) =
row.get("character2_user").and_then(|v| v.parse::<i32>().ok())
{
let msg =
format!(r#"{{"event":"relationship_changed","user_id":{}}}"#, uid);
broker.publish(msg);
}
}
Ok(())
}
fn calculate_studying_inner(
pool: &ConnectionPool,
broker: &MessageBroker,
) -> Result<(), DbError> {
let mut conn = pool
.get()
.map_err(|e| DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare(
"get_studyings_to_execute",
QUERY_GET_STUDYINGS_TO_EXECUTE,
)?;
conn.prepare("set_learning_done", QUERY_SET_LEARNING_DONE)?;
let studies = conn.execute("get_studyings_to_execute", &[])?;
for study in studies {
let tr = study.get("tr").cloned().unwrap_or_default();
match tr.as_str() {
"self" => Self::calculate_studying_self(pool, broker, &study)?,
"children" | "director" => {
Self::calculate_studying_for_associated_character(
pool, broker, &study,
)?
}
_ => {}
}
if let Some(id) = study.get("id").and_then(|v| v.parse::<i32>().ok()) {
conn.execute("set_learning_done", &[&id])?;
}
}
Ok(())
}
fn calculate_studying_self(
pool: &ConnectionPool,
broker: &MessageBroker,
entry: &Row,
) -> Result<(), DbError> {
let falukant_user_id = match entry
.get("associated_falukant_user_id")
.and_then(|v| v.parse::<i32>().ok())
{
Some(id) => id,
None => return Ok(()),
};
let (learn_all, product_id) = study_scope(entry);
let character_id = Self::get_own_character_id(pool, falukant_user_id)?;
if let Some(cid) = character_id {
Self::calculate_studying_character(
pool,
broker,
cid,
learn_all,
product_id,
parse_i32(entry, "learning_recipient_id", -1),
)?;
}
Ok(())
}
fn calculate_studying_for_associated_character(
pool: &ConnectionPool,
broker: &MessageBroker,
entry: &Row,
) -> Result<(), DbError> {
let character_id = parse_i32(entry, "associated_learning_character_id", -1);
if character_id < 0 {
return Ok(());
}
let (learn_all, product_id) = study_scope(entry);
let recipient_id = parse_i32(entry, "learning_recipient_id", -1);
Self::calculate_studying_character(
pool,
broker,
character_id,
learn_all,
product_id,
recipient_id,
)
}
fn get_own_character_id(
pool: &ConnectionPool,
falukant_user_id: i32,
) -> Result<Option<i32>, DbError> {
let mut conn = pool
.get()
.map_err(|e| DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
conn.prepare("get_own_character_id", QUERY_GET_OWN_CHARACTER_ID)?;
let rows = conn.execute("get_own_character_id", &[&falukant_user_id])?;
Ok(rows
.get(0)
.and_then(|r| r.get("id"))
.and_then(|v| v.parse::<i32>().ok()))
}
fn calculate_studying_character(
pool: &ConnectionPool,
broker: &MessageBroker,
character_id: i32,
learn_all: bool,
product_id: Option<i32>,
falukant_user_id: i32,
) -> Result<(), DbError> {
let mut conn = pool
.get()
.map_err(|e| DbError::new(format!("DB-Verbindung fehlgeschlagen: {e}")))?;
if learn_all {
conn.prepare(
"increase_all_products_knowledge",
QUERY_INCREASE_ALL_PRODUCTS_KNOWLEDGE,
)?;
conn.execute(
"increase_all_products_knowledge",
&[&1_i32, &character_id],
)?;
} else if let Some(pid) = product_id {
conn.prepare(
"increase_one_product_knowledge",
QUERY_INCREASE_ONE_PRODUCT_KNOWLEDGE,
)?;
conn.execute(
"increase_one_product_knowledge",
&[&5_i32, &character_id, &pid],
)?;
}
let message =
format!(r#"{{"event":"knowledge_updated","user_id":{}}}"#, falukant_user_id);
broker.publish(message);
Ok(())
}
}
impl Worker for ValueRecalculationWorker {
fn start_worker_thread(&mut self) {
let pool = self.base.pool.clone();
let broker = self.base.broker.clone();
self.base
.start_worker_with_loop(move |state: Arc<WorkerState>| {
ValueRecalculationWorker::run_loop(pool.clone(), broker.clone(), state);
});
}
fn stop_worker_thread(&mut self) {
self.base.stop_worker();
}
fn enable_watchdog(&mut self) {
self.base.start_watchdog();
}
}
fn should_run_interval(
last_run: Option<Instant>,
now: Instant,
interval: Duration,
) -> bool {
match last_run {
None => true,
Some(prev) => now.saturating_duration_since(prev) >= interval,
}
}
fn parse_i32(row: &Row, key: &str, default: i32) -> i32 {
row.get(key)
.and_then(|v| v.parse::<i32>().ok())
.unwrap_or(default)
}
fn study_scope(entry: &Row) -> (bool, Option<i32>) {
let learn_all_flag =
entry.get("learn_all_products").map(|v| v == "t").unwrap_or(false);
let product_id_str = entry.get("product_id").cloned().unwrap_or_default();
if learn_all_flag || product_id_str.is_empty() {
(true, None)
} else {
let pid = product_id_str.parse::<i32>().ok();
match pid {
Some(id) => (false, Some(id)),
None => (true, None),
}
}
}