Compare commits

..

5 Commits

Author SHA1 Message Date
Torsten Schulz (local)
d74f7b852b Refactor proposal generation in FalukantService to improve character selection logic
- Removed the tracking of used character IDs and streamlined the exclusion of characters already proposed or currently active as directors.
- Enhanced logging for SQL queries and fallback mechanisms to ensure better visibility during character selection.
- Implemented a more efficient approach to gather and process character knowledge for proposal creation, ensuring accurate average calculations.
- Improved error handling to provide clearer feedback when no eligible characters are found.
2026-01-12 08:33:26 +01:00
Torsten Schulz (local)
92d6b15c3f Enhance proposal generation logic in FalukantService to prevent duplicate character usage
- Introduced a mechanism to track used character IDs, ensuring that previously proposed characters are excluded from future proposals.
- Added error handling and logging for scenarios where no eligible characters are found, improving traceability and user feedback.
- Implemented a fallback to include newer characters if older ones are unavailable, enhancing the robustness of the proposal generation process.
2026-01-12 08:24:00 +01:00
Torsten Schulz (local)
91f59062f5 Update BranchView to refresh active tab data on tab change and modify 3D model for female toddler character
- Enhanced the activeTab watcher to refresh data only when the selected branch changes and the tab is switched.
- Introduced a new refreshActiveTab method to load data for the currently active tab, improving data management and user experience.
- Updated the female toddler 3D model file for better integration in the application.
2026-01-12 08:07:50 +01:00
Torsten Schulz (local)
1674086c73 Enhance partner search and gift loading functionality in FalukantService and FamilyView
- Added detailed logging for partner search criteria and results in FalukantService to improve debugging and traceability.
- Refactored partner search logic to use a dynamic where clause for better readability and maintainability.
- Implemented error handling in FamilyView for gift loading, ensuring an empty array is set on failure to load gifts, enhancing user experience.
2026-01-09 14:28:01 +01:00
Torsten Schulz (local)
5ddb099f5a Add 3D character model integration and update dependencies
- Introduced a new CharacterModel3D component for rendering 3D character models in OverviewView.
- Updated package.json and package-lock.json to include 'three' library for 3D graphics support.
- Enhanced Vite configuration to allow access to external files and ensure proper handling of GLB/GLTF assets.
- Improved layout and styling in OverviewView for better visualization of character and avatar.
2026-01-09 13:29:32 +01:00
102 changed files with 2019 additions and 10342 deletions

5
.gitignore vendored
View File

@@ -17,8 +17,3 @@ frontend/dist
frontend/dist/* frontend/dist/*
frontedtree.txt frontedtree.txt
backend/dist/ backend/dist/
build
build/*
.vscode
.vscode/*
.clang-format

View File

@@ -1,119 +0,0 @@
cmake_minimum_required(VERSION 3.20)
project(YourPartDaemon VERSION 1.0 LANGUAGES CXX)
# C++ Standard and Compiler Settings
set(CMAKE_CXX_STANDARD 23)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
# Use best available GCC for C++23 support (OpenSUSE Tumbleweed)
# Try GCC 15 first (best C++23 support), then GCC 13, then system default
find_program(GCC15_CC gcc-15)
find_program(GCC15_CXX g++-15)
find_program(GCC13_CC gcc-13)
find_program(GCC13_CXX g++-13)
if(GCC15_CC AND GCC15_CXX)
set(CMAKE_C_COMPILER ${GCC15_CC})
set(CMAKE_CXX_COMPILER ${GCC15_CXX})
message(STATUS "Using GCC 15 for best C++23 support")
elseif(GCC13_CC AND GCC13_CXX)
set(CMAKE_C_COMPILER ${GCC13_CC})
set(CMAKE_CXX_COMPILER ${GCC13_CXX})
message(STATUS "Using GCC 13 for C++23 support")
else()
message(STATUS "Using system default compiler")
endif()
# Optimize for GCC 13 with C++23
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -flto=auto -O3 -march=native -mtune=native")
set(CMAKE_CXX_FLAGS_DEBUG "-O1 -g -DDEBUG")
set(CMAKE_CXX_FLAGS_RELEASE "-O3 -DNDEBUG -march=native -mtune=native")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -flto")
set(CMAKE_BUILD_TYPE Release)
# Include /usr/local if needed
list(APPEND CMAKE_PREFIX_PATH /usr/local)
# Find libwebsockets via pkg-config
find_package(PkgConfig REQUIRED)
pkg_check_modules(LWS REQUIRED libwebsockets)
# Find other dependencies
find_package(PostgreSQL REQUIRED)
find_package(Threads REQUIRED)
find_package(nlohmann_json CONFIG REQUIRED)
# PostgreSQL C++ libpqxx
find_package(PkgConfig REQUIRED)
pkg_check_modules(LIBPQXX REQUIRED libpqxx)
# Project sources and headers
set(SOURCES
src/main.cpp
src/config.cpp
src/connection_pool.cpp
src/database.cpp
src/character_creation_worker.cpp
src/produce_worker.cpp
src/message_broker.cpp
src/websocket_server.cpp
src/stockagemanager.cpp
src/director_worker.cpp
src/valuerecalculationworker.cpp
src/usercharacterworker.cpp
src/houseworker.cpp
src/politics_worker.cpp
)
set(HEADERS
src/config.h
src/database.h
src/connection_pool.h
src/worker.h
src/character_creation_worker.h
src/produce_worker.h
src/message_broker.h
src/websocket_server.h
src/stockagemanager.h
src/director_worker.h
src/valuerecalculationworker.h
src/usercharacterworker.h
src/houseworker.h
src/politics_worker.h
)
# Define executable target
add_executable(yourpart-daemon ${SOURCES} ${HEADERS}
src/utils.h src/utils.cpp
src/underground_worker.h src/underground_worker.cpp)
# Include directories
target_include_directories(yourpart-daemon PRIVATE
${PostgreSQL_INCLUDE_DIRS}
${LIBPQXX_INCLUDE_DIRS}
${LWS_INCLUDE_DIRS}
)
# Find systemd
find_package(PkgConfig REQUIRED)
pkg_check_modules(SYSTEMD REQUIRED libsystemd)
# Link libraries
target_link_libraries(yourpart-daemon PRIVATE
${PostgreSQL_LIBRARIES}
Threads::Threads
z ssl crypto
${LIBPQXX_LIBRARIES}
${LWS_LIBRARIES}
nlohmann_json::nlohmann_json
${SYSTEMD_LIBRARIES}
)
# Installation rules
install(TARGETS yourpart-daemon DESTINATION /usr/local/bin)
# Installiere Template als Referenz ZUERST (wird vom install-Skript benötigt)
install(FILES daemon.conf DESTINATION /etc/yourpart/ RENAME daemon.conf.example)
# Intelligente Konfigurationsdatei-Installation
# Verwendet ein CMake-Skript, das nur fehlende Keys hinzufügt, ohne bestehende zu überschreiben
# Das Skript liest das Template aus /etc/yourpart/daemon.conf.example oder dem Source-Verzeichnis
install(SCRIPT cmake/install-config.cmake)

View File

@@ -1,414 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE QtCreatorProject>
<!-- Written by QtCreator 17.0.0, 2025-08-16T22:07:06. -->
<qtcreator>
<data>
<variable>EnvironmentId</variable>
<value type="QByteArray">{551ef6b3-a39b-43e2-9ee3-ad56e19ff4f4}</value>
</data>
<data>
<variable>ProjectExplorer.Project.ActiveTarget</variable>
<value type="qlonglong">0</value>
</data>
<data>
<variable>ProjectExplorer.Project.EditorSettings</variable>
<valuemap type="QVariantMap">
<value type="bool" key="EditorConfiguration.AutoDetect">true</value>
<value type="bool" key="EditorConfiguration.AutoIndent">true</value>
<value type="bool" key="EditorConfiguration.CamelCaseNavigation">true</value>
<valuemap type="QVariantMap" key="EditorConfiguration.CodeStyle.0">
<value type="QString" key="language">Cpp</value>
<valuemap type="QVariantMap" key="value">
<value type="QByteArray" key="CurrentPreferences">CppGlobal</value>
</valuemap>
</valuemap>
<valuemap type="QVariantMap" key="EditorConfiguration.CodeStyle.1">
<value type="QString" key="language">QmlJS</value>
<valuemap type="QVariantMap" key="value">
<value type="QByteArray" key="CurrentPreferences">QmlJSGlobal</value>
</valuemap>
</valuemap>
<value type="qlonglong" key="EditorConfiguration.CodeStyle.Count">2</value>
<value type="QByteArray" key="EditorConfiguration.Codec">UTF-8</value>
<value type="bool" key="EditorConfiguration.ConstrainTooltips">false</value>
<value type="int" key="EditorConfiguration.IndentSize">4</value>
<value type="bool" key="EditorConfiguration.KeyboardTooltips">false</value>
<value type="int" key="EditorConfiguration.LineEndingBehavior">0</value>
<value type="int" key="EditorConfiguration.MarginColumn">80</value>
<value type="bool" key="EditorConfiguration.MouseHiding">true</value>
<value type="bool" key="EditorConfiguration.MouseNavigation">true</value>
<value type="int" key="EditorConfiguration.PaddingMode">1</value>
<value type="int" key="EditorConfiguration.PreferAfterWhitespaceComments">0</value>
<value type="bool" key="EditorConfiguration.PreferSingleLineComments">false</value>
<value type="bool" key="EditorConfiguration.ScrollWheelZooming">true</value>
<value type="bool" key="EditorConfiguration.ShowMargin">false</value>
<value type="int" key="EditorConfiguration.SmartBackspaceBehavior">2</value>
<value type="bool" key="EditorConfiguration.SmartSelectionChanging">true</value>
<value type="bool" key="EditorConfiguration.SpacesForTabs">true</value>
<value type="int" key="EditorConfiguration.TabKeyBehavior">0</value>
<value type="int" key="EditorConfiguration.TabSize">8</value>
<value type="bool" key="EditorConfiguration.UseGlobal">true</value>
<value type="bool" key="EditorConfiguration.UseIndenter">false</value>
<value type="int" key="EditorConfiguration.Utf8BomBehavior">1</value>
<value type="bool" key="EditorConfiguration.addFinalNewLine">true</value>
<value type="bool" key="EditorConfiguration.cleanIndentation">true</value>
<value type="bool" key="EditorConfiguration.cleanWhitespace">true</value>
<value type="QString" key="EditorConfiguration.ignoreFileTypes">*.md, *.MD, Makefile</value>
<value type="bool" key="EditorConfiguration.inEntireDocument">false</value>
<value type="bool" key="EditorConfiguration.skipTrailingWhitespace">true</value>
<value type="bool" key="EditorConfiguration.tintMarginArea">true</value>
</valuemap>
</data>
<data>
<variable>ProjectExplorer.Project.PluginSettings</variable>
<valuemap type="QVariantMap">
<valuemap type="QVariantMap" key="AutoTest.ActiveFrameworks">
<value type="bool" key="AutoTest.Framework.Boost">true</value>
<value type="bool" key="AutoTest.Framework.CTest">false</value>
<value type="bool" key="AutoTest.Framework.Catch">true</value>
<value type="bool" key="AutoTest.Framework.GTest">true</value>
<value type="bool" key="AutoTest.Framework.QtQuickTest">true</value>
<value type="bool" key="AutoTest.Framework.QtTest">true</value>
</valuemap>
<value type="bool" key="AutoTest.ApplyFilter">false</value>
<valuemap type="QVariantMap" key="AutoTest.CheckStates"/>
<valuelist type="QVariantList" key="AutoTest.PathFilters"/>
<value type="int" key="AutoTest.RunAfterBuild">0</value>
<value type="bool" key="AutoTest.UseGlobal">true</value>
<valuemap type="QVariantMap" key="ClangTools">
<value type="bool" key="ClangTools.AnalyzeOpenFiles">true</value>
<value type="bool" key="ClangTools.BuildBeforeAnalysis">true</value>
<value type="QString" key="ClangTools.DiagnosticConfig">Builtin.DefaultTidyAndClazy</value>
<value type="int" key="ClangTools.ParallelJobs">8</value>
<value type="bool" key="ClangTools.PreferConfigFile">true</value>
<valuelist type="QVariantList" key="ClangTools.SelectedDirs"/>
<valuelist type="QVariantList" key="ClangTools.SelectedFiles"/>
<valuelist type="QVariantList" key="ClangTools.SuppressedDiagnostics"/>
<value type="bool" key="ClangTools.UseGlobalSettings">true</value>
</valuemap>
</valuemap>
</data>
<data>
<variable>ProjectExplorer.Project.Target.0</variable>
<valuemap type="QVariantMap">
<value type="QString" key="DeviceType">Desktop</value>
<value type="bool" key="HasPerBcDcs">true</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DefaultDisplayName">Importiertes Kit</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DisplayName">Importiertes Kit</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">{78ff90a3-f672-45c2-ad08-343b0923896f}</value>
<value type="qlonglong" key="ProjectExplorer.Target.ActiveBuildConfiguration">0</value>
<value type="qlonglong" key="ProjectExplorer.Target.ActiveDeployConfiguration">0</value>
<value type="qlonglong" key="ProjectExplorer.Target.ActiveRunConfiguration">0</value>
<valuemap type="QVariantMap" key="ProjectExplorer.Target.BuildConfiguration.0">
<value type="QString" key="CMake.Build.Type">Debug</value>
<value type="int" key="CMake.Configure.BaseEnvironment">2</value>
<value type="bool" key="CMake.Configure.ClearSystemEnvironment">false</value>
<valuelist type="QVariantList" key="CMake.Configure.UserEnvironmentChanges"/>
<value type="QString" key="CMake.Initial.Parameters">-DCMAKE_CXX_COMPILER:FILEPATH=%{Compiler:Executable:Cxx}
-DCMAKE_COLOR_DIAGNOSTICS:BOOL=ON
-DCMAKE_C_COMPILER:FILEPATH=%{Compiler:Executable:C}
-DCMAKE_PROJECT_INCLUDE_BEFORE:FILEPATH=%{BuildConfig:BuildDirectory:NativeFilePath}/.qtc/package-manager/auto-setup.cmake
-DCMAKE_PREFIX_PATH:PATH=%{Qt:QT_INSTALL_PREFIX}
-DCMAKE_GENERATOR:STRING=Unix Makefiles
-DCMAKE_BUILD_TYPE:STRING=Release
-DQT_QMAKE_EXECUTABLE:FILEPATH=%{Qt:qmakeExecutable}</value>
<value type="QString" key="ProjectExplorer.BuildConfiguration.BuildDirectory">/home/torsten/Programs/yourpart-daemon/build/</value>
<valuemap type="QVariantMap" key="ProjectExplorer.BuildConfiguration.BuildStepList.0">
<valuemap type="QVariantMap" key="ProjectExplorer.BuildStepList.Step.0">
<value type="QString" key="CMakeProjectManager.MakeStep.BuildPreset"></value>
<valuelist type="QVariantList" key="CMakeProjectManager.MakeStep.BuildTargets">
<value type="QString">all</value>
</valuelist>
<value type="bool" key="CMakeProjectManager.MakeStep.ClearSystemEnvironment">false</value>
<valuelist type="QVariantList" key="CMakeProjectManager.MakeStep.UserEnvironmentChanges"/>
<value type="bool" key="ProjectExplorer.BuildStep.Enabled">true</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DisplayName">Erstellen</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">CMakeProjectManager.MakeStep</value>
</valuemap>
<value type="qlonglong" key="ProjectExplorer.BuildStepList.StepsCount">1</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DefaultDisplayName">Erstellen</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DisplayName">Erstellen</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">ProjectExplorer.BuildSteps.Build</value>
</valuemap>
<valuemap type="QVariantMap" key="ProjectExplorer.BuildConfiguration.BuildStepList.1">
<valuemap type="QVariantMap" key="ProjectExplorer.BuildStepList.Step.0">
<value type="QString" key="CMakeProjectManager.MakeStep.BuildPreset"></value>
<valuelist type="QVariantList" key="CMakeProjectManager.MakeStep.BuildTargets">
<value type="QString">clean</value>
</valuelist>
<value type="bool" key="CMakeProjectManager.MakeStep.ClearSystemEnvironment">false</value>
<valuelist type="QVariantList" key="CMakeProjectManager.MakeStep.UserEnvironmentChanges"/>
<value type="bool" key="ProjectExplorer.BuildStep.Enabled">true</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DisplayName">Erstellen</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">CMakeProjectManager.MakeStep</value>
</valuemap>
<value type="qlonglong" key="ProjectExplorer.BuildStepList.StepsCount">1</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DefaultDisplayName">Bereinigen</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DisplayName">Bereinigen</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">ProjectExplorer.BuildSteps.Clean</value>
</valuemap>
<value type="int" key="ProjectExplorer.BuildConfiguration.BuildStepListCount">2</value>
<value type="bool" key="ProjectExplorer.BuildConfiguration.ClearSystemEnvironment">false</value>
<valuelist type="QVariantList" key="ProjectExplorer.BuildConfiguration.CustomParsers"/>
<value type="bool" key="ProjectExplorer.BuildConfiguration.ParseStandardOutput">false</value>
<valuelist type="QVariantList" key="ProjectExplorer.BuildConfiguration.UserEnvironmentChanges"/>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DisplayName">Release</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">CMakeProjectManager.CMakeBuildConfiguration</value>
<value type="qlonglong" key="ProjectExplorer.Target.ActiveDeployConfiguration">0</value>
<value type="qlonglong" key="ProjectExplorer.Target.ActiveRunConfiguration">0</value>
<valuemap type="QVariantMap" key="ProjectExplorer.Target.DeployConfiguration.0">
<valuemap type="QVariantMap" key="ProjectExplorer.BuildConfiguration.BuildStepList.0">
<value type="qlonglong" key="ProjectExplorer.BuildStepList.StepsCount">0</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DefaultDisplayName">Deployment</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DisplayName">Deployment</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">ProjectExplorer.BuildSteps.Deploy</value>
</valuemap>
<value type="int" key="ProjectExplorer.BuildConfiguration.BuildStepListCount">1</value>
<valuemap type="QVariantMap" key="ProjectExplorer.DeployConfiguration.CustomData"/>
<value type="bool" key="ProjectExplorer.DeployConfiguration.CustomDataEnabled">false</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">ProjectExplorer.DefaultDeployConfiguration</value>
</valuemap>
<valuemap type="QVariantMap" key="ProjectExplorer.Target.DeployConfiguration.1">
<valuemap type="QVariantMap" key="ProjectExplorer.BuildConfiguration.BuildStepList.0">
<valuemap type="QVariantMap" key="ProjectExplorer.BuildStepList.Step.0">
<value type="QString" key="CMakeProjectManager.MakeStep.BuildPreset"></value>
<valuelist type="QVariantList" key="CMakeProjectManager.MakeStep.BuildTargets">
<value type="QString"></value>
</valuelist>
<value type="bool" key="CMakeProjectManager.MakeStep.ClearSystemEnvironment">false</value>
<valuelist type="QVariantList" key="CMakeProjectManager.MakeStep.UserEnvironmentChanges"/>
<value type="bool" key="ProjectExplorer.BuildStep.Enabled">true</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">ApplicationManagerPlugin.Deploy.CMakePackageStep</value>
</valuemap>
<valuemap type="QVariantMap" key="ProjectExplorer.BuildStepList.Step.1">
<value type="QString" key="ApplicationManagerPlugin.Deploy.InstallPackageStep.Arguments">install-package --acknowledge</value>
<value type="bool" key="ProjectExplorer.BuildStep.Enabled">true</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DisplayName">Application Manager-Paket installieren</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">ApplicationManagerPlugin.Deploy.InstallPackageStep</value>
<valuelist type="QVariantList" key="ProjectExplorer.RunConfiguration.LastDeployedFiles"/>
<valuelist type="QVariantList" key="ProjectExplorer.RunConfiguration.LastDeployedHosts"/>
<valuelist type="QVariantList" key="ProjectExplorer.RunConfiguration.LastDeployedRemotePaths"/>
<valuelist type="QVariantList" key="ProjectExplorer.RunConfiguration.LastDeployedSysroots"/>
<valuelist type="QVariantList" key="RemoteLinux.LastDeployedLocalTimes"/>
<valuelist type="QVariantList" key="RemoteLinux.LastDeployedRemoteTimes"/>
</valuemap>
<value type="qlonglong" key="ProjectExplorer.BuildStepList.StepsCount">2</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DefaultDisplayName">Deployment</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DisplayName">Deployment</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">ProjectExplorer.BuildSteps.Deploy</value>
</valuemap>
<value type="int" key="ProjectExplorer.BuildConfiguration.BuildStepListCount">1</value>
<valuemap type="QVariantMap" key="ProjectExplorer.DeployConfiguration.CustomData"/>
<value type="bool" key="ProjectExplorer.DeployConfiguration.CustomDataEnabled">false</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">ApplicationManagerPlugin.Deploy.Configuration</value>
</valuemap>
<value type="qlonglong" key="ProjectExplorer.Target.DeployConfigurationCount">2</value>
<valuemap type="QVariantMap" key="ProjectExplorer.Target.RunConfiguration.0">
<value type="bool" key="Analyzer.Perf.Settings.UseGlobalSettings">true</value>
<value type="bool" key="Analyzer.QmlProfiler.Settings.UseGlobalSettings">true</value>
<value type="int" key="Analyzer.Valgrind.Callgrind.CostFormat">0</value>
<value type="bool" key="Analyzer.Valgrind.Settings.UseGlobalSettings">true</value>
<valuelist type="QVariantList" key="CustomOutputParsers"/>
<value type="int" key="PE.EnvironmentAspect.Base">2</value>
<valuelist type="QVariantList" key="PE.EnvironmentAspect.Changes"/>
<value type="bool" key="PE.EnvironmentAspect.PrintOnRun">false</value>
<value type="QString" key="PerfRecordArgsId">-e cpu-cycles --call-graph dwarf,4096 -F 250</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DisplayName">yourpart-daemon</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">CMakeProjectManager.CMakeRunConfiguration.</value>
<value type="QString" key="ProjectExplorer.RunConfiguration.BuildKey">yourpart-daemon</value>
<value type="bool" key="ProjectExplorer.RunConfiguration.Customized">false</value>
<value type="bool" key="RunConfiguration.UseCppDebuggerAuto">true</value>
<value type="bool" key="RunConfiguration.UseLibrarySearchPath">true</value>
<value type="bool" key="RunConfiguration.UseQmlDebuggerAuto">true</value>
<value type="QString" key="RunConfiguration.WorkingDirectory.default">/home/torsten/Programs/yourpart-daemon/build</value>
</valuemap>
<value type="qlonglong" key="ProjectExplorer.Target.RunConfigurationCount">1</value>
</valuemap>
<valuemap type="QVariantMap" key="ProjectExplorer.Target.BuildConfiguration.1">
<value type="QString" key="CMake.Build.Type">Debug</value>
<value type="int" key="CMake.Configure.BaseEnvironment">2</value>
<value type="bool" key="CMake.Configure.ClearSystemEnvironment">false</value>
<valuelist type="QVariantList" key="CMake.Configure.UserEnvironmentChanges"/>
<value type="QString" key="CMake.Initial.Parameters">-DCMAKE_CXX_COMPILER:FILEPATH=%{Compiler:Executable:Cxx}
-DCMAKE_COLOR_DIAGNOSTICS:BOOL=ON
-DCMAKE_C_COMPILER:FILEPATH=%{Compiler:Executable:C}
-DCMAKE_PROJECT_INCLUDE_BEFORE:FILEPATH=%{BuildConfig:BuildDirectory:NativeFilePath}/.qtc/package-manager/auto-setup.cmake
-DCMAKE_PREFIX_PATH:PATH=%{Qt:QT_INSTALL_PREFIX}
-DCMAKE_GENERATOR:STRING=Unix Makefiles
-DCMAKE_BUILD_TYPE:STRING=Debug
-DQT_QMAKE_EXECUTABLE:FILEPATH=%{Qt:qmakeExecutable}</value>
<value type="QString" key="CMake.Source.Directory">/mnt/share/torsten/Programs/yourpart-daemon</value>
<value type="QString" key="ProjectExplorer.BuildConfiguration.BuildDirectory">/home/torsten/Programs/yourpart-daemon/build</value>
<valuemap type="QVariantMap" key="ProjectExplorer.BuildConfiguration.BuildStepList.0">
<valuemap type="QVariantMap" key="ProjectExplorer.BuildStepList.Step.0">
<value type="QString" key="CMakeProjectManager.MakeStep.BuildPreset"></value>
<valuelist type="QVariantList" key="CMakeProjectManager.MakeStep.BuildTargets">
<value type="QString">all</value>
</valuelist>
<value type="bool" key="CMakeProjectManager.MakeStep.ClearSystemEnvironment">false</value>
<valuelist type="QVariantList" key="CMakeProjectManager.MakeStep.UserEnvironmentChanges"/>
<value type="bool" key="ProjectExplorer.BuildStep.Enabled">true</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">CMakeProjectManager.MakeStep</value>
</valuemap>
<value type="qlonglong" key="ProjectExplorer.BuildStepList.StepsCount">1</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DefaultDisplayName">Erstellen</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DisplayName">Erstellen</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">ProjectExplorer.BuildSteps.Build</value>
</valuemap>
<valuemap type="QVariantMap" key="ProjectExplorer.BuildConfiguration.BuildStepList.1">
<valuemap type="QVariantMap" key="ProjectExplorer.BuildStepList.Step.0">
<value type="QString" key="CMakeProjectManager.MakeStep.BuildPreset"></value>
<valuelist type="QVariantList" key="CMakeProjectManager.MakeStep.BuildTargets">
<value type="QString">clean</value>
</valuelist>
<value type="bool" key="CMakeProjectManager.MakeStep.ClearSystemEnvironment">false</value>
<valuelist type="QVariantList" key="CMakeProjectManager.MakeStep.UserEnvironmentChanges"/>
<value type="bool" key="ProjectExplorer.BuildStep.Enabled">true</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">CMakeProjectManager.MakeStep</value>
</valuemap>
<value type="qlonglong" key="ProjectExplorer.BuildStepList.StepsCount">1</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DefaultDisplayName">Bereinigen</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DisplayName">Bereinigen</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">ProjectExplorer.BuildSteps.Clean</value>
</valuemap>
<value type="int" key="ProjectExplorer.BuildConfiguration.BuildStepListCount">2</value>
<value type="bool" key="ProjectExplorer.BuildConfiguration.ClearSystemEnvironment">false</value>
<valuelist type="QVariantList" key="ProjectExplorer.BuildConfiguration.CustomParsers"/>
<value type="bool" key="ProjectExplorer.BuildConfiguration.ParseStandardOutput">false</value>
<valuelist type="QVariantList" key="ProjectExplorer.BuildConfiguration.UserEnvironmentChanges"/>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DisplayName">Debug (importiert)</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">CMakeProjectManager.CMakeBuildConfiguration</value>
<value type="qlonglong" key="ProjectExplorer.Target.ActiveDeployConfiguration">0</value>
<value type="qlonglong" key="ProjectExplorer.Target.ActiveRunConfiguration">-1</value>
<valuemap type="QVariantMap" key="ProjectExplorer.Target.DeployConfiguration.0">
<valuemap type="QVariantMap" key="ProjectExplorer.BuildConfiguration.BuildStepList.0">
<value type="qlonglong" key="ProjectExplorer.BuildStepList.StepsCount">0</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DefaultDisplayName">Deployment</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DisplayName">Deployment</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">ProjectExplorer.BuildSteps.Deploy</value>
</valuemap>
<value type="int" key="ProjectExplorer.BuildConfiguration.BuildStepListCount">1</value>
<valuemap type="QVariantMap" key="ProjectExplorer.DeployConfiguration.CustomData"/>
<value type="bool" key="ProjectExplorer.DeployConfiguration.CustomDataEnabled">false</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">ProjectExplorer.DefaultDeployConfiguration</value>
</valuemap>
<valuemap type="QVariantMap" key="ProjectExplorer.Target.DeployConfiguration.1">
<valuemap type="QVariantMap" key="ProjectExplorer.BuildConfiguration.BuildStepList.0">
<valuemap type="QVariantMap" key="ProjectExplorer.BuildStepList.Step.0">
<value type="QString" key="CMakeProjectManager.MakeStep.BuildPreset"></value>
<valuelist type="QVariantList" key="CMakeProjectManager.MakeStep.BuildTargets">
<value type="QString">install</value>
</valuelist>
<value type="bool" key="CMakeProjectManager.MakeStep.ClearSystemEnvironment">false</value>
<valuelist type="QVariantList" key="CMakeProjectManager.MakeStep.UserEnvironmentChanges"/>
<value type="bool" key="ProjectExplorer.BuildStep.Enabled">true</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">ApplicationManagerPlugin.Deploy.CMakePackageStep</value>
</valuemap>
<valuemap type="QVariantMap" key="ProjectExplorer.BuildStepList.Step.1">
<value type="QString" key="ApplicationManagerPlugin.Deploy.InstallPackageStep.Arguments">install-package --acknowledge</value>
<value type="bool" key="ProjectExplorer.BuildStep.Enabled">true</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DisplayName">Application Manager-Paket installieren</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">ApplicationManagerPlugin.Deploy.InstallPackageStep</value>
<valuelist type="QVariantList" key="ProjectExplorer.RunConfiguration.LastDeployedFiles"/>
<valuelist type="QVariantList" key="ProjectExplorer.RunConfiguration.LastDeployedHosts"/>
<valuelist type="QVariantList" key="ProjectExplorer.RunConfiguration.LastDeployedRemotePaths"/>
<valuelist type="QVariantList" key="ProjectExplorer.RunConfiguration.LastDeployedSysroots"/>
<valuelist type="QVariantList" key="RemoteLinux.LastDeployedLocalTimes"/>
<valuelist type="QVariantList" key="RemoteLinux.LastDeployedRemoteTimes"/>
</valuemap>
<value type="qlonglong" key="ProjectExplorer.BuildStepList.StepsCount">2</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DefaultDisplayName">Deployment</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DisplayName">Deployment</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">ProjectExplorer.BuildSteps.Deploy</value>
</valuemap>
<value type="int" key="ProjectExplorer.BuildConfiguration.BuildStepListCount">1</value>
<valuemap type="QVariantMap" key="ProjectExplorer.DeployConfiguration.CustomData"/>
<value type="bool" key="ProjectExplorer.DeployConfiguration.CustomDataEnabled">false</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">ApplicationManagerPlugin.Deploy.Configuration</value>
</valuemap>
<value type="qlonglong" key="ProjectExplorer.Target.DeployConfigurationCount">2</value>
<value type="qlonglong" key="ProjectExplorer.Target.RunConfigurationCount">0</value>
</valuemap>
<value type="qlonglong" key="ProjectExplorer.Target.BuildConfigurationCount">2</value>
<valuemap type="QVariantMap" key="ProjectExplorer.Target.DeployConfiguration.0">
<valuemap type="QVariantMap" key="ProjectExplorer.BuildConfiguration.BuildStepList.0">
<value type="qlonglong" key="ProjectExplorer.BuildStepList.StepsCount">0</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DefaultDisplayName">Deployment</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DisplayName">Deployment</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">ProjectExplorer.BuildSteps.Deploy</value>
</valuemap>
<value type="int" key="ProjectExplorer.BuildConfiguration.BuildStepListCount">1</value>
<valuemap type="QVariantMap" key="ProjectExplorer.DeployConfiguration.CustomData"/>
<value type="bool" key="ProjectExplorer.DeployConfiguration.CustomDataEnabled">false</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">ProjectExplorer.DefaultDeployConfiguration</value>
</valuemap>
<valuemap type="QVariantMap" key="ProjectExplorer.Target.DeployConfiguration.1">
<valuemap type="QVariantMap" key="ProjectExplorer.BuildConfiguration.BuildStepList.0">
<valuemap type="QVariantMap" key="ProjectExplorer.BuildStepList.Step.0">
<value type="QString" key="CMakeProjectManager.MakeStep.BuildPreset"></value>
<valuelist type="QVariantList" key="CMakeProjectManager.MakeStep.BuildTargets">
<value type="QString"></value>
</valuelist>
<value type="bool" key="CMakeProjectManager.MakeStep.ClearSystemEnvironment">false</value>
<valuelist type="QVariantList" key="CMakeProjectManager.MakeStep.UserEnvironmentChanges"/>
<value type="bool" key="ProjectExplorer.BuildStep.Enabled">true</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">ApplicationManagerPlugin.Deploy.CMakePackageStep</value>
</valuemap>
<valuemap type="QVariantMap" key="ProjectExplorer.BuildStepList.Step.1">
<value type="QString" key="ApplicationManagerPlugin.Deploy.InstallPackageStep.Arguments">install-package --acknowledge</value>
<value type="bool" key="ProjectExplorer.BuildStep.Enabled">true</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DisplayName">Application Manager-Paket installieren</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">ApplicationManagerPlugin.Deploy.InstallPackageStep</value>
<valuelist type="QVariantList" key="ProjectExplorer.RunConfiguration.LastDeployedFiles"/>
<valuelist type="QVariantList" key="ProjectExplorer.RunConfiguration.LastDeployedHosts"/>
<valuelist type="QVariantList" key="ProjectExplorer.RunConfiguration.LastDeployedRemotePaths"/>
<valuelist type="QVariantList" key="ProjectExplorer.RunConfiguration.LastDeployedSysroots"/>
<valuelist type="QVariantList" key="RemoteLinux.LastDeployedLocalTimes"/>
<valuelist type="QVariantList" key="RemoteLinux.LastDeployedRemoteTimes"/>
</valuemap>
<value type="qlonglong" key="ProjectExplorer.BuildStepList.StepsCount">2</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DefaultDisplayName">Deployment</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DisplayName">Deployment</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">ProjectExplorer.BuildSteps.Deploy</value>
</valuemap>
<value type="int" key="ProjectExplorer.BuildConfiguration.BuildStepListCount">1</value>
<valuemap type="QVariantMap" key="ProjectExplorer.DeployConfiguration.CustomData"/>
<value type="bool" key="ProjectExplorer.DeployConfiguration.CustomDataEnabled">false</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">ApplicationManagerPlugin.Deploy.Configuration</value>
</valuemap>
<value type="qlonglong" key="ProjectExplorer.Target.DeployConfigurationCount">2</value>
<valuemap type="QVariantMap" key="ProjectExplorer.Target.RunConfiguration.0">
<value type="bool" key="Analyzer.Perf.Settings.UseGlobalSettings">true</value>
<value type="bool" key="Analyzer.QmlProfiler.Settings.UseGlobalSettings">true</value>
<value type="int" key="Analyzer.Valgrind.Callgrind.CostFormat">0</value>
<value type="bool" key="Analyzer.Valgrind.Settings.UseGlobalSettings">true</value>
<valuelist type="QVariantList" key="CustomOutputParsers"/>
<value type="int" key="PE.EnvironmentAspect.Base">2</value>
<valuelist type="QVariantList" key="PE.EnvironmentAspect.Changes"/>
<value type="bool" key="PE.EnvironmentAspect.PrintOnRun">false</value>
<value type="QString" key="PerfRecordArgsId">-e cpu-cycles --call-graph dwarf,4096 -F 250</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DisplayName">yourpart-daemon</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">CMakeProjectManager.CMakeRunConfiguration.</value>
<value type="QString" key="ProjectExplorer.RunConfiguration.BuildKey">yourpart-daemon</value>
<value type="bool" key="ProjectExplorer.RunConfiguration.Customized">false</value>
<value type="bool" key="RunConfiguration.UseCppDebuggerAuto">true</value>
<value type="bool" key="RunConfiguration.UseLibrarySearchPath">true</value>
<value type="bool" key="RunConfiguration.UseQmlDebuggerAuto">true</value>
<value type="QString" key="RunConfiguration.WorkingDirectory.default">/home/torsten/Programs/yourpart-daemon/build</value>
</valuemap>
<value type="qlonglong" key="ProjectExplorer.Target.RunConfigurationCount">1</value>
</valuemap>
</data>
<data>
<variable>ProjectExplorer.Project.TargetCount</variable>
<value type="qlonglong">1</value>
</data>
<data>
<variable>ProjectExplorer.Project.Updater.FileVersion</variable>
<value type="int">22</value>
</data>
<data>
<variable>Version</variable>
<value type="int">22</value>
</data>
</qtcreator>

View File

@@ -1,205 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE QtCreatorProject>
<!-- Written by QtCreator 12.0.2, 2025-07-18T07:45:58. -->
<qtcreator>
<data>
<variable>EnvironmentId</variable>
<value type="QByteArray">{d36652ff-969b-426b-a63f-1edd325096c5}</value>
</data>
<data>
<variable>ProjectExplorer.Project.ActiveTarget</variable>
<value type="qlonglong">0</value>
</data>
<data>
<variable>ProjectExplorer.Project.EditorSettings</variable>
<valuemap type="QVariantMap">
<value type="bool" key="EditorConfiguration.AutoIndent">true</value>
<value type="bool" key="EditorConfiguration.AutoSpacesForTabs">false</value>
<value type="bool" key="EditorConfiguration.CamelCaseNavigation">true</value>
<valuemap type="QVariantMap" key="EditorConfiguration.CodeStyle.0">
<value type="QString" key="language">Cpp</value>
<valuemap type="QVariantMap" key="value">
<value type="QByteArray" key="CurrentPreferences">CppGlobal</value>
</valuemap>
</valuemap>
<valuemap type="QVariantMap" key="EditorConfiguration.CodeStyle.1">
<value type="QString" key="language">QmlJS</value>
<valuemap type="QVariantMap" key="value">
<value type="QByteArray" key="CurrentPreferences">QmlJSGlobal</value>
</valuemap>
</valuemap>
<value type="qlonglong" key="EditorConfiguration.CodeStyle.Count">2</value>
<value type="QByteArray" key="EditorConfiguration.Codec">UTF-8</value>
<value type="bool" key="EditorConfiguration.ConstrainTooltips">false</value>
<value type="int" key="EditorConfiguration.IndentSize">4</value>
<value type="bool" key="EditorConfiguration.KeyboardTooltips">false</value>
<value type="int" key="EditorConfiguration.MarginColumn">80</value>
<value type="bool" key="EditorConfiguration.MouseHiding">true</value>
<value type="bool" key="EditorConfiguration.MouseNavigation">true</value>
<value type="int" key="EditorConfiguration.PaddingMode">1</value>
<value type="int" key="EditorConfiguration.PreferAfterWhitespaceComments">0</value>
<value type="bool" key="EditorConfiguration.PreferSingleLineComments">false</value>
<value type="bool" key="EditorConfiguration.ScrollWheelZooming">true</value>
<value type="bool" key="EditorConfiguration.ShowMargin">false</value>
<value type="int" key="EditorConfiguration.SmartBackspaceBehavior">0</value>
<value type="bool" key="EditorConfiguration.SmartSelectionChanging">true</value>
<value type="bool" key="EditorConfiguration.SpacesForTabs">true</value>
<value type="int" key="EditorConfiguration.TabKeyBehavior">0</value>
<value type="int" key="EditorConfiguration.TabSize">8</value>
<value type="bool" key="EditorConfiguration.UseGlobal">true</value>
<value type="bool" key="EditorConfiguration.UseIndenter">false</value>
<value type="int" key="EditorConfiguration.Utf8BomBehavior">1</value>
<value type="bool" key="EditorConfiguration.addFinalNewLine">true</value>
<value type="bool" key="EditorConfiguration.cleanIndentation">true</value>
<value type="bool" key="EditorConfiguration.cleanWhitespace">true</value>
<value type="QString" key="EditorConfiguration.ignoreFileTypes">*.md, *.MD, Makefile</value>
<value type="bool" key="EditorConfiguration.inEntireDocument">false</value>
<value type="bool" key="EditorConfiguration.skipTrailingWhitespace">true</value>
<value type="bool" key="EditorConfiguration.tintMarginArea">true</value>
</valuemap>
</data>
<data>
<variable>ProjectExplorer.Project.PluginSettings</variable>
<valuemap type="QVariantMap">
<valuemap type="QVariantMap" key="AutoTest.ActiveFrameworks">
<value type="bool" key="AutoTest.Framework.Boost">true</value>
<value type="bool" key="AutoTest.Framework.CTest">false</value>
<value type="bool" key="AutoTest.Framework.Catch">true</value>
<value type="bool" key="AutoTest.Framework.GTest">true</value>
<value type="bool" key="AutoTest.Framework.QtQuickTest">true</value>
<value type="bool" key="AutoTest.Framework.QtTest">true</value>
</valuemap>
<valuemap type="QVariantMap" key="AutoTest.CheckStates"/>
<value type="int" key="AutoTest.RunAfterBuild">0</value>
<value type="bool" key="AutoTest.UseGlobal">true</value>
<valuemap type="QVariantMap" key="ClangTools">
<value type="bool" key="ClangTools.AnalyzeOpenFiles">true</value>
<value type="bool" key="ClangTools.BuildBeforeAnalysis">true</value>
<value type="QString" key="ClangTools.DiagnosticConfig">Builtin.DefaultTidyAndClazy</value>
<value type="int" key="ClangTools.ParallelJobs">8</value>
<value type="bool" key="ClangTools.PreferConfigFile">true</value>
<valuelist type="QVariantList" key="ClangTools.SelectedDirs"/>
<valuelist type="QVariantList" key="ClangTools.SelectedFiles"/>
<valuelist type="QVariantList" key="ClangTools.SuppressedDiagnostics"/>
<value type="bool" key="ClangTools.UseGlobalSettings">true</value>
</valuemap>
<valuemap type="QVariantMap" key="CppEditor.QuickFix">
<value type="bool" key="UseGlobalSettings">true</value>
</valuemap>
</valuemap>
</data>
<data>
<variable>ProjectExplorer.Project.Target.0</variable>
<valuemap type="QVariantMap">
<value type="QString" key="DeviceType">Desktop</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DefaultDisplayName">Importiertes Kit</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DisplayName">Importiertes Kit</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">{3c6cfc13-714d-4db1-bd45-b9794643cc67}</value>
<value type="qlonglong" key="ProjectExplorer.Target.ActiveBuildConfiguration">0</value>
<value type="qlonglong" key="ProjectExplorer.Target.ActiveDeployConfiguration">0</value>
<value type="qlonglong" key="ProjectExplorer.Target.ActiveRunConfiguration">0</value>
<valuemap type="QVariantMap" key="ProjectExplorer.Target.BuildConfiguration.0">
<value type="QString" key="CMake.Build.Type">Debug</value>
<value type="int" key="CMake.Configure.BaseEnvironment">2</value>
<value type="bool" key="CMake.Configure.ClearSystemEnvironment">false</value>
<valuelist type="QVariantList" key="CMake.Configure.UserEnvironmentChanges"/>
<value type="QString" key="CMake.Initial.Parameters">-DCMAKE_GENERATOR:STRING=Unix Makefiles
-DCMAKE_BUILD_TYPE:STRING=Build
-DCMAKE_PROJECT_INCLUDE_BEFORE:FILEPATH=%{BuildConfig:BuildDirectory:NativeFilePath}/.qtc/package-manager/auto-setup.cmake
-DQT_QMAKE_EXECUTABLE:FILEPATH=%{Qt:qmakeExecutable}
-DCMAKE_PREFIX_PATH:PATH=%{Qt:QT_INSTALL_PREFIX}
-DCMAKE_C_COMPILER:FILEPATH=%{Compiler:Executable:C}
-DCMAKE_CXX_COMPILER:FILEPATH=%{Compiler:Executable:Cxx}</value>
<value type="QString" key="CMake.Source.Directory">/home/torsten/Programs/yourpart-daemon</value>
<value type="QString" key="ProjectExplorer.BuildConfiguration.BuildDirectory">/home/torsten/Programs/yourpart-daemon/build</value>
<valuemap type="QVariantMap" key="ProjectExplorer.BuildConfiguration.BuildStepList.0">
<valuemap type="QVariantMap" key="ProjectExplorer.BuildStepList.Step.0">
<value type="QString" key="CMakeProjectManager.MakeStep.BuildPreset"></value>
<valuelist type="QVariantList" key="CMakeProjectManager.MakeStep.BuildTargets">
<value type="QString">all</value>
</valuelist>
<value type="bool" key="CMakeProjectManager.MakeStep.ClearSystemEnvironment">false</value>
<valuelist type="QVariantList" key="CMakeProjectManager.MakeStep.UserEnvironmentChanges"/>
<value type="bool" key="ProjectExplorer.BuildStep.Enabled">true</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DisplayName">Erstellen</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">CMakeProjectManager.MakeStep</value>
</valuemap>
<value type="qlonglong" key="ProjectExplorer.BuildStepList.StepsCount">1</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DefaultDisplayName">Erstellen</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DisplayName">Erstellen</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">ProjectExplorer.BuildSteps.Build</value>
</valuemap>
<valuemap type="QVariantMap" key="ProjectExplorer.BuildConfiguration.BuildStepList.1">
<valuemap type="QVariantMap" key="ProjectExplorer.BuildStepList.Step.0">
<value type="QString" key="CMakeProjectManager.MakeStep.BuildPreset"></value>
<valuelist type="QVariantList" key="CMakeProjectManager.MakeStep.BuildTargets">
<value type="QString">clean</value>
</valuelist>
<value type="bool" key="CMakeProjectManager.MakeStep.ClearSystemEnvironment">false</value>
<valuelist type="QVariantList" key="CMakeProjectManager.MakeStep.UserEnvironmentChanges"/>
<value type="bool" key="ProjectExplorer.BuildStep.Enabled">true</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DisplayName">Erstellen</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">CMakeProjectManager.MakeStep</value>
</valuemap>
<value type="qlonglong" key="ProjectExplorer.BuildStepList.StepsCount">1</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DefaultDisplayName">Bereinigen</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DisplayName">Bereinigen</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">ProjectExplorer.BuildSteps.Clean</value>
</valuemap>
<value type="int" key="ProjectExplorer.BuildConfiguration.BuildStepListCount">2</value>
<value type="bool" key="ProjectExplorer.BuildConfiguration.ClearSystemEnvironment">false</value>
<valuelist type="QVariantList" key="ProjectExplorer.BuildConfiguration.CustomParsers"/>
<value type="bool" key="ProjectExplorer.BuildConfiguration.ParseStandardOutput">false</value>
<valuelist type="QVariantList" key="ProjectExplorer.BuildConfiguration.UserEnvironmentChanges"/>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DisplayName">Erstellen</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">CMakeProjectManager.CMakeBuildConfiguration</value>
</valuemap>
<value type="qlonglong" key="ProjectExplorer.Target.BuildConfigurationCount">1</value>
<valuemap type="QVariantMap" key="ProjectExplorer.Target.DeployConfiguration.0">
<valuemap type="QVariantMap" key="ProjectExplorer.BuildConfiguration.BuildStepList.0">
<value type="qlonglong" key="ProjectExplorer.BuildStepList.StepsCount">0</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DefaultDisplayName">Deployment</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DisplayName">Deployment</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">ProjectExplorer.BuildSteps.Deploy</value>
</valuemap>
<value type="int" key="ProjectExplorer.BuildConfiguration.BuildStepListCount">1</value>
<valuemap type="QVariantMap" key="ProjectExplorer.DeployConfiguration.CustomData"/>
<value type="bool" key="ProjectExplorer.DeployConfiguration.CustomDataEnabled">false</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">ProjectExplorer.DefaultDeployConfiguration</value>
</valuemap>
<value type="qlonglong" key="ProjectExplorer.Target.DeployConfigurationCount">1</value>
<valuemap type="QVariantMap" key="ProjectExplorer.Target.RunConfiguration.0">
<value type="bool" key="Analyzer.Perf.Settings.UseGlobalSettings">true</value>
<value type="bool" key="Analyzer.QmlProfiler.Settings.UseGlobalSettings">true</value>
<value type="int" key="Analyzer.Valgrind.Callgrind.CostFormat">0</value>
<value type="bool" key="Analyzer.Valgrind.Settings.UseGlobalSettings">true</value>
<value type="QString" key="Analyzer.Valgrind.ValgrindExecutable">/usr/bin/valgrind</value>
<valuelist type="QVariantList" key="CustomOutputParsers"/>
<value type="int" key="PE.EnvironmentAspect.Base">2</value>
<valuelist type="QVariantList" key="PE.EnvironmentAspect.Changes"/>
<value type="bool" key="PE.EnvironmentAspect.PrintOnRun">false</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.DisplayName">yourpart-daemon</value>
<value type="QString" key="ProjectExplorer.ProjectConfiguration.Id">CMakeProjectManager.CMakeRunConfiguration.yourpart-daemon</value>
<value type="QString" key="ProjectExplorer.RunConfiguration.BuildKey">yourpart-daemon</value>
<value type="bool" key="ProjectExplorer.RunConfiguration.Customized">false</value>
<value type="bool" key="RunConfiguration.UseCppDebuggerAuto">true</value>
<value type="bool" key="RunConfiguration.UseLibrarySearchPath">true</value>
<value type="bool" key="RunConfiguration.UseQmlDebuggerAuto">true</value>
<value type="QString" key="RunConfiguration.WorkingDirectory.default">/home/torsten/Programs/yourpart-daemon/build</value>
</valuemap>
<value type="qlonglong" key="ProjectExplorer.Target.RunConfigurationCount">1</value>
</valuemap>
</data>
<data>
<variable>ProjectExplorer.Project.TargetCount</variable>
<value type="qlonglong">1</value>
</data>
<data>
<variable>ProjectExplorer.Project.Updater.FileVersion</variable>
<value type="int">22</value>
</data>
<data>
<variable>Version</variable>
<value type="int">22</value>
</data>
</qtcreator>

View File

@@ -1,168 +0,0 @@
# SSL/TLS Setup für YourPart Daemon
Dieses Dokument beschreibt, wie Sie SSL/TLS-Zertifikate für den YourPart Daemon einrichten können.
## 🚀 Schnellstart
### 1. Self-Signed Certificate (Entwicklung/Testing)
```bash
./setup-ssl.sh
# Wählen Sie Option 1
```
### 2. Let's Encrypt Certificate (Produktion)
```bash
./setup-ssl.sh
# Wählen Sie Option 2
```
### 3. Apache2-Zertifikate verwenden (empfohlen für Ubuntu)
```bash
./setup-ssl.sh
# Wählen Sie Option 4
# Verwendet bereits vorhandene Apache2-Zertifikate
# ⚠️ Warnung bei Snakeoil-Zertifikaten (nur für localhost)
```
### 4. DNS-01 Challenge (für komplexe Setups)
```bash
./setup-ssl-dns.sh
# Für Cloudflare, Route53, etc.
```
## 📋 Voraussetzungen
### Für Apache2-Zertifikate:
- Apache2 installiert oder Zertifikate in Standard-Pfaden
- Unterstützte Pfade (priorisiert nach Qualität):
- `/etc/letsencrypt/live/your-part.de/fullchain.pem` (Let's Encrypt - empfohlen)
- `/etc/letsencrypt/live/$(hostname)/fullchain.pem` (Let's Encrypt)
- `/etc/apache2/ssl/apache.crt` (Custom Apache2)
- `/etc/ssl/certs/ssl-cert-snakeoil.pem` (Ubuntu Standard - nur localhost)
### Für Let's Encrypt (HTTP-01 Challenge):
- Port 80 muss verfügbar sein
- Domain `your-part.de` muss auf den Server zeigen
- Kein anderer Service auf Port 80
### Für DNS-01 Challenge:
- DNS-Provider Account (Cloudflare, Route53, etc.)
- API-Credentials für DNS-Management
## 🔧 Konfiguration
Nach der Zertifikats-Erstellung:
1. **SSL in der Konfiguration aktivieren:**
```ini
# /etc/yourpart/daemon.conf
WEBSOCKET_SSL_ENABLED=true
WEBSOCKET_SSL_CERT_PATH=/etc/yourpart/server.crt
WEBSOCKET_SSL_KEY_PATH=/etc/yourpart/server.key
```
2. **Daemon neu starten:**
```bash
sudo systemctl restart yourpart-daemon
```
3. **Verbindung testen:**
```bash
# WebSocket Secure
wss://your-part.de:4551
# Oder ohne SSL
ws://your-part.de:4551
```
## 🔄 Automatische Erneuerung
### Let's Encrypt-Zertifikate:
- **Cron Job:** Täglich um 2:30 Uhr
- **Script:** `/etc/yourpart/renew-ssl.sh`
- **Log:** `/var/log/yourpart/ssl-renewal.log`
### Apache2-Zertifikate:
- **Ubuntu Snakeoil:** Automatisch von Apache2 verwaltet
- **Let's Encrypt:** Automatische Erneuerung wenn erkannt
- **Custom:** Manuelle Verwaltung erforderlich
## 📁 Dateistruktur
```
/etc/yourpart/
├── server.crt # Zertifikat (Symlink zu Let's Encrypt)
├── server.key # Private Key (Symlink zu Let's Encrypt)
├── renew-ssl.sh # Auto-Renewal Script
└── cloudflare.ini # Cloudflare Credentials (falls verwendet)
/etc/letsencrypt/live/your-part.de/
├── fullchain.pem # Vollständige Zertifikatskette
├── privkey.pem # Private Key
├── cert.pem # Zertifikat
└── chain.pem # Intermediate Certificate
```
## 🛠️ Troubleshooting
### Zertifikat wird nicht akzeptiert
```bash
# Prüfe Zertifikats-Gültigkeit
openssl x509 -in /etc/yourpart/server.crt -text -noout
# Prüfe Berechtigungen
ls -la /etc/yourpart/server.*
```
### Let's Encrypt Challenge fehlgeschlagen
```bash
# Prüfe Port 80
sudo netstat -tlnp | grep :80
# Prüfe DNS
nslookup your-part.de
# Prüfe Firewall
sudo ufw status
```
### Auto-Renewal funktioniert nicht
```bash
# Prüfe Cron Jobs
sudo crontab -l
# Teste Renewal Script
sudo /etc/yourpart/renew-ssl.sh
# Prüfe Logs
tail -f /var/log/yourpart/ssl-renewal.log
```
## 🔒 Sicherheit
### Berechtigungen
- **Zertifikat:** `644` (readable by all, writable by owner)
- **Private Key:** `600` (readable/writable by owner only)
- **Owner:** `yourpart:yourpart`
### Firewall
```bash
# Öffne Port 80 für Let's Encrypt Challenge
sudo ufw allow 80/tcp
# Öffne Port 4551 für WebSocket
sudo ufw allow 4551/tcp
```
## 📚 Weitere Informationen
- [Let's Encrypt Dokumentation](https://letsencrypt.org/docs/)
- [Certbot Dokumentation](https://certbot.eff.org/docs/)
- [libwebsockets SSL](https://libwebsockets.org/lws-api-doc-master/html/group__ssl.html)
## 🆘 Support
Bei Problemen:
1. Prüfen Sie die Logs: `sudo journalctl -u yourpart-daemon -f`
2. Testen Sie die Zertifikate: `openssl s_client -connect your-part.de:4551`
3. Prüfen Sie die Firewall: `sudo ufw status`

View File

@@ -162,18 +162,6 @@ class FalukantController {
} }
return this.service.getProductPriceInRegion(userId, productId, regionId); return this.service.getProductPriceInRegion(userId, productId, regionId);
}); });
this.getProductPricesInRegionBatch = this._wrapWithUser((userId, req) => {
const productIds = req.query.productIds;
const regionId = parseInt(req.query.regionId, 10);
if (!productIds || Number.isNaN(regionId)) {
throw new Error('productIds (comma-separated) and regionId are required');
}
const productIdArray = productIds.split(',').map(id => parseInt(id.trim(), 10)).filter(id => !Number.isNaN(id));
if (productIdArray.length === 0) {
throw new Error('At least one valid productId is required');
}
return this.service.getProductPricesInRegionBatch(userId, productIdArray, regionId);
});
this.getProductPricesInCities = this._wrapWithUser((userId, req) => { this.getProductPricesInCities = this._wrapWithUser((userId, req) => {
const productId = parseInt(req.query.productId, 10); const productId = parseInt(req.query.productId, 10);
const currentPrice = parseFloat(req.query.currentPrice); const currentPrice = parseFloat(req.query.currentPrice);

View File

@@ -25,13 +25,11 @@ function createServer() {
ca: TLS_CA_PATH ? fs.readFileSync(TLS_CA_PATH) : undefined, ca: TLS_CA_PATH ? fs.readFileSync(TLS_CA_PATH) : undefined,
}); });
wss = new WebSocketServer({ server: httpsServer }); wss = new WebSocketServer({ server: httpsServer });
// Direkte Verbindung: lausche auf allen Interfaces (0.0.0.0)
httpsServer.listen(PORT, '0.0.0.0', () => { httpsServer.listen(PORT, '0.0.0.0', () => {
console.log(`[Daemon] WSS (TLS) Server gestartet auf Port ${PORT}`); console.log(`[Daemon] WSS (TLS) Server gestartet auf Port ${PORT}`);
}); });
} else { } else {
// Direkte Verbindung: lausche auf allen Interfaces (0.0.0.0) wss = new WebSocketServer({ port: PORT });
wss = new WebSocketServer({ port: PORT, host: '0.0.0.0' });
console.log(`[Daemon] WS (ohne TLS) Server startet auf Port ${PORT} ...`); console.log(`[Daemon] WS (ohne TLS) Server startet auf Port ${PORT} ...`);
} }

View File

@@ -1,13 +0,0 @@
-- Rollback: Remove indexes for director proposals and character queries
-- Created: 2026-01-12
DROP INDEX IF EXISTS falukant_data.idx_character_region_user_created;
DROP INDEX IF EXISTS falukant_data.idx_character_region_user;
DROP INDEX IF EXISTS falukant_data.idx_character_user_id;
DROP INDEX IF EXISTS falukant_data.idx_director_proposal_employer_character;
DROP INDEX IF EXISTS falukant_data.idx_director_character_id;
DROP INDEX IF EXISTS falukant_data.idx_director_employer_user_id;
DROP INDEX IF EXISTS falukant_data.idx_knowledge_character_id;
DROP INDEX IF EXISTS falukant_data.idx_relationship_character1_id;
DROP INDEX IF EXISTS falukant_data.idx_child_relation_father_id;
DROP INDEX IF EXISTS falukant_data.idx_child_relation_mother_id;

View File

@@ -1,43 +0,0 @@
-- Migration: Add indexes for director proposals and character queries
-- Created: 2026-01-12
-- Index für schnelle Suche nach NPCs in einer Region (mit Altersbeschränkung)
CREATE INDEX IF NOT EXISTS idx_character_region_user_created
ON falukant_data.character (region_id, user_id, created_at)
WHERE user_id IS NULL;
-- Index für schnelle Suche nach NPCs ohne Altersbeschränkung
CREATE INDEX IF NOT EXISTS idx_character_region_user
ON falukant_data.character (region_id, user_id)
WHERE user_id IS NULL;
-- Index für Character-Suche nach user_id (wichtig für getFamily, getDirectorForBranch)
CREATE INDEX IF NOT EXISTS idx_character_user_id
ON falukant_data.character (user_id);
-- Index für Director-Proposals
CREATE INDEX IF NOT EXISTS idx_director_proposal_employer_character
ON falukant_data.director_proposal (employer_user_id, director_character_id);
-- Index für aktive Direktoren
CREATE INDEX IF NOT EXISTS idx_director_character_id
ON falukant_data.director (director_character_id);
-- Index für Director-Suche nach employer_user_id
CREATE INDEX IF NOT EXISTS idx_director_employer_user_id
ON falukant_data.director (employer_user_id);
-- Index für Knowledge-Berechnung
CREATE INDEX IF NOT EXISTS idx_knowledge_character_id
ON falukant_data.knowledge (character_id);
-- Index für Relationships (getFamily)
CREATE INDEX IF NOT EXISTS idx_relationship_character1_id
ON falukant_data.relationship (character1_id);
-- Index für ChildRelations (getFamily)
CREATE INDEX IF NOT EXISTS idx_child_relation_father_id
ON falukant_data.child_relation (father_id);
CREATE INDEX IF NOT EXISTS idx_child_relation_mother_id
ON falukant_data.child_relation (mother_id);

View File

@@ -76,7 +76,6 @@ router.get('/politics/open', falukantController.getOpenPolitics);
router.post('/politics/open', falukantController.applyForElections); router.post('/politics/open', falukantController.applyForElections);
router.get('/cities', falukantController.getRegions); router.get('/cities', falukantController.getRegions);
router.get('/products/price-in-region', falukantController.getProductPriceInRegion); router.get('/products/price-in-region', falukantController.getProductPriceInRegion);
router.get('/products/prices-in-region-batch', falukantController.getProductPricesInRegionBatch);
router.get('/products/prices-in-cities', falukantController.getProductPricesInCities); router.get('/products/prices-in-cities', falukantController.getProductPricesInCities);
router.get('/branches/:branchId/taxes', falukantController.getBranchTaxes); router.get('/branches/:branchId/taxes', falukantController.getBranchTaxes);
router.get('/vehicles/types', falukantController.getVehicleTypes); router.get('/vehicles/types', falukantController.getVehicleTypes);

View File

@@ -1,55 +1,19 @@
import './config/loadEnv.js'; // .env deterministisch laden import './config/loadEnv.js'; // .env deterministisch laden
import http from 'http'; import http from 'http';
import https from 'https';
import fs from 'fs';
import app from './app.js'; import app from './app.js';
import { setupWebSocket } from './utils/socket.js'; import { setupWebSocket } from './utils/socket.js';
import { syncDatabase } from './utils/syncDatabase.js'; import { syncDatabase } from './utils/syncDatabase.js';
// HTTP-Server für API (Port 2020, intern, über Apache-Proxy) const server = http.createServer(app);
const API_PORT = Number.parseInt(process.env.PORT || '2020', 10);
const httpServer = http.createServer(app);
// Socket.io wird nur auf HTTPS-Server bereitgestellt, nicht auf HTTP-Server
// setupWebSocket(httpServer); // Entfernt: Socket.io nur über HTTPS
// HTTPS-Server für Socket.io (Port 4443, direkt erreichbar) setupWebSocket(server);
let httpsServer = null;
const SOCKET_IO_PORT = Number.parseInt(process.env.SOCKET_IO_PORT || '4443', 10);
const USE_TLS = process.env.SOCKET_IO_TLS === '1';
const TLS_KEY_PATH = process.env.SOCKET_IO_TLS_KEY_PATH;
const TLS_CERT_PATH = process.env.SOCKET_IO_TLS_CERT_PATH;
const TLS_CA_PATH = process.env.SOCKET_IO_TLS_CA_PATH;
if (USE_TLS && TLS_KEY_PATH && TLS_CERT_PATH) {
try {
httpsServer = https.createServer({
key: fs.readFileSync(TLS_KEY_PATH),
cert: fs.readFileSync(TLS_CERT_PATH),
ca: TLS_CA_PATH ? fs.readFileSync(TLS_CA_PATH) : undefined,
}, app);
setupWebSocket(httpsServer);
console.log(`[Socket.io] HTTPS-Server für Socket.io konfiguriert auf Port ${SOCKET_IO_PORT}`);
} catch (err) {
console.error('[Socket.io] Fehler beim Laden der TLS-Zertifikate:', err.message);
console.error('[Socket.io] Socket.io wird nicht verfügbar sein');
}
} else {
console.warn('[Socket.io] TLS nicht konfiguriert - Socket.io wird nicht verfügbar sein');
}
syncDatabase().then(() => { syncDatabase().then(() => {
// API-Server auf Port 2020 (intern, nur localhost) const port = process.env.PORT || 3001;
httpServer.listen(API_PORT, '127.0.0.1', () => { server.listen(port, () => {
console.log(`[API] HTTP-Server läuft auf localhost:${API_PORT} (intern, über Apache-Proxy)`); console.log('Server is running on port', port);
}); });
// Socket.io-Server auf Port 4443 (extern, direkt erreichbar)
if (httpsServer) {
httpsServer.listen(SOCKET_IO_PORT, '0.0.0.0', () => {
console.log(`[Socket.io] HTTPS-Server läuft auf Port ${SOCKET_IO_PORT} (direkt erreichbar)`);
});
}
}).catch(err => { }).catch(err => {
console.error('Failed to sync database:', err); console.error('Failed to sync database:', err);
process.exit(1); process.exit(1);

File diff suppressed because it is too large Load Diff

View File

@@ -1,78 +0,0 @@
#!/bin/bash
# YourPart Daemon Local Build Script für OpenSUSE Tumbleweed
# Führen Sie dieses Script lokal auf Ihrem Entwicklungsrechner aus
set -euo pipefail
# Farben für Output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
log_info "Starte lokalen Build für YourPart Daemon auf OpenSUSE Tumbleweed..."
# Prüfe ob wir im richtigen Verzeichnis sind
if [ ! -f "CMakeLists.txt" ] || [ ! -f "daemon.conf" ]; then
log_error "Bitte führen Sie dieses Script aus dem Projektverzeichnis aus!"
exit 1
fi
# Prüfe Dependencies
log_info "Prüfe Dependencies..."
if ! command -v cmake &> /dev/null; then
log_error "CMake nicht gefunden. Führen Sie zuerst install-dependencies-opensuse.sh aus!"
exit 1
fi
if ! command -v g++ &> /dev/null; then
log_error "G++ nicht gefunden. Führen Sie zuerst install-dependencies-opensuse.sh aus!"
exit 1
fi
# Erstelle Build-Verzeichnis
log_info "Erstelle Build-Verzeichnis..."
if [ ! -d "build" ]; then
mkdir build
fi
cd build
# Konfiguriere CMake
log_info "Konfiguriere CMake..."
cmake .. -DCMAKE_BUILD_TYPE=Release
# Kompiliere
log_info "Kompiliere Projekt..."
make -j$(nproc)
cd ..
log_success "Lokaler Build abgeschlossen!"
log_info ""
log_info "Build-Ergebnisse:"
log_info "- Binärdatei: build/yourpart-daemon"
log_info "- Größe: $(du -h build/yourpart-daemon | cut -f1)"
log_info ""
log_info "Nächste Schritte:"
log_info "1. Testen Sie die Binärdatei lokal"
log_info "2. Deployen Sie auf den Server mit deploy.sh"
log_info "3. Oder verwenden Sie deploy-server.sh direkt auf dem Server"

View File

@@ -1,45 +0,0 @@
#!/bin/bash
echo "=== Apache WebSocket-Konfiguration prüfen ==="
echo ""
# Prüfe, welche Module aktiviert sind
echo "Aktivierte Apache-Module:"
apache2ctl -M 2>/dev/null | grep -E "(proxy|rewrite|ssl|headers)" || echo "Keine relevanten Module gefunden"
echo ""
# Prüfe, ob die benötigten Module aktiviert sind
REQUIRED_MODULES=("proxy" "proxy_http" "proxy_wstunnel" "rewrite" "ssl" "headers")
MISSING_MODULES=()
for module in "${REQUIRED_MODULES[@]}"; do
if ! apache2ctl -M 2>/dev/null | grep -q "${module}_module"; then
MISSING_MODULES+=("$module")
fi
done
if [ ${#MISSING_MODULES[@]} -eq 0 ]; then
echo "✅ Alle benötigten Module sind aktiviert"
else
echo "❌ Fehlende Module:"
for module in "${MISSING_MODULES[@]}"; do
echo " - $module"
done
echo ""
echo "Aktivieren mit:"
for module in "${MISSING_MODULES[@]}"; do
echo " sudo a2enmod $module"
done
fi
echo ""
echo "=== Apache-Konfiguration testen ==="
if sudo apache2ctl configtest 2>&1; then
echo "✅ Apache-Konfiguration ist gültig"
else
echo "❌ Apache-Konfiguration hat Fehler"
fi
echo ""
echo "=== Aktive VirtualHosts ==="
apache2ctl -S 2>/dev/null | grep -E "(443|4443|4551)" || echo "Keine relevanten VirtualHosts gefunden"

View File

@@ -1,175 +0,0 @@
# CMake-Skript für intelligente Konfigurationsdatei-Installation
# Fügt nur fehlende Keys hinzu, ohne bestehende Konfiguration zu überschreiben
# Pfade setzen
set(CONFIG_FILE "/etc/yourpart/daemon.conf")
set(TEMPLATE_FILE "/etc/yourpart/daemon.conf.example")
# Prüfe ob Template existiert (wurde von CMake installiert)
if(NOT EXISTS "${TEMPLATE_FILE}")
# Fallback 1: Versuche Template im Source-Verzeichnis zu finden
# CMAKE_CURRENT_LIST_DIR zeigt auf cmake/ während der Installation
get_filename_component(PROJECT_ROOT "${CMAKE_CURRENT_LIST_DIR}/.." ABSOLUTE)
set(TEMPLATE_FILE_FALLBACK "${PROJECT_ROOT}/daemon.conf")
# Fallback 2: Versuche über CMAKE_SOURCE_DIR (falls verfügbar)
if(DEFINED CMAKE_SOURCE_DIR AND EXISTS "${CMAKE_SOURCE_DIR}/daemon.conf")
set(TEMPLATE_FILE "${CMAKE_SOURCE_DIR}/daemon.conf")
message(STATUS "Verwende Template aus CMAKE_SOURCE_DIR: ${TEMPLATE_FILE}")
elseif(EXISTS "${TEMPLATE_FILE_FALLBACK}")
set(TEMPLATE_FILE "${TEMPLATE_FILE_FALLBACK}")
message(STATUS "Verwende Template aus Source-Verzeichnis: ${TEMPLATE_FILE}")
else()
message(FATAL_ERROR "Template-Datei nicht gefunden!")
message(FATAL_ERROR " Gesucht in: ${TEMPLATE_FILE}")
message(FATAL_ERROR " Fallback 1: ${TEMPLATE_FILE_FALLBACK}")
if(DEFINED CMAKE_SOURCE_DIR)
message(FATAL_ERROR " Fallback 2: ${CMAKE_SOURCE_DIR}/daemon.conf")
endif()
endif()
else()
message(STATUS "Verwende installierte Template-Datei: ${TEMPLATE_FILE}")
endif()
# Prüfe ob Ziel-Verzeichnis existiert
if(NOT EXISTS "/etc/yourpart")
message(STATUS "Erstelle Verzeichnis /etc/yourpart...")
execute_process(
COMMAND ${CMAKE_COMMAND} -E make_directory "/etc/yourpart"
RESULT_VARIABLE MKDIR_RESULT
)
if(NOT MKDIR_RESULT EQUAL 0)
message(FATAL_ERROR "Konnte Verzeichnis /etc/yourpart nicht erstellen")
endif()
endif()
# Prüfe ob Config-Datei existiert
if(NOT EXISTS "${CONFIG_FILE}")
message(STATUS "Konfigurationsdatei existiert nicht, erstelle neue...")
execute_process(
COMMAND ${CMAKE_COMMAND} -E copy "${TEMPLATE_FILE}" "${CONFIG_FILE}"
RESULT_VARIABLE COPY_RESULT
)
if(NOT COPY_RESULT EQUAL 0)
message(FATAL_ERROR "Konnte Konfigurationsdatei nicht erstellen: ${CONFIG_FILE}")
endif()
message(STATUS "Neue Konfigurationsdatei erstellt: ${CONFIG_FILE}")
else()
message(STATUS "Konfigurationsdatei existiert bereits, prüfe auf fehlende Keys...")
# Verwende ein Python-Skript für intelligentes Merging
# (CMake hat keine gute Unterstützung für komplexe String-Manipulation)
# Erstelle temporäres Python-Skript im Build-Verzeichnis
set(MERGE_SCRIPT "${CMAKE_CURRENT_BINARY_DIR}/merge-config.py")
# Erstelle Python-Skript
file(WRITE "${MERGE_SCRIPT}"
"#!/usr/bin/env python3
import sys
import re
import os
def merge_config(template_file, config_file):
\"\"\"Fügt fehlende Keys aus Template zur Config hinzu, ohne bestehende zu überschreiben\"\"\"
# Lese bestehende Config
existing_keys = {}
existing_lines = []
if os.path.exists(config_file):
with open(config_file, 'r') as f:
for line in f:
existing_lines.append(line.rstrip())
# Extrahiere Key=Value Paare
match = re.match(r'^\\s*([^#=]+?)\\s*=\\s*(.+?)\\s*$', line)
if match:
key = match.group(1).strip()
value = match.group(2).strip()
existing_keys[key] = value
# Lese Template
new_keys = {}
if not os.path.exists(template_file):
print(f'Fehler: Template-Datei {template_file} nicht gefunden!', file=sys.stderr)
return False
with open(template_file, 'r') as f:
for line in f:
# Extrahiere Key=Value Paare
match = re.match(r'^\\s*([^#=]+?)\\s*=\\s*(.+?)\\s*$', line)
if match:
key = match.group(1).strip()
value = match.group(2).strip()
new_keys[key] = value
# Füge fehlende Keys hinzu
added_count = 0
for key, value in new_keys.items():
if key not in existing_keys:
existing_lines.append(f'{key}={value}')
print(f'Füge fehlenden Key hinzu: {key}')
added_count += 1
# Schreibe aktualisierte Config
if added_count > 0:
with open(config_file, 'w') as f:
for line in existing_lines:
f.write(line + '\\n')
print(f'{added_count} neue Keys hinzugefügt')
else:
print('Keine neuen Keys hinzugefügt - Konfiguration ist aktuell')
return True
if __name__ == '__main__':
if len(sys.argv) != 3:
print('Verwendung: merge-config.py <template> <config>', file=sys.stderr)
sys.exit(1)
template_file = sys.argv[1]
config_file = sys.argv[2]
if not merge_config(template_file, config_file):
sys.exit(1)
")
# Setze Ausführungsrechte
file(CHMOD "${MERGE_SCRIPT}" PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE)
# Führe Merge-Skript aus
execute_process(
COMMAND python3 "${MERGE_SCRIPT}" "${TEMPLATE_FILE}" "${CONFIG_FILE}"
RESULT_VARIABLE MERGE_RESULT
OUTPUT_VARIABLE MERGE_OUTPUT
ERROR_VARIABLE MERGE_ERROR
)
if(NOT MERGE_RESULT EQUAL 0)
message(WARNING "Fehler beim Mergen der Config: ${MERGE_ERROR}")
else()
message(STATUS "${MERGE_OUTPUT}")
endif()
endif()
# Setze korrekte Berechtigungen (Fehler werden ignoriert, da Berechtigungen optional sind)
execute_process(
COMMAND chown yourpart:yourpart "${CONFIG_FILE}"
RESULT_VARIABLE CHOWN_RESULT
ERROR_QUIET
)
if(NOT CHOWN_RESULT EQUAL 0)
message(WARNING "Konnte Besitzer von ${CONFIG_FILE} nicht ändern (möglicherweise kein Root oder User existiert nicht)")
endif()
execute_process(
COMMAND chmod 600 "${CONFIG_FILE}"
RESULT_VARIABLE CHMOD_RESULT
ERROR_QUIET
)
if(NOT CHMOD_RESULT EQUAL 0)
message(WARNING "Konnte Berechtigungen von ${CONFIG_FILE} nicht ändern")
endif()
message(STATUS "Konfigurationsdatei-Verwaltung abgeschlossen: ${CONFIG_FILE}")

View File

@@ -1,10 +0,0 @@
DB_HOST=localhost
DB_PORT=5432
DB_NAME=yp3
DB_USER=yourpart
DB_PASSWORD=hitomisan
THREAD_COUNT=4
WEBSOCKET_PORT=4551
WEBSOCKET_SSL_ENABLED=false
WEBSOCKET_SSL_CERT_PATH=/home/torsten/Programs/yourpart-daemon/ssl-certs/server.crt
WEBSOCKET_SSL_KEY_PATH=/home/torsten/Programs/yourpart-daemon/ssl-certs/server.key

View File

@@ -1,5 +0,0 @@
WebSocket Server starting on port 4551 (no SSL)
[2025/09/29 08:50:10:6854] N: lws_create_context: LWS: 4.3.5-unknown, NET CLI SRV H1 H2 WS ConMon IPv6-absent
[2025/09/29 08:50:10:6874] N: __lws_lc_tag: ++ [wsi|0|pipe] (1)
[2025/09/29 08:50:10:6874] N: __lws_lc_tag: ++ [vh|0|netlink] (1)
WebSocket-Server erfolgreich gestartet auf Port 4551

View File

@@ -1,35 +0,0 @@
#!/bin/bash
echo "=== WebSocket-Header Debug ==="
echo ""
echo "Prüfe Apache-Logs für WebSocket-Upgrade-Header..."
echo ""
# Prüfe die letzten 50 Zeilen des Access-Logs für /ws/ oder /socket.io/
echo "Access-Log Einträge für /ws/ und /socket.io/:"
sudo tail -50 /var/log/apache2/yourpart.access.log | grep -E "(/ws/|/socket.io/)" | tail -10
echo ""
echo "Prüfe Error-Log für WebSocket-Fehler:"
sudo tail -50 /var/log/apache2/yourpart.error.log | grep -iE "(websocket|upgrade|proxy)" | tail -10
echo ""
echo "=== Test mit curl ==="
echo ""
echo "Teste WebSocket-Upgrade für /ws/:"
curl -i -N \
-H "Connection: Upgrade" \
-H "Upgrade: websocket" \
-H "Sec-WebSocket-Version: 13" \
-H "Sec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==" \
https://www.your-part.de/ws/ 2>&1 | head -20
echo ""
echo "=== Prüfe Apache-Konfiguration ==="
echo ""
echo "Aktive Rewrite-Regeln für WebSocket:"
sudo apache2ctl -S 2>/dev/null | grep -A 5 "your-part.de:443" || echo "VirtualHost nicht gefunden"
echo ""
echo "Prüfe, ob mod_proxy_wstunnel aktiviert ist:"
apache2ctl -M 2>/dev/null | grep proxy_wstunnel || echo "mod_proxy_wstunnel NICHT aktiviert!"

View File

@@ -1,203 +0,0 @@
#!/bin/bash
# YourPart Daemon Server-Side Deployment Script
# Führen Sie dieses Script auf dem Server aus, nachdem Sie den Code hochgeladen haben
set -euo pipefail
# Farben für Output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Konfiguration
DAEMON_USER="yourpart"
PROJECT_NAME="yourpart-daemon"
REMOTE_DIR="/opt/yourpart"
SERVICE_NAME="yourpart-daemon"
BUILD_DIR="build"
# Funktionen
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Prüfe ob wir im richtigen Verzeichnis sind
if [ ! -f "CMakeLists.txt" ] || [ ! -f "daemon.conf" ]; then
log_error "Bitte führen Sie dieses Script aus dem Projektverzeichnis aus!"
log_info "Stellen Sie sicher, dass CMakeLists.txt und daemon.conf vorhanden sind."
exit 1
fi
log_info "Starte Server-Side Deployment für YourPart Daemon..."
# 1. Prüfe Dependencies
# Prüfe ob wir root-Rechte haben für bestimmte Operationen
check_sudo() {
if ! sudo -n true 2>/dev/null; then
log_info "Einige Operationen benötigen sudo-Rechte..."
fi
}
log_info "Prüfe Dependencies..."
if ! command -v cmake &> /dev/null; then
log_error "CMake nicht gefunden. Führen Sie zuerst install-dependencies-ubuntu22.sh aus!"
exit 1
fi
if ! command -v gcc-15 &> /dev/null && ! command -v gcc &> /dev/null; then
log_error "GCC nicht gefunden. Führen Sie zuerst install-dependencies-ubuntu22.sh aus!"
exit 1
fi
# 2. Baue Projekt
log_info "Baue Projekt auf dem Server..."
if [ ! -d "$BUILD_DIR" ]; then
mkdir "$BUILD_DIR"
fi
cd "$BUILD_DIR"
# Konfiguriere CMake
log_info "Konfiguriere CMake..."
if command -v gcc-15 &> /dev/null; then
log_info "Verwende GCC 15 für C++23"
cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_STANDARD=23 -DCMAKE_C_COMPILER=gcc-15 -DCMAKE_CXX_COMPILER=g++-15
elif command -v gcc-13 &> /dev/null; then
log_info "Verwende GCC 13 für C++23"
cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_STANDARD=23 -DCMAKE_C_COMPILER=gcc-13 -DCMAKE_CXX_COMPILER=g++-13
else
log_info "Verwende Standard-GCC 11 mit C++20"
cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_STANDARD=20
fi
# Kompiliere
log_info "Kompiliere Projekt..."
make -j$(nproc)
cd ..
log_success "Build abgeschlossen"
# 3. Erstelle Benutzer falls nicht vorhanden
log_info "Prüfe Benutzer $DAEMON_USER..."
if ! id "$DAEMON_USER" &>/dev/null; then
log_info "Erstelle Benutzer $DAEMON_USER..."
sudo useradd --system --shell /bin/false --home-dir "$REMOTE_DIR" --create-home "$DAEMON_USER"
log_success "Benutzer $DAEMON_USER erstellt"
else
log_info "Benutzer $DAEMON_USER existiert bereits"
fi
# 4. Erstelle Verzeichnisse
log_info "Erstelle Verzeichnisse..."
mkdir -p "$REMOTE_DIR"/{logs,config}
sudo mkdir -p /etc/yourpart
sudo mkdir -p /var/log/yourpart
# 5. Stoppe Service falls läuft
log_info "Stoppe Service falls läuft..."
if sudo systemctl is-active --quiet "$SERVICE_NAME"; then
log_info "Stoppe laufenden Service..."
sudo systemctl stop "$SERVICE_NAME"
sleep 2
fi
# 6. Kopiere Dateien
log_info "Kopiere Dateien..."
sudo cp "$BUILD_DIR/yourpart-daemon" /usr/local/bin/
# Intelligente Konfigurationsdatei-Verwaltung
log_info "Verwalte Konfigurationsdatei..."
if [ ! -f "/etc/yourpart/daemon.conf" ]; then
log_info "Konfigurationsdatei existiert nicht, kopiere neue..."
sudo cp daemon.conf /etc/yourpart/
sudo chown yourpart:yourpart /etc/yourpart/daemon.conf
else
log_info "Konfigurationsdatei existiert bereits, prüfe auf fehlende Keys..."
# Erstelle temporäre Datei mit neuen Keys
temp_conf="/tmp/daemon.conf.new"
cp daemon.conf "$temp_conf"
# Füge fehlende Keys hinzu
while IFS='=' read -r key value; do
# Überspringe Kommentare und leere Zeilen
if [[ "$key" =~ ^[[:space:]]*# ]] || [[ -z "$key" ]]; then
continue
fi
# Entferne Leerzeichen am Anfang
key=$(echo "$key" | sed 's/^[[:space:]]*//')
# Prüfe ob Key bereits existiert
if ! grep -q "^[[:space:]]*$key[[:space:]]*=" /etc/yourpart/daemon.conf; then
log_info "Füge fehlenden Key hinzu: $key"
echo "$key=$value" | sudo tee -a /etc/yourpart/daemon.conf > /dev/null
fi
done < "$temp_conf"
rm -f "$temp_conf"
fi
sudo cp yourpart-daemon.service /etc/systemd/system/
# 7. Setze Berechtigungen
log_info "Setze Berechtigungen..."
sudo chmod +x /usr/local/bin/yourpart-daemon
sudo chown -R "$DAEMON_USER:$DAEMON_USER" "$REMOTE_DIR"
sudo chown -R "$DAEMON_USER:$DAEMON_USER" /var/log/yourpart
sudo chown yourpart:yourpart /etc/yourpart/daemon.conf
sudo chmod 600 /etc/yourpart/daemon.conf
# 8. Lade systemd neu
log_info "Lade systemd Konfiguration neu..."
sudo systemctl daemon-reload
# 9. Aktiviere Service
log_info "Aktiviere Service..."
sudo systemctl enable "$SERVICE_NAME"
# 10. Starte Service
log_info "Starte Service..."
sudo systemctl start "$SERVICE_NAME" &
sleep 3
# 11. Prüfe Status
log_info "Prüfe Service-Status..."
sleep 2
if sudo systemctl is-active --quiet "$SERVICE_NAME"; then
log_success "Service läuft erfolgreich!"
sudo systemctl status "$SERVICE_NAME" --no-pager
else
log_error "Service konnte nicht gestartet werden!"
log_info "Logs anzeigen mit: sudo journalctl -u $SERVICE_NAME -f"
exit 1
fi
# 11. Zeige nützliche Befehle
log_success "Deployment erfolgreich abgeschlossen!"
log_info ""
log_info "Nützliche Befehle:"
log_info "- Service-Status: sudo systemctl status $SERVICE_NAME"
log_info "- Service stoppen: sudo systemctl stop $SERVICE_NAME"
log_info "- Service starten: sudo systemctl start $SERVICE_NAME"
log_info "- Service neustarten: sudo systemctl restart $SERVICE_NAME"
log_info "- Logs anzeigen: sudo journalctl -u $SERVICE_NAME -f"
log_info "- Logs der letzten 100 Zeilen: sudo journalctl -u $SERVICE_NAME -n 100"
log_info ""
log_info "Konfigurationsdatei: /etc/yourpart/daemon.conf"
log_info "Log-Verzeichnis: /var/log/yourpart/"
log_info "Service-Datei: /etc/systemd/system/$SERVICE_NAME.service"

201
deploy.sh
View File

@@ -1,183 +1,36 @@
# YourPart Daemon Deployment Script für Ubuntu 22 #!/bin/bash
# Verwendung: ./deploy.sh [server_ip] [ssh_user]
set -euo pipefail echo "=== YourPart Deployment Script ==="
echo ""
# Farben für Output # Prüfen ob wir im richtigen Verzeichnis sind
RED='\033[0;31m' if [ ! -f "package.json" ]; then
GREEN='\033[0;32m' echo "Error: Please run this script from the YourPart3 root directory"
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Konfiguration
SERVER_IP="${1:-your-part.de}"
SSH_USER="${2:-root}"
DAEMON_USER="yourpart"
PROJECT_NAME="yourpart-daemon"
REMOTE_DIR="/opt/yourpart"
SERVICE_NAME="yourpart-daemon"
# Funktionen
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Prüfe ob wir im richtigen Verzeichnis sind
if [ ! -f "CMakeLists.txt" ] || [ ! -f "daemon.conf" ]; then
log_error "Bitte führen Sie dieses Script aus dem Projektverzeichnis aus!"
exit 1 exit 1
fi fi
log_info "Starte Deployment für YourPart Daemon..." # Prüfen ob sudo verfügbar ist
log_info "Server: $SERVER_IP" if ! command -v sudo &> /dev/null; then
log_info "SSH User: $SSH_USER" echo "Error: sudo is required but not installed"
exit 1
# 1. Lokales Build
log_info "Baue Projekt lokal..."
if [ ! -d "build" ]; then
mkdir build
fi fi
cd build # Backend deployen
cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_STANDARD=23 echo ""
make -j$(nproc) echo "=== Deploying Backend ==="
cd .. ./deploy-backend.sh
log_success "Lokaler Build abgeschlossen" # Frontend bauen und deployen
echo ""
echo "=== Building and Deploying Frontend ==="
./deploy-frontend.sh
# 2. Erstelle Deployment-Paket echo ""
log_info "Erstelle Deployment-Paket..." echo "=== Deployment Completed! ==="
DEPLOY_DIR="deploy_package" echo "Your application should now be available at:"
rm -rf "$DEPLOY_DIR" echo " HTTP: http://your-part.de (redirects to HTTPS)"
mkdir -p "$DEPLOY_DIR" echo " HTTPS: https://www.your-part.de"
echo ""
# Kopiere Binärdatei echo "To check logs:"
cp build/yourpart-daemon "$DEPLOY_DIR/" echo " Backend: sudo journalctl -u yourpart.service -f"
echo " Apache: sudo tail -f /var/log/apache2/yourpart.*.log"
# Kopiere Konfigurationsdatei
cp daemon.conf "$DEPLOY_DIR/"
# Kopiere Service-Datei
cp yourpart-daemon.service "$DEPLOY_DIR/"
# Erstelle Installations-Script
cat > "$DEPLOY_DIR/install.sh" << 'EOF'
#!/bin/bash
set -euo pipefail
# Farben
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[SUCCESS]${NC} $1"; }
log_warning() { echo -e "${YELLOW}[WARNING]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
DAEMON_USER="yourpart"
REMOTE_DIR="/opt/yourpart"
SERVICE_NAME="yourpart-daemon"
log_info "Installiere YourPart Daemon..."
# Erstelle Benutzer falls nicht vorhanden
if ! id "$DAEMON_USER" &>/dev/null; then
log_info "Erstelle Benutzer $DAEMON_USER..."
useradd --system --shell /bin/false --home-dir "$REMOTE_DIR" --create-home "$DAEMON_USER"
log_success "Benutzer $DAEMON_USER erstellt"
else
log_info "Benutzer $DAEMON_USER existiert bereits"
fi
# Erstelle Verzeichnisse
log_info "Erstelle Verzeichnisse..."
mkdir -p "$REMOTE_DIR"/{logs,config}
mkdir -p /etc/yourpart
mkdir -p /var/log/yourpart
# Kopiere Dateien
log_info "Kopiere Dateien..."
cp yourpart-daemon /usr/local/bin/
cp daemon.conf /etc/yourpart/
cp yourpart-daemon.service /etc/systemd/system/
# Setze Berechtigungen
chmod +x /usr/local/bin/yourpart-daemon
chown -R "$DAEMON_USER:$DAEMON_USER" "$REMOTE_DIR"
chown -R "$DAEMON_USER:$DAEMON_USER" /var/log/yourpart
chmod 600 /etc/yourpart/daemon.conf
# Lade systemd neu
log_info "Lade systemd Konfiguration neu..."
systemctl daemon-reload
# Aktiviere Service
log_info "Aktiviere Service..."
systemctl enable "$SERVICE_NAME"
log_success "Installation abgeschlossen!"
log_info "Verwenden Sie 'systemctl start $SERVICE_NAME' um den Service zu starten"
log_info "Verwenden Sie 'systemctl status $SERVICE_NAME' um den Status zu prüfen"
log_info "Verwenden Sie 'journalctl -u $SERVICE_NAME -f' um die Logs zu verfolgen"
EOF
chmod +x "$DEPLOY_DIR/install.sh"
# Erstelle Tarball
tar -czf "${PROJECT_NAME}_deploy.tar.gz" -C "$DEPLOY_DIR" .
log_success "Deployment-Paket erstellt: ${PROJECT_NAME}_deploy.tar.gz"
# 3. Upload zum Server
log_info "Lade Dateien zum Server hoch..."
scp "${PROJECT_NAME}_deploy.tar.gz" "$SSH_USER@$SERVER_IP:/tmp/"
# 4. Installation auf dem Server
log_info "Installiere auf dem Server..."
ssh "$SSH_USER@$SERVER_IP" << EOF
set -euo pipefail
# Entpacke Deployment-Paket
cd /tmp
tar -xzf "${PROJECT_NAME}_deploy.tar.gz"
# Führe Installation aus
./install.sh
# Starte Service
systemctl start $SERVICE_NAME
# Prüfe Status
systemctl status $SERVICE_NAME --no-pager
# Aufräumen
rm -f "${PROJECT_NAME}_deploy.tar.gz"
rm -rf /tmp/yourpart-daemon /tmp/daemon.conf /tmp/yourpart-daemon.service /tmp/install.sh
echo "Deployment erfolgreich abgeschlossen!"
EOF
# 5. Aufräumen
log_info "Räume lokale Dateien auf..."
rm -rf "$DEPLOY_DIR"
rm -f "${PROJECT_NAME}_deploy.tar.gz"
log_success "Deployment erfolgreich abgeschlossen!"
log_info "Der YourPart Daemon läuft jetzt auf $SERVER_IP"
log_info "Verwenden Sie 'ssh $SSH_USER@$SERVER_IP systemctl status $SERVICE_NAME' um den Status zu prüfen"

View File

@@ -0,0 +1,143 @@
# 3D-Animationen im Falukant-Bereich
## Benötigte Dependencies
### Three.js (Empfohlen)
```bash
npm install three
npm install @types/three --save-dev # Für TypeScript-Support
```
**Alternative Optionen:**
- **Babylon.js**: Mächtiger, aber größer (~500KB vs ~600KB)
- **A-Frame**: WebVR-fokussiert, einfacher für VR/AR
- **React Three Fiber**: Falls React verwendet wird (hier Vue)
**Empfehlung: Three.js** - am weitesten verbreitet, beste Dokumentation, große Community
### Optional: Vue-Three.js Wrapper
```bash
npm install vue-threejs # Oder troika-three-text für Text-Rendering
```
## Sinnvolle Seiten für 3D-Animationen
### 1. **OverviewView** (Hauptübersicht)
**Sinnvoll:** ⭐⭐⭐⭐⭐
- **3D-Charakter-Modell**: Rotierendes 3D-Modell des eigenen Charakters
- **Statussymbole**: 3D-Icons für Geld, Gesundheit, Reputation (schwebend/rotierend)
- **Hintergrund**: Subtile 3D-Szene (z.B. mittelalterliche Stadt im Hintergrund)
### 2. **HouseView** (Haus)
**Sinnvoll:** ⭐⭐⭐⭐⭐
- **3D-Haus-Modell**: Interaktives 3D-Modell des eigenen Hauses
- **Upgrade-Visualisierung**: Animation beim Haus-Upgrade
- **Zustand-Anzeige**: 3D-Visualisierung von Dach, Wänden, Boden, Fenstern
### 3. **BranchView** (Niederlassungen)
**Sinnvoll:** ⭐⭐⭐⭐
- **3D-Fabrik/Gebäude**: 3D-Modell der Niederlassung
- **Produktions-Animation**: 3D-Animationen für laufende Produktionen
- **Transport-Visualisierung**: 3D-Wagen/Karren für Transporte
### 4. **FamilyView** (Familie)
**Sinnvoll:** ⭐⭐⭐⭐
- **3D-Charaktere**: 3D-Modelle von Partner und Kindern
- **Beziehungs-Visualisierung**: 3D-Animationen für Beziehungsstatus
- **Geschenk-Animation**: 3D-Animation beim Verschenken
### 5. **HealthView** (Gesundheit)
**Sinnvoll:** ⭐⭐⭐
- **3D-Körper-Modell**: 3D-Visualisierung des Gesundheitszustands
- **Aktivitäts-Animationen**: 3D-Animationen für Gesundheitsaktivitäten
### 6. **NobilityView** (Sozialstatus)
**Sinnvoll:** ⭐⭐⭐
- **3D-Wappen**: Rotierendes 3D-Wappen
- **Insignien**: 3D-Krone, Schwert, etc. je nach Titel
### 7. **ChurchView** (Kirche)
**Sinnvoll:** ⭐⭐⭐
- **3D-Kirche**: 3D-Modell der Kirche
- **Taufe-Animation**: 3D-Animation bei der Taufe
### 8. **BankView** (Bank)
**Sinnvoll:** ⭐⭐
- **3D-Bankgebäude**: 3D-Modell der Bank
- **Geld-Animation**: 3D-Münzen/Geldstapel
### 9. **UndergroundView** (Untergrund)
**Sinnvoll:** ⭐⭐⭐⭐
- **3D-Dungeon**: 3D-Untergrund-Visualisierung
- **Aktivitäts-Animationen**: 3D-Animationen für Untergrund-Aktivitäten
### 10. **ReputationView** (Reputation)
**Sinnvoll:** ⭐⭐⭐
- **3D-Party-Szene**: 3D-Visualisierung von Festen
- **Reputation-Visualisierung**: 3D-Effekte für Reputationsänderungen
## Implementierungs-Strategie
### Phase 1: Basis-Setup
1. Three.js installieren
2. Basis-Komponente `ThreeScene.vue` erstellen
3. Erste einfache Animation (z.B. rotierender Würfel) auf OverviewView
### Phase 2: Charakter-Modell
1. 3D-Charakter-Modell erstellen/laden (GLTF/GLB)
2. Auf OverviewView integrieren
3. Interaktionen (Klick, Hover)
### Phase 3: Gebäude-Modelle
1. Haus-Modell für HouseView
2. Fabrik-Modell für BranchView
3. Kirche-Modell für ChurchView
### Phase 4: Animationen
1. Upgrade-Animationen
2. Status-Änderungs-Animationen
3. Interaktive Elemente
## Technische Überlegungen
### Performance
- **Lazy Loading**: 3D-Szenen nur laden, wenn Seite aktiv ist
- **Level of Detail (LOD)**: Einfache Modelle für schwächere Geräte
- **WebGL-Detection**: Fallback auf 2D, wenn WebGL nicht unterstützt wird
### Asset-Management
- **GLTF/GLB**: Kompaktes Format für 3D-Modelle
- **Texturen**: Optimiert für Web (WebP, komprimiert)
- **CDN**: Assets über CDN laden für bessere Performance
### Browser-Kompatibilität
- **WebGL 1.0**: Mindestanforderung (95%+ Browser)
- **WebGL 2.0**: Optional für bessere Features
- **Fallback**: 2D-Versionen für ältere Browser
## Beispiel-Struktur
```
frontend/src/
components/
falukant/
ThreeScene.vue # Basis-3D-Szene-Komponente
CharacterModel.vue # 3D-Charakter-Komponente
BuildingModel.vue # 3D-Gebäude-Komponente
assets/
3d/
models/
character.glb
house.glb
factory.glb
textures/
...
```
## Nächste Schritte
1. **Three.js installieren**
2. **Basis-Komponente erstellen**
3. **Erste Animation auf OverviewView testen**
4. **3D-Modelle erstellen/beschaffen** (Blender, Sketchfab, etc.)
5. **Schrittweise auf weitere Seiten ausweiten**

171
docs/3D_ASSETS_STRUCTURE.md Normal file
View File

@@ -0,0 +1,171 @@
# 3D-Assets Struktur für Falukant
## Verzeichnisstruktur
```
frontend/public/
models/
3d/
falukant/
characters/
male.glb # Basis-Modell männlich
female.glb # Basis-Modell weiblich
male_child.glb # Männlich, Kind (0-9 Jahre)
male_teen.glb # Männlich, Teenager (10-17 Jahre)
male_adult.glb # Männlich, Erwachsen (18-39 Jahre)
male_middle.glb # Männlich, Mittelalter (40-59 Jahre)
male_elder.glb # Männlich, Älter (60+ Jahre)
female_child.glb # Weiblich, Kind
female_teen.glb # Weiblich, Teenager
female_adult.glb # Weiblich, Erwachsen
female_middle.glb # Weiblich, Mittelalter
female_elder.glb # Weiblich, Älter
buildings/
house/
house_small.glb # Kleines Haus
house_medium.glb # Mittleres Haus
house_large.glb # Großes Haus
factory/
factory_basic.glb # Basis-Fabrik
factory_advanced.glb # Erweiterte Fabrik
church/
church.glb # Kirche
bank/
bank.glb # Bank
objects/
weapons/
sword.glb
shield.glb
items/
coin.glb
gift.glb
effects/
particles/
money.glb # Geld-Effekt
health.glb # Gesundheits-Effekt
```
## Namenskonventionen
### Charaktere
- Format: `{gender}[_{ageRange}].glb`
- Beispiele:
- `male.glb` - Basis-Modell männlich (Fallback)
- `female.glb` - Basis-Modell weiblich (Fallback)
- `male_adult.glb` - Männlich, Erwachsen
- `female_teen.glb` - Weiblich, Teenager
### Gebäude
- Format: `{buildingType}_{variant}.glb`
- Beispiele:
- `house_small.glb`
- `factory_basic.glb`
- `church.glb`
### Objekte
- Format: `{category}/{item}.glb`
- Beispiele:
- `weapons/sword.glb`
- `items/coin.glb`
## Altersbereiche
Die Altersbereiche werden automatisch bestimmt:
```javascript
// In CharacterModel3D.vue
getAgeRange(age) {
if (age < 10) return 'child';
if (age < 18) return 'teen';
if (age < 40) return 'adult';
if (age < 60) return 'middle';
return 'elder';
}
```
**Fallback-Verhalten:**
- Wenn kein spezifisches Modell für den Altersbereich existiert, wird das Basis-Modell (`male.glb` / `female.glb`) verwendet
- Dies ermöglicht schrittweise Erweiterung ohne Breaking Changes
## Dateigrößen-Empfehlungen
- **Charaktere**: 100KB - 500KB (komprimiert)
- **Gebäude**: 200KB - 1MB (komprimiert)
- **Objekte**: 10KB - 100KB (komprimiert)
## Optimierung
### Vor dem Hochladen:
1. **Blender** öffnen
2. **Decimate Modifier** anwenden (falls nötig)
3. **Texturen komprimieren** (WebP, max 1024x1024)
4. **GLB Export** mit:
- Compression aktiviert
- Texturen eingebettet
- Unnötige Animationen entfernt
### Komprimierung:
- Verwende `gltf-pipeline` oder `gltf-transform` für weitere Komprimierung
- Ziel: < 500KB pro Modell
## Verwendung im Code
```vue
<!-- CharacterModel3D.vue -->
<CharacterModel3D
:gender="character.gender"
:age="character.age"
/>
<!-- Automatisch wird geladen: -->
<!-- /models/3d/falukant/characters/male_adult.glb -->
<!-- Falls nicht vorhanden: male.glb -->
```
## Erweiterte Struktur (Optional)
Für komplexere Szenarien:
```
frontend/public/
models/
3d/
falukant/
characters/
{gender}/
base/
{gender}.glb # Basis-Modell
ages/
{gender}_{ageRange}.glb
variants/
{gender}_{variant}.glb # Z.B. verschiedene Outfits
```
## Wartung
### Neue Modelle hinzufügen:
1. GLB-Datei in entsprechendes Verzeichnis kopieren
2. Namenskonvention beachten
3. Dateigröße prüfen (< 500KB empfohlen)
4. Im Browser testen
### Modelle aktualisieren:
1. Alte Datei ersetzen
2. Browser-Cache leeren (oder Versionierung verwenden)
3. Testen
### Versionierung (Optional):
```
characters/
v1/
male.glb
v2/
male.glb
```
## Performance-Tipps
1. **Lazy Loading**: Modelle nur laden, wenn benötigt
2. **Preloading**: Wichtige Modelle vorladen
3. **Caching**: Browser-Cache nutzen
4. **CDN**: Für Produktion CDN verwenden

View File

@@ -0,0 +1,159 @@
# 3D-Modell-Erstellung für Falukant
## KI-basierte Tools (Empfohlen)
### 1. **Rodin** ⭐⭐⭐⭐⭐
- **URL**: https://rodin.io/
- **Preis**: Kostenlos (mit Limits), Premium verfügbar
- **Features**:
- Text-zu-3D (z.B. "medieval character", "house")
- Sehr gute Qualität
- Export als GLB/GLTF
- **Gut für**: Charaktere, Gebäude, Objekte
### 2. **Meshy** ⭐⭐⭐⭐⭐
- **URL**: https://www.meshy.ai/
- **Preis**: Kostenlos (mit Limits), ab $9/monat
- **Features**:
- Text-zu-3D
- Bild-zu-3D
- Textur-Generierung
- Export als GLB/OBJ/FBX
- **Gut für**: Alle Arten von Modellen
### 3. **Luma AI Genie** ⭐⭐⭐⭐
- **URL**: https://lumalabs.ai/genie
- **Preis**: Kostenlos (Beta)
- **Features**:
- Text-zu-3D
- Sehr schnell
- Export als GLB
- **Gut für**: Schnelle Prototypen
### 4. **CSM (Common Sense Machines)** ⭐⭐⭐⭐
- **URL**: https://csm.ai/
- **Preis**: Kostenlos (mit Limits)
- **Features**:
- Text-zu-3D
- Bild-zu-3D
- Export als GLB/USD
- **Gut für**: Verschiedene Objekte
### 5. **Tripo AI** ⭐⭐⭐⭐
- **URL**: https://www.tripo3d.ai/
- **Preis**: Kostenlos (mit Limits), Premium verfügbar
- **Features**:
- Text-zu-3D
- Bild-zu-3D
- Export als GLB/FBX/OBJ
- **Gut für**: Charaktere und Objekte
### 6. **Masterpiece Studio** ⭐⭐⭐
- **URL**: https://masterpiecestudio.com/
- **Preis**: Ab $9/monat
- **Features**:
- Text-zu-3D
- VR-Unterstützung
- Export als GLB/FBX
- **Gut für**: Professionelle Modelle
## Traditionelle Tools (Für Nachbearbeitung)
### 1. **Blender** (Kostenlos) ⭐⭐⭐⭐⭐
- **URL**: https://www.blender.org/
- **Features**:
- Vollständige 3D-Suite
- GLB/GLTF Export
- Optimierung von KI-generierten Modellen
- **Gut für**: Nachbearbeitung, Optimierung, Animationen
### 2. **Sketchfab** (Modelle kaufen/laden)
- **URL**: https://sketchfab.com/
- **Preis**: Kostenlos (CC0 Modelle), Premium Modelle kostenpflichtig
- **Features**:
- Millionen von 3D-Modellen
- Viele kostenlose CC0 Modelle
- GLB/GLTF Download
- **Gut für**: Vorgefertigte Modelle, Inspiration
## Empfohlener Workflow
### Für Falukant-Charaktere:
1. **Rodin** oder **Meshy** verwenden
2. Prompt: "medieval character, male/female, simple style, low poly, game ready"
3. Export als GLB
4. In **Blender** optimieren (falls nötig)
5. Texturen anpassen
### Für Gebäude:
1. **Meshy** oder **Tripo AI** verwenden
2. Prompt: "medieval house, simple, low poly, game ready, front view"
3. Export als GLB
4. In **Blender** optimieren
5. Mehrere Varianten erstellen (Haus, Fabrik, Kirche)
### Für Objekte:
1. **Sketchfab** durchsuchen (kostenlose CC0 Modelle)
2. Oder **Meshy** für spezifische Objekte
3. Export als GLB
4. Optimieren falls nötig
## Prompt-Beispiele für Falukant
### Charakter:
```
"medieval character, [male/female], simple low poly style,
game ready, neutral pose, front view, no background,
GLB format, optimized for web"
```
### Haus:
```
"medieval house, simple low poly style, game ready,
front view, no background, GLB format, optimized for web"
```
### Fabrik:
```
"medieval factory building, simple low poly style,
game ready, front view, no background, GLB format"
```
### Wappen:
```
"medieval coat of arms shield, simple low poly style,
game ready, front view, no background, GLB format"
```
## Optimierung für Web
### Nach der Erstellung:
1. **Blender** öffnen
2. **Decimate Modifier** anwenden (weniger Polygone)
3. **Texture** komprimieren (WebP, 512x512 oder 1024x1024)
4. **GLB Export** mit:
- Compression aktiviert
- Texturen eingebettet
- Normals und Tangents berechnet
### Größen-Richtlinien:
- **Charaktere**: 2000-5000 Polygone
- **Gebäude**: 1000-3000 Polygone
- **Objekte**: 100-1000 Polygone
- **Texturen**: 512x512 oder 1024x1024 (nicht größer)
## Kostenlose Alternativen
### Wenn KI-Tools Limits haben:
1. **Sketchfab** durchsuchen (CC0 Modelle)
2. **Poly Haven** (https://polyhaven.com/) - kostenlose Assets
3. **Kenney.nl** - kostenlose Game Assets
4. **OpenGameArt.org** - kostenlose Game Assets
## Nächste Schritte
1. **Rodin** oder **Meshy** testen
2. Ersten Charakter erstellen
3. Als GLB exportieren
4. In Three.js testen
5. Bei Bedarf optimieren

View File

@@ -0,0 +1,334 @@
# Blender Rigging-Anleitung für Falukant-Charaktere
Diese Anleitung erklärt, wie du Bones/Gelenke zu deinen 3D-Modellen in Blender hinzufügst, damit sie animiert werden können.
## Voraussetzungen
- Blender (kostenlos, https://www.blender.org/)
- GLB-Modell von meshy.ai oder anderen Quellen
## Schritt-für-Schritt Anleitung
### 1. Modell in Blender importieren
1. Öffne Blender
2. Gehe zu `File``Import``glTF 2.0 (.glb/.gltf)`
3. Wähle dein Modell aus
4. Das Modell sollte jetzt in der Szene erscheinen
### 2. Modell vorbereiten
1. Stelle sicher, dass das Modell im **Object Mode** ist (Tab drücken, falls im Edit Mode)
2. Wähle das Modell aus (Linksklick)
3. Drücke `Alt + G` um die Position auf (0, 0, 0) zu setzen
4. Drücke `Alt + R` um die Rotation zurückzusetzen
5. Drücke `Alt + S` um die Skalierung auf 1 zu setzen
### 3. Rigging (Bones hinzufügen)
#### Option A: Automatisches Rigging mit Rigify (Empfohlen)
1. **Rigify aktivieren:**
- Gehe zu `Edit``Preferences` (oder `Blender``Preferences` auf Mac)
- Klicke auf den Tab **"Add-ons"** (links im Fenster)
- Im Suchfeld oben rechts tippe: **"rigify"** (ohne Anführungszeichen)
- Du solltest "Rigify: Auto-rigging system" sehen
- Aktiviere das **Häkchen** neben "Rigify"
- Das Add-on ist jetzt aktiviert
- Schließe das Preferences-Fenster
**Alternative Wege zu Preferences:**
- Windows/Linux: `Edit``Preferences`
- Mac: `Blender``Preferences`
- Oder: `Ctrl + ,` (Strg + Komma)
2. **Rigify-Rig hinzufügen:**
- Stelle sicher, dass du im **Object Mode** bist (Tab drücken, falls im Edit Mode)
- Wähle das Modell aus (oder nichts, das Rig wird separat erstellt)
- Drücke `Shift + A` (Add Menu)
- Wähle **`Armature`** aus
- In der Liste siehst du jetzt **`Human (Meta-Rig)`** - klicke darauf
- Ein Basis-Rig wird in der Szene erstellt
**Falls "Human (Meta-Rig)" nicht erscheint:**
- Stelle sicher, dass Rigify aktiviert ist (siehe Schritt 1)
- Starte Blender neu, falls nötig
- Prüfe, ob du die neueste Blender-Version hast (Rigify ist ab Version 2.8+ verfügbar)
3. **Rig positionieren und anpassen:**
**Schritt 1: Rig zum Modell bewegen**
- Stelle sicher, dass du im **Object Mode** bist (Tab drücken)
- Wähle das **Armature** aus (nicht das Modell)
- Drücke `G` (Grab/Move) und bewege das Rig zum Modell
- Oder: Drücke `Alt + G` um die Position zurückzusetzen, dann `G` + `X`, `Y` oder `Z` für eine Achse
**Schritt 2: Rig skalieren (falls zu groß/klein)**
- Wähle das Armature aus
- Drücke `S` (Scale) und skaliere das Rig
- Oder: `S` + `X`, `Y` oder `Z` für eine Achse
- Tipp: Drücke `Shift + X` (oder Y/Z) um diese Achse auszuschließen
**Schritt 3: Einzelne Bones anpassen**
- Wähle das Armature aus
- Wechsle in den **Edit Mode** (Tab)
- Wähle einen Bone aus (Linksklick)
- Drücke `G` um ihn zu bewegen
- Drücke `E` um einen neuen Bone zu extrudieren
- Drücke `R` um einen Bone zu rotieren
- Drücke `S` um einen Bone zu skalieren
**Wichtige Bones zum Anpassen:**
- **Root/Spine** - Sollte in der Mitte des Körpers sein (Hüfthöhe)
- **Spine1/Spine2** - Entlang der Wirbelsäule
- **Neck/Head** - Am Hals und Kopf
- **Shoulders** - An den Schultern
- **Arms** - Entlang der Arme
- **Legs** - Entlang der Beine
**Tipp:** Nutze die Zahlenansicht (Numpad) um die Positionen genau zu sehen
4. **Rig generieren:**
- Wechsle zurück in den **Object Mode** (Tab drücken)
- Wähle das **Meta-Rig (Armature)** aus (nicht das Modell!) - sollte im Outliner blau markiert sein
**Methode 1: Rigify-Button in der Toolbar (Einfachste Methode)**
- Oben in der Toolbar siehst du den Button **"Rigify"** (neben "Object")
- Klicke auf **"Rigify"** → **"Generate Rig"**
- Ein vollständiges Rig wird erstellt (dies kann einen Moment dauern)
**Methode 2: Properties-Panel (Alternative)**
- Im **Properties-Panel** (rechts):
- Klicke auf das **Wrench-Icon** (Modifier Properties) in der linken Toolbar
- Oder: Klicke auf das **Bone-Icon** (Armature Properties)
- Scrolle durch die Tabs, bis du **"Rigify"** oder **"Rigify Generation"** siehst
- In diesem Tab findest du den Button **"Generate Rig"**
- Klicke auf **"Generate Rig"**
**Wichtig:** Nach dem Generieren kannst du das Rig weiter anpassen, aber du musst es im **Pose Mode** tun (nicht Edit Mode)
**Die richtigen Tabs im Properties-Panel (von oben nach unten):**
- 📐 **Object Properties** (Würfel-Icon) - hier findest du Transform, etc.
- 🦴 **Armature Properties** (Bone-Icon) - hier findest du Armature-Einstellungen
- 🔧 **Modifier Properties** (Wrench-Icon) - hier sollte der **Rigify-Tab** sein!
- 🌍 **World Properties** (Globus-Icon) - NICHT hier suchen!
**Falls du den Rigify-Tab nicht siehst:**
- Stelle sicher, dass das **Meta-Rig** (nicht ein bereits generiertes Rig) ausgewählt ist
- Klicke auf das **Wrench-Icon** (Modifier Properties) in der linken Toolbar
- Der Rigify-Tab sollte dort erscheinen
#### Option B: Manuelles Rigging
1. **Armature erstellen:**
- Drücke `Shift + A``Armature`
- Ein Bone wird erstellt
2. **Bones hinzufügen:**
- Wechsle in den **Edit Mode** (Tab)
- Wähle den Root-Bone aus
- Drücke `E` um einen neuen Bone zu extrudieren
- Erstelle die wichtigsten Bones:
- **Spine/Spine1/Spine2** - Wirbelsäule
- **Neck/Head** - Hals und Kopf
- **LeftArm/LeftForeArm/LeftHand** - Linker Arm
- **RightArm/RightForeArm/RightHand** - Rechter Arm
- **LeftUpLeg/LeftLeg/LeftFoot** - Linkes Bein
- **RightUpLeg/RightLeg/RightFoot** - Rechtes Bein
3. **Bone-Namen vergeben:**
- Wähle jeden Bone aus
- Im Properties-Panel (rechts) unter "Bone" kannst du den Namen ändern
- **Wichtig:** Verwende diese Namen für die Animation:
- `LeftArm`, `RightArm`
- `LeftForeArm`, `RightForeArm`
- `LeftHand`, `RightHand`
- `LeftUpLeg`, `RightUpLeg`
- `LeftLeg`, `RightLeg`
- `LeftFoot`, `RightFoot`
- `Neck`, `Head`
- `Spine`, `Spine1`, `Spine2`
### 4. Modell an Bones binden (Skinning)
1. **Beide Objekte auswählen:**
- Wähle zuerst das **Mesh** aus
- Dann wähle das **Armature** aus (Shift + Linksklick)
- Drücke `Ctrl + P``With Automatic Weights`
- Blender berechnet automatisch, welche Vertices zu welchen Bones gehören
2. **Weights überprüfen:**
- Wähle das Mesh aus
- Wechsle in den **Weight Paint Mode** (Dropdown oben)
- Wähle einen Bone aus (rechts im Properties-Panel)
- Rot = vollständig gebunden, Blau = nicht gebunden
- Falls nötig, kannst du die Weights manuell anpassen
### 5. Test-Animation erstellen (Optional)
1. **Pose Mode aktivieren:**
- Wähle das **generierte Rig** aus (nicht das Meta-Rig!)
- Wechsle in den **Pose Mode** (Dropdown oben: "Object Mode" → "Pose Mode")
- Oder: `Ctrl + Tab` → "Pose Mode"
2. **Bone auswählen:**
- **Wichtig:** Arbeite im **3D-Viewport** (Hauptfenster), nicht nur im Outliner!
- **Rigify-Bone-Namen** (nach dem Generieren):
- Für **Knie beugen**: `Leg.L (IK)` oder `Leg.L (FK)` (nicht "Tweak"!)
- Für **Hand anheben**: `Arm.L (IK)` oder `Arm.L (FK)`
- Für **Fuß bewegen**: `Leg.L (IK)` (der Fuß-Controller)
- **IK** = Inverse Kinematics (einfacher, empfohlen für Anfänger)
- **FK** = Forward Kinematics (mehr Kontrolle)
- **Tweak** = Feinabstimmungen (für später, nicht für Hauptanimationen)
- Klicke auf einen **Bone** im **3D-Viewport** (nicht im Outliner!)
- Der Bone sollte orange/ausgewählt sein und im Viewport sichtbar sein
- **Tipp:** Nutze `X-Ray Mode` (Button oben im Viewport) um Bones besser zu sehen
- **Tipp:** Im Outliner kannst du Bones finden, aber die Animation machst du im Viewport
3. **Bone animieren:**
- Wähle z.B. `hand.L` (linke Hand) aus
- Drücke `R` (Rotate) und rotiere den Bone
- Oder: `R` + `Z` (um Z-Achse rotieren)
- Oder: `R` + `X` (um X-Achse rotieren)
- Bewege die Maus → Linksklick zum Bestätigen
- **Beispiel für Hand anheben:** `hand.L``R``Z` → nach oben bewegen
4. **Animation aufnehmen (Timeline):**
- Unten siehst du die **Timeline** (falls nicht sichtbar: `Shift + F12` oder `Window``Animation``Timeline`)
- Stelle den Frame auf **1** (Anfang)
- Wähle den Bone aus und positioniere ihn in der **Ausgangsposition**
- Drücke `I` (Insert Keyframe) → wähle **"Rotation"** (oder "Location" falls bewegt)
- Ein Keyframe wird erstellt (gelber Punkt in der Timeline)
- Stelle den Frame auf **30** (oder einen anderen Frame)
- Rotiere/Bewege den Bone in die **Zielposition** (z.B. Hand nach oben)
- Drücke wieder `I`**"Rotation"** (oder "Location")
- Stelle den Frame auf **60** (Rückkehr zur Ausgangsposition)
- Rotiere den Bone zurück zur Ausgangsposition
- Drücke `I`**"Rotation"**
- Drücke **Play** (Leertaste) um die Animation zu sehen
5. **Animation testen:**
- Die Animation sollte jetzt in einer Schleife abgespielt werden
- Du kannst weitere Keyframes hinzufügen (Frame 90, 120, etc.)
- **Tipp:** Nutze `Alt + A` um die Animation zu stoppen
### 6. Modell exportieren
1. **Beide Objekte auswählen:**
- Wähle das **Mesh** aus
- Shift + Linksklick auf das **generierte Rig** (nicht das Meta-Rig!)
2. **Exportieren:**
- Gehe zu `File``Export``glTF 2.0 (.glb/.gltf)`
- Wähle `.glb` Format
- Stelle sicher, dass folgende Optionen aktiviert sind:
-**Include****Selected Objects**
-**Transform****+Y Up**
-**Geometry****Apply Modifiers**
-**Animation****Bake Animation** (wichtig für Animationen!)
-**Animation****Always Sample Animations** (falls Animationen nicht korrekt exportiert werden)
- Klicke auf "Export glTF 2.0"
### 7. Modell testen
1. Kopiere die exportierte `.glb` Datei nach:
```
frontend/public/models/3d/falukant/characters/
```
2. Lade die Seite neu
3. Die Bones sollten jetzt automatisch erkannt und animiert werden
4. **Animationen testen:**
- Öffne die Browser-Konsole (F12)
- Du solltest sehen: `[ThreeScene] Found X animation(s)`
- Die Animationen sollten automatisch abgespielt werden
- Falls keine Animationen vorhanden sind, werden die Bones trotzdem mit Idle-Animationen bewegt
## Rig anpassen - Detaillierte Anleitung
### Rig nach dem Generieren anpassen
Wenn das Rigify-Rig generiert wurde, aber nicht perfekt passt:
1. **Pose Mode verwenden:**
- Wähle das generierte Armature aus
- Wechsle in den **Pose Mode** (Dropdown oben, oder Strg+Tab → Pose Mode)
- Hier kannst du die Bones bewegen, ohne die Struktur zu zerstören
2. **Rig neu generieren (falls nötig):**
- Falls das Rig komplett neu positioniert werden muss:
- Lösche das generierte Rig (X → Delete)
- Gehe zurück zum Meta-Rig
- Passe das Meta-Rig im Edit Mode an
- Generiere das Rig erneut
3. **Snap to Mesh (Hilfsmittel):**
- Im Edit Mode: `Shift + Tab` um Snap zu aktivieren
- Oder: Rechtsklick auf das Snap-Symbol (Magnet) oben
- Wähle "Face" oder "Vertex" als Snap-Target
- Jetzt werden Bones automatisch am Mesh ausgerichtet
### Häufige Probleme und Lösungen
**Problem: Rig ist zu groß/klein**
- Lösung: Im Object Mode das Armature auswählen und mit `S` skalieren
**Problem: Rig ist an falscher Position**
- Lösung: Im Object Mode mit `G` bewegen, oder `Alt + G` zurücksetzen
**Problem: Einzelne Bones passen nicht**
- Lösung: Im Edit Mode die Bones einzeln anpassen (`G` zum Bewegen)
**Problem: Nach dem Generieren passt es nicht mehr**
- Lösung: Passe das Meta-Rig an und generiere neu, oder verwende Pose Mode
## Tipps und Tricks
### Bone-Namen für automatische Erkennung
Die Komponente erkennt Bones anhand ihrer Namen. Verwende diese Keywords:
- `arm` - für Arme
- `hand` oder `wrist` - für Hände
- `leg` oder `knee` - für Beine
- `foot` oder `ankle` - für Füße
- `shoulder` - für Schultern
- `elbow` - für Ellbogen
### Einfacheres Rigging mit Mixamo
Alternativ kannst du:
1. Dein Modell auf [Mixamo](https://www.mixamo.com/) hochladen
2. Automatisches Rigging durchführen lassen
3. Das geriggte Modell herunterladen
4. In Blender importieren und anpassen
### Performance-Optimierung
- Verwende nicht zu viele Bones (max. 50-100 für Charaktere)
- Entferne unnötige Bones vor dem Export
- Teste die Animation im Browser, bevor du das finale Modell exportierst
## Troubleshooting
### Bones werden nicht erkannt
- Prüfe die Bone-Namen (müssen `arm`, `hand`, `leg`, etc. enthalten)
- Stelle sicher, dass das Modell korrekt an die Bones gebunden ist
- Öffne die Browser-Konsole und prüfe die Logs: `[ThreeScene] Found X bones for animation`
### Modell verformt sich falsch
- Überprüfe die Weights im Weight Paint Mode
- Passe die Bone-Positionen an
- Stelle sicher, dass alle Vertices korrekt zugewiesen sind
### Export schlägt fehl
- Stelle sicher, dass beide Objekte (Mesh + Armature) ausgewählt sind
- Prüfe, ob das Modell im Object Mode ist
- Versuche es mit einem anderen Export-Format (.gltf statt .glb)
## Weitere Ressourcen
- [Blender Rigging Tutorial](https://www.youtube.com/results?search_query=blender+rigging+tutorial)
- [Mixamo Auto-Rigging](https://www.mixamo.com/)
- [Three.js GLTF Animation Guide](https://threejs.org/docs/#manual/en/introduction/Animation-system)

View File

@@ -21,6 +21,7 @@
"dotenv": "^16.4.5", "dotenv": "^16.4.5",
"mitt": "^3.0.1", "mitt": "^3.0.1",
"socket.io-client": "^4.8.1", "socket.io-client": "^4.8.1",
"three": "^0.182.0",
"vue": "~3.4.31", "vue": "~3.4.31",
"vue-i18n": "^10.0.0-beta.2", "vue-i18n": "^10.0.0-beta.2",
"vue-multiselect": "^3.1.0", "vue-multiselect": "^3.1.0",
@@ -2834,6 +2835,12 @@
"safe-buffer": "~5.2.0" "safe-buffer": "~5.2.0"
} }
}, },
"node_modules/three": {
"version": "0.182.0",
"resolved": "https://registry.npmjs.org/three/-/three-0.182.0.tgz",
"integrity": "sha512-GbHabT+Irv+ihI1/f5kIIsZ+Ef9Sl5A1Y7imvS5RQjWgtTPfPnZ43JmlYI7NtCRDK9zir20lQpfg8/9Yd02OvQ==",
"license": "MIT"
},
"node_modules/tinyglobby": { "node_modules/tinyglobby": {
"version": "0.2.14", "version": "0.2.14",
"resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.14.tgz", "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.14.tgz",

View File

@@ -21,6 +21,7 @@
"dotenv": "^16.4.5", "dotenv": "^16.4.5",
"mitt": "^3.0.1", "mitt": "^3.0.1",
"socket.io-client": "^4.8.1", "socket.io-client": "^4.8.1",
"three": "^0.182.0",
"vue": "~3.4.31", "vue": "~3.4.31",
"vue-i18n": "^10.0.0-beta.2", "vue-i18n": "^10.0.0-beta.2",
"vue-multiselect": "^3.1.0", "vue-multiselect": "^3.1.0",

View File

@@ -0,0 +1,40 @@
# 3D-Charakter-Modelle
## Verzeichnisstruktur
Dieses Verzeichnis enthält die 3D-Modelle für Falukant-Charaktere.
## Dateinamen-Konvention
### Basis-Modelle (Fallback)
- `male.glb` - Basis-Modell männlich
- `female.glb` - Basis-Modell weiblich
### Altersspezifische Modelle
- `male_toddler.glb` - Männlich, Kleinkind (0-3 Jahre)
- `male_child.glb` - Männlich, Kind (4-7 Jahre)
- `male_preteen.glb` - Männlich, Vor-Teenager (8-12 Jahre)
- `male_teen.glb` - Männlich, Teenager (13-17 Jahre)
- `male_adult.glb` - Männlich, Erwachsen (18+ Jahre)
- `female_toddler.glb` - Weiblich, Kleinkind (0-3 Jahre)
- `female_child.glb` - Weiblich, Kind (4-7 Jahre)
- `female_preteen.glb` - Weiblich, Vor-Teenager (8-12 Jahre)
- `female_teen.glb` - Weiblich, Teenager (13-17 Jahre)
- `female_adult.glb` - Weiblich, Erwachsen (18+ Jahre)
## Fallback-Verhalten
Wenn kein spezifisches Modell für den Altersbereich existiert, wird automatisch das Basis-Modell (`male.glb` / `female.glb`) verwendet.
## Dateigröße
- Empfohlen: < 500KB pro Modell
- Maximal: 1MB pro Modell
## Optimierung
Vor dem Hochladen:
1. In Blender öffnen
2. Decimate Modifier anwenden (falls nötig)
3. Texturen komprimieren (WebP, max 1024x1024)
4. GLB Export mit Compression aktiviert

Binary file not shown.

View File

@@ -0,0 +1,225 @@
<template>
<div class="character-model-3d">
<ThreeScene
v-if="currentModelPath"
:key="currentModelPath"
:modelPath="currentModelPath"
:autoRotate="autoRotate"
:rotationSpeed="rotationSpeed"
:cameraPosition="cameraPosition"
:backgroundColor="backgroundColor"
@model-loaded="onModelLoaded"
@model-error="onModelError"
@loading-progress="onLoadingProgress"
/>
<div v-if="loading" class="loading-overlay">
<div class="loading-spinner"></div>
<p v-if="loadingProgress > 0">{{ Math.round(loadingProgress) }}%</p>
</div>
<div v-if="error" class="error-overlay">
<p>{{ error }}</p>
</div>
</div>
</template>
<script>
import ThreeScene from './ThreeScene.vue';
export default {
name: 'CharacterModel3D',
components: {
ThreeScene
},
props: {
gender: {
type: String,
required: true,
validator: (value) => ['male', 'female'].includes(value)
},
age: {
type: Number,
default: null
},
autoRotate: {
type: Boolean,
default: false
},
rotationSpeed: {
type: Number,
default: 0.5
},
cameraPosition: {
type: Object,
default: () => ({ x: 0, y: 1, z: 3 })
},
backgroundColor: {
type: String,
default: '#f0f0f0'
}
},
data() {
return {
loading: true,
loadingProgress: 0,
error: null,
currentModelPath: null
};
},
computed: {
baseModelPath() {
const basePath = '/models/3d/falukant/characters';
return `${basePath}/${this.gender}.glb`;
},
ageSpecificModelPath() {
const ageRange = this.getAgeRange(this.age);
if (!ageRange) return null;
const basePath = '/models/3d/falukant/characters';
return `${basePath}/${this.gender}_${ageRange}.glb`;
}
},
watch: {
gender() {
this.findAndLoadModel();
},
age() {
this.findAndLoadModel();
}
},
mounted() {
this.findAndLoadModel();
},
methods: {
getAgeRange(age) {
if (age === null || age === undefined) return null;
// Verfügbare Altersbereiche: toddler, child, preteen, teen, adult
// Alter ist in Tagen gespeichert (1 Tag = 1 Jahr)
if (age < 4) return 'toddler'; // 0-3 Jahre
if (age < 10) return 'child'; // 4-7 Jahre
if (age < 13) return 'preteen'; // 8-12 Jahre
if (age < 18) return 'teen'; // 13-17 Jahre
return 'adult'; // 18+ Jahre
},
async findAndLoadModel() {
this.loading = true;
this.error = null;
// Versuche zuerst altersspezifisches Modell, dann Basis-Modell
const pathsToTry = [];
if (this.ageSpecificModelPath) {
pathsToTry.push(this.ageSpecificModelPath);
}
pathsToTry.push(this.baseModelPath);
// Prüfe welche Datei existiert
for (const path of pathsToTry) {
const exists = await this.checkFileExists(path);
if (exists) {
this.currentModelPath = path;
console.log(`[CharacterModel3D] Using model: ${path}`);
return;
}
}
// Fallback: Verwende Basis-Modell auch wenn Prüfung fehlschlägt
this.currentModelPath = this.baseModelPath;
console.warn(`[CharacterModel3D] Using fallback model: ${this.baseModelPath}`);
},
async checkFileExists(path) {
try {
const response = await fetch(path, { method: 'HEAD' });
if (!response.ok) {
return false;
}
// Prüfe Content-Type - sollte nicht HTML sein
const contentType = response.headers.get('content-type') || '';
const isHTML = contentType.includes('text/html') || contentType.includes('text/plain');
if (isHTML) {
console.warn(`[CharacterModel3D] File ${path} returns HTML, probably doesn't exist`);
return false;
}
// GLB-Dateien können verschiedene Content-Types haben
return true;
} catch (error) {
console.warn(`[CharacterModel3D] Error checking file ${path}:`, error);
return false;
}
},
onModelLoaded(model) {
this.loading = false;
this.error = null;
this.$emit('model-loaded', model);
},
onModelError(error) {
// Wenn ein Fehler auftritt und wir noch nicht das Basis-Modell verwenden
if (this.currentModelPath !== this.baseModelPath) {
console.warn('[CharacterModel3D] Model failed, trying fallback...');
this.currentModelPath = this.baseModelPath;
// Der Watch-Handler wird das Modell neu laden
return;
}
this.loading = false;
this.error = 'Fehler beim Laden des 3D-Modells';
console.error('Character model error:', error);
this.$emit('model-error', error);
},
onLoadingProgress(progress) {
this.loadingProgress = progress;
}
}
};
</script>
<style scoped>
.character-model-3d {
width: 100%;
height: 100%;
position: relative;
min-height: 400px;
}
.loading-overlay,
.error-overlay {
position: absolute;
top: 0;
left: 0;
right: 0;
bottom: 0;
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
background: rgba(255, 255, 255, 0.9);
z-index: 10;
}
.loading-spinner {
width: 40px;
height: 40px;
border: 4px solid #f3f3f3;
border-top: 4px solid #F9A22C;
border-radius: 50%;
animation: spin 1s linear infinite;
margin-bottom: 10px;
}
@keyframes spin {
0% { transform: rotate(0deg); }
100% { transform: rotate(360deg); }
}
.error-overlay p {
color: #d32f2f;
font-weight: bold;
}
</style>

View File

@@ -151,19 +151,6 @@ export default {
// Extrahiere Parameter aus value und effects // Extrahiere Parameter aus value und effects
params = this.extractParamsFromValue(value, n); params = this.extractParamsFromValue(value, n);
// Wenn value eine einfache Zahl ist (z.B. für overproduction), als value-Parameter verwenden
if (typeof parsed.value === 'number') {
params.value = parsed.value;
}
// Weitere Parameter aus parsed extrahieren (z.B. branch_id)
if (parsed.branch_id !== undefined) {
params.branch_id = parsed.branch_id;
}
if (parsed.region_id !== undefined) {
params.region_id = parsed.region_id;
}
} }
} catch (e) { } catch (e) {
// Bei Parse-Fehler: Alte Struktur unterstützen // Bei Parse-Fehler: Alte Struktur unterstützen
@@ -185,12 +172,9 @@ export default {
if (value && value.title && value.description) { if (value && value.title && value.description) {
// Parameter aus effects extrahieren und formatieren // Parameter aus effects extrahieren und formatieren
const formattedParams = this.formatParams(params); const formattedParams = this.formatParams(params);
// Zuerst Description interpolieren (für {amount} etc.), dann Effects hinzufügen
let description = this.interpolateString(value.description, formattedParams);
description = this.formatDescriptionWithEffects(description, value.effects || [], formattedParams);
return { return {
title: this.interpolateString(value.title, formattedParams), title: this.interpolateString(value.title, formattedParams),
description: description description: this.formatDescriptionWithEffects(value.description, value.effects || [], formattedParams)
}; };
} }
@@ -228,10 +212,6 @@ export default {
const title = this.$t(titleKey, formattedParams); const title = this.$t(titleKey, formattedParams);
let description = this.$t(descKey, formattedParams); let description = this.$t(descKey, formattedParams);
// Stelle sicher, dass auch hier die Parameter interpoliert werden (für {amount} etc.)
// Vue i18n interpoliert bereits, aber wir müssen sicherstellen, dass formatParams korrekt formatiert
description = this.interpolateString(description, formattedParams);
// Füge Effect-Details hinzu, falls vorhanden // Füge Effect-Details hinzu, falls vorhanden
if (value && value.effects) { if (value && value.effects) {
description = this.formatDescriptionWithEffects(description, value.effects, formattedParams); description = this.formatDescriptionWithEffects(description, value.effects, formattedParams);
@@ -254,27 +234,15 @@ export default {
// Geldbeträge formatieren // Geldbeträge formatieren
if (params.amount !== undefined && params.amount !== null) { if (params.amount !== undefined && params.amount !== null) {
formatted.amount = this.formatMoney(Number(params.amount)); formatted.amount = this.formatMoney(params.amount);
} }
if (params.absolute !== undefined && params.absolute !== null) { if (params.absolute !== undefined && params.absolute !== null) {
formatted.amount = this.formatMoney(Number(params.absolute)); formatted.amount = this.formatMoney(params.absolute);
} }
if (params.percent !== undefined && params.percent !== null) { if (params.percent !== undefined && params.percent !== null) {
formatted.percent = `${params.percent > 0 ? '+' : ''}${params.percent.toFixed(1)}%`; formatted.percent = `${params.percent > 0 ? '+' : ''}${params.percent.toFixed(1)}%`;
} }
// Einfache Werte (z.B. für overproduction)
if (params.value !== undefined && params.value !== null) {
formatted.value = Number(params.value);
}
// Filiale-Information
if (params.branch_id !== undefined && params.branch_id !== null) {
formatted.branch_info = ` (Filiale #${params.branch_id})`;
} else {
formatted.branch_info = '';
}
// Gesundheit formatieren // Gesundheit formatieren
if (params.change !== undefined && params.change !== null) { if (params.change !== undefined && params.change !== null) {
formatted.healthChange = params.change > 0 ? `+${params.change}` : `${params.change}`; formatted.healthChange = params.change > 0 ? `+${params.change}` : `${params.change}`;

View File

@@ -0,0 +1,441 @@
<template>
<div ref="container" class="three-scene-container"></div>
</template>
<script>
import { markRaw } from 'vue';
import * as THREE from 'three';
import { GLTFLoader } from 'three/examples/jsm/loaders/GLTFLoader.js';
import { DRACOLoader } from 'three/examples/jsm/loaders/DRACOLoader.js';
import { OrbitControls } from 'three/examples/jsm/controls/OrbitControls.js';
export default {
name: 'ThreeScene',
props: {
modelPath: {
type: String,
required: true
},
autoRotate: {
type: Boolean,
default: true
},
rotationSpeed: {
type: Number,
default: 0.5
},
cameraPosition: {
type: Object,
default: () => ({ x: 0, y: 1, z: 3 })
},
backgroundColor: {
type: String,
default: '#f0f0f0'
}
},
data() {
return {
scene: null,
camera: null,
renderer: null,
controls: null,
model: null,
animationId: null,
mixer: null,
clock: null,
animationStartTime: 0,
baseY: 0, // Basis-Y-Position für Bewegungsanimation
bones: [] // Gespeicherte Bones für manuelle Animation
};
},
mounted() {
this.initScene();
this.loadModel();
this.animate();
window.addEventListener('resize', this.onWindowResize);
},
beforeUnmount() {
window.removeEventListener('resize', this.onWindowResize);
if (this.animationId) {
cancelAnimationFrame(this.animationId);
}
if (this.mixer) {
this.mixer.stopAllAction();
}
if (this.renderer) {
this.renderer.dispose();
}
if (this.model) {
this.disposeModel(this.model);
}
},
watch: {
modelPath() {
if (this.model) {
this.disposeModel(this.model);
this.model = null;
}
this.loadModel();
},
autoRotate(newVal) {
if (this.controls) {
this.controls.autoRotate = newVal;
}
}
},
methods: {
initScene() {
// Szene erstellen - markRaw verhindert Vue-Reaktivität
this.scene = markRaw(new THREE.Scene());
this.scene.background = new THREE.Color(this.backgroundColor);
// Kamera erstellen - markRaw verhindert Vue-Reaktivität
this.camera = markRaw(new THREE.PerspectiveCamera(
50,
this.$refs.container.clientWidth / this.$refs.container.clientHeight,
0.1,
1000
));
this.camera.position.set(
this.cameraPosition.x,
this.cameraPosition.y,
this.cameraPosition.z
);
// Renderer erstellen - markRaw verhindert Vue-Reaktivität
this.renderer = markRaw(new THREE.WebGLRenderer({
antialias: true,
alpha: true,
powerPreference: 'high-performance'
}));
this.renderer.setSize(
this.$refs.container.clientWidth,
this.$refs.container.clientHeight
);
this.renderer.setPixelRatio(Math.min(window.devicePixelRatio, 2)); // Begrenzt für Performance
this.renderer.shadowMap.enabled = true;
this.renderer.shadowMap.type = THREE.PCFSoftShadowMap;
this.renderer.toneMapping = THREE.ACESFilmicToneMapping;
this.renderer.toneMappingExposure = 1.2; // Leicht erhöhte Helligkeit
this.$refs.container.appendChild(this.renderer.domElement);
// Controls erstellen - markRaw verhindert Vue-Reaktivität
this.controls = markRaw(new OrbitControls(this.camera, this.renderer.domElement));
this.controls.enableDamping = true;
this.controls.dampingFactor = 0.05;
this.controls.autoRotate = false; // Rotation deaktiviert
this.controls.enableRotate = false; // Manuelle Rotation deaktiviert
this.controls.enableZoom = true;
this.controls.enablePan = false;
this.controls.minDistance = 2;
this.controls.maxDistance = 5;
// Clock für Animationen
this.clock = markRaw(new THREE.Clock());
// Verbesserte Beleuchtung
// Umgebungslicht - heller für bessere Sichtbarkeit
const ambientLight = markRaw(new THREE.AmbientLight(0xffffff, 1.0));
this.scene.add(ambientLight);
// Hauptlicht von vorne oben (Key Light)
const mainLight = markRaw(new THREE.DirectionalLight(0xffffff, 1.2));
mainLight.position.set(3, 8, 4);
mainLight.castShadow = true;
mainLight.shadow.mapSize.width = 2048;
mainLight.shadow.mapSize.height = 2048;
mainLight.shadow.camera.near = 0.5;
mainLight.shadow.camera.far = 50;
this.scene.add(mainLight);
// Fülllicht von links (Fill Light)
const fillLight = markRaw(new THREE.DirectionalLight(0xffffff, 0.6));
fillLight.position.set(-4, 5, 3);
this.scene.add(fillLight);
// Zusätzliches Licht von rechts (Rim Light)
const rimLight = markRaw(new THREE.DirectionalLight(0xffffff, 0.5));
rimLight.position.set(4, 3, -3);
this.scene.add(rimLight);
// Punktlicht von oben für zusätzliche Helligkeit
const pointLight = markRaw(new THREE.PointLight(0xffffff, 0.8, 20));
pointLight.position.set(0, 6, 0);
this.scene.add(pointLight);
},
loadModel() {
const loader = new GLTFLoader();
// Optional: DRACO-Loader für komprimierte Modelle
// const dracoLoader = new DRACOLoader();
// dracoLoader.setDecoderPath('https://www.gstatic.com/draco/v1/decoders/');
// loader.setDRACOLoader(dracoLoader);
console.log('[ThreeScene] Loading model from:', this.modelPath);
console.log('[ThreeScene] Full URL:', window.location.origin + this.modelPath);
loader.load(
this.modelPath,
(gltf) => {
console.log('[ThreeScene] Model loaded successfully:', gltf);
// Altes Modell entfernen
if (this.model) {
this.scene.remove(this.model);
this.disposeModel(this.model);
}
// Modell als nicht-reaktiv markieren - verhindert Vue-Proxy-Konflikte
this.model = markRaw(gltf.scene);
// Modell zentrieren und skalieren
const box = new THREE.Box3().setFromObject(this.model);
const center = box.getCenter(new THREE.Vector3());
const size = box.getSize(new THREE.Vector3());
console.log('[ThreeScene] Model bounds:', { center, size });
// Modell zentrieren (X und Z)
this.model.position.x = -center.x;
this.model.position.z = -center.z;
// Modell skalieren (größer für bessere Sichtbarkeit)
const maxSize = Math.max(size.x, size.y, size.z);
const scale = maxSize > 0 ? 3.0 / maxSize : 1;
this.model.scale.multiplyScalar(scale);
// Modell auf Boden setzen und Basis-Y-Position speichern
this.baseY = -size.y * scale / 2;
this.model.position.y = this.baseY;
// Schatten aktivieren
this.model.traverse((child) => {
if (child.isMesh) {
child.castShadow = true;
child.receiveShadow = true;
}
});
this.scene.add(this.model);
// Kamera auf Modell ausrichten
this.centerCameraOnModel();
// Bones für manuelle Animation finden
this.findAndStoreBones(this.model);
// Falls keine Bones gefunden, Hinweis in der Konsole
if (this.bones.length === 0) {
console.warn('[ThreeScene] No bones found in model. To enable limb animations, add bones in Blender. See docs/BLENDER_RIGGING_GUIDE.md');
}
// Animationen aus GLTF laden (falls vorhanden)
if (gltf.animations && gltf.animations.length > 0) {
console.log(`[ThreeScene] Found ${gltf.animations.length} animation(s):`, gltf.animations.map(a => a.name));
this.mixer = markRaw(new THREE.AnimationMixer(this.model));
gltf.animations.forEach((clip) => {
const action = this.mixer.clipAction(clip);
action.play();
console.log(`[ThreeScene] Playing animation: "${clip.name}" (duration: ${clip.duration.toFixed(2)}s)`);
});
} else {
console.log('[ThreeScene] No animations found in model');
}
this.animationStartTime = this.clock.getElapsedTime();
this.$emit('model-loaded', this.model);
},
(progress) => {
// Loading-Progress
if (progress.lengthComputable) {
const percent = (progress.loaded / progress.total) * 100;
this.$emit('loading-progress', percent);
} else {
// Fallback für nicht-computable progress
this.$emit('loading-progress', 50);
}
},
(error) => {
console.error('[ThreeScene] Error loading model:', error);
console.error('[ThreeScene] Model path was:', this.modelPath);
console.error('[ThreeScene] Full URL:', window.location.origin + this.modelPath);
console.error('[ThreeScene] Error details:', {
message: error?.message,
stack: error?.stack,
type: error?.constructor?.name
});
// Prüfe ob es ein 404-Fehler ist (JSON-Parse-Fehler deutet auf HTML-Fehlerseite hin)
if (error?.message && (error.message.includes('JSON') || error.message.includes('Unexpected'))) {
console.error('[ThreeScene] Possible 404 error - file not found or wrong path');
console.error('[ThreeScene] Please check:');
console.error(' 1. File exists at:', this.modelPath);
console.error(' 2. Vite dev server is running');
console.error(' 3. File is in public/ directory');
// Versuche die Datei direkt zu fetchen um den Fehler zu sehen
fetch(this.modelPath)
.then(response => {
console.error('[ThreeScene] Fetch response:', {
status: response.status,
statusText: response.statusText,
headers: Object.fromEntries(response.headers.entries())
});
return response.text();
})
.then(text => {
console.error('[ThreeScene] Response preview:', text.substring(0, 200));
})
.catch(fetchError => {
console.error('[ThreeScene] Fetch error:', fetchError);
});
}
this.$emit('model-error', error);
}
);
},
disposeModel(model) {
model.traverse((child) => {
if (child.isMesh) {
if (child.geometry) child.geometry.dispose();
if (child.material) {
if (Array.isArray(child.material)) {
child.material.forEach((mat) => mat.dispose());
} else {
child.material.dispose();
}
}
}
});
},
findAndStoreBones(object) {
this.bones = [];
object.traverse((child) => {
if (child.isBone || (child.type === 'Bone')) {
// Speichere Bones mit ihren Namen für einfachen Zugriff
const boneName = child.name.toLowerCase();
// Typische Bone-Namen für Gliedmaßen
if (boneName.includes('arm') ||
boneName.includes('hand') ||
boneName.includes('leg') ||
boneName.includes('foot') ||
boneName.includes('shoulder') ||
boneName.includes('elbow') ||
boneName.includes('knee') ||
boneName.includes('wrist') ||
boneName.includes('ankle')) {
this.bones.push({
bone: child,
name: boneName,
originalRotation: child.rotation.clone()
});
}
}
});
console.log(`[ThreeScene] Found ${this.bones.length} bones for animation`);
},
animateLimbs(time) {
// Sanfte Idle-Animation für Gliedmaßen
const animationSpeed = 1.5; // Geschwindigkeit
const maxRotation = 0.15; // Maximale Rotation in Radianten (ca. 8.6 Grad)
this.bones.forEach((boneData, index) => {
const bone = boneData.bone;
const boneName = boneData.name;
// Unterschiedliche Animationen basierend auf Bone-Typ
if (boneName.includes('arm') || boneName.includes('shoulder')) {
// Arme: Sanftes Vor- und Zurückschwingen
const phase = time * animationSpeed + (index * 0.5);
bone.rotation.x = boneData.originalRotation.x + Math.sin(phase) * maxRotation * 0.3;
bone.rotation.z = boneData.originalRotation.z + Math.cos(phase * 0.7) * maxRotation * 0.2;
} else if (boneName.includes('hand') || boneName.includes('wrist')) {
// Hände: Leichtes Wackeln
const phase = time * animationSpeed * 1.5 + (index * 0.3);
bone.rotation.y = boneData.originalRotation.y + Math.sin(phase) * maxRotation * 0.4;
} else if (boneName.includes('leg') || boneName.includes('knee')) {
// Beine: Leichtes Vor- und Zurückbewegen
const phase = time * animationSpeed * 0.8 + (index * 0.4);
bone.rotation.x = boneData.originalRotation.x + Math.sin(phase) * maxRotation * 0.2;
} else if (boneName.includes('foot') || boneName.includes('ankle')) {
// Füße: Minimales Wackeln
const phase = time * animationSpeed * 1.2 + (index * 0.2);
bone.rotation.x = boneData.originalRotation.x + Math.sin(phase) * maxRotation * 0.15;
}
});
},
centerCameraOnModel() {
if (!this.model || !this.camera) return;
// Kamera-Position für gute Ansicht des zentrierten Modells
this.camera.position.set(0, this.baseY + 1, 3);
this.camera.lookAt(0, this.baseY + 0.5, 0);
if (this.controls) {
this.controls.target.set(0, this.baseY + 0.5, 0);
this.controls.update();
}
},
animate() {
this.animationId = requestAnimationFrame(this.animate);
const delta = this.clock ? this.clock.getDelta() : 0;
// GLTF-Animationen aktualisieren (falls vorhanden)
if (this.mixer) {
this.mixer.update(delta);
}
// Gliedmaßen-Animationen
if (this.bones.length > 0) {
const time = this.clock ? this.clock.getElapsedTime() : 0;
this.animateLimbs(time);
}
if (this.controls) {
this.controls.update();
}
if (this.renderer && this.scene && this.camera) {
this.renderer.render(this.scene, this.camera);
}
},
onWindowResize() {
if (!this.$refs.container || !this.camera || !this.renderer) return;
const width = this.$refs.container.clientWidth;
const height = this.$refs.container.clientHeight;
this.camera.aspect = width / height;
this.camera.updateProjectionMatrix();
this.renderer.setSize(width, height);
}
}
};
</script>
<style scoped>
.three-scene-container {
width: 100%;
height: 100%;
position: relative;
overflow: hidden;
}
.three-scene-container canvas {
display: block;
width: 100%;
height: 100%;
}
</style>

View File

@@ -33,7 +33,7 @@
"notifications": { "notifications": {
"notify_election_created": "Es wurde eine neue Wahl ausgeschrieben.", "notify_election_created": "Es wurde eine neue Wahl ausgeschrieben.",
"production": { "production": {
"overproduction": "Überproduktion: Deine Produktion liegt {value} Einheiten über dem Bedarf{branch_info}." "overproduction": "Überproduktion: Deine Produktion liegt {value}% über dem Bedarf."
}, },
"transport": { "transport": {
"waiting": "Transport wartet" "waiting": "Transport wartet"

View File

@@ -19,7 +19,7 @@
"notifications": { "notifications": {
"notify_election_created": "A new election has been scheduled.", "notify_election_created": "A new election has been scheduled.",
"production": { "production": {
"overproduction": "Overproduction: your production is {value} units above demand{branch_info}." "overproduction": "Overproduction: your production is {value}% above demand."
}, },
"transport": { "transport": {
"waiting": "Transport waiting" "waiting": "Transport waiting"

View File

@@ -188,29 +188,18 @@ const store = createStore({
socketIoUrl = 'http://localhost:3001'; socketIoUrl = 'http://localhost:3001';
} }
// Direkte Verbindung zu Socket.io (ohne Apache-Proxy) // Normalisiere URL (Env-Variablen enthalten teils Pfade wie /api; Port kann absichtlich gesetzt sein, z.B. :4443)
// In Produktion: direkte Verbindung zu Port 4443 (verschlüsselt) try {
const hostname = window.location.hostname; if (socketIoUrl) {
const isProduction = hostname === 'www.your-part.de' || hostname.includes('your-part.de'); const parsed = new URL(socketIoUrl, window.location.origin);
// Falls /api oder ähnliche Pfade enthalten sind → auf Origin reduzieren (inkl. Port!)
if (isProduction) { socketIoUrl = parsed.origin;
// Produktion: direkte Verbindung zu Port 4443 (verschlüsselt)
const protocol = window.location.protocol === 'https:' ? 'https:' : 'http:';
socketIoUrl = `${protocol}//${hostname}:4443`;
} else {
// Lokale Entwicklung: direkte Backend-Verbindung
if (!socketIoUrl && (import.meta.env.DEV || hostname === 'localhost' || hostname === '127.0.0.1')) {
socketIoUrl = 'http://localhost:3001';
} else if (socketIoUrl) {
try {
const parsed = new URL(socketIoUrl, window.location.origin);
socketIoUrl = parsed.origin;
} catch (e) {
socketIoUrl = window.location.origin;
}
} else {
socketIoUrl = window.location.origin;
} }
} catch (e) {
// Wenn Parsing fehlschlägt: letzte Rettung ist der aktuelle Origin
try {
socketIoUrl = window.location.origin;
} catch (_) {}
} }
const socket = io(socketIoUrl, { const socket = io(socketIoUrl, {
@@ -295,13 +284,12 @@ const store = createStore({
// Wenn Umgebungsvariable nicht gesetzt ist oder leer, verwende Fallback-Logik // Wenn Umgebungsvariable nicht gesetzt ist oder leer, verwende Fallback-Logik
if (!daemonUrl || (typeof daemonUrl === 'string' && daemonUrl.trim() === '')) { if (!daemonUrl || (typeof daemonUrl === 'string' && daemonUrl.trim() === '')) {
// Immer direkte Verbindung zum Daemon-Port 4551 (verschlüsselt) // Fallback: direkte Verbindung zum Daemon-Port 4551 (ohne Apache-Proxy)
const protocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:'; const protocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:';
daemonUrl = `${protocol}//${hostname}:4551/`; daemonUrl = `${protocol}//${hostname}:4551/`;
console.log('[Daemon] Verwende direkte Verbindung zu Port 4551'); console.log('[Daemon] Verwende Fallback basierend auf Hostname, Protokoll und Port 4551');
} else { } else {
// Wenn Umgebungsvariable gesetzt ist, verwende sie direkt console.log('[Daemon] Verwende Umgebungsvariable');
console.log('[Daemon] Verwende Umgebungsvariable:', daemonUrl);
} }
console.log('[Daemon] Finale Daemon-URL:', daemonUrl); console.log('[Daemon] Finale Daemon-URL:', daemonUrl);

View File

@@ -468,10 +468,14 @@ export default {
}, },
watch: { watch: {
activeTab(newVal) { activeTab(newVal, oldVal) {
if (newVal === 'taxes') { // Nur neu laden, wenn der Tab wirklich gewechselt wurde und ein Branch ausgewählt ist
this.loadBranchTaxes(); if (!this.selectedBranch || newVal === oldVal) return;
}
// Alle Tabs neu laden, wenn gewechselt wird
this.$nextTick(() => {
this.refreshActiveTab();
});
}, },
selectedBranch: { selectedBranch: {
handler(newBranch) { handler(newBranch) {
@@ -537,6 +541,33 @@ export default {
} }
}, },
refreshActiveTab() {
// Lade die Daten für den aktiven Tab neu
switch (this.activeTab) {
case 'director':
this.$refs.directorInfo?.refresh();
break;
case 'inventory':
this.$refs.saleSection?.loadInventory();
this.$refs.saleSection?.loadTransports();
break;
case 'production':
this.$refs.productionSection?.loadProductions();
this.$refs.productionSection?.loadStorage();
this.$refs.revenueSection?.refresh && this.$refs.revenueSection.refresh();
break;
case 'taxes':
this.loadBranchTaxes();
break;
case 'storage':
this.$refs.storageSection?.loadStorageData();
break;
case 'transport':
this.loadVehicles();
break;
}
},
async onBranchSelected(newBranch) { async onBranchSelected(newBranch) {
this.selectedBranch = newBranch; this.selectedBranch = newBranch;
// Branches neu laden, um das Wetter zu aktualisieren // Branches neu laden, um das Wetter zu aktualisieren
@@ -549,13 +580,8 @@ export default {
await this.loadVehicles(); await this.loadVehicles();
await this.loadProductPricesForCurrentBranch(); await this.loadProductPricesForCurrentBranch();
this.$nextTick(() => { this.$nextTick(() => {
this.$refs.directorInfo?.refresh(); // Alle Tabs neu laden
this.$refs.saleSection?.loadInventory(); this.refreshActiveTab();
this.$refs.saleSection?.loadTransports();
this.$refs.productionSection?.loadProductions();
this.$refs.productionSection?.loadStorage();
this.$refs.storageSection?.loadStorageData();
this.$refs.revenueSection?.refresh && this.$refs.revenueSection.refresh();
}); });
// load tax info for this branch // load tax info for this branch
@@ -572,49 +598,27 @@ export default {
return; return;
} }
if (!this.products || this.products.length === 0) { // Lade Preise für alle Produkte in der aktuellen Region
this.productPricesCache = {}; const prices = {};
return; for (const product of this.products) {
} try {
const { data } = await apiClient.get('/api/falukant/products/price-in-region', {
// OPTIMIERUNG: Lade alle Preise in einem Batch-Request params: {
try { productId: product.id,
const productIds = this.products.map(p => p.id).join(','); regionId: this.selectedBranch.regionId
const { data } = await apiClient.get('/api/falukant/products/prices-in-region-batch', { }
params: { });
productIds: productIds, prices[product.id] = data.price;
regionId: this.selectedBranch.regionId } catch (error) {
} console.error(`Error loading price for product ${product.id}:`, error);
}); // Fallback auf Standard-Berechnung
this.productPricesCache = data || {}; const knowledgeFactor = product.knowledges?.[0]?.knowledge || 0;
} catch (error) { const maxPrice = product.sellCost;
console.error('Error loading prices in batch:', error); const minPrice = maxPrice * 0.6;
// Fallback: Lade Preise einzeln (aber parallel) prices[product.id] = minPrice + (maxPrice - minPrice) * (knowledgeFactor / 100);
const pricePromises = this.products.map(async (product) => { }
try {
const { data } = await apiClient.get('/api/falukant/products/price-in-region', {
params: {
productId: product.id,
regionId: this.selectedBranch.regionId
}
});
return { productId: product.id, price: data.price };
} catch (err) {
console.error(`Error loading price for product ${product.id}:`, err);
// Fallback auf Standard-Berechnung
const knowledgeFactor = product.knowledges?.[0]?.knowledge || 0;
const maxPrice = product.sellCost;
const minPrice = maxPrice * 0.6;
return { productId: product.id, price: minPrice + (maxPrice - minPrice) * (knowledgeFactor / 100) };
}
});
const results = await Promise.all(pricePromises);
this.productPricesCache = {};
results.forEach(({ productId, price }) => {
this.productPricesCache[productId] = price;
});
} }
this.productPricesCache = prices;
}, },
formatPercent(value) { formatPercent(value) {

View File

@@ -274,16 +274,12 @@ export default {
getEffect(gift) { getEffect(gift) {
const relationship = this.relationships[0]; const relationship = this.relationships[0];
if (!relationship || !relationship.character2) {
return 0;
}
const partner = relationship.character2; const partner = relationship.character2;
const currentMoodId = partner.moodId; const currentMoodId = partner.mood?.id ?? partner.mood_id;
const moodEntry = gift.moodsAffects.find(ma => ma.mood_id === currentMoodId); const moodEntry = gift.moodsAffects.find(ma => ma.mood_id === currentMoodId);
const moodValue = moodEntry ? moodEntry.suitability : 0; const moodValue = moodEntry ? moodEntry.suitability : 0;
let highestCharacterValue = 0; let highestCharacterValue = 0;
// traits ist ein Array von Trait-Objekten mit id und tr for (const trait of partner.characterTrait) {
for (const trait of partner.traits || []) {
const charEntry = gift.charactersAffects.find(ca => ca.trait_id === trait.id); const charEntry = gift.charactersAffects.find(ca => ca.trait_id === trait.id);
if (charEntry && charEntry.suitability > highestCharacterValue) { if (charEntry && charEntry.suitability > highestCharacterValue) {
highestCharacterValue = charEntry.suitability; highestCharacterValue = charEntry.suitability;

View File

@@ -116,15 +116,26 @@
</table> </table>
</div> </div>
</div> </div>
<div v-if="falukantUser?.character" class="imagecontainer"> <div v-if="falukantUser?.character" class="overview-visualization">
<div :style="getAvatarStyle" class="avatar"></div> <div class="character-3d-container">
<div :style="getHouseStyle" class="house"></div> <CharacterModel3D
:gender="falukantUser.character.gender"
:age="falukantUser.character.age"
:autoRotate="true"
:rotationSpeed="0.5"
/>
</div>
<div class="imagecontainer">
<div :style="getAvatarStyle" class="avatar"></div>
<div :style="getHouseStyle" class="house"></div>
</div>
</div> </div>
</div> </div>
</template> </template>
<script> <script>
import StatusBar from '@/components/falukant/StatusBar.vue'; import StatusBar from '@/components/falukant/StatusBar.vue';
import CharacterModel3D from '@/components/falukant/CharacterModel3D.vue';
import apiClient from '@/utils/axios.js'; import apiClient from '@/utils/axios.js';
import { mapState } from 'vuex'; import { mapState } from 'vuex';
@@ -169,6 +180,7 @@ export default {
name: 'FalukantOverviewView', name: 'FalukantOverviewView',
components: { components: {
StatusBar, StatusBar,
CharacterModel3D,
}, },
data() { data() {
return { return {
@@ -481,4 +493,27 @@ h2 {
padding: 20px; padding: 20px;
color: #666; color: #666;
} }
.overview-visualization {
display: flex;
gap: 20px;
margin-top: 20px;
flex-wrap: wrap;
}
.character-3d-container {
flex: 1;
min-width: 300px;
max-width: 500px;
height: 400px;
border: 1px solid #ddd;
border-radius: 8px;
overflow: hidden;
background: #f9f9f9;
}
.imagecontainer {
flex: 1;
min-width: 300px;
}
</style> </style>

View File

@@ -50,5 +50,13 @@ export default defineConfig(({ mode }) => {
assert: 'assert', assert: 'assert',
} }
}, },
server: {
fs: {
// Erlaube Zugriff auf Dateien außerhalb des Projektverzeichnisses
strict: false
}
},
// Stelle sicher, dass GLB/GLTF-Dateien als Assets behandelt werden
assetsInclude: ['**/*.glb', '**/*.gltf']
}; };
}); });

View File

@@ -1,257 +0,0 @@
#!/bin/bash
# YourPart Daemon Dependencies Installation Script für OpenSUSE Tumbleweed
# Optimiert für OpenSUSE Tumbleweed mit GCC 13
set -euo pipefail
# Farben für Output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
log_info "Installiere Dependencies für YourPart Daemon auf OpenSUSE Tumbleweed..."
# Prüfe OpenSUSE Version
if ! grep -q "openSUSE Tumbleweed" /etc/os-release; then
log_warning "Dieses Script ist für OpenSUSE Tumbleweed optimiert. Andere Versionen könnten Probleme haben."
fi
# Update Package Lists
log_info "Aktualisiere Paketlisten..."
sudo zypper refresh
# Installiere Build-Tools
log_info "Installiere Build-Tools..."
sudo zypper install -y \
gcc \
gcc-c++ \
cmake \
pkg-config \
git \
curl \
wget \
patterns-devel-C-C++-devel_C_C++
# Installiere GCC 13 (falls verfügbar)
log_info "Prüfe verfügbare GCC Versionen..."
if zypper search gcc13 2>/dev/null | grep -q "gcc13"; then
log_info "Installiere GCC 13..."
sudo zypper install -y gcc13 gcc13-c++
# Setze GCC 13 als Standard
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-13 100
sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-13 100
log_success "GCC 13 erfolgreich installiert und als Standard gesetzt"
else
log_info "GCC 13 nicht verfügbar, verwende Standard-GCC"
# Prüfe verfügbare GCC Versionen
log_info "Verfügbare GCC Versionen:"
gcc --version | head -1
g++ --version | head -1
fi
# Installiere PostgreSQL Development Libraries
log_info "Installiere PostgreSQL Development Libraries..."
# Prüfe welche PostgreSQL Version bereits installiert ist
if rpm -q postgresql16-devel >/dev/null 2>&1; then
log_info "PostgreSQL 16 Development Libraries bereits installiert"
sudo zypper install -y libpqxx-devel
elif zypper search postgresql16-devel 2>/dev/null | grep -q "postgresql16-devel"; then
log_info "Installiere PostgreSQL 16 Development Libraries..."
sudo zypper install -y \
postgresql16-devel \
libpqxx-devel
elif zypper search postgresql15-devel 2>/dev/null | grep -q "postgresql15-devel"; then
log_info "Verwende PostgreSQL 15..."
sudo zypper install -y \
postgresql15-devel \
libpqxx-devel \
postgresql15-server
else
log_info "Verwende PostgreSQL 14..."
sudo zypper install -y \
postgresql14-devel \
libpqxx-devel \
postgresql14-server
fi
# Installiere libwebsockets
log_info "Installiere libwebsockets..."
# Prüfe ob libwebsockets bereits installiert ist
if rpm -q libwebsockets-devel >/dev/null 2>&1; then
log_info "libwebsockets-devel bereits installiert"
else
sudo zypper install -y libwebsockets-devel
fi
# SSL und Zlib Development Libraries (nur wenn nicht bereits installiert)
if rpm -q libressl-devel >/dev/null 2>&1; then
log_info "LibreSSL Development Libraries bereits installiert"
elif ! rpm -q libopenssl-3-devel >/dev/null 2>&1; then
log_info "Installiere OpenSSL Development Libraries..."
sudo zypper install -y libopenssl-3-devel
fi
if rpm -q zlib-ng-compat-devel >/dev/null 2>&1; then
log_info "Zlib Development Libraries bereits installiert"
elif ! rpm -q zlib-devel >/dev/null 2>&1; then
log_info "Installiere Zlib Development Libraries..."
sudo zypper install -y zlib-ng-compat-devel
fi
# Installiere nlohmann-json
log_info "Installiere nlohmann-json..."
sudo zypper install -y nlohmann_json-devel
# Installiere systemd development libraries
log_info "Installiere systemd development libraries..."
sudo zypper install -y systemd-devel
# Installiere PostgreSQL Server (falls nicht vorhanden)
log_info "Prüfe PostgreSQL Installation..."
if ! systemctl is-active --quiet postgresql; then
log_info "Installiere und starte PostgreSQL Server..."
# Verwende die gleiche Version wie die Development Libraries
if zypper search postgresql16-server 2>/dev/null | grep -q "postgresql16-server"; then
sudo zypper install -y postgresql16-server
elif zypper search postgresql15-server 2>/dev/null | grep -q "postgresql15-server"; then
sudo zypper install -y postgresql15-server
else
sudo zypper install -y postgresql14-server
fi
# Starte PostgreSQL
sudo systemctl start postgresql
sudo systemctl enable postgresql
log_success "PostgreSQL installiert und gestartet"
else
log_success "PostgreSQL läuft bereits"
fi
# Erstelle Datenbank und Benutzer
log_info "Konfiguriere PostgreSQL..."
sudo -u postgres psql << EOF
-- Erstelle Benutzer falls nicht vorhanden
DO \$\$
BEGIN
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'yourpart') THEN
CREATE USER yourpart WITH PASSWORD 'hitomisan';
END IF;
END
\$\$;
-- Erstelle Datenbank falls nicht vorhanden
SELECT 'CREATE DATABASE yp3 OWNER yourpart'
WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname = 'yp3')\gexec
-- Setze Berechtigungen
GRANT ALL PRIVILEGES ON DATABASE yp3 TO yourpart;
\q
EOF
log_success "PostgreSQL konfiguriert"
# Installiere systemd (sollte bereits vorhanden sein)
log_info "Prüfe systemd..."
if ! command -v systemctl &> /dev/null; then
log_error "systemd ist nicht installiert. Bitte installieren Sie OpenSUSE Tumbleweed."
exit 1
fi
log_success "systemd verfügbar"
# Installiere zusätzliche Tools für Monitoring
log_info "Installiere Monitoring-Tools..."
sudo zypper install -y \
htop \
iotop \
net-tools \
lsof
# Konfiguriere Firewall (falls firewalld installiert ist)
if command -v firewall-cmd &> /dev/null; then
log_info "Konfiguriere Firewall..."
sudo firewall-cmd --permanent --add-port=4551/tcp
sudo firewall-cmd --permanent --add-port=22/tcp
sudo firewall-cmd --reload
log_success "Firewall konfiguriert"
fi
# Erstelle Log-Verzeichnis
log_info "Erstelle Log-Verzeichnisse..."
sudo mkdir -p /var/log/yourpart
sudo chmod 755 /var/log/yourpart
# Teste Compiler-Konfiguration
log_info "Teste Compiler-Konfiguration..."
cat > /tmp/test_compile.cpp << 'EOF'
#include <iostream>
#include <string>
#include <vector>
#include <memory>
#include <string_view>
int main() {
std::cout << "C++23 Test erfolgreich!" << std::endl;
// Test C++23 Features
auto lambda = [](auto x) { return x * 2; };
std::vector<std::unique_ptr<int>> vec;
std::string_view sv = "test";
return 0;
}
EOF
if g++ -std=c++23 -o /tmp/test_compile /tmp/test_compile.cpp; then
log_success "C++23 Compilation erfolgreich"
rm -f /tmp/test_compile /tmp/test_compile.cpp
elif g++ -std=c++20 -o /tmp/test_compile /tmp/test_compile.cpp; then
log_success "C++20 Compilation erfolgreich"
rm -f /tmp/test_compile /tmp/test_compile.cpp
else
log_warning "C++23/20 Compilation fehlgeschlagen, verwende C++17"
rm -f /tmp/test_compile /tmp/test_compile.cpp
fi
log_success "Alle Dependencies erfolgreich installiert!"
log_info ""
log_info "Nächste Schritte:"
log_info "1. Führen Sie das deploy.sh Script von Ihrem Entwicklungsrechner aus"
log_info "2. Oder kopieren Sie die Binärdatei manuell und konfigurieren Sie den Service"
log_info ""
log_info "Verfügbare Services:"
log_info "- PostgreSQL: systemctl status postgresql"
log_info "- Firewall: firewall-cmd --list-all"
log_info ""
log_info "Datenbankverbindung:"
log_info "- Host: localhost"
log_info "- Port: 5432"
log_info "- Database: yp3"
log_info "- User: yourpart"
log_info "- Password: hitomisan"
log_info ""
log_info "Compiler-Info:"
log_info "- GCC Version: $(gcc --version | head -1)"
log_info "- G++ Version: $(g++ --version | head -1)"

View File

@@ -1,231 +0,0 @@
#!/bin/bash
# YourPart Daemon Dependencies Installation Script für Ubuntu 22
# Optimiert für Ubuntu 22.04 LTS mit verfügbaren Paketen
set -euo pipefail
# Farben für Output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
log_info "Installiere Dependencies für YourPart Daemon auf Ubuntu 22.04 LTS..."
# Prüfe Ubuntu Version
if ! grep -q "22.04" /etc/os-release; then
log_warning "Dieses Script ist für Ubuntu 22.04 optimiert. Andere Versionen könnten Probleme haben."
fi
# Update Package Lists
log_info "Aktualisiere Paketlisten..."
apt update
# Installiere Build-Tools
log_info "Installiere Build-Tools..."
apt install -y \
build-essential \
cmake \
pkg-config \
git \
curl \
wget \
software-properties-common
# Installiere GCC 13 für C++23 Support
log_info "Installiere GCC 13 für C++23 Support..."
apt install -y software-properties-common
# Füge Ubuntu Toolchain PPA hinzu
add-apt-repository -y ppa:ubuntu-toolchain-r/test
apt update
# Installiere GCC 13 (bessere C++23 Unterstützung als GCC 11)
if apt install -y gcc-13 g++-13; then
log_success "GCC 13 erfolgreich installiert"
# Setze GCC 13 als Standard
update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-13 100
update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-13 100
log_info "GCC 13 ist jetzt der Standard-Compiler"
elif apt install -y gcc-15 g++-15; then
log_success "GCC 15 erfolgreich installiert"
# Setze GCC 15 als Standard
update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-15 100
update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-15 100
log_info "GCC 15 ist jetzt der Standard-Compiler"
else
log_warning "GCC 13/15 nicht verfügbar, verwende GCC 11"
apt install -y gcc g++
fi
# Prüfe Compiler-Versionen
log_info "Verfügbare Compiler-Versionen:"
gcc --version | head -1
g++ --version | head -1
# Installiere PostgreSQL Repository
log_info "Füge PostgreSQL Repository hinzu..."
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add -
echo "deb http://apt.postgresql.org/pub/repos/apt/ jammy-pgdg main" > /etc/apt/sources.list.d/pgdg.list
apt update
# Installiere PostgreSQL Development Libraries
log_info "Installiere PostgreSQL Development Libraries..."
apt install -y \
postgresql-client-14 \
postgresql-server-dev-14 \
libpq-dev \
libpqxx-dev
# Installiere libwebsockets
log_info "Installiere libwebsockets..."
apt install -y \
libwebsockets-dev \
libssl-dev \
libz-dev
# Installiere nlohmann-json
log_info "Installiere nlohmann-json..."
apt install -y nlohmann-json3-dev
# Installiere systemd development libraries
log_info "Installiere systemd development libraries..."
apt install -y libsystemd-dev
# Installiere PostgreSQL Server (falls nicht vorhanden)
log_info "Prüfe PostgreSQL Installation..."
if ! systemctl is-active --quiet postgresql; then
log_info "Installiere PostgreSQL Server..."
apt install -y postgresql-14 postgresql-contrib-14
# Starte PostgreSQL
systemctl start postgresql
systemctl enable postgresql
log_success "PostgreSQL installiert und gestartet"
else
log_success "PostgreSQL läuft bereits"
fi
# Erstelle Datenbank und Benutzer
log_info "Konfiguriere PostgreSQL..."
sudo -u postgres psql << EOF
-- Erstelle Benutzer falls nicht vorhanden
DO \$\$
BEGIN
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'yourpart') THEN
CREATE USER yourpart WITH PASSWORD 'hitomisan';
END IF;
END
\$\$;
-- Erstelle Datenbank falls nicht vorhanden
SELECT 'CREATE DATABASE yp3 OWNER yourpart'
WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname = 'yp3')\gexec
-- Setze Berechtigungen
GRANT ALL PRIVILEGES ON DATABASE yp3 TO yourpart;
\q
EOF
log_success "PostgreSQL konfiguriert"
# Installiere systemd (sollte bereits vorhanden sein)
log_info "Prüfe systemd..."
if ! command -v systemctl &> /dev/null; then
log_error "systemd ist nicht installiert. Bitte installieren Sie Ubuntu 22.04 LTS."
exit 1
fi
log_success "systemd verfügbar"
# Installiere zusätzliche Tools für Monitoring
log_info "Installiere Monitoring-Tools..."
apt install -y \
htop \
iotop \
net-tools \
lsof
# Konfiguriere Firewall (falls ufw installiert ist)
if command -v ufw &> /dev/null; then
log_info "Konfiguriere Firewall..."
ufw allow 4551/tcp comment "YourPart Daemon WebSocket"
ufw allow 22/tcp comment "SSH"
log_success "Firewall konfiguriert"
fi
# Erstelle Log-Verzeichnis
log_info "Erstelle Log-Verzeichnisse..."
mkdir -p /var/log/yourpart
chmod 755 /var/log/yourpart
# Teste Compiler-Konfiguration
log_info "Teste Compiler-Konfiguration..."
cat > /tmp/test_compile.cpp << 'EOF'
#include <iostream>
#include <string>
#include <vector>
#include <memory>
int main() {
std::cout << "C++20 Test erfolgreich!" << std::endl;
// Test C++20 Features
auto lambda = [](auto x) { return x * 2; };
std::vector<std::unique_ptr<int>> vec;
return 0;
}
EOF
if g++ -std=c++20 -o /tmp/test_compile /tmp/test_compile.cpp; then
log_success "C++20 Compilation erfolgreich"
rm -f /tmp/test_compile /tmp/test_compile.cpp
else
log_warning "C++20 Compilation fehlgeschlagen, verwende C++17"
rm -f /tmp/test_compile /tmp/test_compile.cpp
fi
log_success "Alle Dependencies erfolgreich installiert!"
log_info ""
log_info "Nächste Schritte:"
log_info "1. Führen Sie das deploy.sh Script von Ihrem Entwicklungsrechner aus"
log_info "2. Oder kopieren Sie die Binärdatei manuell und konfigurieren Sie den Service"
log_info ""
log_info "Verfügbare Services:"
log_info "- PostgreSQL: systemctl status postgresql"
log_info "- Firewall: ufw status"
log_info ""
log_info "Datenbankverbindung:"
log_info "- Host: localhost"
log_info "- Port: 5432"
log_info "- Database: yp3"
log_info "- User: yourpart"
log_info "- Password: hitomisan"
log_info ""
log_info "Compiler-Info:"
log_info "- GCC Version: $(gcc --version | head -1)"
log_info "- G++ Version: $(g++ --version | head -1)"

View File

@@ -1,157 +0,0 @@
#!/bin/bash
# YourPart Daemon Dependencies Installation Script für Ubuntu 22
# Führen Sie dieses Script auf dem Server aus, bevor Sie das Deployment durchführen
set -euo pipefail
# Farben für Output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
log_info "Installiere Dependencies für YourPart Daemon auf Ubuntu 22..."
# Update Package Lists
log_info "Aktualisiere Paketlisten..."
apt update
# Installiere Build-Tools
log_info "Installiere Build-Tools..."
apt install -y \
build-essential \
cmake \
pkg-config \
git \
curl \
wget
# Installiere C++ Compiler (Ubuntu 22 hat GCC 11, das reicht aus)
log_info "Installiere GCC 11 (Standard für Ubuntu 22)..."
apt install -y gcc g++
# Prüfe verfügbare GCC Versionen
log_info "Verfügbare GCC Versionen:"
gcc --version | head -1
g++ --version | head -1
# Installiere PostgreSQL Development Libraries
log_info "Installiere PostgreSQL Development Libraries..."
apt install -y \
postgresql-server-dev-14 \
libpq-dev \
libpqxx-dev
# Installiere libwebsockets
log_info "Installiere libwebsockets..."
apt install -y \
libwebsockets-dev \
libssl-dev \
libz-dev
# Installiere nlohmann-json
log_info "Installiere nlohmann-json..."
apt install -y nlohmann-json3-dev
# Installiere PostgreSQL Server (falls nicht vorhanden)
log_info "Prüfe PostgreSQL Installation..."
if ! systemctl is-active --quiet postgresql; then
log_info "Installiere PostgreSQL Server..."
apt install -y postgresql postgresql-contrib
# Starte PostgreSQL
systemctl start postgresql
systemctl enable postgresql
log_success "PostgreSQL installiert und gestartet"
else
log_success "PostgreSQL läuft bereits"
fi
# Erstelle Datenbank und Benutzer
log_info "Konfiguriere PostgreSQL..."
sudo -u postgres psql << EOF
-- Erstelle Benutzer falls nicht vorhanden
DO \$\$
BEGIN
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'yourpart') THEN
CREATE USER yourpart WITH PASSWORD 'hitomisan';
END IF;
END
\$\$;
-- Erstelle Datenbank falls nicht vorhanden
SELECT 'CREATE DATABASE yp3 OWNER yourpart'
WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname = 'yp3')\gexec
-- Setze Berechtigungen
GRANT ALL PRIVILEGES ON DATABASE yp3 TO yourpart;
\q
EOF
log_success "PostgreSQL konfiguriert"
# Installiere systemd (sollte bereits vorhanden sein)
log_info "Prüfe systemd..."
if ! command -v systemctl &> /dev/null; then
log_error "systemd ist nicht installiert. Bitte installieren Sie Ubuntu 22 LTS."
exit 1
fi
log_success "systemd verfügbar"
# Installiere zusätzliche Tools für Monitoring
log_info "Installiere Monitoring-Tools..."
apt install -y \
htop \
iotop \
netstat-nat \
lsof
# Konfiguriere Firewall (falls ufw installiert ist)
if command -v ufw &> /dev/null; then
log_info "Konfiguriere Firewall..."
ufw allow 4551/tcp comment "YourPart Daemon WebSocket"
ufw allow 22/tcp comment "SSH"
log_success "Firewall konfiguriert"
fi
# Erstelle Log-Verzeichnis
log_info "Erstelle Log-Verzeichnisse..."
mkdir -p /var/log/yourpart
chmod 755 /var/log/yourpart
log_success "Alle Dependencies erfolgreich installiert!"
log_info ""
log_info "Nächste Schritte:"
log_info "1. Führen Sie das deploy.sh Script von Ihrem Entwicklungsrechner aus"
log_info "2. Oder kopieren Sie die Binärdatei manuell und konfigurieren Sie den Service"
log_info ""
log_info "Verfügbare Services:"
log_info "- PostgreSQL: systemctl status postgresql"
log_info "- Firewall: ufw status"
log_info ""
log_info "Datenbankverbindung:"
log_info "- Host: localhost"
log_info "- Port: 5432"
log_info "- Database: yp3"
log_info "- User: yourpart"
log_info "- Password: hitomisan"

View File

@@ -1,89 +0,0 @@
#!/bin/bash
# GCC 15 Installation für Ubuntu 22.04
# Verwendet verschiedene Quellen um GCC 15 zu bekommen
set -euo pipefail
# Farben
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[SUCCESS]${NC} $1"; }
log_warning() { echo -e "${YELLOW}[WARNING]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_info "Installiere GCC 15 für Ubuntu 22.04..."
# Option 1: Ubuntu Toolchain PPA (manchmal verfügbar)
log_info "Versuche Ubuntu Toolchain PPA..."
apt update
apt install -y software-properties-common
# Füge verschiedene PPAs hinzu
add-apt-repository -y ppa:ubuntu-toolchain-r/test 2>/dev/null || log_warning "PPA bereits hinzugefügt oder nicht verfügbar"
add-apt-repository -y ppa:ubuntu-toolchain-r/ppa 2>/dev/null || log_warning "PPA bereits hinzugefügt oder nicht verfügbar"
apt update
# Versuche GCC 15 zu installieren
if apt install -y gcc-15 g++-15 2>/dev/null; then
log_success "GCC 15 erfolgreich über PPA installiert"
GCC15_AVAILABLE=true
else
log_warning "GCC 15 nicht über PPA verfügbar"
GCC15_AVAILABLE=false
fi
# Option 2: Snap (falls PPA nicht funktioniert)
if [ "$GCC15_AVAILABLE" = false ]; then
log_info "Versuche GCC 15 über Snap..."
if command -v snap &> /dev/null; then
if snap install gcc --classic 2>/dev/null; then
log_success "GCC über Snap installiert"
# Prüfe Version
SNAP_GCC_VERSION=$(snap run gcc --version | head -1)
log_info "Snap GCC Version: $SNAP_GCC_VERSION"
else
log_warning "GCC über Snap nicht verfügbar"
fi
else
log_info "Snap nicht installiert, installiere es..."
apt install -y snapd
if snap install gcc --classic 2>/dev/null; then
log_success "GCC über Snap installiert"
else
log_warning "GCC über Snap nicht verfügbar"
fi
fi
fi
# Option 3: Compile from Source (letzte Option)
if [ "$GCC15_AVAILABLE" = false ]; then
log_info "GCC 15 nicht verfügbar. Verwende GCC 11 (Standard für Ubuntu 22.04)"
apt install -y gcc g++
log_info "Verfügbare Compiler:"
gcc --version | head -1
g++ --version | head -1
log_warning "Der Code verwendet nur C++17 Features, daher ist GCC 11 ausreichend"
log_info "Falls Sie trotzdem GCC 15 brauchen, können Sie es aus dem Quellcode kompilieren"
fi
# Setze GCC 15 als Standard (falls verfügbar)
if [ "$GCC15_AVAILABLE" = true ]; then
log_info "Setze GCC 15 als Standard..."
update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-15 100
update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-15 100
log_success "GCC 15 ist jetzt der Standard-Compiler"
gcc --version | head -1
g++ --version | head -1
fi
log_success "Compiler-Installation abgeschlossen!"

Binary file not shown.

View File

@@ -1,236 +0,0 @@
#!/bin/bash
# SSL/TLS Setup Script für YourPart Daemon mit DNS-01 Challenge
# Für Domains mit DNS-Provider wie Cloudflare, Route53, etc.
set -e
# Farben für Logging
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
CERT_DIR="/etc/yourpart"
CERT_FILE="$CERT_DIR/server.crt"
KEY_FILE="$CERT_DIR/server.key"
LETSENCRYPT_CERT="/etc/letsencrypt/live/your-part.de/fullchain.pem"
LETSENCRYPT_KEY="/etc/letsencrypt/live/your-part.de/privkey.pem"
log_info "SSL/TLS Setup mit DNS-01 Challenge für YourPart Daemon"
# Prüfe ob certbot installiert ist
if ! command -v certbot &> /dev/null; then
log_error "Certbot ist nicht installiert!"
log_info "Installiere Certbot..."
if command -v apt &> /dev/null; then
sudo apt update
sudo apt install -y certbot
elif command -v zypper &> /dev/null; then
sudo zypper install -y certbot
else
log_error "Paketmanager nicht unterstützt. Installiere Certbot manuell."
exit 1
fi
fi
# DNS Provider Auswahl
echo ""
log_info "Wählen Sie Ihren DNS-Provider:"
echo "1) Cloudflare"
echo "2) Route53 (AWS)"
echo "3) Manual (manuelle DNS-Einträge)"
echo ""
read -p "Ihre Wahl (1-3): " -n 1 -r
echo ""
case $REPLY in
1)
DNS_PROVIDER="cloudflare"
;;
2)
DNS_PROVIDER="route53"
;;
3)
DNS_PROVIDER="manual"
;;
*)
log_error "Ungültige Auswahl!"
exit 1
;;
esac
# Erstelle Zertifikats-Verzeichnis
if [ ! -d "$CERT_DIR" ]; then
log_info "Erstelle Zertifikats-Verzeichnis: $CERT_DIR"
sudo mkdir -p "$CERT_DIR"
fi
# DNS-Provider spezifische Konfiguration
case $DNS_PROVIDER in
"cloudflare")
log_info "Cloudflare DNS-01 Challenge Setup"
# Prüfe ob Cloudflare Plugin installiert ist
if ! certbot plugins | grep -q cloudflare; then
log_info "Installiere Cloudflare Plugin..."
if command -v apt &> /dev/null; then
sudo apt install -y python3-certbot-dns-cloudflare
elif command -v zypper &> /dev/null; then
sudo zypper install -y python3-certbot-dns-cloudflare
else
log_error "Cloudflare Plugin nicht verfügbar. Installiere manuell."
exit 1
fi
fi
# Erstelle Cloudflare Credentials Datei
log_info "Erstelle Cloudflare Credentials..."
read -p "Cloudflare API Token: " -s CF_TOKEN
echo
CF_CREDENTIALS_FILE="/etc/yourpart/cloudflare.ini"
sudo tee "$CF_CREDENTIALS_FILE" > /dev/null << EOF
dns_cloudflare_api_token = $CF_TOKEN
EOF
sudo chmod 600 "$CF_CREDENTIALS_FILE"
# Erstelle Zertifikat
log_info "Erstelle Let's Encrypt Zertifikat mit Cloudflare DNS-01 Challenge..."
sudo certbot certonly \
--dns-cloudflare \
--dns-cloudflare-credentials "$CF_CREDENTIALS_FILE" \
-d your-part.de \
--non-interactive \
--agree-tos \
--email admin@your-part.de
;;
"route53")
log_info "Route53 DNS-01 Challenge Setup"
# Prüfe ob Route53 Plugin installiert ist
if ! certbot plugins | grep -q route53; then
log_info "Installiere Route53 Plugin..."
if command -v apt &> /dev/null; then
sudo apt install -y python3-certbot-dns-route53
elif command -v zypper &> /dev/null; then
sudo zypper install -y python3-certbot-dns-route53
else
log_error "Route53 Plugin nicht verfügbar. Installiere manuell."
exit 1
fi
fi
# Erstelle Zertifikat
log_info "Erstelle Let's Encrypt Zertifikat mit Route53 DNS-01 Challenge..."
log_warning "Stelle sicher, dass AWS-Credentials konfiguriert sind!"
sudo certbot certonly \
--dns-route53 \
-d your-part.de \
--non-interactive \
--agree-tos \
--email admin@your-part.de
;;
"manual")
log_info "Manuelle DNS-01 Challenge"
log_warning "Sie müssen die DNS-TXT-Einträge manuell erstellen!"
# Erstelle Zertifikat mit manueller Bestätigung
sudo certbot certonly \
--manual \
--preferred-challenges dns \
-d your-part.de \
--agree-tos \
--email admin@your-part.de
;;
esac
if [ $? -eq 0 ]; then
log_success "Let's Encrypt Zertifikat erfolgreich erstellt!"
# Erstelle Symlinks zu den Zertifikaten
sudo ln -sf "$LETSENCRYPT_CERT" "$CERT_FILE"
sudo ln -sf "$LETSENCRYPT_KEY" "$KEY_FILE"
# Setze korrekte Berechtigungen
sudo chown yourpart:yourpart "$CERT_FILE" "$KEY_FILE"
sudo chmod 644 "$CERT_FILE"
sudo chmod 600 "$KEY_FILE"
# Zeige Zertifikats-Informationen
log_info "Let's Encrypt Zertifikats-Informationen:"
openssl x509 -in "$CERT_FILE" -text -noout | grep -E "(Subject:|Not Before|Not After|DNS:)"
# Erstelle Auto-Renewal Script
log_info "Richte automatische Zertifikats-Erneuerung ein..."
RENEWAL_SCRIPT="/etc/yourpart/renew-ssl-dns.sh"
sudo tee "$RENEWAL_SCRIPT" > /dev/null << EOF
#!/bin/bash
# Automatische SSL-Zertifikats-Erneuerung für YourPart Daemon (DNS-01)
CERT_DIR="/etc/yourpart"
LETSENCRYPT_CERT="/etc/letsencrypt/live/your-part.de/fullchain.pem"
LETSENCRYPT_KEY="/etc/letsencrypt/live/your-part.de/privkey.pem"
# Erneuere Zertifikat
certbot renew --quiet
if [ \$? -eq 0 ]; then
# Aktualisiere Symlinks
ln -sf "\$LETSENCRYPT_CERT" "\$CERT_DIR/server.crt"
ln -sf "\$LETSENCRYPT_KEY" "\$CERT_DIR/server.key"
# Setze Berechtigungen
chown yourpart:yourpart "\$CERT_DIR/server.crt" "\$CERT_DIR/server.key"
chmod 644 "\$CERT_DIR/server.crt"
chmod 600 "\$CERT_DIR/server.key"
# Starte Daemon neu
systemctl reload yourpart-daemon
echo "\$(date): SSL-Zertifikat erfolgreich erneuert" >> /var/log/yourpart/ssl-renewal.log
fi
EOF
sudo chmod +x "$RENEWAL_SCRIPT"
# Füge Cron Job hinzu (täglich um 2:30 Uhr)
(sudo crontab -l 2>/dev/null; echo "30 2 * * * $RENEWAL_SCRIPT") | sudo crontab -
log_success "Automatische Erneuerung eingerichtet (täglich um 2:30 Uhr)"
log_info ""
log_info "Nächste Schritte:"
log_info "1. Aktiviere SSL in der Konfiguration:"
log_info " WEBSOCKET_SSL_ENABLED=true"
log_info "2. Starte den Daemon neu:"
log_info " sudo systemctl restart yourpart-daemon"
log_info "3. Verbinde dich mit:"
log_info " wss://your-part.de:4551"
log_info ""
log_success "Let's Encrypt Zertifikat ist produktionsbereit!"
else
log_error "Let's Encrypt Zertifikat konnte nicht erstellt werden!"
exit 1
fi

View File

@@ -1,432 +0,0 @@
#!/bin/bash
# SSL/TLS Setup Script für YourPart Daemon
# Erstellt oder verwaltet SSL-Zertifikate für WebSocket Secure (WSS)
# Unterstützt Self-Signed Certificates und Let's Encrypt
set -e
# Farben für Logging
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
CERT_DIR="/etc/yourpart"
CERT_FILE="$CERT_DIR/server.crt"
KEY_FILE="$CERT_DIR/server.key"
CSR_FILE="$CERT_DIR/server.csr"
# Let's Encrypt Verzeichnisse
LETSENCRYPT_DIR="/etc/letsencrypt/live"
LETSENCRYPT_CERT="$LETSENCRYPT_DIR/your-part.de/fullchain.pem"
LETSENCRYPT_KEY="$LETSENCRYPT_DIR/your-part.de/privkey.pem"
# Apache2 Zertifikate (Ubuntu/Debian)
APACHE2_CERT="/etc/ssl/certs/ssl-cert-snakeoil.pem"
APACHE2_KEY="/etc/ssl/private/ssl-cert-snakeoil.key"
# Apache2 Let's Encrypt Zertifikate
APACHE2_LE_CERT="/etc/letsencrypt/live/your-part.de/fullchain.pem"
APACHE2_LE_KEY="/etc/letsencrypt/live/your-part.de/privkey.pem"
# Prüfe ob OpenSSL installiert ist
if ! command -v openssl &> /dev/null; then
log_error "OpenSSL ist nicht installiert!"
exit 1
fi
# Prüfe ob wir sudo-Rechte haben
if ! sudo -n true 2>/dev/null; then
log_info "Einige Operationen benötigen sudo-Rechte für SSL-Verzeichnisse..."
fi
# Funktionen
setup_letsencrypt() {
log_info "Let's Encrypt Setup für your-part.de"
# Prüfe ob certbot installiert ist
if ! command -v certbot &> /dev/null; then
log_error "Certbot ist nicht installiert!"
log_info "Installiere Certbot..."
if command -v apt &> /dev/null; then
sudo apt update
sudo apt install -y certbot
elif command -v zypper &> /dev/null; then
sudo zypper install -y certbot
else
log_error "Paketmanager nicht unterstützt. Installiere Certbot manuell."
exit 1
fi
fi
# Prüfe ob Let's Encrypt Zertifikate bereits existieren
if [ -f "$LETSENCRYPT_CERT" ] && [ -f "$LETSENCRYPT_KEY" ]; then
log_info "Let's Encrypt Zertifikate existieren bereits"
# Prüfe Gültigkeit
if openssl x509 -in "$LETSENCRYPT_CERT" -text -noout &> /dev/null; then
log_success "Let's Encrypt Zertifikat ist gültig"
# Zeige Zertifikats-Informationen
log_info "Let's Encrypt Zertifikats-Informationen:"
openssl x509 -in "$LETSENCRYPT_CERT" -text -noout | grep -E "(Subject:|Not Before|Not After|DNS:)"
read -p "Möchten Sie die Zertifikate erneuern? (y/N): " -n 1 -r
echo
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
log_info "Zertifikate bleiben unverändert"
return 0
fi
else
log_warning "Let's Encrypt Zertifikat ist ungültig, erstelle neue..."
fi
fi
# Erstelle oder erneuere Let's Encrypt Zertifikat
log_info "Erstelle/erneuere Let's Encrypt Zertifikat für your-part.de..."
# Prüfe ob Port 80 verfügbar ist (für HTTP-01 Challenge)
if ! sudo netstat -tlnp | grep -q ":80 "; then
log_warning "Port 80 ist nicht verfügbar. Stelle sicher, dass kein anderer Service läuft."
log_info "Oder verwende DNS-01 Challenge mit --dns-cloudflare oder ähnlich"
fi
# Erstelle Zertifikat mit HTTP-01 Challenge
sudo certbot certonly --standalone -d your-part.de --non-interactive --agree-tos --email admin@your-part.de
if [ $? -eq 0 ]; then
log_success "Let's Encrypt Zertifikat erfolgreich erstellt!"
# Erstelle Symlinks zu den Zertifikaten
sudo ln -sf "$LETSENCRYPT_CERT" "$CERT_FILE"
sudo ln -sf "$LETSENCRYPT_KEY" "$KEY_FILE"
# Setze korrekte Berechtigungen
sudo chown yourpart:yourpart "$CERT_FILE" "$KEY_FILE"
sudo chmod 644 "$CERT_FILE"
sudo chmod 600 "$KEY_FILE"
# Zeige Zertifikats-Informationen
log_info "Let's Encrypt Zertifikats-Informationen:"
openssl x509 -in "$CERT_FILE" -text -noout | grep -E "(Subject:|Not Before|Not After|DNS:)"
# Erstelle Auto-Renewal Cron Job
setup_auto_renewal
else
log_error "Let's Encrypt Zertifikat konnte nicht erstellt werden!"
exit 1
fi
}
setup_apache2_certificates() {
log_info "Apache2 Zertifikate Setup"
# Prüfe ob Apache2 installiert ist
if ! command -v apache2 &> /dev/null && ! command -v httpd &> /dev/null; then
log_warning "Apache2 ist nicht installiert, aber wir können trotzdem die Zertifikate verwenden"
fi
# Prüfe verschiedene Apache2 Zertifikats-Pfade (priorisiert nach Qualität)
APACHE2_CERT_PATHS=(
"/etc/letsencrypt/live/your-part.de/fullchain.pem"
"/etc/letsencrypt/live/$(hostname)/fullchain.pem"
"/etc/apache2/ssl/apache.crt"
"/etc/httpd/ssl/apache.crt"
"/etc/ssl/certs/apache-selfsigned.crt"
"/etc/ssl/certs/ssl-cert-snakeoil.pem"
)
APACHE2_KEY_PATHS=(
"/etc/letsencrypt/live/your-part.de/privkey.pem"
"/etc/letsencrypt/live/$(hostname)/privkey.pem"
"/etc/apache2/ssl/apache.key"
"/etc/httpd/ssl/apache.key"
"/etc/ssl/private/apache-selfsigned.key"
"/etc/ssl/private/ssl-cert-snakeoil.key"
)
# Finde verfügbare Zertifikate
FOUND_CERT=""
FOUND_KEY=""
for cert_path in "${APACHE2_CERT_PATHS[@]}"; do
if sudo test -f "$cert_path"; then
FOUND_CERT="$cert_path"
log_info "Gefundenes Zertifikat: $cert_path"
break
fi
done
for key_path in "${APACHE2_KEY_PATHS[@]}"; do
if sudo test -f "$key_path"; then
FOUND_KEY="$key_path"
log_info "Gefundener Private Key: $key_path"
break
fi
done
if [ -z "$FOUND_CERT" ] || [ -z "$FOUND_KEY" ]; then
log_error "Keine Apache2-Zertifikate gefunden!"
log_info "Verfügbare Pfade:"
for path in "${APACHE2_CERT_PATHS[@]}" "${APACHE2_KEY_PATHS[@]}"; do
if sudo test -f "$path"; then
log_info "$path"
else
log_info "$path"
fi
done
exit 1
fi
# Warnung für Snakeoil-Zertifikate
if [[ "$FOUND_CERT" == *"snakeoil"* ]]; then
log_warning "ACHTUNG: Snakeoil-Zertifikat erkannt!"
log_warning "Dieses Zertifikat ist nur für localhost gültig, nicht für your-part.de"
log_warning "Für Produktionsumgebungen sollten Sie Let's Encrypt verwenden"
echo ""
read -p "Möchten Sie trotzdem fortfahren? (y/N): " -n 1 -r
echo ""
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
log_info "Setup abgebrochen. Verwenden Sie Option 2 für Let's Encrypt."
exit 0
fi
fi
# Erstelle Symlinks zu den Apache2-Zertifikaten
log_info "Erstelle Symlinks zu Apache2-Zertifikaten..."
sudo ln -sf "$FOUND_CERT" "$CERT_FILE"
sudo ln -sf "$FOUND_KEY" "$KEY_FILE"
# Setze korrekte Berechtigungen
sudo chown yourpart:yourpart "$CERT_FILE" "$KEY_FILE"
sudo chmod 644 "$CERT_FILE"
sudo chmod 600 "$KEY_FILE"
log_success "Apache2-Zertifikate erfolgreich verlinkt!"
# Zeige Zertifikats-Informationen
log_info "Apache2-Zertifikats-Informationen:"
openssl x509 -in "$CERT_FILE" -text -noout | grep -E "(Subject:|Not Before|Not After|DNS:)"
# Prüfe ob es sich um Let's Encrypt-Zertifikate handelt
if [[ "$FOUND_CERT" == *"letsencrypt"* ]]; then
log_info "Let's Encrypt-Zertifikate erkannt, richte Auto-Renewal ein..."
setup_auto_renewal
else
log_warning "Self-Signed oder andere Zertifikate erkannt - kein Auto-Renewal eingerichtet"
fi
}
setup_auto_renewal() {
log_info "Richte automatische Zertifikats-Erneuerung ein..."
# Erstelle Renewal Script
sudo tee /etc/yourpart/renew-ssl.sh > /dev/null << 'EOF'
#!/bin/bash
# Automatische SSL-Zertifikats-Erneuerung für YourPart Daemon
CERT_DIR="/etc/yourpart"
LETSENCRYPT_CERT="/etc/letsencrypt/live/your-part.de/fullchain.pem"
LETSENCRYPT_KEY="/etc/letsencrypt/live/your-part.de/privkey.pem"
# Erneuere Zertifikat
certbot renew --quiet
if [ $? -eq 0 ]; then
# Aktualisiere Symlinks
ln -sf "$LETSENCRYPT_CERT" "$CERT_DIR/server.crt"
ln -sf "$LETSENCRYPT_KEY" "$CERT_DIR/server.key"
# Setze Berechtigungen
chown yourpart:yourpart "$CERT_DIR/server.crt" "$CERT_DIR/server.key"
chmod 644 "$CERT_DIR/server.crt"
chmod 600 "$CERT_DIR/server.key"
# Starte Daemon neu
systemctl reload yourpart-daemon
echo "$(date): SSL-Zertifikat erfolgreich erneuert" >> /var/log/yourpart/ssl-renewal.log
fi
EOF
sudo chmod +x /etc/yourpart/renew-ssl.sh
# Füge Cron Job hinzu (täglich um 2:30 Uhr)
(sudo crontab -l 2>/dev/null; echo "30 2 * * * /etc/yourpart/renew-ssl.sh") | sudo crontab -
log_success "Automatische Erneuerung eingerichtet (täglich um 2:30 Uhr)"
}
log_info "SSL/TLS Setup für YourPart Daemon"
# Benutzerauswahl
echo ""
log_info "Wählen Sie den Zertifikatstyp:"
echo "1) Self-Signed Certificate (für Entwicklung/Testing)"
echo "2) Let's Encrypt Certificate (für Produktion)"
echo "3) Bestehende Let's Encrypt Zertifikate verwenden"
echo "4) Apache2-Zertifikate verwenden (empfohlen für Ubuntu)"
echo ""
read -p "Ihre Wahl (1-4): " -n 1 -r
echo ""
case $REPLY in
1)
log_info "Self-Signed Certificate wird erstellt..."
CERT_TYPE="self-signed"
;;
2)
log_info "Let's Encrypt Certificate wird erstellt..."
CERT_TYPE="letsencrypt"
;;
3)
log_info "Bestehende Let's Encrypt Zertifikate werden verwendet..."
CERT_TYPE="existing-letsencrypt"
;;
4)
log_info "Apache2-Zertifikate werden verwendet..."
CERT_TYPE="apache2"
;;
*)
log_error "Ungültige Auswahl!"
exit 1
;;
esac
# Erstelle Zertifikats-Verzeichnis falls nicht vorhanden
if [ ! -d "$CERT_DIR" ]; then
log_info "Erstelle Zertifikats-Verzeichnis: $CERT_DIR"
sudo mkdir -p "$CERT_DIR"
fi
# Führe entsprechenden Setup-Typ aus
case $CERT_TYPE in
"self-signed")
# Prüfe ob bereits Zertifikate existieren
if [ -f "$CERT_FILE" ] && [ -f "$KEY_FILE" ]; then
log_info "Zertifikate existieren bereits"
# Prüfe Gültigkeit der Zertifikate
if openssl x509 -in "$CERT_FILE" -text -noout &> /dev/null; then
log_success "Zertifikat ist gültig"
# Zeige Zertifikats-Informationen
log_info "Zertifikats-Informationen:"
openssl x509 -in "$CERT_FILE" -text -noout | grep -E "(Subject:|Not Before|Not After|DNS:)"
read -p "Möchten Sie neue Zertifikate erstellen? (y/N): " -n 1 -r
echo
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
log_info "Zertifikate bleiben unverändert"
exit 0
fi
else
log_warning "Zertifikat ist ungültig, erstelle neue..."
fi
fi
log_info "Erstelle neue Self-Signed SSL-Zertifikate..."
# Erstelle Private Key
log_info "Erstelle Private Key..."
sudo openssl genrsa -out "$KEY_FILE" 2048
sudo chmod 600 "$KEY_FILE"
sudo chown yourpart:yourpart "$KEY_FILE"
# Erstelle Certificate Signing Request (CSR)
log_info "Erstelle Certificate Signing Request..."
sudo openssl req -new -key "$KEY_FILE" -out "$CSR_FILE" -subj "/C=DE/ST=Germany/L=Berlin/O=YourPart/OU=IT/CN=your-part.de"
# Erstelle Self-Signed Certificate
log_info "Erstelle Self-Signed Certificate..."
sudo openssl x509 -req -days 365 -in "$CSR_FILE" -signkey "$KEY_FILE" -out "$CERT_FILE"
# Setze korrekte Berechtigungen
sudo chmod 644 "$CERT_FILE"
sudo chown yourpart:yourpart "$CERT_FILE"
# Lösche CSR-Datei (nicht mehr benötigt)
sudo rm -f "$CSR_FILE"
log_success "Self-Signed SSL-Zertifikate erfolgreich erstellt!"
;;
"letsencrypt")
setup_letsencrypt
;;
"existing-letsencrypt")
if [ -f "$LETSENCRYPT_CERT" ] && [ -f "$LETSENCRYPT_KEY" ]; then
log_info "Verwende bestehende Let's Encrypt Zertifikate..."
# Erstelle Symlinks zu den Zertifikaten
sudo ln -sf "$LETSENCRYPT_CERT" "$CERT_FILE"
sudo ln -sf "$LETSENCRYPT_KEY" "$KEY_FILE"
# Setze korrekte Berechtigungen
sudo chown yourpart:yourpart "$CERT_FILE" "$KEY_FILE"
sudo chmod 644 "$CERT_FILE"
sudo chmod 600 "$KEY_FILE"
log_success "Let's Encrypt Zertifikate erfolgreich verlinkt!"
# Richte Auto-Renewal ein
setup_auto_renewal
else
log_error "Let's Encrypt Zertifikate nicht gefunden in $LETSENCRYPT_DIR"
log_info "Führen Sie zuerst 'certbot certonly' aus oder wählen Sie Option 2"
exit 1
fi
;;
"apache2")
setup_apache2_certificates
;;
esac
# Zeige Zertifikats-Informationen
log_info "Zertifikats-Informationen:"
openssl x509 -in "$CERT_FILE" -text -noout | grep -E "(Subject:|Not Before|Not After|DNS:)"
log_info ""
log_info "Nächste Schritte:"
log_info "1. Aktiviere SSL in der Konfiguration:"
log_info " WEBSOCKET_SSL_ENABLED=true"
log_info "2. Starte den Daemon neu:"
log_info " sudo systemctl restart yourpart-daemon"
log_info "3. Verbinde dich mit:"
log_info " wss://your-part.de:4551"
log_info ""
case $CERT_TYPE in
"self-signed")
log_warning "Hinweis: Dies ist ein Self-Signed Certificate!"
log_warning "Für Produktionsumgebungen verwenden Sie Let's Encrypt oder Apache2-Zertifikate."
;;
"apache2")
log_success "Apache2-Zertifikate erfolgreich konfiguriert!"
log_info "Diese Zertifikate werden automatisch von Apache2 verwaltet."
;;
*)
log_success "Let's Encrypt Zertifikat ist produktionsbereit!"
;;
esac

View File

@@ -1,260 +0,0 @@
#include "character_creation_worker.h"
#include "connection_guard.h"
#include <iostream>
#include <chrono>
#include <thread>
#include <random>
CharacterCreationWorker::CharacterCreationWorker(ConnectionPool &pool, MessageBroker &broker)
: Worker(pool, broker, "CharacterCreationWorker"),
gen(std::random_device{}()),
dist(2, 3),
deathCheckRunning(true),
deathThread(&CharacterCreationWorker::monitorCharacterDeaths, this) {
}
CharacterCreationWorker::~CharacterCreationWorker() {
deathCheckRunning.store(false);
if (deathThread.joinable()) {
deathThread.join();
}
}
void CharacterCreationWorker::run() {
while (runningWorker) {
setCurrentStep("Check if previous day character was created");
if (!isTodayCharacterCreated()) {
setCurrentStep("Create characters for today");
createCharactersForToday();
}
setCurrentStep("Sleep for 60 seconds");
for (int i = 0; i < 60 && runningWorker; ++i) {
std::this_thread::sleep_for(std::chrono::seconds(1));
setCurrentStep("signalActivity()");
signalActivity();
}
setCurrentStep("Loop done");
}
}
bool CharacterCreationWorker::isTodayCharacterCreated() {
try {
setCurrentStep("Get Database Connection");
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
setCurrentStep("Execute Query");
auto results = db.query(QUERY_IS_PREVIOUS_DAY_CHARACTER_CREATED);
return !results.empty();
} catch (const std::exception &e) {
std::cerr << "[CharacterCreationWorker] Fehler in isTodayCharacterCreated: " << e.what() << std::endl;
return false;
}
}
void CharacterCreationWorker::createCharactersForToday() {
loadNames();
if (first_name_cache.empty() || last_name_cache.empty()) {
std::cerr << "[CharacterCreationWorker] Fehler: Namen konnten nicht geladen werden." << std::endl;
return;
}
auto town_ids = getTownRegionIds();
for (auto region_id : town_ids) {
createCharactersForRegion(region_id);
}
}
void CharacterCreationWorker::createCharactersForRegion(int region_id) {
std::vector<int> nobility_stands = {1, 2, 3};
std::vector<std::string> genders = {"male", "female"};
for (auto nobility : nobility_stands) {
for (const auto &gender : genders) {
int num_chars = dist(gen);
for (int i = 0; i < num_chars; ++i) {
createCharacter(region_id, gender, nobility);
}
}
}
}
void CharacterCreationWorker::createCharacter(int region_id, const std::string &gender, int title_of_nobility) {
int first_name_id = getRandomFromSet(first_name_cache[gender]);
if (first_name_id == -1) {
std::cerr << "Fehler: Kein passender Vorname gefunden." << std::endl;
return;
}
int last_name_id = getRandomFromSet(last_name_cache);
if (last_name_id == -1) {
std::cerr << "Fehler: Kein passender Nachname gefunden." << std::endl;
return;
}
try {
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
db.prepare("insert_character", QUERY_INSERT_CHARACTER);
db.execute("insert_character", {std::to_string(region_id),
std::to_string(first_name_id),
std::to_string(last_name_id),
gender,
std::to_string(title_of_nobility)});
} catch (const std::exception &e) {
std::cerr << "[CharacterCreationWorker] Fehler in createCharacter: " << e.what() << std::endl;
}
}
void CharacterCreationWorker::monitorCharacterDeaths() {
while (deathCheckRunning) {
try {
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
auto results = db.query(QUERY_GET_ELIGIBLE_NPC_FOR_DEATH);
for (const auto &row : results) {
int characterId = std::stoi(row.at("id"));
int age = std::stoi(row.at("age"));
if (calculateDeathProbability(age)) {
handleCharacterDeath(characterId);
}
}
} catch (const std::exception &e) {
std::cerr << "[CharacterCreationWorker] Fehler beim Überprüfen von Todesfällen: " << e.what() << std::endl;
}
std::this_thread::sleep_for(std::chrono::hours(1));
}
}
bool CharacterCreationWorker::calculateDeathProbability(int age) {
if (age < 60) {
return false;
}
double baseProbability = 0.01;
double increasePerYear = 0.01;
double deathProbability = baseProbability + (increasePerYear * (age - 60));
std::uniform_real_distribution<double> deathDist(0.0, 1.0);
return deathDist(gen) < deathProbability;
}
void CharacterCreationWorker::handleCharacterDeath(int characterId) {
try {
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
// 1) Director löschen und User benachrichtigen
db.prepare("delete_director", QUERY_DELETE_DIRECTOR);
auto dirResult = db.execute("delete_director", { std::to_string(characterId) });
if (!dirResult.empty()) {
int userId = std::stoi(dirResult[0].at("user_id"));
notifyUser(userId, "director_death");
}
// 2) Relationships löschen und betroffene User benachrichtigen
db.prepare("delete_relationship", QUERY_DELETE_RELATIONSHIP);
auto relResult = db.execute("delete_relationship", { std::to_string(characterId) });
for (auto &row : relResult) {
int relatedUserId = std::stoi(row.at("related_user_id"));
notifyUser(relatedUserId, "relationship_death");
}
// 3) Child-Relations löschen und Eltern benachrichtigen
db.prepare("delete_child_relation", QUERY_DELETE_CHILD_RELATION);
auto childResult = db.execute("delete_child_relation", { std::to_string(characterId) });
for (auto &row : childResult) {
int fatherUserId = std::stoi(row.at("father_user_id"));
int motherUserId = std::stoi(row.at("mother_user_id"));
notifyUser(fatherUserId, "child_death");
notifyUser(motherUserId, "child_death");
}
// 4) Charakter als verstorben markieren
markCharacterAsDeceased(characterId);
} catch (const std::exception &e) {
std::cerr << "[CharacterCreationWorker] Fehler beim Bearbeiten des Todes: "
<< e.what() << std::endl;
}
}
void CharacterCreationWorker::notifyUser(int userId, const std::string &eventType) {
try {
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
db.prepare("insert_notification", QUERY_INSERT_NOTIFICATION);
db.execute("insert_notification", { std::to_string(userId) });
// Sende falukantUpdateStatus nach dem Einfügen der Benachrichtigung
nlohmann::json updateMessage = {
{"event", "falukantUpdateStatus"},
{"user_id", userId}
};
broker.publish(updateMessage.dump());
// Sende auch die ursprüngliche Benachrichtigung
nlohmann::json message = {
{"event", eventType},
{"user_id", userId}
};
broker.publish(message.dump());
} catch (const std::exception &e) {
std::cerr << "[CharacterCreationWorker] Fehler beim Senden der Benachrichtigung: "
<< e.what() << std::endl;
}
}
void CharacterCreationWorker::markCharacterAsDeceased(int characterId) {
try {
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
db.prepare("mark_character_deceased", QUERY_MARK_CHARACTER_DECEASED);
db.execute("mark_character_deceased", {std::to_string(characterId)});
} catch (const std::exception &e) {
std::cerr << "[CharacterCreationWorker] Fehler beim Markieren des Charakters als verstorben: " << e.what() << std::endl;
}
}
std::vector<int> CharacterCreationWorker::getTownRegionIds() {
try {
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
auto rows = db.query(QUERY_GET_TOWN_REGION_IDS);
std::vector<int> ids;
for (const auto &row : rows) {
ids.push_back(std::stoi(row.at("id")));
}
return ids;
} catch (const std::exception &e) {
std::cerr << "[CharacterCreationWorker] Fehler in getTownRegionIds: " << e.what() << std::endl;
return {};
}
}
void CharacterCreationWorker::loadNames() {
try {
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
auto firstNameRows = db.query(QUERY_LOAD_FIRST_NAMES);
for (const auto &row : firstNameRows) {
first_name_cache[row.at("gender")].insert(std::stoi(row.at("id")));
}
auto lastNameRows = db.query(QUERY_LOAD_LAST_NAMES);
for (const auto &row : lastNameRows) {
last_name_cache.insert(std::stoi(row.at("id")));
}
} catch (const std::exception &e) {
std::cerr << "[CharacterCreationWorker] Fehler in loadNames: " << e.what() << std::endl;
}
}
int CharacterCreationWorker::getRandomFromSet(const std::unordered_set<int> &name_set) {
if (name_set.empty()) {
return -1;
}
auto it = name_set.begin();
std::advance(it, std::uniform_int_distribution<int>(0, name_set.size() - 1)(gen));
return *it;
}

View File

@@ -1,162 +0,0 @@
#pragma once
#include "worker.h"
#include <random>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include <string>
#include <thread>
#include <atomic>
class CharacterCreationWorker : public Worker {
public:
CharacterCreationWorker(ConnectionPool &pool, MessageBroker &broker);
~CharacterCreationWorker() override;
protected:
void run() override;
private:
std::mt19937 gen;
std::uniform_int_distribution<int> dist;
std::unordered_map<std::string, std::unordered_set<int>> first_name_cache;
std::unordered_set<int> last_name_cache;
std::atomic<bool> deathCheckRunning{true};
std::thread deathThread;
bool isTodayCharacterCreated();
void createCharactersForToday();
void createCharactersForRegion(int region_id);
void createCharacter(int region_id, const std::string &gender, int title_of_nobility);
std::vector<int> getTownRegionIds();
void loadNames();
int getRandomFromSet(const std::unordered_set<int> &name_set);
void monitorCharacterDeaths();
void handleCharacterDeath(int characterId);
void notifyUser(int userId, const std::string &eventType);
void markCharacterAsDeceased(int characterId);
bool calculateDeathProbability(int age);
static constexpr const char *QUERY_IS_PREVIOUS_DAY_CHARACTER_CREATED = R"(
SELECT created_at
FROM falukant_data."character"
WHERE user_id IS NULL
AND created_at::date = CURRENT_DATE
ORDER BY created_at DESC
LIMIT 1;
)";
static constexpr const char *QUERY_GET_TOWN_REGION_IDS = R"(
SELECT fdr.id
FROM falukant_data.region fdr
JOIN falukant_type.region ftr ON fdr.region_type_id = ftr.id
WHERE ftr.label_tr = 'city';
)";
static constexpr const char *QUERY_LOAD_FIRST_NAMES = R"(
SELECT id, gender
FROM falukant_predefine.firstname;
)";
static constexpr const char *QUERY_LOAD_LAST_NAMES = R"(
SELECT id
FROM falukant_predefine.lastname;
)";
static constexpr const char *QUERY_INSERT_CHARACTER = R"(
INSERT INTO falukant_data."character"(
user_id, region_id, first_name, last_name,
birthdate, gender, created_at, updated_at, title_of_nobility
)
VALUES (NULL, $1, $2, $3, NOW(), $4, NOW(), NOW(), $5);
)";
static constexpr const char *QUERY_GET_ELIGIBLE_NPC_FOR_DEATH = R"(
WITH aged AS (
SELECT
c.id,
(current_date - c.birthdate::date) AS age,
c.user_id
FROM
falukant_data."character" c
WHERE
c.user_id IS NULL
AND (current_date - c.birthdate::date) > 60
),
always_sel AS (
-- Immer mitnehmen: alle über 85 Tage
SELECT *
FROM aged
WHERE age > 85
),
random_sel AS (
-- Zufallsstichprobe: alle zwischen 61 und 85 Tagen, hier beispielhaft auf 10 limitiert
SELECT *
FROM aged
WHERE age <= 85
ORDER BY random()
LIMIT 10 -- <-- hier die gewünschte Anzahl anpassen
)
-- Zusammenführen der beiden Mengen
SELECT *
FROM always_sel
UNION ALL
SELECT *
FROM random_sel;
)";
static constexpr const char *QUERY_DELETE_DIRECTOR = R"(
DELETE FROM falukant_data.director
WHERE director_character_id = $1
RETURNING employer_user_id;
)";
static constexpr const char *QUERY_DELETE_RELATIONSHIP = R"(
WITH deleted AS (
DELETE FROM falukant_data.relationship
WHERE character1_id = $1
OR character2_id = $1
RETURNING
CASE
WHEN character1_id = $1 THEN character2_id
ELSE character1_id
END AS related_character_id,
relationship_type_id
)
SELECT
c.user_id AS related_user_id
FROM deleted d
JOIN falukant_data."character" c
ON c.id = d.related_character_id;
)";
static constexpr const char *QUERY_DELETE_CHILD_RELATION = R"(
WITH deleted AS (
DELETE FROM falukant_data.child_relation
WHERE child_character_id = $1
RETURNING
father_character_id,
mother_character_id
)
SELECT
cf.user_id AS father_user_id,
cm.user_id AS mother_user_id
FROM deleted d
JOIN falukant_data."character" cf
ON cf.id = d.father_character_id
JOIN falukant_data."character" cm
ON cm.id = d.mother_character_id;
)";
static constexpr const char *QUERY_INSERT_NOTIFICATION = R"(
INSERT INTO falukant_log.notification (user_id, tr, shown, created_at, updated_at)
VALUES ($1, 'director_death', false, NOW(), NOW());
)";
static constexpr const char *QUERY_MARK_CHARACTER_DECEASED = R"(
DELETE FROM falukant_data."character"
WHERE id = $1;
)";
};

View File

@@ -1,39 +0,0 @@
#include "config.h"
#include <fstream>
#include <sstream>
#include <stdexcept>
Config::Config(const std::string &filepath)
{
load(filepath);
}
void Config::load(const std::string &filepath)
{
std::ifstream file(filepath);
if (!file)
{
throw std::runtime_error("Konfigurationsdatei konnte nicht geöffnet werden.");
}
std::string line;
while (std::getline(file, line))
{
std::istringstream iss(line);
std::string key, value;
if (std::getline(iss, key, '=') && std::getline(iss, value))
{
config_map[key] = value;
}
}
}
std::string Config::get(const std::string &key) const
{
auto it = config_map.find(key);
if (it != config_map.end())
{
return it->second;
}
throw std::runtime_error("Konfigurationsschlüssel nicht gefunden: " + key);
}

View File

@@ -1,14 +0,0 @@
#pragma once
#include <string>
#include <map>
class Config
{
public:
Config(const std::string &filepath);
std::string get(const std::string &key) const;
private:
std::map<std::string, std::string> config_map;
void load(const std::string &filepath);
};

View File

@@ -1,24 +0,0 @@
#pragma once
#include "connection_pool.h"
#include <memory>
class ConnectionGuard {
public:
ConnectionGuard(ConnectionPool &pool)
: pool(pool), connection(pool.getConnection()) {}
~ConnectionGuard() {
if (connection) {
pool.releaseConnection(connection);
}
}
Database &get() {
return *connection;
}
private:
ConnectionPool &pool;
std::shared_ptr<Database> connection;
};

View File

@@ -1,49 +0,0 @@
#include "connection_pool.h"
#include <iostream>
#include "connection_guard.h"
ConnectionPool::ConnectionPool(const std::string &host, const std::string &port,
const std::string &name, const std::string &user,
const std::string &password, int pool_size)
: host(host), port(port), name(name), user(user), password(password) {
createPool(pool_size);
}
void ConnectionPool::createPool(int pool_size) {
std::string conninfo = "host=" + host + " port=" + port + " dbname=" + name +
" user=" + user + " password=" + password;
for (int i = 0; i < pool_size; ++i) {
auto conn = std::make_shared<Database>(conninfo);
pool.push(conn);
}
}
std::shared_ptr<Database> ConnectionPool::getConnection() {
std::unique_lock<std::mutex> lock(pool_mutex);
pool_cv.wait(lock, [this]() { return !pool.empty(); });
auto conn = pool.front();
pool.pop();
if (!conn->isValid()) {
std::cerr << "[ConnectionPool] Ungültige Verbindung. Erstelle neu.\n";
std::string conninfo = "host=" + host +
" port=" + port +
" dbname=" + name +
" user=" + user +
" password=" + password;
conn = std::make_shared<Database>(conninfo);
if (!conn->isValid()) {
std::cerr << "[ConnectionPool] Erneut fehlgeschlagen.\n";
return nullptr;
}
}
return conn;
}
void ConnectionPool::releaseConnection(std::shared_ptr<Database> conn) {
{
std::lock_guard<std::mutex> lock(pool_mutex);
pool.push(conn);
}
pool_cv.notify_one();
}

View File

@@ -1,31 +0,0 @@
#pragma once
#include <queue>
#include <memory>
#include <mutex>
#include <condition_variable>
#include "database.h"
class ConnectionPool {
public:
ConnectionPool(const std::string &host, const std::string &port,
const std::string &name, const std::string &user,
const std::string &password, int pool_size);
std::shared_ptr<Database> getConnection();
void releaseConnection(std::shared_ptr<Database> conn);
private:
std::queue<std::shared_ptr<Database>> pool;
std::mutex pool_mutex;
std::condition_variable pool_cv;
std::string host;
std::string port;
std::string name;
std::string user;
std::string password;
void createPool(int pool_size);
void refreshConnection(std::shared_ptr<Database> &conn);
};

View File

@@ -1,174 +0,0 @@
#include "database.h"
#include <pqxx/pqxx>
#include <vector>
#include <unordered_map>
#include <string>
#include <iostream>
Database::Database(const std::string &conninfo)
{
try {
connection_ = std::make_unique<pqxx::connection>(conninfo);
if (!connection_->is_open()) {
throw std::runtime_error("Konnte DB-Verbindung nicht öffnen!");
}
} catch (const std::exception &e) {
std::cerr << "[Database] Fehler beim Verbinden: " << e.what() << std::endl;
throw;
}
}
std::vector<std::map<std::string, std::string>>
Database::query(const std::string &sql)
{
std::vector<std::map<std::string, std::string>> rows;
try {
pqxx::work txn(*connection_);
pqxx::result r = txn.exec(sql);
txn.commit();
// Pre-allocate memory for better performance
rows.reserve(r.size());
for (const auto& row : r) {
std::map<std::string, std::string> oneRow;
for (auto f = 0u; f < row.size(); f++) {
const std::string colName = r.column_name(f);
const char* value = row[f].c_str();
oneRow.emplace(colName, value ? value : "");
}
rows.emplace_back(std::move(oneRow));
}
} catch (const std::exception &ex) {
std::cerr << "[Database] query-Fehler: " << ex.what() << "\nSQL: " << sql << std::endl;
}
return rows;
}
void Database::prepare(const std::string &stmtName, const std::string &sql)
{
try {
// Versuche zuerst, das alte Statement zu entfernen, falls es existiert
try {
remove(stmtName);
} catch (...) {
// Ignoriere Fehler beim Entfernen - das Statement existiert möglicherweise nicht
}
// Erstelle das neue Statement
pqxx::work txn(*connection_);
txn.conn().prepare(stmtName, sql);
txn.commit();
} catch (const std::exception &ex) {
std::cerr << "[Database] prepare-Fehler: " << ex.what()
<< "\nSQL: " << sql << std::endl;
}
}
Database::FieldList Database::execute(const std::string& stmtName,
const std::vector<std::string>& params)
{
try {
pqxx::work txn(*connection_);
pqxx::result res;
if (params.empty()) {
res = txn.exec_prepared(stmtName);
} else {
// Kompatibilität für libpqxx 6.x (Ubuntu 22) und 7.x (OpenSUSE Tumbleweed)
#if PQXX_VERSION_MAJOR >= 7
pqxx::params p;
for (const auto& v : params) p.append(v);
res = txn.exec_prepared(stmtName, p);
#else
// Für libpqxx 6.x - verwende exec_params mit variadic template
if (params.size() == 0) {
res = txn.exec_prepared(stmtName);
} else if (params.size() == 1) {
res = txn.exec_prepared(stmtName, params[0]);
} else if (params.size() == 2) {
res = txn.exec_prepared(stmtName, params[0], params[1]);
} else if (params.size() == 3) {
res = txn.exec_prepared(stmtName, params[0], params[1], params[2]);
} else if (params.size() == 4) {
res = txn.exec_prepared(stmtName, params[0], params[1], params[2], params[3]);
} else if (params.size() == 5) {
res = txn.exec_prepared(stmtName, params[0], params[1], params[2], params[3], params[4]);
} else {
// Für mehr als 5 Parameter, verwende exec_params mit einzelnen Parametern
std::string sql = "EXECUTE " + stmtName;
if (!params.empty()) {
sql += "(";
for (size_t i = 0; i < params.size(); ++i) {
if (i > 0) sql += ", ";
sql += "$" + std::to_string(i + 1);
}
sql += ")";
}
// Konvertiere vector zu einzelnen Parametern für exec_params
if (params.size() == 6) {
res = txn.exec_params(sql, params[0], params[1], params[2], params[3], params[4], params[5]);
} else if (params.size() == 7) {
res = txn.exec_params(sql, params[0], params[1], params[2], params[3], params[4], params[5], params[6]);
} else if (params.size() == 8) {
res = txn.exec_params(sql, params[0], params[1], params[2], params[3], params[4], params[5], params[6], params[7]);
} else if (params.size() == 9) {
res = txn.exec_params(sql, params[0], params[1], params[2], params[3], params[4], params[5], params[6], params[7], params[8]);
} else if (params.size() == 10) {
res = txn.exec_params(sql, params[0], params[1], params[2], params[3], params[4], params[5], params[6], params[7], params[8], params[9]);
} else {
// Für noch mehr Parameter, verwende eine einfachere Lösung
throw std::runtime_error("Zu viele Parameter für prepared statement: " + std::to_string(params.size()));
}
}
#endif
}
FieldList out;
out.reserve(res.size());
for (const auto& row : res) {
std::unordered_map<std::string, std::string> m;
m.reserve(row.size()); // Pre-allocate for better performance
for (const auto& f : row) {
// Use string_view for better performance (C++17+)
const std::string_view name = f.name();
const char* value = f.c_str();
m.emplace(name, f.is_null() ? std::string{} : std::string(value));
}
out.emplace_back(std::move(m));
}
txn.commit();
return out;
} catch (const std::exception& e) {
std::cerr << "[Database] execute-Fehler: " << e.what()
<< "\n\nStatement: " << stmtName << std::endl;
return {};
}
}
void Database::remove(const std::string &stmtName) {
pqxx::work txn(*connection_);
txn.conn().unprepare(stmtName);
txn.commit();
}
bool Database::isValid() const {
try {
if (!connection_ || !connection_->is_open()) {
return false;
}
pqxx::work txn(*connection_);
txn.exec("SELECT 1"); // Einfacher Ping
txn.commit();
return true;
} catch (const std::exception &ex) {
std::cerr << "[Database] Verbindung ungültig: " << ex.what() << "\n";
return false;
}
}

View File

@@ -1,29 +0,0 @@
#pragma once
#include <string>
#include <vector>
#include <unordered_map>
#include <pgsql/libpq-fe.h>
#include <memory>
#include <unordered_map>
#include <pqxx/pqxx>
class Database {
public:
Database(const std::string &conninfo);
typedef std::unordered_map<std::string, std::string> FieldMap;
typedef std::vector<FieldMap> FieldList;
std::vector<std::map<std::string, std::string>> query(const std::string &sql);
void prepare(const std::string &stmtName, const std::string &sql);
FieldList execute(
const std::string &stmtName,
const std::vector<std::string> &params = {}
);
void remove(const std::string &stmtName);
bool isOpen() const { return connection_ && connection_->is_open(); }
bool isValid() const;
private:
std::unique_ptr<pqxx::connection> connection_;
};

View File

@@ -1,197 +0,0 @@
#include "director_worker.h"
#include <iostream>
DirectorWorker::DirectorWorker(ConnectionPool &pool, MessageBroker &broker)
: Worker(pool, broker, "DirectorWorker") {
}
DirectorWorker::~DirectorWorker() {
}
void DirectorWorker::run() {
auto lastExecutionTime = std::chrono::steady_clock::now();
while (runningWorker) {
signalActivity();
auto now = std::chrono::steady_clock::now();
auto elapsed = std::chrono::duration_cast<std::chrono::seconds>(now - lastExecutionTime).count();
if (elapsed >= 60) {
try {
performTask();
paySalary();
calculateSatisfaction();
lastExecutionTime = now;
} catch (const std::exception &e) {
std::cerr << "[DirectorWorker] Fehler beim Ausführen der Aufgabe: " << e.what() << std::endl;
}
}
std::this_thread::sleep_for(std::chrono::seconds(1));
}
}
void DirectorWorker::performTask() {
try {
setCurrentStep("Get Database Connection");
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
setCurrentStep("Get director actions");
db.prepare("QUERY_GET_DIRECTORS", QUERY_GET_DIRECTORS);
const auto directors = db.execute("QUERY_GET_DIRECTORS");
// Use const references and string_view for better performance
for (const auto &director: directors) {
const auto& mayProduce = director.at("may_produce");
const auto& mayTransport = director.at("may_start_transport");
const auto& maySell = director.at("may_sell");
if (mayProduce == "t") {
startProductions(director);
}
if (mayTransport == "t") {
startTransports(director);
}
if (maySell == "t") {
startSellings(director);
}
}
} catch (const std::exception &e) {
std::cerr << "[DirectorWorker] Fehler bei der Datenbankoperation: " << e.what() << std::endl;
}
}
void DirectorWorker::startProductions(std::unordered_map<std::string, std::string> director) {
auto parseIntOrZero = [&](const std::string &s){
if (s.empty() || s == "null") return 0;
try {
return std::stoi(s);
} catch(...) {
return 0;
}
};
setCurrentStep("Get Database Connection - Production");
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
setCurrentStep("Get to produce");
db.prepare("get_to_produce", QUERY_GET_BEST_PRODUCTION);
const auto productions = db.execute("get_to_produce", { director.at("id") });
if (productions.empty()) return;
const auto &production = productions.at(0);
int runningProductions = parseIntOrZero(production.at("running_productions"));
if (runningProductions >= 2) {
return;
}
setCurrentStep("Add production to DB");
int availableStock = parseIntOrZero(production.at("stock_size"));
int usedStock = parseIntOrZero(production.at("used_in_stock"));
int freeCapacity = availableStock - usedStock - runningProductions;
int certificate = parseIntOrZero(production.at("certificate"));
int onePieceCost = certificate * 6;
int money = parseIntOrZero(production.at("money"));
int maxMoneyProduction = onePieceCost > 0 ? money / onePieceCost : 0;
int toProduce = std::min(std::min(freeCapacity, maxMoneyProduction), 300);
if (toProduce < 1) {
return;
}
int falukantUserId = parseIntOrZero(production.at("falukant_user_id"));
int productionCost = toProduce * onePieceCost;
nlohmann::json msg1 = { { "event", "falukantUpdateStatus" } };
setCurrentStep("Update money");
changeFalukantUserMoney(falukantUserId, -productionCost, "director starts production", msg1);
setCurrentStep("Insert production");
db.prepare("insert_production", QUERY_INSERT_PRODUCTION);
int remaining = toProduce;
while (remaining > 0) {
int batch = std::min(100, remaining);
db.execute("insert_production", {
production.at("branch_id"),
production.at("product_id"),
std::to_string(batch)
});
remaining -= batch;
}
nlohmann::json msg2 = {
{ "event", "production_started" },
{ "branch_id", production.at("branch_id") }
};
sendMessageToFalukantUsers(falukantUserId, msg2);
}
void DirectorWorker::startTransports(std::unordered_map<std::string, std::string>) {
}
void DirectorWorker::startSellings(std::unordered_map<std::string, std::string> director) {
setCurrentStep("Get Database Connection - Production");
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
setCurrentStep("Get to sell");
db.prepare("get_to_sell", QUERY_GET_INVENTORY);
const auto inventory = db.execute("get_to_sell", { director.at("id") });
for (const auto &item: inventory) {
const auto inventoryId = std::stoi(item.at("id"));
const auto productId = std::stoi(item.at("product_id"));
const auto quantity = std::stoi(item.at("quantity"));
const auto quality = std::stoi(item.at("quality"));
const auto maxSellPrice = std::stod(item.at("sell_cost"));
auto falukantUserId = std::stoi(item.at("user_id"));
const auto regionId = std::stoi(item.at("region_id"));
if (quantity > 0) {
const auto minPrice = maxSellPrice * 0.6;
const auto pieceSellPrice = minPrice + (double)(maxSellPrice - minPrice) * (quality / 100.0);
const auto sellPrice = pieceSellPrice * quantity;
const nlohmann::json changeMessage = {
{ "productId", productId },
{ "event", "falukantUpdateStatus" }
};
changeFalukantUserMoney(falukantUserId, sellPrice, "sell products", changeMessage);
db.prepare("QUERY_ADD_SELL_LOG", QUERY_ADD_SELL_LOG);
db.execute("QUERY_ADD_SELL_LOG", { std::to_string(regionId), std::to_string(productId), std::to_string(quantity),
std::to_string(falukantUserId) });
}
db.prepare("remove_inventory", QUERY_REMOVE_INVENTORY);
db.execute("remove_inventory", { std::to_string(inventoryId) });
nlohmann::json message = {
{ "event", "selled_items" },
{ "branch_id", item.at("branch_id") },
};
sendMessageToFalukantUsers(falukantUserId, message);
}
}
void DirectorWorker::paySalary() {
setCurrentStep("salary - load to pay");
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
db.prepare("QUERY_GET_SALARY_TO_PAY", QUERY_GET_SALARY_TO_PAY);
const auto &salariesToPay = db.execute("QUERY_GET_SALARY_TO_PAY");
nlohmann::json message = {
{ "event", "falukantUpdateStatus" }
};
for (auto const &item: salariesToPay) {
changeFalukantUserMoney(std::stoi(item.at("employer_user_id")), -std::stoi(item.at("income")), "director payed out", message);
db.prepare("QUERY_SET_SALARY_PAYED", QUERY_SET_SALARY_PAYED);
db.execute("QUERY_SET_SALARY_PAYED", { std::to_string(std::stoi(item.at("id"))) });
}
}
void DirectorWorker::calculateSatisfaction() {
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
db.prepare("QUERY_UPDATE_SATISFACTION", QUERY_UPDATE_SATISFACTION);
const auto &changedDirectors = db.execute("QUERY_UPDATE_SATISFACTION");
nlohmann::json message = {
{ "event", "directorchanged" }
};
for (auto const &director: changedDirectors) {
sendMessageToFalukantUsers(std::stoi(director.at("employer_user_id")), message);
}
}

View File

@@ -1,155 +0,0 @@
#ifndef DIRECTOR_WORKER_H
#define DIRECTOR_WORKER_H
#include "worker.h"
class DirectorWorker : public Worker {
public:
explicit DirectorWorker(ConnectionPool &pool, MessageBroker &broker);
~DirectorWorker() override;
protected:
void run() override;
private:
void performTask();
void startProductions(std::unordered_map<std::string, std::string> director);
void startTransports(std::unordered_map<std::string, std::string>);
void startSellings(std::unordered_map<std::string, std::string>);
void paySalary();
void calculateSatisfaction();
static constexpr const char *QUERY_GET_DIRECTORS = R"(
select d.may_produce, d.may_sell, d.may_start_transport, b.id branch_id, fu.id falukantUserId, d.id
from falukant_data.director d
join falukant_data.falukant_user fu
on fu.id = d.employer_user_id
join falukant_data."character" c
on c.id = d.director_character_id
join falukant_data.branch b
on b.region_id = c.region_id
and b.falukant_user_id = fu.id
where current_time between '08:00:00' and '17:00:00'
)";
static constexpr const char *QUERY_GET_BEST_PRODUCTION = R"(
select fdu."id" falukant_user_id, fdu."money", fdu."certificate", ftp."id" product_id, ftp.label_tr,(select sum("quantity")
from "falukant_data"."stock" fds
where fds."branch_id" = fdb."id") stock_size, coalesce((select sum(coalesce(fdi."quantity", 0))
from "falukant_data"."stock" fds
join "falukant_data"."inventory" fdi
on fdi."stock_id" = fds."id"
where fds."branch_id" = fdb."id"), 0) used_in_stock,
(ftp."sell_cost" * (fdtpw."worth_percent" + (fdk_character."knowledge" * 2 + fdk_director."knowledge") / 3) / 100 - 6 * ftp.category) / (300.0 * ftp. production_time) worth,
fdb."id" branch_id,
(select count("id") from "falukant_data"."production" where "branch_id" = fdb."id") running_productions,
coalesce((select sum(coalesce(fdp.quantity, 0)) quantity from
falukant_data.production fdp where fdp.branch_id = fdb.id), 0) running_productions
from "falukant_data"."director" fdd
join "falukant_data".character fdc
on fdc.id = fdd.director_character_id
join "falukant_data"."falukant_user" fdu
on fdd."employer_user_id" = fdu."id"
join "falukant_data"."character" user_character
on user_character."user_id" = fdu."id"
join "falukant_data"."branch" fdb
on fdb."falukant_user_id" = fdu."id"
and fdb."region_id" = fdc."region_id"
join "falukant_data"."town_product_worth" fdtpw
on fdtpw."region_id" = fdb."region_id"
join "falukant_data"."knowledge" fdk_character
on
fdk_character."product_id" = fdtpw."product_id"
and fdk_character."character_id" = user_character."id"
and fdk_character."product_id" = fdtpw."product_id"
join "falukant_data"."knowledge" fdk_director
on
fdk_director."product_id" = fdtpw."product_id"
and fdk_director."character_id" = fdd."director_character_id"
and fdk_director."product_id" = fdtpw."product_id"
join "falukant_type"."product" ftp
on
ftp."id" = fdtpw."product_id"
and ftp.category <= fdu.certificate
where fdd."id" = $1
order by worth desc
limit 1;
)";
static constexpr const char *QUERY_INSERT_PRODUCTION = R"(
insert into "falukant_data"."production" ("branch_id", "product_id", "quantity")
values ($1, $2, $3)
)";
static constexpr const char *QUERY_GET_INVENTORY = R"(
select i.id, i.product_id, i.quantity, i.quality, p.sell_cost, fu.id user_id, b.region_id, b.id branch_id
from falukant_data.inventory i
join falukant_data.stock s
on s.id = i.stock_id
join falukant_data.branch b
on b.id = s.branch_id
join falukant_data.falukant_user fu
on fu.id = b.falukant_user_id
join falukant_data.director d
on d.employer_user_id = fu.id
join falukant_type.product p
on p.id = i.product_id
where d.id = $1
)";
static constexpr const char *QUERY_REMOVE_INVENTORY = R"(
delete from falukant_data.inventory
where id = $1
)";
static constexpr const char *QUERY_ADD_SELL_LOG = R"(
INSERT INTO falukant_log.sell ("region_id", "product_id", "quantity", "seller_id")
values ($1, $2, $3, $4)
ON CONFLICT ("region_id", "product_id", "seller_id")
DO UPDATE
SET "quantity" = falukant_log.sell."quantity" + EXCLUDED.quantity
)";
static constexpr const char *QUERY_GET_SALARY_TO_PAY = R"(
select d.id, d.employer_user_id, d.income
from falukant_data.director d
where date(d.last_salary_payout) < date(now())
)";
static constexpr const char *QUERY_SET_SALARY_PAYED = R"(
update falukant_data.director
set last_salary_payout = NOW()
where id = $1
)";
static constexpr const char *QUERY_UPDATE_SATISFACTION = R"(
WITH new_sats AS (
SELECT
d.id,
ROUND(
d.income::numeric
/
(
c.title_of_nobility
* POWER(1.231, AVG(k.knowledge) / 1.5)
)
* 100
) AS new_satisfaction
FROM falukant_data.director d
JOIN falukant_data.knowledge k
ON d.director_character_id = k.character_id
JOIN falukant_data.character c
ON c.id = d.director_character_id
GROUP BY d.id, c.title_of_nobility, d.income
)
UPDATE falukant_data.director dir
SET satisfaction = ns.new_satisfaction
FROM new_sats ns
WHERE dir.id = ns.id
-- Nur updaten, wenn sich der Wert tatsächlich ändert:
AND dir.satisfaction IS DISTINCT FROM ns.new_satisfaction
RETURNING dir.employer_user_id;
)";
};
#endif // DIRECTOR_WORKER_H

View File

@@ -1,78 +0,0 @@
#include "houseworker.h"
#include <iostream>
HouseWorker::HouseWorker(ConnectionPool &pool, MessageBroker &broker):
Worker(pool, broker, "HouseWorker") {
}
HouseWorker::~HouseWorker() {
}
void HouseWorker::run() {
auto lastExecutionTime = std::chrono::steady_clock::now();
auto lastHouseStateChange = std::chrono::system_clock::now();
while (runningWorker) {
signalActivity();
auto now = std::chrono::steady_clock::now();
auto elapsed = std::chrono::duration_cast<std::chrono::seconds>(now - lastExecutionTime).count();
if (elapsed >= 3600) {
performTask();
}
auto nowSystem = std::chrono::system_clock::now();
auto lastDay = floor<std::chrono::days>(lastHouseStateChange);
auto today = floor<std::chrono::days>(nowSystem);
if (lastDay < today) {
performHouseStateChange();
lastHouseStateChange = nowSystem;
}
std::this_thread::sleep_for(std::chrono::seconds(1));
}
}
void HouseWorker::performTask() {
try {
setCurrentStep("Get Database Connection");
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
setCurrentStep("Get new houses data");
db.prepare("QUERY_GET_NEW_HOUSE_DATA", QUERY_GET_NEW_HOUSE_DATA);
const auto newHouses = db.execute("QUERY_GET_NEW_HOUSE_DATA");
for (const auto &newHouse: newHouses) {
db.prepare("QUERY_ADD_NEW_BUYABLE_HOUSE", QUERY_ADD_NEW_BUYABLE_HOUSE);
db.execute("QUERY_ADD_NEW_BUYABLE_HOUSE", { newHouse.at("house_id") });
}
} catch (const std::exception &e) {
std::cerr << "[HouseWorker] Fehler bei der Datenbankoperation: " << e.what() << std::endl;
}
}
void HouseWorker::performHouseStateChange() {
try {
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
// Entferne alte vorbereitete Anweisungen falls sie existieren
try {
db.remove("QUERY_UPDATE_BUYABLE_HOUSE_STATE");
} catch (...) {
// Ignoriere Fehler beim Entfernen
}
try {
db.remove("QUERY_UPDATE_USER_HOUSE_STATE");
} catch (...) {
// Ignoriere Fehler beim Entfernen
}
// Bereite neue Anweisungen vor
db.prepare("QUERY_UPDATE_BUYABLE_HOUSE_STATE", QUERY_UPDATE_BUYABLE_HOUSE_STATE);
db.prepare("QUERY_UPDATE_USER_HOUSE_STATE", QUERY_UPDATE_USER_HOUSE_STATE);
// Führe die Anweisungen aus
db.execute("QUERY_UPDATE_BUYABLE_HOUSE_STATE");
db.execute("QUERY_UPDATE_USER_HOUSE_STATE");
} catch(const std::exception &e) {
std::cerr << "[HouseWorker] Fehler bei der Datenbankoperation: " << e.what() << std::endl;
}
}

View File

@@ -1,54 +0,0 @@
#ifndef HOUSEWORKER_H
#define HOUSEWORKER_H
#include "worker.h"
class HouseWorker : public Worker {
public:
HouseWorker(ConnectionPool &pool, MessageBroker &broker);
~HouseWorker() override;
protected:
void run() override;
private:
void performTask();
void performHouseStateChange();
static constexpr const char *QUERY_GET_NEW_HOUSE_DATA = R"(
SELECT
h.id AS house_id
FROM
falukant_type.house AS h
WHERE
random() < 0.0001
and "label_tr" != 'under_bridge';
)";
static constexpr const char *QUERY_ADD_NEW_BUYABLE_HOUSE = R"(
insert into falukant_data.buyable_house (house_type_id) values ($1);
)";
static constexpr const char *QUERY_UPDATE_BUYABLE_HOUSE_STATE = R"(
update falukant_data.buyable_house
set roof_condition = round(roof_condition - random() * (3 + 0 * id)),
floor_condition = round(floor_condition - random() * (3 + 0 * id)),
wall_condition = round(wall_condition - random() * (3 + 0 * id)),
window_condition = round(wall_condition - random() * (3 + 0 * id))
)";
static constexpr const char *QUERY_UPDATE_USER_HOUSE_STATE = R"(
update falukant_data.user_house
set roof_condition = round(roof_condition - random() * (3 + 0 * id)),
floor_condition = round(floor_condition - random() * (3 + 0 * id)),
wall_condition = round(wall_condition - random() * (3 + 0 * id)),
window_condition = round(window_condition - random() * (3 + 0 * id))
where house_type_id not in (
select id
from falukant_type.house h
where h.label_tr = 'under_bridge'
)
)";
};
#endif // HOUSEWORKER_H

View File

@@ -1,135 +0,0 @@
#include "character_creation_worker.h"
#include "produce_worker.h"
#include "stockagemanager.h"
#include "director_worker.h"
#include "valuerecalculationworker.h"
#include "connection_pool.h"
#include "websocket_server.h"
#include "message_broker.h"
#include "usercharacterworker.h"
#include "houseworker.h"
#include "politics_worker.h"
#include "underground_worker.h"
#include "config.h"
#include <csignal>
#include <atomic>
#include <iostream>
#include <thread>
#include <vector>
#include <memory>
#include <cstdlib>
#include <systemd/sd-daemon.h>
std::atomic<bool> keepRunning(true);
std::atomic<bool> shutdownRequested(false);
void handleSignal(int signal) {
std::cerr << "Signal erhalten: " << signal << ". Beende Anwendung..." << std::endl;
if (signal == SIGINT || signal == SIGTERM) {
std::cerr << "Setze Shutdown-Flags..." << std::endl;
keepRunning.store(false, std::memory_order_relaxed);
shutdownRequested.store(true, std::memory_order_relaxed);
std::cerr << "Shutdown-Flags gesetzt. keepRunning=" << keepRunning.load() << ", shutdownRequested=" << shutdownRequested.load() << std::endl;
}
}
int main() {
std::signal(SIGINT, handleSignal);
std::signal(SIGTERM, handleSignal);
try {
Config config("/etc/yourpart/daemon.conf");
ConnectionPool pool(
config.get("DB_HOST"),
config.get("DB_PORT"),
config.get("DB_NAME"),
config.get("DB_USER"),
config.get("DB_PASSWORD"),
10
);
int websocketPort = std::stoi(config.get("WEBSOCKET_PORT"));
bool sslEnabled = config.get("WEBSOCKET_SSL_ENABLED") == "true";
std::string certPath = sslEnabled ? config.get("WEBSOCKET_SSL_CERT_PATH") : "";
std::string keyPath = sslEnabled ? config.get("WEBSOCKET_SSL_KEY_PATH") : "";
MessageBroker broker;
WebSocketServer websocketServer(websocketPort, pool, broker, sslEnabled, certPath, keyPath);
// Use C++23 features for better performance
std::vector<std::unique_ptr<Worker>> workers;
workers.reserve(9); // Pre-allocate for better performance
workers.emplace_back(std::make_unique<CharacterCreationWorker>(pool, broker));
workers.emplace_back(std::make_unique<ProduceWorker>(pool, broker));
workers.emplace_back(std::make_unique<StockageManager>(pool, broker));
workers.emplace_back(std::make_unique<DirectorWorker>(pool, broker));
workers.emplace_back(std::make_unique<ValueRecalculationWorker>(pool, broker));
workers.emplace_back(std::make_unique<UserCharacterWorker>(pool, broker));
workers.emplace_back(std::make_unique<HouseWorker>(pool, broker));
workers.emplace_back(std::make_unique<PoliticsWorker>(pool, broker));
workers.emplace_back(std::make_unique<UndergroundWorker>(pool, broker));
websocketServer.setWorkers(workers);
broker.start();
websocketServer.run();
for (auto &worker : workers) {
worker->startWorkerThread();
worker->enableWatchdog();
}
// Benachrichtige systemd, dass der Service bereit ist
sd_notify(0, "READY=1");
// Hauptschleife mit besserer Signal-Behandlung
std::cerr << "Hauptschleife gestartet. keepRunning=" << keepRunning.load() << ", shutdownRequested=" << shutdownRequested.load() << std::endl;
while (keepRunning.load() && !shutdownRequested.load()) {
std::this_thread::sleep_for(std::chrono::milliseconds(100));
}
std::cerr << "Hauptschleife beendet. keepRunning=" << keepRunning.load() << ", shutdownRequested=" << shutdownRequested.load() << std::endl;
std::cerr << "Starte sauberes Herunterfahren..." << std::endl;
auto shutdownStart = std::chrono::steady_clock::now();
const auto maxShutdownTime = std::chrono::seconds(10);
// Stoppe alle Worker-Threads
std::cerr << "Stoppe Worker-Threads..." << std::endl;
for (auto &worker : workers) {
worker->stopWorkerThread();
if (std::chrono::steady_clock::now() - shutdownStart > maxShutdownTime) {
std::cerr << "Shutdown-Timeout erreicht, erzwinge Beendigung..." << std::endl;
break;
}
}
// Stoppe Watchdog-Threads
std::cerr << "Stoppe Watchdog-Threads..." << std::endl;
for (auto &worker : workers) {
worker->stopWatchdogThread();
if (std::chrono::steady_clock::now() - shutdownStart > maxShutdownTime) {
std::cerr << "Shutdown-Timeout erreicht, erzwinge Beendigung..." << std::endl;
break;
}
}
// Stoppe WebSocket Server
std::cerr << "Stoppe WebSocket-Server..." << std::endl;
websocketServer.stop();
// Stoppe Message Broker
std::cerr << "Stoppe Message Broker..." << std::endl;
broker.stop();
auto shutdownDuration = std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::steady_clock::now() - shutdownStart);
std::cerr << "Anwendung erfolgreich beendet in " << shutdownDuration.count() << "ms." << std::endl;
// Erzwinge sofortiges Exit nach Shutdown
std::cerr << "Erzwinge Prozess-Beendigung..." << std::endl;
std::_Exit(0);
} catch (const std::exception &e) {
std::cerr << "Fehler: " << e.what() << std::endl;
return 1;
}
return 0;
}

View File

@@ -1,45 +0,0 @@
#include "message_broker.h"
#include <iostream>
void MessageBroker::publish(const std::string &message) {
std::lock_guard<std::mutex> lock(mutex);
std::cout << "[MessageBroker] Nachricht gepubliziert: " << message << std::endl;
messageQueue.push(message);
cv.notify_all();
}
void MessageBroker::subscribe(const MessageCallback &callback) {
std::lock_guard<std::mutex> lock(mutex);
subscribers.push_back(callback);
}
void MessageBroker::start() {
running = true;
brokerThread = std::thread([this]() { processMessages(); });
}
void MessageBroker::stop() {
running = false;
cv.notify_all();
if (brokerThread.joinable()) {
brokerThread.join();
}
}
void MessageBroker::processMessages() {
while (running) {
std::unique_lock<std::mutex> lock(mutex);
cv.wait(lock, [this]() { return !messageQueue.empty() || !running; });
if (!running) break;
while (!messageQueue.empty()) {
std::string message = std::move(messageQueue.front());
messageQueue.pop();
lock.unlock();
std::cout << "[MessageBroker] Sende Nachricht an " << subscribers.size() << " Subscribers: " << message << std::endl;
for (const auto &callback : subscribers) {
callback(message);
}
lock.lock();
}
}
}

View File

@@ -1,29 +0,0 @@
#pragma once
#include <functional>
#include <string>
#include <queue>
#include <mutex>
#include <condition_variable>
#include <thread>
#include <atomic>
#include <vector>
class MessageBroker {
public:
using MessageCallback = std::function<void(const std::string &message)>;
void publish(const std::string &message);
void subscribe(const MessageCallback &callback);
void start();
void stop();
private:
std::queue<std::string> messageQueue;
std::vector<MessageCallback> subscribers;
std::mutex mutex;
std::condition_variable cv;
std::atomic<bool> running{false};
std::thread brokerThread;
void processMessages();
};

View File

@@ -1,134 +0,0 @@
#pragma once
#include <string_view>
#include <chrono>
#include <memory>
#include <vector>
#include <unordered_map>
namespace PerformanceUtils {
// C++23: std::expected-like error handling
template<typename T, typename E>
class Expected {
private:
union {
T value_;
E error_;
};
bool has_value_;
public:
Expected(T&& value) : value_(std::move(value)), has_value_(true) {}
Expected(const E& error) : error_(error), has_value_(false) {}
bool has_value() const noexcept { return has_value_; }
const T& value() const { return value_; }
const E& error() const { return error_; }
T&& move_value() { return std::move(value_); }
};
// C++23: std::optional with better performance
template<typename T>
class FastOptional {
private:
alignas(T) char storage_[sizeof(T)];
bool has_value_;
public:
FastOptional() : has_value_(false) {}
template<typename... Args>
FastOptional(Args&&... args) : has_value_(true) {
new(storage_) T(std::forward<Args>(args)...);
}
~FastOptional() {
if (has_value_) {
reinterpret_cast<T*>(storage_)->~T();
}
}
bool has_value() const noexcept { return has_value_; }
T& value() { return *reinterpret_cast<T*>(storage_); }
const T& value() const { return *reinterpret_cast<const T*>(storage_); }
};
// String interning for better memory usage
class StringInterner {
private:
std::unordered_map<std::string_view, std::string> interned_strings_;
public:
std::string_view intern(std::string_view str) {
auto it = interned_strings_.find(str);
if (it != interned_strings_.end()) {
return it->second;
}
auto [new_it, inserted] = interned_strings_.emplace(str, std::string(str));
return new_it->second;
}
};
// Performance timer
class Timer {
private:
std::chrono::high_resolution_clock::time_point start_;
public:
Timer() : start_(std::chrono::high_resolution_clock::now()) {}
auto elapsed() const {
return std::chrono::high_resolution_clock::now() - start_;
}
auto elapsed_ms() const {
return std::chrono::duration_cast<std::chrono::milliseconds>(elapsed()).count();
}
};
// Memory pool for frequent allocations
template<typename T>
class MemoryPool {
private:
std::vector<std::unique_ptr<T[]>> blocks_;
std::vector<T*> free_list_;
size_t block_size_;
size_t current_block_;
size_t current_index_;
public:
MemoryPool(size_t block_size = 1024)
: block_size_(block_size), current_block_(0), current_index_(0) {
allocate_block();
}
T* allocate() {
if (!free_list_.empty()) {
T* ptr = free_list_.back();
free_list_.pop_back();
return ptr;
}
if (current_index_ >= block_size_) {
allocate_block();
}
return &blocks_[current_block_][current_index_++];
}
void deallocate(T* ptr) {
free_list_.push_back(ptr);
}
private:
void allocate_block() {
blocks_.emplace_back(std::make_unique<T[]>(block_size_));
current_block_ = blocks_.size() - 1;
current_index_ = 0;
}
};
}

View File

@@ -1,251 +0,0 @@
// File: politics_worker.cpp
#include "politics_worker.h"
#include <iostream>
#include <chrono>
PoliticsWorker::PoliticsWorker(ConnectionPool &pool, MessageBroker &broker)
: Worker(pool, broker, "PoliticsWorker")
{
}
PoliticsWorker::~PoliticsWorker()
{
}
void PoliticsWorker::run() {
auto lastExecutionDate = std::chrono::system_clock::time_point{};
while (runningWorker) {
signalActivity();
auto now = std::chrono::system_clock::now();
auto todayFloor = std::chrono::floor<std::chrono::days>(now);
auto targetTime = todayFloor + std::chrono::hours(3); // 03:00 Uhr
if (now >= targetTime && lastExecutionDate < todayFloor) {
signalActivity();
performDailyPoliticsTask();
lastExecutionDate = todayFloor;
}
for (int i = 0; i < 5 && runningWorker.load(); ++i) {
signalActivity();
std::this_thread::sleep_for(std::chrono::seconds(1));
}
}
}
void PoliticsWorker::performDailyPoliticsTask() {
try {
// … (Schritte für Notifications und evaluatePoliticalPositions) …
// 3) Elections anlegen und **je 2 × posts_to_fill** Kandidaten hinzufügen
{
setCurrentStep("Schedule Elections and Insert Candidates");
// 3a) Neue Elections erzeugen (liefert jetzt auch posts_to_fill)
auto elections = scheduleElections();
if (!elections.empty()) {
for (auto const & tpl : elections) {
int electionId = std::get<0>(tpl);
int regionId = std::get<1>(tpl);
int postsToFill = std::get<2>(tpl);
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
db.prepare("INSERT_CANDIDATES", QUERY_INSERT_CANDIDATES);
// $1 = electionId, $2 = regionId, $3 = postsToFill
db.execute("INSERT_CANDIDATES", {
std::to_string(electionId),
std::to_string(regionId),
std::to_string(postsToFill)
});
}
}
}
// … nach scheduleElections() & Kandidaten …
{
setCurrentStep("Process Elections After 3 Days");
auto newOffices = processElections();
for (auto const &tup : newOffices) {
notifyOfficeFilled({tup});
}
}
} catch (std::exception const & e) {
std::cerr << "[PoliticsWorker] Fehler bei performDailyPoliticsTask: " << e.what() << "\n";
}
}
void PoliticsWorker::evaluatePoliticalPositions(
std::unordered_map<int,int>& requiredPerRegion,
std::unordered_map<int,int>& occupiedPerRegion
) {
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
signalActivity();
db.prepare("COUNT_OFFICES_PER_REGION", QUERY_COUNT_OFFICES_PER_REGION);
signalActivity();
const auto result = db.execute("COUNT_OFFICES_PER_REGION");
signalActivity();
for (const auto &row : result) {
int regionId = std::stoi(row.at("region_id"));
int reqCount = std::stoi(row.at("required_count"));
int occCount = std::stoi(row.at("occupied_count"));
requiredPerRegion[regionId] = reqCount;
occupiedPerRegion[regionId] = occCount;
signalActivity();
}
}
// politics_worker.cpp (Auszug)
std::vector<std::tuple<int,int,int>> PoliticsWorker::scheduleElections() {
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
signalActivity();
db.prepare("SELECT_NEEDED_ELECTIONS", QUERY_SELECT_NEEDED_ELECTIONS);
signalActivity();
auto result = db.execute("SELECT_NEEDED_ELECTIONS");
signalActivity();
std::vector<std::tuple<int,int,int>> created;
created.reserve(result.size());
for (auto const & row : result) {
int electionId = std::stoi(row.at("election_id"));
int regionId = std::stoi(row.at("region_id"));
int postsToFill = std::stoi(row.at("posts_to_fill"));
created.emplace_back(electionId, regionId, postsToFill);
signalActivity();
}
return created;
}
std::vector<std::tuple<int,int,int,int>> PoliticsWorker::processExpiredOfficesAndFill() {
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
signalActivity();
db.prepare("PROCESS_EXPIRED_AND_FILL", QUERY_PROCESS_EXPIRED_AND_FILL);
signalActivity();
const auto result = db.execute("PROCESS_EXPIRED_AND_FILL");
signalActivity();
std::vector<std::tuple<int,int,int,int>> created;
for (const auto &row : result) {
int officeId = std::stoi(row.at("office_id"));
int officeTypeId = std::stoi(row.at("office_type_id"));
int characterId = std::stoi(row.at("character_id"));
int regionId = std::stoi(row.at("region_id"));
created.emplace_back(officeId, officeTypeId, characterId, regionId);
signalActivity();
}
return created;
}
std::vector<int> PoliticsWorker::getUserIdsInCitiesOfRegions(const std::vector<int>& regionIds) {
if (regionIds.empty()) {
return {};
}
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
std::vector<int> userIds;
for (int rid : regionIds) {
signalActivity();
db.prepare("GET_USERS_IN_CITIES", QUERY_USERS_IN_CITIES_OF_REGIONS);
signalActivity();
const auto rows = db.execute("GET_USERS_IN_CITIES", { std::to_string(rid) });
signalActivity();
for (const auto &row : rows) {
int uid = std::stoi(row.at("user_id"));
userIds.push_back(uid);
signalActivity();
}
}
return userIds;
}
void PoliticsWorker::notifyOfficeExpirations() {
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
signalActivity();
db.prepare("NOTIFY_OFFICE_EXPIRATION", QUERY_NOTIFY_OFFICE_EXPIRATION);
signalActivity();
db.execute("NOTIFY_OFFICE_EXPIRATION");
signalActivity();
// Sende falukantUpdateStatus an alle betroffenen Benutzer
db.prepare("GET_USERS_WITH_EXPIRING_OFFICES", QUERY_GET_USERS_WITH_EXPIRING_OFFICES);
const auto affectedUsers = db.execute("GET_USERS_WITH_EXPIRING_OFFICES");
for (const auto &user : affectedUsers) {
int userId = std::stoi(user.at("user_id"));
nlohmann::json message = { { "event", "falukantUpdateStatus" } };
sendMessageToFalukantUsers(userId, message);
}
}
void PoliticsWorker::notifyElectionCreated(const std::vector<std::pair<int,int>>& elections) {
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
db.prepare("NOTIFY_ELECTION_CREATED", QUERY_NOTIFY_ELECTION_CREATED);
for (const auto &pr : elections) {
signalActivity();
db.execute("NOTIFY_ELECTION_CREATED", { std::to_string(pr.first) });
signalActivity();
}
// Sende falukantUpdateStatus an alle betroffenen Benutzer
db.prepare("GET_USERS_IN_REGIONS_WITH_ELECTIONS", QUERY_GET_USERS_IN_REGIONS_WITH_ELECTIONS);
const auto affectedUsers = db.execute("GET_USERS_IN_REGIONS_WITH_ELECTIONS");
for (const auto &user : affectedUsers) {
int userId = std::stoi(user.at("user_id"));
nlohmann::json message = { { "event", "falukantUpdateStatus" } };
sendMessageToFalukantUsers(userId, message);
}
}
void PoliticsWorker::notifyOfficeFilled(const std::vector<std::tuple<int,int,int,int>>& newOffices) {
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
db.prepare("NOTIFY_OFFICE_FILLED", QUERY_NOTIFY_OFFICE_FILLED);
for (const auto &tup : newOffices) {
int characterId = std::get<2>(tup);
signalActivity();
db.execute("NOTIFY_OFFICE_FILLED", { std::to_string(characterId) });
signalActivity();
}
// Sende falukantUpdateStatus an alle betroffenen Benutzer
db.prepare("GET_USERS_WITH_FILLED_OFFICES", QUERY_GET_USERS_WITH_FILLED_OFFICES);
const auto affectedUsers = db.execute("GET_USERS_WITH_FILLED_OFFICES");
for (const auto &user : affectedUsers) {
int userId = std::stoi(user.at("user_id"));
nlohmann::json message = { { "event", "falukantUpdateStatus" } };
sendMessageToFalukantUsers(userId, message);
}
}
std::vector<std::tuple<int,int,int,int>> PoliticsWorker::processElections() {
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
db.prepare("PROCESS_ELECTIONS", QUERY_PROCESS_ELECTIONS);
auto result = db.execute("PROCESS_ELECTIONS", {});
std::vector<std::tuple<int,int,int,int>> created;
for (auto const &row : result) {
int officeId = std::stoi(row.at("office_id"));
int officeTypeId = std::stoi(row.at("office_type_id"));
int characterId = std::stoi(row.at("character_id"));
int regionId = std::stoi(row.at("region_id"));
created.emplace_back(officeId, officeTypeId, characterId, regionId);
}
return created;
}

View File

@@ -1,513 +0,0 @@
// File: politics_worker.h
#ifndef POLITICS_WORKER_H
#define POLITICS_WORKER_H
#include "worker.h"
#include <tuple>
#include <vector>
#include <unordered_map>
class PoliticsWorker : public Worker {
public:
PoliticsWorker(ConnectionPool &pool, MessageBroker &broker);
~PoliticsWorker() override;
protected:
void run() override;
private:
void performDailyPoliticsTask();
void evaluatePoliticalPositions(
std::unordered_map<int,int>& requiredPerRegion,
std::unordered_map<int,int>& occupiedPerRegion
);
std::vector<std::tuple<int,int,int>> scheduleElections();
std::vector<std::tuple<int,int,int,int>> processExpiredOfficesAndFill();
std::vector<int> getUserIdsInCitiesOfRegions(const std::vector<int>& regionIds);
void notifyOfficeExpirations();
void notifyElectionCreated(const std::vector<std::pair<int,int>>& elections);
void notifyOfficeFilled(const std::vector<std::tuple<int,int,int,int>>& newOffices);
std::vector<std::tuple<int, int, int, int> > processElections();
// ------------------------------------------------------------
// QUERY: Zähle pro Region, wie viele Sitze vorgesehen vs. besetzt sind
// ------------------------------------------------------------
static constexpr const char* QUERY_COUNT_OFFICES_PER_REGION = R"(
WITH
seats_per_region AS (
SELECT
pot.id AS office_type_id,
rt.id AS region_id,
pot.seats_per_region AS seats_total
FROM
falukant_type.political_office_type AS pot
JOIN
falukant_type.region AS rt
ON pot.region_type = rt.label_tr
),
occupied AS (
SELECT
po.office_type_id,
po.region_id,
COUNT(*) AS occupied_count
FROM
falukant_data.political_office AS po
GROUP BY
po.office_type_id, po.region_id
),
combined AS (
SELECT
spr.region_id,
spr.seats_total AS required_count,
COALESCE(o.occupied_count, 0) AS occupied_count
FROM
seats_per_region AS spr
LEFT JOIN
occupied AS o
ON spr.office_type_id = o.office_type_id
AND spr.region_id = o.region_id
)
SELECT
region_id,
SUM(required_count) AS required_count,
SUM(occupied_count) AS occupied_count
FROM combined
GROUP BY region_id;
)";
// ------------------------------------------------------------
// STEP 1: Erzeuge nur diejenigen Wahlen, bei denen noch keine Election
// für denselben Termin (NOW()+2 Tage) existiert.
// ------------------------------------------------------------
static constexpr const char* QUERY_SELECT_NEEDED_ELECTIONS = R"(
WITH
-- 1) Definiere das heutige Datum einmal als Referenz
target_date AS (
SELECT NOW()::date AS election_date
),
-- 2) Lösche nur diejenigen Ämter, deren Ablaufdatum heute erreicht ist,
-- und merke deren (office_type_id, region_id)
expired_today AS (
DELETE FROM falukant_data.political_office AS po
USING falukant_type.political_office_type AS pot
WHERE po.office_type_id = pot.id
AND (po.created_at + (pot.term_length * INTERVAL '1 day'))::date = (SELECT election_date FROM target_date)
RETURNING
pot.id AS office_type_id,
po.region_id AS region_id
),
-- 3) Gruppiere nach Typ+Region und zähle, wie viele Sitze heute frei geworden sind
gaps_per_region AS (
SELECT
office_type_id,
region_id,
COUNT(*) AS gaps
FROM expired_today
GROUP BY office_type_id, region_id
),
-- 4) Filtere nur diejenigen Typ+RegionKombinationen, für die noch **keine** Election
-- mit genau demselben Datum angelegt wurde
to_schedule AS (
SELECT
g.office_type_id,
g.region_id,
g.gaps,
td.election_date
FROM
gaps_per_region AS g
CROSS JOIN
target_date AS td
WHERE NOT EXISTS (
SELECT 1
FROM falukant_data.election AS e
WHERE e.office_type_id = g.office_type_id
AND e.region_id = g.region_id
AND e."date"::date = td.election_date
)
),
-- 5) Lege für jede so gefilterte Kombination genau eine Election an
new_elections AS (
INSERT INTO falukant_data.election
(office_type_id, "date", posts_to_fill, created_at, updated_at, region_id)
SELECT
ts.office_type_id,
ts.election_date AS "date",
ts.gaps AS posts_to_fill,
NOW() AS created_at,
NOW() AS updated_at,
ts.region_id
FROM
to_schedule AS ts
RETURNING
id AS election_id,
region_id,
posts_to_fill
)
-- 6) Gib alle neu angelegten Wahlen zurück
SELECT
ne.election_id,
ne.region_id,
ne.posts_to_fill
FROM
new_elections AS ne
ORDER BY
ne.region_id,
ne.election_id;
)";
// -----------------------------------------------------------------------
// 2) Fügt für eine gegebene Election genau LIMIT = ($3 * 2) Kandidaten ein:
// $1 = election_id, $2 = region_id, $3 = Anzahl der Sitze (posts_to_fill)
// -----------------------------------------------------------------------
static constexpr const char* QUERY_INSERT_CANDIDATES = R"(
INSERT INTO falukant_data.candidate
(election_id, character_id, created_at, updated_at)
SELECT
$1 AS election_id,
sub.id AS character_id,
NOW() AS created_at,
NOW() AS updated_at
FROM (
WITH RECURSIVE region_tree AS (
SELECT r.id
FROM falukant_data.region AS r
WHERE r.id = $2
UNION ALL
SELECT r2.id
FROM falukant_data.region AS r2
JOIN region_tree AS rt
ON r2.parent_id = rt.id
)
SELECT
ch.id
FROM
falukant_data."character" AS ch
JOIN
region_tree AS rt2
ON ch.region_id = rt2.id
WHERE
ch.user_id IS NULL
AND ch.birthdate <= NOW() - INTERVAL '21 days'
AND ch.title_of_nobility IN (
SELECT id
FROM falukant_type.title
WHERE label_tr != 'noncivil'
)
ORDER BY RANDOM()
LIMIT ($3 * 2)
) AS sub(id);
)";
// ------------------------------------------------------------
// STEP 2: Füge eine einzelne neue Election ein und liefere die neue election_id
// $1 = office_type_id
// $2 = gaps (posts_to_fill)
// ------------------------------------------------------------
static constexpr const char* QUERY_INSERT_ELECTION = R"(
INSERT INTO falukant_data.election
(office_type_id, "date", posts_to_fill, created_at, updated_at)
VALUES
(
$1,
NOW() + INTERVAL '2 days',
$2,
NOW(),
NOW()
)
RETURNING id;
)";
// ------------------------------------------------------------
// QUERY: Process Expired Offices & Refill (Winner + Random)
// ------------------------------------------------------------
static constexpr const char* QUERY_PROCESS_EXPIRED_AND_FILL = R"(
WITH
expired_offices AS (
DELETE FROM falukant_data.political_office AS po
USING falukant_type.political_office_type AS pot
WHERE po.office_type_id = pot.id
AND (po.created_at + (pot.term_length * INTERVAL '1 day')) <= NOW()
RETURNING
pot.id AS office_type_id,
po.region_id AS region_id
),
distinct_types AS (
SELECT DISTINCT
office_type_id,
region_id
FROM expired_offices
),
votes_per_candidate AS (
SELECT
dt.office_type_id,
dt.region_id,
c.character_id,
COUNT(v.id) AS vote_count
FROM distinct_types AS dt
JOIN falukant_data.election AS e
ON e.office_type_id = dt.office_type_id
JOIN falukant_data.vote AS v
ON v.election_id = e.id
JOIN falukant_data.candidate AS c
ON c.election_id = e.id
AND c.id = v.candidate_id
WHERE e."date" >= (NOW() - INTERVAL '30 days')
GROUP BY
dt.office_type_id,
dt.region_id,
c.character_id
),
ranked_winners AS (
SELECT
vpc.office_type_id,
vpc.region_id,
vpc.character_id,
ROW_NUMBER() OVER (
PARTITION BY vpc.office_type_id, vpc.region_id
ORDER BY vpc.vote_count DESC
) AS rn
FROM votes_per_candidate AS vpc
),
selected_winners AS (
SELECT
rw.office_type_id,
rw.region_id,
rw.character_id
FROM ranked_winners AS rw
JOIN falukant_type.political_office_type AS pot
ON pot.id = rw.office_type_id
WHERE rw.rn <= pot.seats_per_region
),
insert_winners AS (
INSERT INTO falukant_data.political_office
(office_type_id, character_id, created_at, updated_at, region_id)
SELECT
sw.office_type_id,
sw.character_id,
NOW() AS created_at,
NOW() AS updated_at,
sw.region_id
FROM selected_winners AS sw
RETURNING
id AS new_office_id,
office_type_id,
character_id,
region_id
),
count_inserted AS (
SELECT
office_type_id,
region_id,
COUNT(*) AS inserted_count
FROM insert_winners
GROUP BY office_type_id, region_id
),
needed_to_fill AS (
SELECT
dt.office_type_id,
dt.region_id,
(pot.seats_per_region - COALESCE(ci.inserted_count, 0)) AS gaps
FROM distinct_types AS dt
JOIN falukant_type.political_office_type AS pot
ON pot.id = dt.office_type_id
LEFT JOIN count_inserted AS ci
ON ci.office_type_id = dt.office_type_id
AND ci.region_id = dt.region_id
WHERE (pot.seats_per_region - COALESCE(ci.inserted_count, 0)) > 0
),
random_candidates AS (
SELECT
rtf.office_type_id,
rtf.region_id,
ch.id AS character_id,
ROW_NUMBER() OVER (
PARTITION BY rtf.office_type_id, rtf.region_id
ORDER BY RANDOM()
) AS rn
FROM needed_to_fill AS rtf
JOIN falukant_data."character" AS ch
ON ch.region_id = rtf.region_id
AND ch.user_id IS NULL
AND ch.birthdate <= NOW() - INTERVAL '21 days'
AND ch.title_of_nobility IN (
SELECT id FROM falukant_type.title WHERE label_tr != 'noncivil'
)
AND NOT EXISTS (
SELECT 1
FROM falukant_data.political_office AS po2
JOIN falukant_type.political_office_type AS pot2
ON pot2.id = po2.office_type_id
WHERE po2.character_id = ch.id
AND (po2.created_at + (pot2.term_length * INTERVAL '1 day')) > NOW() + INTERVAL '2 days'
)
),
insert_random AS (
INSERT INTO falukant_data.political_office
(office_type_id, character_id, created_at, updated_at, region_id)
SELECT
rc.office_type_id,
rc.character_id,
NOW() AS created_at,
NOW() AS updated_at,
rc.region_id
FROM random_candidates AS rc
JOIN needed_to_fill AS rtf
ON rtf.office_type_id = rc.office_type_id
AND rtf.region_id = rc.region_id
WHERE rc.rn <= rtf.gaps
RETURNING
id AS new_office_id,
office_type_id,
character_id,
region_id
)
SELECT
new_office_id AS office_id,
office_type_id,
character_id,
region_id
FROM insert_winners
UNION ALL
SELECT
new_office_id AS office_id,
office_type_id,
character_id,
region_id
FROM insert_random;
)";
// ------------------------------------------------------------
// QUERY: Hole User-IDs in allen Cities untergeordneter Regionen:
// ------------------------------------------------------------
static constexpr const char* QUERY_USERS_IN_CITIES_OF_REGIONS = R"(
WITH RECURSIVE region_tree AS (
SELECT id
FROM falukant_data.region
WHERE id = $1
UNION ALL
SELECT r2.id
FROM falukant_data.region AS r2
JOIN region_tree AS rt
ON r2.parent_id = rt.id
)
SELECT DISTINCT
ch.user_id
FROM
falukant_data."character" AS ch
JOIN
region_tree AS rt2
ON ch.region_id = rt2.id
WHERE
ch.user_id IS NOT NULL;
)";
// ------------------------------------------------------------
// QUERY: Benachrichtige User, deren Amt in 2 Tagen abläuft
// ------------------------------------------------------------
static constexpr const char* QUERY_NOTIFY_OFFICE_EXPIRATION = R"(
INSERT INTO falukant_log.notification
(user_id, tr, created_at, updated_at)
SELECT
po.character_id,
'notify_office_expiring',
NOW(), NOW()
FROM
falukant_data.political_office AS po
JOIN
falukant_type.political_office_type AS pot
ON po.office_type_id = pot.id
WHERE
(po.created_at + (pot.term_length * INTERVAL '1 day'))
BETWEEN (NOW() + INTERVAL '2 days')
AND (NOW() + INTERVAL '2 days' + INTERVAL '1 second');
)";
// ------------------------------------------------------------
// QUERY: Benachrichtige User, wenn Election angelegt wurde
// ------------------------------------------------------------
static constexpr const char* QUERY_NOTIFY_ELECTION_CREATED = R"(
INSERT INTO falukant_log.notification
(user_id, tr, created_at, updated_at)
VALUES
($1, 'notify_election_created', NOW(), NOW());
)";
// ------------------------------------------------------------
// QUERY: Benachrichtige User, wenn Amt neu besetzt wurde
// ------------------------------------------------------------
static constexpr const char* QUERY_NOTIFY_OFFICE_FILLED = R"(
INSERT INTO falukant_log.notification
(user_id, tr, created_at, updated_at)
VALUES
($1, 'notify_office_filled', NOW(), NOW());
)";
// ------------------------------------------------------------
// QUERY: Hole alle Benutzer, deren Amt in 2 Tagen abläuft
// ------------------------------------------------------------
static constexpr const char* QUERY_GET_USERS_WITH_EXPIRING_OFFICES = R"(
SELECT DISTINCT
ch.user_id
FROM
falukant_data.political_office AS po
JOIN
falukant_type.political_office_type AS pot
ON po.office_type_id = pot.id
JOIN
falukant_data."character" AS ch
ON po.character_id = ch.id
WHERE
ch.user_id IS NOT NULL
AND (po.created_at + (pot.term_length * INTERVAL '1 day'))
BETWEEN (NOW() + INTERVAL '2 days')
AND (NOW() + INTERVAL '2 days' + INTERVAL '1 second');
)";
// ------------------------------------------------------------
// QUERY: Hole alle Benutzer in Regionen mit neuen Wahlen
// ------------------------------------------------------------
static constexpr const char* QUERY_GET_USERS_IN_REGIONS_WITH_ELECTIONS = R"(
SELECT DISTINCT
ch.user_id
FROM
falukant_data.election AS e
JOIN
falukant_data."character" AS ch
ON ch.region_id = e.region_id
WHERE
ch.user_id IS NOT NULL
AND e."date" >= NOW() - INTERVAL '1 day';
)";
// ------------------------------------------------------------
// QUERY: Hole alle Benutzer, deren Amt neu besetzt wurde
// ------------------------------------------------------------
static constexpr const char* QUERY_GET_USERS_WITH_FILLED_OFFICES = R"(
SELECT DISTINCT
ch.user_id
FROM
falukant_data.political_office AS po
JOIN
falukant_data."character" AS ch
ON po.character_id = ch.id
WHERE
ch.user_id IS NOT NULL
AND po.created_at >= NOW() - INTERVAL '1 minute';
)";
static constexpr const char* QUERY_PROCESS_ELECTIONS = R"(
SELECT office_id, office_type_id, character_id, region_id
FROM falukant_data.process_elections();
)";
};
#endif // POLITICS_WORKER_H

View File

@@ -1,202 +0,0 @@
#include "produce_worker.h"
#include "connection_guard.h"
#include <iostream>
#include <algorithm>
#include <thread>
#include <nlohmann/json.hpp>
ProduceWorker::ProduceWorker(ConnectionPool &pool, MessageBroker &broker)
: Worker(pool, broker, "ProduceWorker") {}
ProduceWorker::~ProduceWorker() {
}
void ProduceWorker::run() {
auto lastIterationTime = std::chrono::steady_clock::now();
while (runningWorker.load()) {
setCurrentStep("Check runningWorker Variable");
if (!runningWorker.load()) {
break;
}
setCurrentStep("Calculate elapsed time");
auto now = std::chrono::steady_clock::now();
auto elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(now - lastIterationTime);
if (elapsed < std::chrono::milliseconds(200)) {
// Kürzere Sleep-Intervalle für bessere Shutdown-Responsivität
auto sleepTime = std::chrono::milliseconds(200) - elapsed;
for (int i = 0; i < sleepTime.count() && runningWorker.load(); i += 10) {
std::this_thread::sleep_for(std::chrono::milliseconds(10));
}
}
if (!runningWorker.load()) break;
lastIterationTime = std::chrono::steady_clock::now();
setCurrentStep("Process Productions");
processProductions();
setCurrentStep("Signal Activity");
signalActivity();
setCurrentStep("Loop Done");
}
}
void ProduceWorker::processProductions() {
try {
setCurrentStep("Get Database Connection");
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
setCurrentStep("Fetch Finished Productions");
auto finishedProductions = getFinishedProductions(db);
setCurrentStep("Process Finished Productions");
for (const auto &production : finishedProductions) {
if (production.find("branch_id") == production.end() ||
production.find("product_id") == production.end() ||
production.find("quantity") == production.end() ||
production.find("quality") == production.end() ||
production.find("user_id") == production.end()) {
continue;
}
int branchId = std::stoi(production.at("branch_id"));
int productId = std::stoi(production.at("product_id"));
int quantity = std::stoi(production.at("quantity"));
int quality = std::stoi(production.at("quality"));
int userId = std::stoi(production.at("user_id"));
int regionId = std::stoi(production.at("region_id"));
addToInventory(db, branchId, productId, quantity, quality, userId);
deleteProduction(db, production.at("production_id"));
addProductionToLog(regionId, userId, productId, quantity);
const nlohmann::json message = {
{"event", "production_ready"},
{"branch_id", std::to_string(branchId) }
};
sendMessageToFalukantUsers(userId, message);
}
} catch (const std::exception &e) {
std::cerr << "[ProduceWorker] Fehler in processProductions: " << e.what() << std::endl;
}
}
std::vector<std::unordered_map<std::string, std::string>> ProduceWorker::getFinishedProductions(Database &db) {
try {
db.prepare("get_finished_productions", QUERY_GET_FINISHED_PRODUCTIONS);
return db.execute("get_finished_productions");
} catch (const std::exception &e) {
std::cerr << "[ProduceWorker] Fehler beim Abrufen abgeschlossener Produktionen: "
<< e.what() << std::endl;
}
return {};
}
bool ProduceWorker::addToInventory(Database &db,
int branchId,
int productId,
int quantity,
int quality,
int userId) {
try {
db.prepare("get_stocks", QUERY_GET_AVAILABLE_STOCKS);
auto stocks = db.execute("get_stocks", {std::to_string(branchId)});
int remainingQuantity = quantity;
for (const auto &stock : stocks) {
int stockId = std::stoi(stock.at("id"));
int totalCapacity = std::stoi(stock.at("total_capacity"));
int filledCapacity = std::stoi(stock.at("filled"));
int freeCapacity = totalCapacity - filledCapacity;
if (freeCapacity <= 0) {
continue;
}
int toStore = std::min(remainingQuantity, freeCapacity);
if (!storeInStock(db, stockId, productId, toStore, quality)) {
return false;
}
remainingQuantity -= toStore;
if (remainingQuantity <= 0) {
break;
}
}
if (remainingQuantity == 0) {
sendProductionReadyEvent(userId, productId, quantity, quality, branchId);
return true;
}
db.prepare("QUERY_ADD_OVERPRODUCTION_NOTIFICATION", QUERY_ADD_OVERPRODUCTION_NOTIFICATION);
nlohmann::json notification = {
{"tr", "production.overproduction"},
{"value", remainingQuantity}
};
db.execute("QUERY_ADD_OVERPRODUCTION_NOTIFICATION", {std::to_string(userId), notification.dump()});
// Sende falukantUpdateStatus nach dem Einfügen der Benachrichtigung
nlohmann::json updateMessage = {
{"event", "falukantUpdateStatus"},
{"user_id", userId}
};
broker.publish(updateMessage.dump());
return true;
} catch (const std::exception &e) {
std::cerr << "[ProduceWorker] Fehler in addToInventory: " << e.what() << std::endl;
}
return false;
}
bool ProduceWorker::storeInStock(Database &db,
int stockId,
int productId,
int quantity,
int quality) {
try {
db.prepare("insert_inventory", QUERY_INSERT_INVENTORY);
db.execute("insert_inventory", {std::to_string(stockId),
std::to_string(productId),
std::to_string(quantity),
std::to_string(quality)});
return true;
} catch (const std::exception &e) {
std::cerr << "[ProduceWorker] Fehler in storeInStock: " << e.what() << std::endl;
}
return false;
}
void ProduceWorker::deleteProduction(Database &db, const std::string &productionId) {
try {
db.prepare("delete_production", QUERY_DELETE_PRODUCTION);
db.execute("delete_production", {productionId});
} catch (const std::exception &e) {
std::cerr << "[ProduceWorker] Fehler beim Löschen der Produktion: " << e.what() << std::endl;
}
}
void ProduceWorker::sendProductionReadyEvent(int userId,
int productId,
int quantity,
int quality,
int branchId)
{
try {
nlohmann::json message = {
{"event", "production_ready"},
{"user_id", userId},
{"product_id", productId},
{"quantity", quantity},
{"quality", quality},
{"branch_id", branchId}
};
broker.publish(message.dump());
} catch (const std::exception &e) {
std::cerr << "[ProduceWorker] Fehler beim Senden des Production Ready Events: "
<< e.what() << std::endl;
}
}
void ProduceWorker::addProductionToLog(int regionId, int userId, int productId, int quantity) {
try {
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
db.prepare("QUERY_INSERT_UPDATE_PRODUCTION_LOG", QUERY_INSERT_UPDATE_PRODUCTION_LOG);
db.execute("QUERY_INSERT_UPDATE_PRODUCTION_LOG", { std::to_string(regionId), std::to_string(productId),
std::to_string(productId), std::to_string(userId) });
} catch (const std::exception &e) {
}
}

View File

@@ -1,89 +0,0 @@
#pragma once
#include "worker.h"
#include <vector>
#include <string>
class ProduceWorker : public Worker {
public:
explicit ProduceWorker(ConnectionPool &pool, MessageBroker &broker);
~ProduceWorker() override;
protected:
void run() override;
private:
void processProductions();
std::vector<std::unordered_map<std::string, std::string>> getFinishedProductions(Database &db);
bool addToInventory(Database &db, int branchId, int productId, int quantity, int quality, int userId);
bool storeInStock(Database &db, int stockId, int productId, int quantity, int quality);
void deleteProduction(Database &db, const std::string &productionId);
void sendProductionReadyEvent(int userId, int productId, int quantity, int quality, int branchId);
void addProductionToLog(int regionId, int userId, int productId, int quantity);
static constexpr const char *QUERY_GET_FINISHED_PRODUCTIONS = R"(
SELECT DISTINCT
p.id AS production_id,
p.branch_id,
p.product_id,
p.quantity,
p.start_timestamp,
pr.production_time,
k.character_id,
case when k2.id is not null then (k.knowledge * 2 + k2.knowledge) / 3 else k.knowledge end AS quality,
br.region_id,
br.falukant_user_id user_id
FROM falukant_data.production p
JOIN falukant_type.product pr ON p.product_id = pr.id
JOIN falukant_data.branch br ON p.branch_id = br.id
JOIN falukant_data.character c ON c.user_id = br.falukant_user_id
JOIN falukant_data.knowledge k ON p.product_id = k.product_id AND k.character_id = c.id
JOIN falukant_data.stock s ON s.branch_id = br.id
LEFT JOIN falukant_data.director d on d.employer_user_id = c.user_id
LEFT JOIN falukant_data.knowledge k2 on k2.character_id = d.director_character_id and k2.product_id = p.product_id
WHERE p.start_timestamp + interval '1 minute' * pr.production_time <= NOW()
ORDER BY p.start_timestamp;
)";
static constexpr const char *QUERY_GET_AVAILABLE_STOCKS = R"(
SELECT stock.id, stock.quantity AS total_capacity, (
SELECT COALESCE(SUM(inventory.quantity), 0)
FROM falukant_data.inventory
WHERE inventory.stock_id = stock.id
) AS filled, stock.branch_id
FROM falukant_data.stock stock
JOIN falukant_data.branch branch
ON stock.branch_id = branch.id
WHERE branch.id = $1
ORDER BY total_capacity DESC;
)";
static constexpr const char *QUERY_DELETE_PRODUCTION = R"(
DELETE FROM falukant_data.production WHERE id = $1;
)";
static constexpr const char *QUERY_INSERT_INVENTORY = R"(
INSERT INTO falukant_data.inventory (stock_id, product_id, quantity, quality, produced_at)
VALUES ($1, $2, $3, $4, NOW());
)";
static constexpr const char *QUERY_INSERT_UPDATE_PRODUCTION_LOG = R"(
INSERT INTO falukant_log.production (
region_id,
product_id,
quantity,
producer_id,
production_date
)
VALUES ($1, $2, $3, $4, CURRENT_DATE)
ON CONFLICT (producer_id, product_id, region_id, production_date)
DO UPDATE
SET quantity = falukant_log.production.quantity + EXCLUDED.quantity;
)";
static constexpr const char *QUERY_ADD_OVERPRODUCTION_NOTIFICATION = R"(
INSERT INTO falukant_log.notification
(user_id, tr, shown, created_at, updated_at)
VALUES($1, $2, false, now(), now());
)";
};

View File

@@ -1,98 +0,0 @@
#include "stockagemanager.h"
#include "connection_guard.h"
#include <iostream>
#include <random>
#include <chrono>
#include <thread>
StockageManager::StockageManager(ConnectionPool &pool, MessageBroker &broker)
: Worker(pool, broker, "StockageManager") {}
StockageManager::~StockageManager() {
addStocksRunning = false;
if (addStocksThread.joinable()) addStocksThread.join();
}
void StockageManager::run() {
addStocksThread = std::thread([this]() { addLocalStocks(); });
while (runningWorker) {
setCurrentStep("Main loop: Running...");
std::this_thread::sleep_for(std::chrono::seconds(1));
signalActivity();
}
}
void StockageManager::addLocalStocks() {
auto lastExecutionTime = std::chrono::steady_clock::now();
std::uniform_real_distribution<> dist(0.0, 1.0);
while (addStocksRunning) {
signalActivity();
auto now = std::chrono::steady_clock::now();
auto elapsed = std::chrono::duration_cast<std::chrono::seconds>(now - lastExecutionTime).count();
if (elapsed >= 60) {
try {
setCurrentStep("Add Local Stocks: Fetch Town IDs");
auto townIds = getTownIds();
for (const auto &townId : townIds) {
std::mt19937 gen(std::random_device{}());
double chance = round(dist(gen) * 2160);
if (chance <= 1) {
addStockForTown(townId);
}
}
} catch (const std::exception &e) {
std::cerr << "[StockageManager] Fehler in addLocalStocks: " << e.what() << std::endl;
}
lastExecutionTime = now;
}
cleanupBuyableSotck();
std::this_thread::sleep_for(std::chrono::seconds(1));
}
}
std::vector<int> StockageManager::getTownIds() {
try {
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
db.prepare("get_towns", QUERY_GET_TOWNS);
const auto towns = db.execute("get_towns");
std::vector<int> townIds;
for (const auto &town: towns) {
auto id = town.at("id");
townIds.push_back(std::stoi(id));
}
return townIds;
} catch (const std::exception &e) {
std::cerr << "[ProduceWorker] Fehler beim Abrufen abgeschlossener Produktionen: "
<< e.what() << std::endl;
}
return {};
}
void StockageManager::addStockForTown(int townId) {
try {
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
db.prepare("add_stock", QUERY_INSERT_STOCK);
db.execute("add_stock", {std::to_string(townId)});
nlohmann::json message = {
{"event", "stock_change"},
{"branch", std::to_string(townId) }
};
sendMessageToRegionUsers(townId, message);
} catch (const std::exception &e) {
std::cerr << "[StockageManager] Fehler in addStockForTown: " << e.what() << std::endl;
}
}
void StockageManager::cleanupBuyableSotck() {
try {
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
db.prepare("cleanup_stock", QUERY_CLEANUP_STOCK);
db.execute("cleanup_stock", {});
} catch (const std::exception &e) {
std::cerr << "[StockageManager] Fehler bei stock cleanup: " << e.what() << std::endl;
}
}

View File

@@ -1,59 +0,0 @@
#pragma once
#include "worker.h"
#include <thread>
#include <atomic>
class StockageManager : public Worker {
public:
explicit StockageManager(ConnectionPool &pool, MessageBroker &broker);
~StockageManager() override;
protected:
void run() override;
private:
void addLocalStocks();
std::vector<int> getTownIds();
void addStockForTown(int townId);
std::atomic<bool> addStocksRunning{true};
std::thread addStocksThread;
static constexpr const char *QUERY_GET_TOWNS = R"(
SELECT fdr.id
from falukant_data.region fdr
join falukant_type.region ftr
on ftr.id = fdr.region_type_id
where ftr.label_tr = 'city'
)";
static constexpr const char *QUERY_INSERT_STOCK = R"(
INSERT INTO falukant_data.buyable_stock (region_id, stock_type_id, quantity)
SELECT
$1 AS region_id,
s.id AS stock_type_id,
GREATEST(1, ROUND(RANDOM() * 5 * COUNT(br.id))) AS quantity
FROM
falukant_data.branch AS br
CROSS JOIN
falukant_type.stock AS s
WHERE
br.region_id = $1
GROUP BY
s.id
ORDER BY
RANDOM()
LIMIT
GREATEST(
ROUND(RANDOM() * (SELECT COUNT(id) FROM falukant_type.stock)),
1
);
)";
static constexpr const char *QUERY_CLEANUP_STOCK = R"(
delete from falukant_data.buyable_stock
where quantity <= 0
)";
void cleanupBuyableSotck();
};

View File

@@ -1,448 +0,0 @@
#include "underground_worker.h"
#include <random>
#include <algorithm>
#include <numeric>
#include <chrono>
#include <thread>
using json = nlohmann::json;
UndergroundWorker::~UndergroundWorker() = default;
static std::mt19937& rng() {
static thread_local std::mt19937 g{std::random_device{}()};
return g;
}
int UndergroundWorker::randomInt(int lo,int hi){
std::uniform_int_distribution<int> d(lo,hi);
return d(rng());
}
long long UndergroundWorker::randomLL(long long lo,long long hi){
std::uniform_int_distribution<long long> d(lo,hi);
return d(rng());
}
std::vector<size_t> UndergroundWorker::randomIndices(size_t n,size_t k){
std::vector<size_t> idx(n);
std::iota(idx.begin(),idx.end(),0);
std::shuffle(idx.begin(),idx.end(),rng());
if(k<idx.size()) idx.resize(k);
return idx;
}
void UndergroundWorker::run(){
using namespace std::chrono;
while(runningWorker){
setCurrentStep("Process underground jobs");
signalActivity();
tick();
setCurrentStep("Idle");
for(int i=0;i<60 && runningWorker;++i){
std::this_thread::sleep_for(seconds(1));
signalActivity();
}
}
}
void UndergroundWorker::tick(){
setCurrentStep("Fetch pending underground jobs");
ConnectionGuard g(pool);
auto& db=g.get();
db.prepare("UG_SELECT_PENDING",Q_SELECT_PENDING);
db.prepare("UG_UPDATE_RESULT",Q_UPDATE_RESULT);
const auto rows=db.execute("UG_SELECT_PENDING");
for(const auto& r:rows){
try{
auto res=executeRow(r);
int id=std::stoi(r.at("id"));
updateResult(id,res);
broker.publish(json{{"event","underground_processed"},{"id",id},{"type",r.at("underground_type")}}.dump());
}catch(const std::exception& e){
try{
int id=std::stoi(r.at("id"));
updateResult(id,json{{"status","error"},{"message",e.what()}});
}catch(...){}
}
}
}
std::vector<UndergroundWorker::Row> UndergroundWorker::fetchPending(){
ConnectionGuard g(pool);
auto& db=g.get();
db.prepare("UG_SELECT_PENDING",Q_SELECT_PENDING);
return db.execute("UG_SELECT_PENDING");
}
nlohmann::json UndergroundWorker::executeRow(const Row& r){
int performerId=std::stoi(r.at("performer_id"));
int victimId=std::stoi(r.at("victim_id"));
std::string type=r.at("underground_type");
std::string params=r.at("parameters");
return handleTask(type,performerId,victimId,params);
}
nlohmann::json UndergroundWorker::handleTask(const std::string& type,int performerId,int victimId,const std::string& paramsJson){
json p; try{ p=json::parse(paramsJson);} catch(...){ p=json::object(); }
if(type=="spyin") return spyIn(performerId,victimId,p);
if(type=="assassin") return assassin(performerId,victimId,p);
if(type=="sabotage") return sabotage(performerId,victimId,p);
if(type=="corrupt_politician") return corruptPolitician(performerId,victimId,p);
if(type=="rob") return rob(performerId,victimId,p);
return {{"status","unknown_type"},{"type",type}};
}
nlohmann::json UndergroundWorker::spyIn(int performerId,int victimId,const json& p){
ConnectionGuard g(pool);
auto& db=g.get();
db.prepare("UG_SELECT_BY_PERFORMER",Q_SELECT_BY_PERFORMER);
const auto rows = db.execute("UG_SELECT_BY_PERFORMER",{ std::to_string(victimId) });
json activities = json::array();
for(const auto& r : rows){
json params = json::object();
try{ params = json::parse(r.at("parameters")); }catch(...){}
json result = nullptr;
auto it = r.find("result_text");
if(it != r.end()){
try{ result = json::parse(it->second); }catch(...){}
}
std::string status = "pending";
if(result.is_object()){
if(auto s = result.find("status"); s!=result.end() && s->is_string()) {
status = s->get<std::string>();
} else {
status = "done";
}
}
activities.push_back({
{"id", std::stoi(r.at("id"))},
{"type", r.at("underground_type")},
{"performed_by", std::stoi(r.at("performer_id"))},
{"victim_id", std::stoi(r.at("victim_id"))},
{"created_at", r.at("created_at")},
{"parameters", params},
{"result", result},
{"status", status}
});
}
return {
{"status","success"},
{"action","spyin"},
{"performer_id", performerId},
{"victim_id", victimId},
{"details", p},
{"victim_illegal_activity_count", activities.size()},
{"victim_illegal_activities", activities}
};
}
nlohmann::json UndergroundWorker::assassin(int performerId,int victimId,const json& p){
ConnectionGuard g(pool);
auto& db=g.get();
db.prepare("UG_SELECT_CHAR_HEALTH",Q_SELECT_CHAR_HEALTH);
db.prepare("UG_UPDATE_CHAR_HEALTH",Q_UPDATE_CHAR_HEALTH);
const auto rows=db.execute("UG_SELECT_CHAR_HEALTH",{std::to_string(victimId)});
if(rows.empty()) return {{"status","error"},{"action","assassin"},{"performer_id",performerId},{"victim_id",victimId},{"message","victim_not_found"},{"details",p}};
int current=std::stoi(rows.front().at("health"));
std::uniform_int_distribution<int> dist(0,current);
int new_health=dist(rng());
db.execute("UG_UPDATE_CHAR_HEALTH",{std::to_string(victimId),std::to_string(new_health)});
return {{"status","success"},{"action","assassin"},{"performer_id",performerId},{"victim_id",victimId},{"details",p},{"previous_health",current},{"new_health",new_health},{"reduced_by",current-new_health}};
}
nlohmann::json UndergroundWorker::sabotage(int performerId,int victimId,const json& p){
const auto target=p.value("target",std::string{});
if(target=="house") return sabotageHouse(performerId,victimId,p);
if(target=="storage") return sabotageStorage(performerId,victimId,p);
return {{"status","error"},{"action","sabotage"},{"message","unknown_target"},{"performer_id",performerId},{"victim_id",victimId},{"details",p}};
}
int UndergroundWorker::getUserIdForCharacter(int characterId){
ConnectionGuard g(pool);
auto& db=g.get();
db.prepare("UG_SELECT_CHAR_USER",Q_SELECT_CHAR_USER);
const auto r=db.execute("UG_SELECT_CHAR_USER",{std::to_string(characterId)});
if(r.empty()) return -1;
return std::stoi(r.front().at("user_id"));
}
std::optional<UndergroundWorker::HouseConditions> UndergroundWorker::getHouseByUser(int userId){
ConnectionGuard g(pool);
auto& db=g.get();
db.prepare("UG_SELECT_HOUSE_BY_USER",Q_SELECT_HOUSE_BY_USER);
const auto r=db.execute("UG_SELECT_HOUSE_BY_USER",{std::to_string(userId)});
if(r.empty()) return std::nullopt;
HouseConditions h{
std::stoi(r.front().at("id")),
std::stoi(r.front().at("roof_condition")),
std::stoi(r.front().at("floor_condition")),
std::stoi(r.front().at("wall_condition")),
std::stoi(r.front().at("window_condition"))
};
return h;
}
void UndergroundWorker::updateHouse(const HouseConditions& h){
ConnectionGuard g(pool);
auto& db=g.get();
db.prepare("UG_UPDATE_HOUSE",Q_UPDATE_HOUSE);
db.execute("UG_UPDATE_HOUSE",{
std::to_string(h.id),
std::to_string(std::clamp(h.roof,0,100)),
std::to_string(std::clamp(h.floor,0,100)),
std::to_string(std::clamp(h.wall,0,100)),
std::to_string(std::clamp(h.windowc,0,100))
});
}
nlohmann::json UndergroundWorker::sabotageHouse(int performerId,int victimId,const json& p){
int userId=getUserIdForCharacter(victimId);
if(userId<0) return {{"status","error"},{"action","sabotage"},{"target","house"},{"message","victim_not_found"},{"performer_id",performerId},{"victim_id",victimId},{"details",p}};
auto hopt=getHouseByUser(userId);
if(!hopt) return {{"status","error"},{"action","sabotage"},{"target","house"},{"message","house_not_found"},{"performer_id",performerId},{"victim_id",victimId},{"details",p}};
auto h=*hopt;
std::vector<std::string> allow;
if(p.contains("conditions") && p["conditions"].is_array())
for(const auto& s:p["conditions"]) if(s.is_string()) allow.push_back(s.get<std::string>());
std::vector<std::pair<std::string,int*>> fields={
{"roof_condition",&h.roof},
{"floor_condition",&h.floor},
{"wall_condition",&h.wall},
{"window_condition",&h.windowc}
};
std::vector<std::pair<std::string,int*>> pool;
for(auto& f:fields) if(allow.empty() || std::find(allow.begin(),allow.end(),f.first)!=allow.end()) pool.push_back(f);
if(pool.empty()) return {{"status","error"},{"action","sabotage"},{"target","house"},{"message","no_conditions_selected"},{"performer_id",performerId},{"victim_id",victimId},{"details",p}};
size_t k=static_cast<size_t>(randomInt(1,(int)pool.size()));
std::vector<size_t> picks=randomIndices(pool.size(),k);
json changed=json::array();
for(size_t i: picks){
int& cur=*pool[i].second;
if(cur>0){
int red=randomInt(1,cur);
cur=std::clamp(cur-red,0,100);
}
changed.push_back(pool[i].first);
}
updateHouse(h);
return {
{"status","success"},
{"action","sabotage"},
{"target","house"},
{"performer_id",performerId},
{"victim_id",victimId},
{"details",p},
{"changed_conditions",changed},
{"new_conditions",{
{"roof_condition",h.roof},
{"floor_condition",h.floor},
{"wall_condition",h.wall},
{"window_condition",h.windowc}
}}
};
}
std::vector<UndergroundWorker::Row> UndergroundWorker::selectStockByBranch(int branchId){
ConnectionGuard g(pool);
auto& db=g.get();
db.prepare("UG_SELECT_STOCK_BY_BRANCH",Q_SELECT_STOCK_BY_BRANCH);
return db.execute("UG_SELECT_STOCK_BY_BRANCH",{std::to_string(branchId)});
}
std::vector<UndergroundWorker::Row> UndergroundWorker::filterByStockTypes(const std::vector<Row>& rows,const std::vector<int>& allowed){
if(allowed.empty()) return rows;
std::vector<Row> out;
out.reserve(rows.size());
for(const auto& r:rows){
int t=std::stoi(r.at("stock_type_id"));
if(std::find(allowed.begin(),allowed.end(),t)!=allowed.end()) out.push_back(r);
}
return out;
}
void UndergroundWorker::updateStockQty(int id,long long qty){
ConnectionGuard g(pool);
auto& db=g.get();
db.prepare("UG_UPDATE_STOCK_QTY",Q_UPDATE_STOCK_QTY);
db.execute("UG_UPDATE_STOCK_QTY",{std::to_string(id),std::to_string(qty)});
}
nlohmann::json UndergroundWorker::sabotageStorage(int performerId,int victimId,const json& p){
if(!p.contains("branch_id") || !p["branch_id"].is_number_integer())
return {{"status","error"},{"action","sabotage"},{"target","storage"},{"message","branch_id_required"},{"performer_id",performerId},{"victim_id",victimId},{"details",p}};
int branchId=p["branch_id"].get<int>();
std::vector<int> allowed;
if(p.contains("stock_type_ids") && p["stock_type_ids"].is_array())
for(const auto& v:p["stock_type_ids"]) if(v.is_number_integer()) allowed.push_back(v.get<int>());
auto rows=filterByStockTypes(selectStockByBranch(branchId),allowed);
if(rows.empty()) return {{"status","success"},{"action","sabotage"},{"target","storage"},{"performer_id",performerId},{"victim_id",victimId},{"details",p},{"removed_total",0},{"affected_rows",json::array()}};
long long total=0;
for(const auto& r:rows) total+=std::stoll(r.at("quantity"));
if(total<=0) return {{"status","success"},{"action","sabotage"},{"target","storage"},{"performer_id",performerId},{"victim_id",victimId},{"details",p},{"removed_total",0},{"affected_rows",json::array()}};
long long cap=total/4;
if(cap<=0) return {{"status","success"},{"action","sabotage"},{"target","storage"},{"performer_id",performerId},{"victim_id",victimId},{"details",p},{"removed_total",0},{"affected_rows",json::array()}};
long long to_remove=randomLL(1,cap);
std::shuffle(rows.begin(),rows.end(),rng());
json affected=json::array();
for(const auto& r:rows){
if(to_remove==0) break;
int id=std::stoi(r.at("id"));
long long q=std::stoll(r.at("quantity"));
if(q<=0) continue;
long long take=randomLL(1,std::min(q,to_remove));
long long newq=q-take;
updateStockQty(id,newq);
to_remove-=take;
affected.push_back({{"id",id},{"stock_type_id",std::stoi(r.at("stock_type_id"))},{"previous_quantity",q},{"new_quantity",newq},{"removed",take}});
}
long long removed=0;
for(const auto& a:affected) removed+=a.at("removed").get<long long>();
return {
{"status","success"},
{"action","sabotage"},
{"target","storage"},
{"performer_id",performerId},
{"victim_id",victimId},
{"details",p},
{"removed_total",removed},
{"affected_rows",affected}
};
}
nlohmann::json UndergroundWorker::corruptPolitician(int performerId,int victimId,const json& p){
return {{"status","success"},{"action","corrupt_politician"},{"performer_id",performerId},{"victim_id",victimId},{"details",p}};
}
nlohmann::json UndergroundWorker::rob(int performerId,int victimId,const json& p){
int userId=getUserIdForCharacter(victimId);
if(userId<0) return {{"status","error"},{"action","rob"},{"message","victim_not_found"},{"performer_id",performerId},{"victim_id",victimId},{"details",p}};
ConnectionGuard g(pool);
auto& db=g.get();
db.prepare("UG_SELECT_FALUKANT_USER",Q_SELECT_FALUKANT_USER);
const auto fu=db.execute("UG_SELECT_FALUKANT_USER",{std::to_string(userId)});
if(fu.empty()) return {{"status","error"},{"action","rob"},{"message","falukant_user_not_found"},{"performer_id",performerId},{"victim_id",victimId},{"details",p}};
int falukantUserId=std::stoi(fu.front().at("id"));
double money=std::stod(fu.front().at("money"));
int defaultBranch=std::stoi(fu.front().at("main_branch_region_id"));
bool stealGoods = (randomInt(0,1)==1);
if(stealGoods){
int branchId = p.contains("branch_id") && p["branch_id"].is_number_integer()
? p["branch_id"].get<int>()
: defaultBranch;
if(branchId<=0){
return {{"status","success"},{"action","rob"},{"mode","goods"},{"performer_id",performerId},{"victim_id",victimId},{"details",p},{"removed_total",0},{"affected_rows",json::array()}};
}
auto rows = selectStockByBranch(branchId);
if(rows.empty()){
return {{"status","success"},{"action","rob"},{"mode","goods"},{"performer_id",performerId},{"victim_id",victimId},{"details",p},{"removed_total",0},{"affected_rows",json::array()}};
}
long long total=0;
for(const auto& r:rows) total+=std::stoll(r.at("quantity"));
if(total<=0){
return {{"status","success"},{"action","rob"},{"mode","goods"},{"performer_id",performerId},{"victim_id",victimId},{"details",p},{"removed_total",0},{"affected_rows",json::array()}};
}
long long cap = std::max<long long>(1, total/2);
long long to_remove = randomLL(1, cap);
std::shuffle(rows.begin(),rows.end(),rng());
json affected = json::array();
for(const auto& r:rows){
if(to_remove==0) break;
int id=std::stoi(r.at("id"));
long long q=std::stoll(r.at("quantity"));
if(q<=0) continue;
long long take=randomLL(1,std::min(q,to_remove));
long long newq=q-take;
updateStockQty(id,newq);
to_remove-=take;
affected.push_back({
{"id",id},
{"stock_type_id",std::stoi(r.at("stock_type_id"))},
{"previous_quantity",q},
{"new_quantity",newq},
{"removed",take}
});
}
long long removed=0;
for(const auto& a:affected) removed+=a.at("removed").get<long long>();
return {
{"status","success"},
{"action","rob"},
{"mode","goods"},
{"performer_id",performerId},
{"victim_id",victimId},
{"details",p},
{"removed_total",removed},
{"affected_rows",affected}
};
} else {
if(money<=0.0){
return {{"status","success"},{"action","rob"},{"mode","money"},{"performer_id",performerId},{"victim_id",victimId},{"details",p},{"stolen",0.0},{"balance_before",0.0},{"balance_after",0.0}};
}
double rate = randomDouble(0.0,0.18);
double amount = std::floor(money * rate * 100.0 + 0.5) / 100.0;
if(amount < 0.01) amount = 0.01;
if(amount > money) amount = money;
json msg = {
{"event","money_changed"},
{"reason","robbery"},
{"delta",-amount},
{"performer_id",performerId},
{"victim_id",victimId}
};
changeFalukantUserMoney(falukantUserId, -amount, "robbery", msg);
double after = std::floor((money - amount) * 100.0 + 0.5)/100.0;
return {
{"status","success"},
{"action","rob"},
{"mode","money"},
{"performer_id",performerId},
{"victim_id",victimId},
{"details",p},
{"stolen",amount},
{"rate",rate},
{"balance_before",money},
{"balance_after",after}
};
}
}
void UndergroundWorker::updateResult(int id,const nlohmann::json& result){
ConnectionGuard g(pool);
auto& db=g.get();
db.prepare("UG_UPDATE_RESULT",Q_UPDATE_RESULT);
db.execute("UG_UPDATE_RESULT",{std::to_string(id),result.dump()});
}
double UndergroundWorker::randomDouble(double lo,double hi){
std::uniform_real_distribution<double> d(lo,hi);
return d(rng());
}

View File

@@ -1,101 +0,0 @@
#pragma once
#include <unordered_map>
#include <optional>
#include <nlohmann/json.hpp>
#include "worker.h"
class UndergroundWorker final: public Worker{
using Row = std::unordered_map<std::string,std::string>;
struct HouseConditions { int id; int roof; int floor; int wall; int windowc; };
public:
UndergroundWorker(ConnectionPool& pool,MessageBroker& broker):Worker(pool,broker,"UndergroundWorker"){}
~UndergroundWorker() override;
protected:
void run() override;
private:
void tick();
std::vector<Row> fetchPending();
nlohmann::json executeRow(const Row& r);
nlohmann::json handleTask(const std::string& type,int performerId,int victimId,const std::string& paramsJson);
nlohmann::json spyIn(int performerId,int victimId,const nlohmann::json& p);
nlohmann::json assassin(int performerId,int victimId,const nlohmann::json& p);
nlohmann::json sabotage(int performerId,int victimId,const nlohmann::json& p);
nlohmann::json corruptPolitician(int performerId,int victimId,const nlohmann::json& p);
nlohmann::json rob(int performerId,int victimId,const nlohmann::json& p);
void updateResult(int id,const nlohmann::json& result);
nlohmann::json sabotageHouse(int performerId,int victimId,const nlohmann::json& p);
nlohmann::json sabotageStorage(int performerId,int victimId,const nlohmann::json& p);
int getUserIdForCharacter(int characterId);
std::optional<HouseConditions> getHouseByUser(int userId);
void updateHouse(const HouseConditions& h);
std::vector<Row> selectStockByBranch(int branchId);
std::vector<Row> filterByStockTypes(const std::vector<Row>& rows,const std::vector<int>& allowed);
void updateStockQty(int id,long long qty);
static int randomInt(int lo,int hi);
static long long randomLL(long long lo,long long hi);
static std::vector<size_t> randomIndices(size_t n,size_t k);
static double randomDouble(double lo,double hi);
private:
static constexpr const char* Q_SELECT_BY_PERFORMER=R"SQL(
SELECT u.id, t.tr AS underground_type, u.performer_id, u.victim_id,
to_char(u.created_at,'YYYY-MM-DD"T"HH24:MI:SS"Z"') AS created_at,
COALESCE(u.parameters::text,'{}') AS parameters,
COALESCE(u.result::text,'null') AS result_text
FROM falukant_data.underground u
JOIN falukant_type.underground t ON t.tr=u.underground_type_id
WHERE u.performer_id=$1
ORDER BY u.created_at DESC
)SQL";
static constexpr const char* Q_SELECT_PENDING=R"SQL(
SELECT u.id,t.tr AS underground_type,u.performer_id,u.victim_id,COALESCE(u.parameters::text,'{}') AS parameters
FROM falukant_data.underground u
JOIN falukant_type.underground t ON t.tr=u.underground_type_id
WHERE u.result IS NULL AND u.created_at<=NOW()-INTERVAL '1 day'
ORDER BY u.created_at ASC
LIMIT 200
)SQL";
static constexpr const char* Q_UPDATE_RESULT=R"SQL(
UPDATE falukant_data.underground SET result=$2::jsonb,updated_at=NOW() WHERE id=$1
)SQL";
static constexpr const char* Q_SELECT_CHAR_USER=R"SQL(
SELECT user_id FROM falukant_data."character" WHERE id=$1
)SQL";
static constexpr const char* Q_SELECT_HOUSE_BY_USER=R"SQL(
SELECT id, roof_condition, floor_condition, wall_condition, window_condition
FROM falukant_data.user_house
WHERE user_id=$1
LIMIT 1
)SQL";
static constexpr const char* Q_UPDATE_HOUSE=R"SQL(
UPDATE falukant_data.user_house
SET roof_condition=$2, floor_condition=$3, wall_condition=$4, window_condition=$5
WHERE id=$1
)SQL";
static constexpr const char* Q_SELECT_STOCK_BY_BRANCH=R"SQL(
SELECT id, stock_type_id, quantity
FROM falukant_data.stock
WHERE branch_id=$1
ORDER BY quantity DESC
)SQL";
static constexpr const char* Q_UPDATE_STOCK_QTY=R"SQL(
UPDATE falukant_data.stock SET quantity=$2 WHERE id=$1
)SQL";
static constexpr const char* Q_SELECT_CHAR_HEALTH=R"SQL(
SELECT health FROM falukant_data."character" WHERE id=$1
)SQL";
static constexpr const char* Q_UPDATE_CHAR_HEALTH=R"SQL(
UPDATE falukant_data."character" SET health=$2, updated_at=NOW() WHERE id=$1
)SQL";
static constexpr const char* Q_SELECT_FALUKANT_USER=R"SQL(
SELECT id, money, COALESCE(main_branch_region_id,0) AS main_branch_region_id
FROM falukant_data.falukant_user
WHERE user_id=$1
LIMIT 1
)SQL";
};

View File

@@ -1,427 +0,0 @@
#include "usercharacterworker.h"
#include "connection_guard.h"
#include <iostream>
#include <chrono>
#include <thread>
#include <vector>
#include <cmath>
#include "utils.h"
UserCharacterWorker::UserCharacterWorker(ConnectionPool &pool, MessageBroker &broker)
: Worker(pool, broker, "UserCharacterWorker"),
gen(rd()), dist(0.0, 1.0) {}
UserCharacterWorker::~UserCharacterWorker() {}
void UserCharacterWorker::run() {
using namespace std::chrono;
auto lastExecutionTime = steady_clock::now();
int lastPregnancyDay = -1;
while (runningWorker) {
signalActivity();
// 1h-Block
auto nowSteady = steady_clock::now();
auto elapsed = duration_cast<seconds>(nowSteady - lastExecutionTime).count();
if (elapsed >= 3600) {
try {
processCharacterEvents();
updateCharactersMood();
handleCredits();
} catch (const std::exception &e) {
std::cerr << "[UserCharacterWorker] Fehler in processCharacterEvents: " << e.what() << std::endl;
}
lastExecutionTime = nowSteady;
}
// Schwangerschaftsverarbeitung: initial oder täglich um 06:00 einmal pro Tag
auto nowSys = system_clock::now();
std::time_t t = system_clock::to_time_t(nowSys);
std::tm local_tm;
localtime_r(&t, &local_tm);
if (lastPregnancyDay == -1 || (local_tm.tm_hour == 6 && local_tm.tm_yday != lastPregnancyDay)) {
try {
processPregnancies();
} catch (const std::exception &e) {
std::cerr << "[UserCharacterWorker] Fehler in processPregnancies: " << e.what() << std::endl;
}
lastPregnancyDay = local_tm.tm_yday;
}
std::this_thread::sleep_for(seconds(1));
recalculateKnowledge();
}
}
void UserCharacterWorker::processCharacterEvents() {
setCurrentStep("Get character data");
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
db.prepare(QUERY_GET_USERS_TO_UPDATE, QUERY_GET_USERS_TO_UPDATE);
auto rows = db.execute(QUERY_GET_USERS_TO_UPDATE);
std::vector<Character> characters;
for (const auto &row : rows) {
characters.push_back({ std::stoi(row.at("id")), std::stoi(row.at("age")), std::stoi(row.at("health")) });
}
for (auto &character : characters) {
updateCharacterHealth(character);
}
}
void UserCharacterWorker::updateCharacterHealth(Character& character) {
int healthChange = calculateHealthChange(character.age);
if (healthChange != 0) {
character.health = std::max(0, character.health + healthChange);
if (character.health == 0) {
handleCharacterDeath(character.id);
return;
}
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
db.prepare("QUERY_UPDATE_CHARACTERS_HEALTH", QUERY_UPDATE_CHARACTERS_HEALTH);
db.execute("QUERY_UPDATE_CHARACTERS_HEALTH",
{ std::to_string(character.health), std::to_string(character.id) });
}
}
void UserCharacterWorker::updateCharactersMood() {
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
db.prepare("QUERY_UPDATE_MOOD", QUERY_UPDATE_MOOD);
db.execute("QUERY_UPDATE_MOOD");
}
int UserCharacterWorker::calculateHealthChange(int age) {
if (age < 30) {
return 0;
}
if (age >= 45) {
double probability = std::min(1.0, 0.1 + (age - 45) * 0.02);
if (dist(gen) < probability) {
return -std::uniform_int_distribution<int>(1, 10)(gen);
}
return 0;
}
double probability = (age - 30) / 30.0;
return (dist(gen) < probability) ? -1 : 0;
}
void UserCharacterWorker::handleCharacterDeath(int characterId) {
setHeir(characterId);
nlohmann::json deathEvent = {
{"event", "CharacterDeath"},
{"character_id", characterId}
};
broker.publish(deathEvent.dump());
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
try {
// 1) Director löschen (falls Character ein Director ist)
db.prepare("delete_director", QUERY_DELETE_DIRECTOR);
db.execute("delete_director", { std::to_string(characterId) });
// 2) Relationships löschen (Ehepartner, etc.)
db.prepare("delete_relationship", QUERY_DELETE_RELATIONSHIP);
db.execute("delete_relationship", { std::to_string(characterId) });
// 3) Child-Relations löschen (als Kind, Vater oder Mutter)
db.prepare("delete_child_relation", QUERY_DELETE_CHILD_RELATION);
db.execute("delete_child_relation", { std::to_string(characterId) });
// 4) Knowledge löschen
db.prepare("delete_knowledge", QUERY_DELETE_KNOWLEDGE);
db.execute("delete_knowledge", { std::to_string(characterId) });
// 5) Debtors_prism löschen
db.prepare("delete_debtors_prism", QUERY_DELETE_DEBTORS_PRISM);
db.execute("delete_debtors_prism", { std::to_string(characterId) });
// 6) Political Office löschen
db.prepare("delete_political_office", QUERY_DELETE_POLITICAL_OFFICE);
db.execute("delete_political_office", { std::to_string(characterId) });
// 7) Election Candidate löschen
db.prepare("delete_election_candidate", QUERY_DELETE_ELECTION_CANDIDATE);
db.execute("delete_election_candidate", { std::to_string(characterId) });
// 8) Character löschen
db.prepare("delete_character", "DELETE FROM falukant_data.character WHERE id = $1");
db.execute("delete_character", { std::to_string(characterId) });
} catch (const std::exception &e) {
std::cerr << "[UserCharacterWorker] Fehler beim Löschen der Character-Verknüpfungen: "
<< e.what() << std::endl;
}
}
void UserCharacterWorker::setHeir(int characterId) {
auto falukantUserId = getFalukantUserId(characterId);
auto heirId = getHeirFromChildren(characterId);
auto newMoney = calculateNewMoney(falukantUserId, true);
if (heirId < 1) {
getRandomHeir(characterId);
newMoney = calculateNewMoney(falukantUserId, false);
}
setNewCharacter(falukantUserId, heirId);
setNewMoney(falukantUserId, newMoney);
}
int UserCharacterWorker::getFalukantUserId(int characterId) {
ConnectionGuard guard(pool);
auto &db = guard.get();
db.prepare("QUERY_GET_FALUKANT_USER_ID", QUERY_GET_FALUKANT_USER_ID);
const auto rows = db.execute("QUERY_GET_FALUKANT_USER_ID", { std::to_string(characterId) });
if (!rows.empty() && !rows.front().at("user_id").empty()) {
return std::stoi(rows.front().at("user_id"));
}
return -1;
}
int UserCharacterWorker::getHeirFromChildren(int deceasedCharacterId) {
ConnectionGuard guard(pool);
auto &db = guard.get();
db.prepare("QUERY_GET_HEIR", QUERY_GET_HEIR);
const auto rows = db.execute("QUERY_GET_HEIR", { std::to_string(deceasedCharacterId) });
if (!rows.empty()) {
return std::stoi(rows.front().at("child_character_id"));
}
return -1;
}
int UserCharacterWorker::getRandomHeir(int deceasedCharacterId) {
ConnectionGuard guard(pool);
auto &db = guard.get();
db.prepare("QUERY_RANDOM_HEIR", QUERY_RANDOM_HEIR);
const auto rows = db.execute("QUERY_RANDOM_HEIR", { std::to_string(deceasedCharacterId) });
if (!rows.empty()) {
return std::stoi(rows.front().at("child_character_id"));
}
return -1;
}
void UserCharacterWorker::setNewCharacter(int falukantUserId, int heirCharacterId) {
if (heirCharacterId < 1) return;
ConnectionGuard guard(pool);
auto &db = guard.get();
db.prepare("QUERY_SET_CHARACTER_USER", QUERY_SET_CHARACTER_USER);
db.execute("QUERY_SET_CHARACTER_USER", {
std::to_string(falukantUserId),
std::to_string(heirCharacterId)
});
}
void UserCharacterWorker::setNewMoney(int falukantUserId, double newAmount) {
ConnectionGuard guard(pool);
auto &db = guard.get();
db.prepare("QUERY_UPDATE_USER_MONEY", QUERY_UPDATE_USER_MONEY);
db.execute("QUERY_UPDATE_USER_MONEY", {
std::to_string(newAmount),
std::to_string(falukantUserId)
});
}
void UserCharacterWorker::recalculateKnowledge() {
setCurrentStep("Get character data");
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
db.prepare("QUERY_UPDATE_GET_ITEMS_TO_UPDATE", QUERY_UPDATE_GET_ITEMS_TO_UPDATE);
auto rows = db.execute("QUERY_UPDATE_GET_ITEMS_TO_UPDATE");
for (const auto &updateItem: rows) {
if (std::stoi(updateItem.at("quantity")) >= 10) {
db.prepare("QUERY_UPDATE_GET_CHARACTER_IDS", QUERY_UPDATE_GET_CHARACTER_IDS);
auto charactersData = db.execute("QUERY_UPDATE_GET_CHARACTER_IDS", { updateItem.at("producer_id") });
for (const auto &characterRow: charactersData) {
db.prepare("QUERY_UPDATE_KNOWLEDGE", QUERY_UPDATE_KNOWLEDGE);
if (characterRow.at("director_id") == "") {
db.execute("QUERY_UPDATE_KNOWLEDGE", { characterRow.at("character_id"), updateItem.at("product_id"), "2" });
} else {
db.execute("QUERY_UPDATE_KNOWLEDGE", { characterRow.at("character_id"), updateItem.at("product_id"), "1" });
db.execute("QUERY_UPDATE_KNOWLEDGE", { characterRow.at("director_id"), updateItem.at("product_id"), "1" });
}
}
}
db.prepare("QUERY_DELETE_LOG_ENTRY", QUERY_DELETE_LOG_ENTRY);
db.execute("QUERY_DELETE_LOG_ENTRY", { updateItem.at("id") });
const nlohmann::json message = {
{"event", "knowledge_update"},
};
sendMessageToFalukantUsers(std::stoi(updateItem.at("producer_id")), message);
}
}
void UserCharacterWorker::processPregnancies() {
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
db.prepare("QUERY_AUTOBATISM", QUERY_AUTOBATISM);
db.execute("QUERY_AUTOBATISM");
db.prepare("get_candidates", QUERY_GET_PREGNANCY_CANDIDATES);
auto rows = db.execute("get_candidates");
const nlohmann::json message = {
{"event", "children_update"},
};
for (const auto &row : rows) {
int fatherCid = Utils::optionalStoiOrDefault(row, "father_cid", -1);
int motherCid = Utils::optionalStoiOrDefault(row, "mother_cid", -1);
if (fatherCid < 0 || motherCid < 0) {
continue; // ungültige Daten überspringen
}
int titleOfNobility = Utils::optionalStoiOrDefault(row, "title_of_nobility", 0);
int lastName = Utils::optionalStoiOrDefault(row, "last_name", 0);
int regionId = Utils::optionalStoiOrDefault(row, "region_id", 0);
auto fatherUidOpt = Utils::optionalUid(row.at("father_uid"));
auto motherUidOpt = Utils::optionalUid(row.at("mother_uid"));
// Geschlecht zufällig
std::string gender = (dist(gen) < 0.5) ? "male" : "female";
db.prepare("insert_child", QUERY_INSERT_CHILD);
auto resChild = db.execute("insert_child", {
std::to_string(regionId), // $1
gender, // $2
std::to_string(lastName), // $3
std::to_string(titleOfNobility) // $4
});
if (resChild.empty()) continue;
int childCid = Utils::optionalStoiOrDefault(resChild.front(), "child_cid", -1);
if (childCid < 0) continue;
db.prepare("insert_relation", QUERY_INSERT_CHILD_RELATION);
db.execute("insert_relation", {
std::to_string(fatherCid),
std::to_string(motherCid),
std::to_string(childCid)
});
if (fatherUidOpt) {
sendMessageToFalukantUsers(*fatherUidOpt, message);
// Sende falukantUpdateStatus nach dem Erstellen des Kindes
nlohmann::json updateMessage = { { "event", "falukantUpdateStatus" } };
sendMessageToFalukantUsers(*fatherUidOpt, updateMessage);
}
if (motherUidOpt) {
sendMessageToFalukantUsers(*motherUidOpt, message);
// Sende falukantUpdateStatus nach dem Erstellen des Kindes
nlohmann::json updateMessage = { { "event", "falukantUpdateStatus" } };
sendMessageToFalukantUsers(*motherUidOpt, updateMessage);
}
}
}
void UserCharacterWorker::handleCredits() {
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
db.prepare("QUERY_GET_OPEN_CREDITS", QUERY_GET_OPEN_CREDITS);
const auto &credits = db.execute("QUERY_GET_OPEN_CREDITS");
const nlohmann::json message = {
{ "event", "falukantUpdateStatus" }
};
db.prepare("QUERY_UPDATE_CREDIT", QUERY_UPDATE_CREDIT);
db.prepare("QUERY_ADD_CHARACTER_TO_DEBTORS_PRISM", QUERY_ADD_CHARACTER_TO_DEBTORS_PRISM);
for (const auto &credit: credits) {
const auto userMoney = std::stod(credit.at("money"));
auto remainingAmount = std::stod(credit.at("remaining_amount"));
const auto amount = std::stod(credit.at("amount"));
const auto fee = std::stoi(credit.at("interest_rate"));
const auto falukantUserId = std::stoi(credit.at("user_id"));
const auto payRate = amount / 10 + amount * fee / 100;
remainingAmount -= payRate;
if (payRate <= userMoney - (payRate * 3)) {
changeFalukantUserMoney(falukantUserId, -payRate, "credit pay rate", message);
} else {
if (credit.at("prism_started_previously") == "t") {
changeFalukantUserMoney(falukantUserId, payRate, "debitor_prism", message);
} else {
db.execute("QUERY_ADD_CHARACTER_TO_DEBTORS_PRISM", { credit.at("character_id") });
}
}
db.execute("QUERY_UPDATE_CREDIT", { std::to_string(remainingAmount), std::to_string(falukantUserId) });
}
db.prepare("QUERY_CLEANUP_CREDITS", QUERY_CLEANUP_CREDITS);
db.execute("QUERY_CLEANUP_CREDITS");
}
double UserCharacterWorker::getCurrentMoney(int falukantUserId) {
ConnectionGuard g(pool); auto &db = g.get();
db.prepare("GET_CURRENT_MONEY", QUERY_GET_CURRENT_MONEY);
auto rows = db.execute("GET_CURRENT_MONEY", {std::to_string(falukantUserId)});
return rows.empty()? 0.0 : std::stod(rows.front().at("sum"));
}
double UserCharacterWorker::getHouseValue(int falukantUserId) {
ConnectionGuard g(pool); auto &db = g.get();
db.prepare("HOUSE_VALUE", QUERY_HOUSE_VALUE);
auto rows = db.execute("HOUSE_VALUE", {std::to_string(falukantUserId)});
return rows.empty()? 0.0 : std::stod(rows.front().at("sum"));
}
double UserCharacterWorker::getSettlementValue(int falukantUserId) {
ConnectionGuard g(pool); auto &db = g.get();
db.prepare("SETTLEMENT_VALUE", QUERY_SETTLEMENT_VALUE);
auto rows = db.execute("SETTLEMENT_VALUE", {std::to_string(falukantUserId)});
return rows.empty()? 0.0 : std::stod(rows.front().at("sum"));
}
double UserCharacterWorker::getInventoryValue(int falukantUserId) {
ConnectionGuard g(pool); auto &db = g.get();
db.prepare("INVENTORY_VALUE", QUERY_INVENTORY_VALUE);
auto rows = db.execute("INVENTORY_VALUE", {std::to_string(falukantUserId)});
return rows.empty()? 0.0 : std::stod(rows.front().at("sum"));
}
double UserCharacterWorker::getCreditDebt(int falukantUserId) {
ConnectionGuard guard(pool);
auto &db = guard.get();
db.prepare("CREDIT_DEBT", QUERY_CREDIT_DEBT);
auto rows = db.execute("CREDIT_DEBT", { std::to_string(falukantUserId) });
return rows.empty()
? 0.0
: std::stod(rows.front().at("sum"));
}
int UserCharacterWorker::getChildCount(int deceasedUserId) {
ConnectionGuard g(pool); auto &db = g.get();
db.prepare("COUNT_CHILDREN", QUERY_COUNT_CHILDREN);
auto rows = db.execute("COUNT_CHILDREN", {std::to_string(deceasedUserId)});
return rows.empty()? 0 : std::stoi(rows.front().at("cnt"));
}
double UserCharacterWorker::calculateNewMoney(int falukantUserId, bool hasHeir) {
if (!hasHeir) {
return 800.0;
}
double cash = getCurrentMoney(falukantUserId);
double houses = getHouseValue(falukantUserId);
double sets = getSettlementValue(falukantUserId);
double inv = getInventoryValue(falukantUserId);
double debt = getCreditDebt(falukantUserId);
double totalAssets = cash + houses + sets + inv - debt;
int childCount = getChildCount(falukantUserId);
bool single = (childCount <= 1);
double heirShare = single ? totalAssets : totalAssets * 0.8;
double net = heirShare - (houses + sets + inv + debt);
if (net <= 1000.0) {
return 1000.0;
}
return net;
}

View File

@@ -1,414 +0,0 @@
#ifndef USERCHARACTERWORKER_H
#define USERCHARACTERWORKER_H
#include "worker.h"
#include <random>
class UserCharacterWorker : public Worker {
public:
UserCharacterWorker(ConnectionPool &pool, MessageBroker &broker);
~UserCharacterWorker() override;
protected:
void run() override;
private:
struct Character {
int id;
int age;
int health;
};
void processCharacterEvents();
void updateCharacterHealth(Character& character);
void updateCharactersMood();
int calculateHealthChange(int age);
void handleCharacterDeath(int characterId);
void recalculateKnowledge();
void processPregnancies();
void handleCredits();
void setHeir(int characterId);
int getFalukantUserId(int characterId);
int getHeirFromChildren(int deceasedCharacterId);
int getRandomHeir(int deceasedCharacterId);
void setNewCharacter(int falukantUserId, int heirCharacterId);
void setNewMoney(int falukantUserId, double newAmount);
double getHouseValue(int falukantUserId);
double getSettlementValue(int falukantUserId);
double getInventoryValue(int falukantUserId);
double getCreditDebt(int falukantUserId);
double getCurrentMoney(int falukantUserId);
double calculateNewMoney(int falukantUserId, bool hasHeir);
int getChildCount(int deceasedUserId);
std::random_device rd;
std::mt19937 gen;
std::uniform_real_distribution<> dist;
bool didRunToday { false };
static constexpr const char *QUERY_GET_USERS_TO_UPDATE = R"(
SELECT "id", CURRENT_DATE - birthdate::date AS age, "health"
FROM "falukant_data"."character"
WHERE "user_id" IS NOT NULL;
)";
static constexpr const char *QUERY_UPDATE_CHARACTERS_HEALTH = R"(
UPDATE "falukant_data"."character"
SET health = $1
WHERE id = $2
)";
static constexpr const char *QUERY_UPDATE_GET_ITEMS_TO_UPDATE = R"(
SELECT id, product_id, producer_id, quantity
FROM falukant_log.production p
WHERE p.production_timestamp::date < current_date
)";
static constexpr const char *QUERY_UPDATE_GET_CHARACTER_IDS = R"(
select fu.id user_id, c.id character_id, c2.id director_id
from falukant_data.falukant_user fu
join falukant_data."character" c
on c.user_id = fu.id
left join falukant_data.director d
on d.employer_user_id = fu.id
left join falukant_data."character" c2
on c2.id = d.director_character_id
where fu.id = $1
)";
static constexpr const char *QUERY_UPDATE_KNOWLEDGE = R"(
update falukant_data.knowledge
set knowledge = least(knowledge + $3, 100)
where character_id = $1
and product_id = $2
)";
static constexpr const char *QUERY_DELETE_LOG_ENTRY = R"(
delete from falukant_log.production
where id = $1
)";
static constexpr char const* QUERY_GET_PREGNANCY_CANDIDATES = R"(
SELECT
r.character1_id AS father_cid,
r.character2_id AS mother_cid,
c1.title_of_nobility,
c1.last_name,
c1.region_id,
fu1.id AS father_uid,
fu2.id AS mother_uid,
-- Durchschnittsalter in Tagen
((NOW()::date - c1.birthdate::date)
+ (NOW()::date - c2.birthdate::date)) / 2 AS avg_age_days,
-- Angepasste Schwangerschaftswahrscheinlichkeit in Prozent
100.0 /
(1
+ EXP(
0.0647 * (
((NOW()::date - c1.birthdate::date)
+ (NOW()::date - c2.birthdate::date)) / 2
)
- 0.0591
)
) AS prob_pct
FROM falukant_data.relationship r
JOIN falukant_type.relationship r2
ON r2.id = r.relationship_type_id
AND r2.tr = 'married'
JOIN falukant_data."character" c1
ON c1.id = r.character1_id
JOIN falukant_data."character" c2
ON c2.id = r.character2_id
LEFT JOIN falukant_data.falukant_user fu1
ON fu1.id = c1.user_id
LEFT JOIN falukant_data.falukant_user fu2
ON fu2.id = c2.user_id
WHERE random()*100 < (
100.0 /
(1
+ EXP(
0.11166347 * (
((NOW()::date - c1.birthdate::date)
+ (NOW()::date - c2.birthdate::date)) / 2
)
- 2.638267
)
)
) / 2; -- Geburtenrate halbiert
)";
static constexpr char const* QUERY_INSERT_CHILD = R"(
INSERT INTO falukant_data."character" (
user_id,
region_id,
first_name,
last_name,
birthdate,
gender,
title_of_nobility,
mood_id,
created_at,
updated_at
) VALUES (
NULL,
$1::int, -- region_id
/* zufälliger Vorname passend zum Gender */
(
SELECT id
FROM falukant_predefine.firstname
WHERE gender = $2
ORDER BY RANDOM()
LIMIT 1
),
$3::int, -- last_name (Eltern-Nachname)
NOW(),
$2::varchar, -- gender
$4::int, -- title_of_nobility
/* zufällige Stimmung */
(
SELECT id
FROM falukant_type.mood
ORDER BY RANDOM()
LIMIT 1
),
NOW(),
NOW()
)
RETURNING id AS child_cid
)";
static constexpr char const* QUERY_INSERT_CHILD_RELATION = R"(
-- QUERY_INSERT_CHILD_RELATION
INSERT INTO falukant_data.child_relation (
father_character_id,
mother_character_id,
child_character_id,
name_set,
created_at,
updated_at
)
VALUES (
$1::int, -- father_cid
$2::int, -- mother_cid
$3::int, -- child_cid
false,
NOW(), NOW()
)
RETURNING
father_character_id,
-- Vater-User
(SELECT user_id FROM falukant_data."character" WHERE id = father_character_id) AS father_user_id,
mother_character_id,
-- Mutter-User
(SELECT user_id FROM falukant_data."character" WHERE id = mother_character_id) AS mother_user_id,
child_character_id,
-- Kind-User
(SELECT user_id FROM falukant_data."character" WHERE id = child_character_id) AS child_user_id;
)";
static constexpr char const* QUERY_AUTOBATISM = R"(
update falukant_data.child_relation
set name_set = true
where id in (
select cr.id
from falukant_data.child_relation cr
join falukant_data."character" c
on c.id = cr.child_character_id
where cr.name_set = false
and c.birthdate < current_date - interval '5 days'
)
)";
static constexpr char const* QUERY_UPDATE_MOOD = R"(
UPDATE falukant_data."character" AS c
SET mood_id = falukant_data.get_random_mood_id()
WHERE c.health > 0;
)";
static constexpr char const* QUERY_GET_OPEN_CREDITS = R"(
select c.id credit_id, c.amount, c.remaining_amount, c.interest_rate, fu.id user_id, fu."money", c2.id character_id, dp.created_at debitor_prism_start,
dp.created_at::date < current_date prism_started_previously
from falukant_data.credit c
join falukant_data.falukant_user fu
on fu.id = c.id
join falukant_data."character" c2
on c2.user_id = c.falukant_user_id
left join falukant_data.debtors_prism dp
on dp.character_id = c2.id
where c.remaining_amount > 0
and c.updated_at::date < current_date
)";
static constexpr char const* QUERY_UPDATE_CREDIT = R"(
update falukant_data.credit c
set remaining_amount = $1
where falukant_user_id = $2
)";
static constexpr char const* QUERY_CLEANUP_CREDITS = R"(
delete from falukant_data.credit
where remaining_amount >= 0.01
)";
static constexpr char const* QUERY_ADD_CHARACTER_TO_DEBTORS_PRISM = R"(
insert into falukant_data.debtors_prism (character_id) values ($1)
)";
static constexpr const char* QUERY_GET_HEIR = R"(
SELECT child_character_id
FROM falukant_data.child_relation
WHERE father_character_id = $1
OR mother_character_id = $1
ORDER BY (is_heir IS TRUE) DESC,
updated_at DESC
LIMIT 1
)";
static constexpr const char* QUERY_RANDOM_HEIR = R"(
WITH chosen AS (
SELECT
cr.id AS relation_id,
cr.child_character_id
FROM
falukant_data.child_relation AS cr
JOIN
falukant_data."character" AS ch
ON ch.id = cr.child_character_id
WHERE
(cr.father_character_id = $1 OR cr.mother_character_id = $1)
-- gleicher Wohnort wie der Verstorbene
AND ch.region_id = (
SELECT region_id
FROM falukant_data."character"
WHERE id = $1
)
-- nicht älter als 10 Tage
AND ch.birthdate >= NOW() - INTERVAL '10 days'
-- Titel "noncivil"
AND ch.title_of_nobility = (
SELECT id
FROM falukant_type.title
WHERE label_tr = 'noncivil'
)
ORDER BY RANDOM()
LIMIT 1
)
UPDATE
falukant_data.child_relation AS cr2
SET
is_heir = true,
updated_at = NOW()
FROM
chosen
WHERE
cr2.id = chosen.relation_id
RETURNING
chosen.child_character_id
)";
static constexpr const char* QUERY_SET_CHARACTER_USER = R"(
UPDATE falukant_data."character"
SET user_id = $1,
updated_at = NOW()
WHERE id = $2
)";
static constexpr const char* QUERY_UPDATE_USER_MONEY = R"(
UPDATE falukant_data.falukant_user
SET money = $1,
updated_at = NOW()
WHERE user_id = $2
)";
static constexpr const char* QUERY_GET_FALUKANT_USER_ID = R"(
SELECT user_id
FROM falukant_data."character"
WHERE id = $1
LIMIT 1
)";
// SubQueries
static constexpr const char* QUERY_GET_CURRENT_MONEY = R"(
SELECT COALESCE(money,0) AS sum
FROM falukant_data.falukant_user
WHERE user_id = $1
)";
static constexpr const char* QUERY_HOUSE_VALUE = R"(
SELECT COALESCE(SUM(h.cost),0) AS sum
FROM falukant_data.user_house AS uh
JOIN falukant_type.house AS h ON uh.house_type_id = h.id
WHERE uh.user_id = $1
)";
static constexpr const char* QUERY_SETTLEMENT_VALUE = R"(
SELECT COALESCE(SUM(b.base_cost),0) AS sum
FROM falukant_data.branch AS br
JOIN falukant_type.branch AS b ON br.branch_type_id = b.id
WHERE br.falukant_user_id = $1
)";
static constexpr const char* QUERY_INVENTORY_VALUE = R"(
SELECT COALESCE(SUM(i.quantity * p.sell_cost),0) AS sum
FROM falukant_data.inventory AS i
JOIN falukant_type.product AS p ON i.product_id = p.id
JOIN falukant_data.branch AS br ON i.stock_id = br.id
WHERE br.falukant_user_id = $1
)";
static constexpr const char* QUERY_CREDIT_DEBT = R"(
SELECT COALESCE(SUM(remaining_amount),0) AS sum
FROM falukant_data.credit
WHERE falukant_user_id = $1
)";
static constexpr const char* QUERY_COUNT_CHILDREN = R"(
SELECT COUNT(*) AS cnt
FROM falukant_data.child_relation
WHERE father_character_id = $1
OR mother_character_id = $1
)";
// Queries zum Löschen von Character-Verknüpfungen beim Tod
static constexpr const char *QUERY_DELETE_DIRECTOR = R"(
DELETE FROM falukant_data.director
WHERE director_character_id = $1
RETURNING employer_user_id;
)";
static constexpr const char *QUERY_DELETE_RELATIONSHIP = R"(
DELETE FROM falukant_data.relationship
WHERE character1_id = $1
OR character2_id = $1;
)";
static constexpr const char *QUERY_DELETE_CHILD_RELATION = R"(
DELETE FROM falukant_data.child_relation
WHERE child_character_id = $1
OR father_character_id = $1
OR mother_character_id = $1;
)";
static constexpr const char *QUERY_DELETE_KNOWLEDGE = R"(
DELETE FROM falukant_data.knowledge
WHERE character_id = $1;
)";
static constexpr const char *QUERY_DELETE_DEBTORS_PRISM = R"(
DELETE FROM falukant_data.debtors_prism
WHERE character_id = $1;
)";
static constexpr const char *QUERY_DELETE_POLITICAL_OFFICE = R"(
DELETE FROM falukant_data.political_office
WHERE character_id = $1;
)";
static constexpr const char *QUERY_DELETE_ELECTION_CANDIDATE = R"(
DELETE FROM falukant_data.election_candidate
WHERE character_id = $1;
)";
};
#endif // USERCHARACTERWORKER_H

View File

@@ -1,94 +0,0 @@
#include "utils.h"
#include <sstream>
#include <iomanip>
#include <ctime>
int Utils::optionalStoiOrDefault(const std::unordered_map<std::string, std::string>& row,
const std::string& key, int def) {
auto it = row.find(key);
if (it == row.end()) return def;
const std::string& val = it->second;
if (isNullOrEmpty(val)) return def;
try {
return std::stoi(val);
} catch (...) {
return def;
}
}
double Utils::optionalStodOrDefault(const std::unordered_map<std::string, std::string>& row,
const std::string& key, double def) {
auto it = row.find(key);
if (it == row.end()) return def;
const std::string& val = it->second;
if (isNullOrEmpty(val)) return def;
try {
return std::stod(val);
} catch (...) {
return def;
}
}
bool Utils::isNullOrEmpty(const std::string& s) {
return s.empty() || s == "NULL";
}
std::optional<std::chrono::system_clock::time_point> Utils::parseTimestamp(const std::string& iso) {
std::istringstream ss(iso);
std::tm tm = {};
ss >> std::get_time(&tm, "%Y-%m-%dT%H:%M:%S");
if (ss.fail()) {
ss.clear();
ss.str(iso);
ss >> std::get_time(&tm, "%Y-%m-%d %H:%M:%S");
if (ss.fail()) return std::nullopt;
}
std::time_t time_c = std::mktime(&tm);
if (time_c == -1) return std::nullopt;
return std::chrono::system_clock::from_time_t(time_c);
}
std::optional<int> Utils::computeAgeYears(const std::string& birthdate_iso) {
auto birth_tp = parseTimestamp(birthdate_iso);
if (!birth_tp) return std::nullopt;
auto now = std::chrono::system_clock::now();
std::time_t birth_time = std::chrono::system_clock::to_time_t(*birth_tp);
std::time_t now_time = std::chrono::system_clock::to_time_t(now);
std::tm birth_tm;
std::tm now_tm;
#if defined(_WIN32) || defined(_WIN64)
localtime_s(&birth_tm, &birth_time);
localtime_s(&now_tm, &now_time);
#else
localtime_r(&birth_time, &birth_tm);
localtime_r(&now_time, &now_tm);
#endif
int years = now_tm.tm_year - birth_tm.tm_year;
if (now_tm.tm_mon < birth_tm.tm_mon ||
(now_tm.tm_mon == birth_tm.tm_mon && now_tm.tm_mday < birth_tm.tm_mday)) {
years--;
}
return years;
}
std::string Utils::buildPgIntArrayLiteral(const std::vector<int>& elems) {
std::string res = "{";
for (size_t i = 0; i < elems.size(); ++i) {
res += std::to_string(elems[i]);
if (i + 1 < elems.size()) res += ",";
}
res += "}";
return res;
}
std::optional<int> Utils::optionalUid(const std::string& val) {
if (isNullOrEmpty(val)) return std::nullopt;
try {
return std::stoi(val);
} catch (...) {
return std::nullopt;
}
}

View File

@@ -1,30 +0,0 @@
#pragma once
#include <string>
#include <unordered_map>
#include <vector>
#include <optional>
#include <chrono>
class Utils {
public:
// Safe conversions with fallback
static int optionalStoiOrDefault(const std::unordered_map<std::string, std::string>& row,
const std::string& key, int def = -1);
static double optionalStodOrDefault(const std::unordered_map<std::string, std::string>& row,
const std::string& key, double def = 0.0);
static bool isNullOrEmpty(const std::string& s);
// Parse timestamp from common ISO / SQL formats into time_point
static std::optional<std::chrono::system_clock::time_point> parseTimestamp(const std::string& iso);
// Compute full years age from birthdate string; returns nullopt on parse failure.
static std::optional<int> computeAgeYears(const std::string& birthdate_iso);
// Build Postgres integer array literal "{1,2,3}"
static std::string buildPgIntArrayLiteral(const std::vector<int>& elems);
// Safely parse a nullable integer-like string
static std::optional<int> optionalUid(const std::string& val);
};

View File

@@ -1,168 +0,0 @@
#include "valuerecalculationworker.h"
ValueRecalculationWorker::ValueRecalculationWorker(ConnectionPool &pool, MessageBroker &broker)
: Worker(pool, broker, "ValueRecalculationWorker"),
activities{
{"productKnowledge", Activity(std::chrono::system_clock::from_time_t(0),
[this]() { calculateProductKnowledge(); },
std::chrono::hours(0))}, // 00:00 Uhr
{"regionalSellPrice", Activity(std::chrono::system_clock::from_time_t(0),
[this]() { calculateRegionalSellPrice(); },
std::chrono::hours(12) + std::chrono::minutes(0))} // 12:00 Uhr
}
{
}
ValueRecalculationWorker::~ValueRecalculationWorker() {
}
void ValueRecalculationWorker::run() {
while (runningWorker) {
setCurrentStep("Check if activity has to run");
auto now = std::chrono::system_clock::now();
for (auto &[key, activity] : activities) {
if (shouldRunToday(activity)) {
activity.lastRun = now;
activity.callMethod();
}
}
setCurrentStep("CalculateMarriages");
calculateMarriages();
calculateStudying();
setCurrentStep("Sleep for 60 seconds");
for (int i = 0; i < 60 && runningWorker; ++i) {
std::this_thread::sleep_for(std::chrono::seconds(1));
setCurrentStep("signalActivity()");
signalActivity();
}
setCurrentStep("Loop done");
}
}
bool ValueRecalculationWorker::shouldRunToday(const Activity& activity) {
auto now = std::chrono::system_clock::now();
auto todayScheduledTime = getNextScheduledTime(activity.scheduledTime);
return now >= todayScheduledTime && activity.lastRun < todayScheduledTime;
}
std::chrono::system_clock::time_point ValueRecalculationWorker::getNextScheduledTime(std::chrono::system_clock::duration scheduledDuration) {
auto now = std::chrono::system_clock::now();
std::time_t now_c = std::chrono::system_clock::to_time_t(now);
std::tm now_tm = *std::localtime(&now_c);
now_tm.tm_hour = std::chrono::duration_cast<std::chrono::hours>(scheduledDuration).count();
now_tm.tm_min = std::chrono::duration_cast<std::chrono::minutes>(scheduledDuration).count() % 60;
now_tm.tm_sec = 0;
return std::chrono::system_clock::from_time_t(std::mktime(&now_tm));
}
void ValueRecalculationWorker::calculateProductKnowledge() {
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
db.prepare("QUERY_UPDATE_PRODUCT_KNOWLEDGE_USER", QUERY_UPDATE_PRODUCT_KNOWLEDGE_USER);
db.execute("QUERY_UPDATE_PRODUCT_KNOWLEDGE_USER");
db.prepare("QUERY_GET_PRODUCERS_LAST_DAY", QUERY_GET_PRODUCERS_LAST_DAY);
const auto &usersToInform = db.execute("QUERY_GET_PRODUCERS_LAST_DAY");
const nlohmann::json message = {
{ "event", "price_update" }
};
for (const auto &user: usersToInform) {
const auto userId = std::stoi(user.at("producer_id"));
sendMessageToFalukantUsers(userId, message);
}
db.prepare("QUERY_DELETE_OLD_PRODUCTIONS", QUERY_DELETE_OLD_PRODUCTIONS);
db.execute("QUERY_DELETE_OLD_PRODUCTIONS");
}
void ValueRecalculationWorker::calculateRegionalSellPrice() {
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
db.prepare("QUERY_UPDATE_REGION_SELL_PRICE", QUERY_UPDATE_REGION_SELL_PRICE);
db.execute("QUERY_UPDATE_REGION_SELL_PRICE");
db.prepare("QUERY_GET_SELL_REGIONS", QUERY_GET_SELL_REGIONS);
const auto &regionsWithSells = db.execute("QUERY_GET_SELL_REGIONS");
const nlohmann::json message = {
{ "event", "price_update" }
};
for (const auto &region: regionsWithSells) {
const auto regionId = std::stoi(region.at("region_id"));
sendMessageToRegionUsers(regionId, message);
}
db.prepare("QUERY_DELETE_REGION_SELL_PRICE", QUERY_DELETE_REGION_SELL_PRICE);
db.execute("QUERY_DELETE_REGION_SELL_PRICE");
}
void ValueRecalculationWorker::calculateMarriages() {
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
db.prepare("QUERY_SET_MARRIAGES_BY_PARTY", QUERY_SET_MARRIAGES_BY_PARTY);
const auto &usersFromUpdatedRelationships = db.execute("QUERY_SET_MARRIAGES_BY_PARTY");
const nlohmann::json message = {
{ "event", "relationship_changed" }
};
for (const auto &userFromUpdatedRelationships: usersFromUpdatedRelationships) {
if (userFromUpdatedRelationships.at("character1_user") != "") {
const auto user1Id = std::stoi(userFromUpdatedRelationships.at("character1_user"));
sendMessageToRegionUsers(user1Id, message);
}
if (userFromUpdatedRelationships.at("character2_user") != "") {
const auto user2Id = std::stoi(userFromUpdatedRelationships.at("character2_user"));
sendMessageToRegionUsers(user2Id, message);
}
}
}
void ValueRecalculationWorker::calculateStudying() {
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
db.prepare("QUERY_GET_STUDYINGS_TO_EXECUTE", QUERY_GET_STUDYINGS_TO_EXECUTE);
db.prepare("QUERY_SET_LEARNING_DONE", QUERY_SET_LEARNING_DONE);
const auto studies = db.execute("QUERY_GET_STUDYINGS_TO_EXECUTE");
for (const auto &study: studies) {
if (study.at("tr") == "self") {
calculateStudyingSelf(study);
} else if (study.at("tr") == "children" || study.at("tr") == "director") {
caclulateStudyingForAssociatedCharacter(study);
}
db.execute("QUERY_SET_LEARNING_DONE", {study.at("id")});
}
}
void ValueRecalculationWorker::calculateStudyingSelf(Database::FieldMap entry) {
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
db.prepare("QUERY_GET_OWN_CHARACTER_ID", QUERY_GET_OWN_CHARACTER_ID);
const auto ownCharacterIdResult = db.execute("QUERY_GET_OWN_CHARACTER_ID", { entry.at("associated_falukant_user_id") });
if (ownCharacterIdResult.size() > 0) {
auto characterId = std::stoi(ownCharacterIdResult.at(0).at("id"));
auto learnAll = entry.at("learn_all_products") == "t" || entry.at("product_id") == "";
int productId = learnAll ? 0 : std::stoi(entry.at("product_id"));
calculateStudyingCharacter(characterId, learnAll, productId, std::stoi(entry.at("learning_recipient_id")));
}
}
void ValueRecalculationWorker::caclulateStudyingForAssociatedCharacter(Database::FieldMap entry) {
auto characterId = std::stoi(entry.at("associated_learning_character_id"));
auto learnAll = entry.at("learn_all_products") == "t" || entry.at("product_id") == "";
int productId = learnAll ? 0 : std::stoi(entry.at("product_id"));
calculateStudyingCharacter(characterId, learnAll, productId, std::stoi(entry.at("learning_recipient_id")));
}
void ValueRecalculationWorker::calculateStudyingCharacter(int characterId, bool all, int productId, int falukantUserId) {
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
if (all) {
db.prepare("QUERY_INCREASE_ALL_PRODUCTS_KNOWLEDGE", QUERY_INCREASE_ALL_PRODUCTS_KNOWLEDGE);
db.execute("QUERY_INCREASE_ALL_PRODUCTS_KNOWLEDGE", { "1", std::to_string(characterId) });
} else {
db.prepare("QUERY_INCREASE_ONE_PRODUCT_KNOWLEDGE", QUERY_INCREASE_ONE_PRODUCT_KNOWLEDGE);
db.execute("QUERY_INCREASE_ONE_PRODUCT_KNOWLEDGE", { "5", std::to_string(characterId), std::to_string(productId) });
}
const nlohmann::json message = {
{ "event", "knowledge_updated" }
};
sendMessageToFalukantUsers(falukantUserId, message);
}

View File

@@ -1,175 +0,0 @@
#ifndef VALUERECALCULATIONWORKER_H
#define VALUERECALCULATIONWORKER_H
#include "worker.h"
#include <unordered_map>
#include <functional>
#include <chrono>
class ValueRecalculationWorker : public Worker {
public:
ValueRecalculationWorker(ConnectionPool &pool, MessageBroker &broker);
~ValueRecalculationWorker() override;
protected:
void run() override;
private:
struct Activity {
std::chrono::system_clock::time_point lastRun;
std::function<void()> callMethod;
std::chrono::system_clock::duration scheduledTime;
Activity(std::chrono::system_clock::time_point lr, std::function<void()> cm, std::chrono::system_clock::duration st)
: lastRun(lr), callMethod(std::move(cm)), scheduledTime(st) {}
};
std::unordered_map<std::string, Activity> activities;
void calculateProductKnowledge();
void calculateRegionalSellPrice();
void calculateMarriages();
void calculateStudying();
void calculateStudyingSelf(Database::FieldMap entry);
void caclulateStudyingForAssociatedCharacter(Database::FieldMap entry);
void calculateStudyingCharacter(int characterId, bool all, int productId, int falukantUserId);
bool shouldRunToday(const Activity& activity);
std::chrono::system_clock::time_point getNextScheduledTime(std::chrono::system_clock::duration scheduledDuration);
static constexpr const char *QUERY_UPDATE_PRODUCT_KNOWLEDGE_USER = R"(
UPDATE falukant_data.knowledge k
SET knowledge = LEAST(100, k.knowledge + 1)
FROM falukant_data."character" c
JOIN falukant_log.production p
ON DATE(p.production_timestamp) = CURRENT_DATE - INTERVAL '1 day'
WHERE c.id = k.character_id
AND c.user_id = 18
AND k.product_id = 10
)";
static constexpr const char *QUERY_DELETE_OLD_PRODUCTIONS = R"(
delete from falukant_log.production flp
where date(flp.production_timestamp) < CURRENT_DATE
)";
static constexpr const char *QUERY_GET_PRODUCERS_LAST_DAY = R"(
select p."producer_id"
from falukant_log.production p
where date(p."production_timestamp") = CURRENT_DATE - interval '1 day'
group by producer_id
)";
static constexpr const char *QUERY_UPDATE_REGION_SELL_PRICE = R"(
UPDATE falukant_data.town_product_worth tpw
SET worth_percent =
GREATEST(
0,
LEAST(
CASE
WHEN s.quantity > avg_sells THEN tpw.worth_percent - 1
WHEN s.quantity < avg_sells THEN tpw.worth_percent + 1
ELSE tpw.worth_percent
END,
100
)
)
FROM (
SELECT region_id, product_id, quantity,
(SELECT AVG(quantity)
FROM falukant_log.sell avs
WHERE avs.product_id = s.product_id) AS avg_sells
FROM falukant_log.sell s
WHERE DATE(s.sell_timestamp) = CURRENT_DATE - INTERVAL '1 day'
) s
WHERE tpw.region_id = s.region_id
AND tpw.product_id = s.product_id
)";
static constexpr const char *QUERY_DELETE_REGION_SELL_PRICE = R"(
delete from falukant_log.sell s
where date(s.sell_timestamp) < CURRENT_DATE
)";
static constexpr const char *QUERY_GET_SELL_REGIONS = R"(
select s."region_id"
from falukant_log.sell s
where date(s."sell_timestamp") = CURRENT_DATE - interval '1 day'
group by "region_id"
)";
static constexpr const char * QUERY_SET_MARRIAGES_BY_PARTY = R"(
WITH updated_relations AS (
UPDATE falukant_data.relationship AS rel
SET relationship_type_id = (
SELECT id
FROM falukant_type.relationship AS rt
WHERE rt.tr = 'married'
)
WHERE rel.id IN (
SELECT rel2.id
FROM falukant_data.party AS p
JOIN falukant_type.party AS pt
ON pt.id = p.party_type_id
AND pt.tr = 'wedding'
JOIN falukant_data.falukant_user AS fu
ON fu.id = p.falukant_user_id
JOIN falukant_data."character" AS c
ON c.user_id = fu.id
JOIN falukant_data.relationship AS rel2
ON rel2.character1_id = c.id
OR rel2.character2_id = c.id
JOIN falukant_type.relationship AS rt2
ON rt2.id = rel2.relationship_type_id
AND rt2.tr = 'engaged'
WHERE p.created_at <= NOW() - INTERVAL '1 day'
)
RETURNING character1_id, character2_id
)
SELECT
c1.user_id AS character1_user,
c2.user_id AS character2_user
FROM updated_relations AS ur
JOIN falukant_data."character" AS c1
ON c1.id = ur.character1_id
JOIN falukant_data."character" AS c2
ON c2.id = ur.character2_id;
)";
static constexpr const char * QUERY_GET_STUDYINGS_TO_EXECUTE = R"(
select l.id, l.associated_falukant_user_id, l.associated_learning_character_id, l.learn_all_products, l.learning_recipient_id, l.product_id,
lr.tr
from falukant_data.learning l
join falukant_type.learn_recipient lr
on lr.id = l.learning_recipient_id
where l.learning_is_executed = false
and l.created_at + interval '1 day' < now();
)";
static constexpr const char * QUERY_GET_OWN_CHARACTER_ID = R"(
select id
from falukant_data."character" c
where c.user_id = $1
)";
static constexpr const char *QUERY_INCREASE_ONE_PRODUCT_KNOWLEDGE = R"(
update falukant_data.knowledge k
set knowledge = LEAST(100, k.knowledge + $1)
where k.character_id = $2
and k.product_id = $3
)";
static constexpr const char *QUERY_INCREASE_ALL_PRODUCTS_KNOWLEDGE = R"(
update falukant_data.knowledge k
set knowledge = LEAST(100, k.knowledge + $1)
where k.character_id = $2
)";
static constexpr const char *QUERY_SET_LEARNING_DONE = R"(
update falukant_data.learning
set learning_is_executed = true
where id = $1
)";
};
#endif // VALUERECALCULATIONWORKER_H

View File

@@ -1,994 +0,0 @@
#include "websocket_server.h"
#include "connection_guard.h"
#include "worker.h"
#include <iostream>
#include <chrono>
#include <cstring>
#include <future>
#include <algorithm>
#include <sys/socket.h>
#include <netinet/in.h>
#include <netinet/tcp.h>
using json = nlohmann::json;
// Protocols array definition
struct lws_protocols WebSocketServer::protocols[] = {
{
"", // Leeres Protokoll für Standard-WebSocket-Verbindungen
WebSocketServer::wsCallback,
sizeof(WebSocketUserData),
4096
},
{
"yourpart-protocol",
WebSocketServer::wsCallback,
sizeof(WebSocketUserData),
4096
},
{ nullptr, nullptr, 0, 0 }
};
// Static instance pointer
WebSocketServer* WebSocketServer::instance = nullptr;
WebSocketServer::WebSocketServer(int port, ConnectionPool &pool, MessageBroker &broker,
bool useSSL, const std::string& certPath, const std::string& keyPath)
: port(port), pool(pool), broker(broker), useSSL(useSSL), certPath(certPath), keyPath(keyPath) {
instance = this;
}
WebSocketServer::~WebSocketServer() {
stop();
instance = nullptr;
}
void WebSocketServer::run() {
running = true;
broker.subscribe([this](const std::string &msg) {
{
std::lock_guard<std::mutex> lock(queueMutex);
messageQueue.push(msg);
}
queueCV.notify_one();
});
serverThread = std::thread([this](){ startServer(); });
messageThread = std::thread([this](){ processMessageQueue(); });
pingThread = std::thread([this](){ pingClients(); });
// Warte kurz bis alle Threads gestartet sind
std::this_thread::sleep_for(std::chrono::milliseconds(100));
}
void WebSocketServer::stop() {
running = false;
if (context) lws_cancel_service(context);
// Stoppe Threads mit Timeout
std::vector<std::future<void>> futures;
if (serverThread.joinable()) {
futures.push_back(std::async(std::launch::async, [this]() { serverThread.join(); }));
}
if (messageThread.joinable()) {
futures.push_back(std::async(std::launch::async, [this]() { messageThread.join(); }));
}
if (pingThread.joinable()) {
futures.push_back(std::async(std::launch::async, [this]() { pingThread.join(); }));
}
// Warte auf alle Threads mit Timeout
for (auto& future : futures) {
if (future.wait_for(std::chrono::milliseconds(1000)) == std::future_status::timeout) {
std::cerr << "WebSocket-Thread beendet sich nicht, erzwinge Beendigung..." << std::endl;
}
}
// Force detach alle Threads
if (serverThread.joinable()) serverThread.detach();
if (messageThread.joinable()) messageThread.detach();
if (pingThread.joinable()) pingThread.detach();
if (context) {
lws_context_destroy(context);
context = nullptr;
}
}
void WebSocketServer::startServer() {
// Kurze Wartezeit, falls ein vorheriger Prozess den Port noch freigibt
std::this_thread::sleep_for(std::chrono::milliseconds(100));
struct lws_context_creation_info info;
memset(&info, 0, sizeof(info));
info.port = port;
info.protocols = protocols;
// Setze Socket-Optionen-Callback für SO_REUSEADDR
// Hinweis: In älteren libwebsockets-Versionen muss SO_REUSEADDR manuell gesetzt werden
// Wir versuchen es über einen Callback, falls verfügbar
// Server-Optionen für mehrere gleichzeitige Verbindungen
info.options = LWS_SERVER_OPTION_VALIDATE_UTF8 |
LWS_SERVER_OPTION_HTTP_HEADERS_SECURITY_BEST_PRACTICES_ENFORCE |
LWS_SERVER_OPTION_SKIP_SERVER_CANONICAL_NAME;
// Erlaube mehrere Verbindungen pro IP
info.ka_time = 60;
info.ka_probes = 10;
info.ka_interval = 10;
// SSL/TLS Konfiguration
if (useSSL) {
if (certPath.empty() || keyPath.empty()) {
throw std::runtime_error("SSL enabled but certificate or key path not provided");
}
info.options |= LWS_SERVER_OPTION_DO_SSL_GLOBAL_INIT;
info.ssl_cert_filepath = certPath.c_str();
info.ssl_private_key_filepath = keyPath.c_str();
std::cout << "WebSocket SSL Server starting on port " << port << " with certificates: "
<< certPath << " / " << keyPath << std::endl;
} else {
std::cout << "WebSocket Server starting on port " << port << " (no SSL)" << std::endl;
}
// Erhöhe Log-Level für besseres Debugging
setenv("LWS_LOG_LEVEL", "7", 1); // 7 = alle Logs
context = lws_create_context(&info);
if (!context) {
std::string errorMsg = "Failed to create LWS context on port " + std::to_string(port);
errorMsg += ". Port may be in use or insufficient permissions.";
std::cerr << errorMsg << std::endl;
throw std::runtime_error(errorMsg);
}
std::cout << "WebSocket-Server erfolgreich gestartet auf Port " << port << std::endl;
while (running) {
int ret = lws_service(context, 50);
if (ret < 0) {
std::cerr << "WebSocket-Server Fehler: lws_service returned " << ret << std::endl;
// Bei kritischen Fehlern beenden, sonst weiterlaufen
if (ret == -1) {
std::cerr << "Kritischer Fehler im WebSocket-Server, beende..." << std::endl;
break;
}
}
// Kurze Pause für bessere Shutdown-Responsivität
if (running) {
std::this_thread::sleep_for(std::chrono::milliseconds(10));
}
}
std::cout << "WebSocket-Server wird beendet..." << std::endl;
}
void WebSocketServer::processMessageQueue() {
while (running) {
std::unique_lock<std::mutex> lock(queueMutex);
queueCV.wait_for(lock, std::chrono::milliseconds(100), [this](){ return !messageQueue.empty() || !running; });
while (!messageQueue.empty() && running) {
std::string msg = std::move(messageQueue.front());
messageQueue.pop();
lock.unlock();
handleBrokerMessage(msg);
lock.lock();
}
}
}
void WebSocketServer::pingClients() {
while (running) {
// Kürzere Sleep-Intervalle für bessere Shutdown-Responsivität
for (int i = 0; i < WebSocketUserData::PING_INTERVAL_SECONDS * 10 && running; ++i) {
std::this_thread::sleep_for(std::chrono::milliseconds(100));
}
if (!running || !context) continue;
auto now = std::chrono::steady_clock::now();
std::vector<struct lws*> toDisconnect;
// Prüfe alle Verbindungen auf Timeouts
{
std::shared_lock<std::shared_mutex> lock(connectionsMutex);
for (auto& pair : connections) {
for (auto* wsi : pair.second) {
auto* ud = reinterpret_cast<WebSocketUserData*>(lws_wsi_user(wsi));
if (!ud) continue;
// Prüfe ob Pong-Timeout erreicht wurde
auto timeSincePing = std::chrono::duration_cast<std::chrono::seconds>(now - ud->lastPingTime).count();
auto timeSincePong = std::chrono::duration_cast<std::chrono::seconds>(now - ud->lastPongTime).count();
if (!ud->pongReceived && timeSincePing > WebSocketUserData::PONG_TIMEOUT_SECONDS) {
ud->pingTimeoutCount++;
std::cout << "Ping-Timeout für User " << ud->userId << " (Versuch " << ud->pingTimeoutCount << "/" << WebSocketUserData::MAX_PING_TIMEOUTS << ")" << std::endl;
if (ud->pingTimeoutCount >= WebSocketUserData::MAX_PING_TIMEOUTS) {
std::cout << "Verbindung wird getrennt: Zu viele Ping-Timeouts für User " << ud->userId << std::endl;
toDisconnect.push_back(wsi);
} else {
// Reset für nächsten Versuch
ud->pongReceived = true;
ud->lastPongTime = now;
}
}
}
}
}
// Trenne problematische Verbindungen
for (auto* wsi : toDisconnect) {
lws_close_reason(wsi, LWS_CLOSE_STATUS_POLICY_VIOLATION, (unsigned char*)"Ping timeout", 12);
}
// Sende Pings an alle aktiven Verbindungen
if (running) {
lws_callback_on_writable_all_protocol(context, &protocols[0]);
}
}
}
int WebSocketServer::wsCallback(struct lws *wsi,
enum lws_callback_reasons reason,
void *user, void *in, size_t len) {
if (!instance) return 0;
auto *ud = reinterpret_cast<WebSocketUserData*>(user);
switch (reason) {
case LWS_CALLBACK_ESTABLISHED: {
if (!ud) {
std::cerr << "[ESTABLISHED] ud ist nullptr" << std::endl;
return 0;
}
ud->pongReceived = true;
ud->connectionTime = std::chrono::steady_clock::now();
ud->lastPingTime = std::chrono::steady_clock::now();
ud->lastPongTime = std::chrono::steady_clock::now();
ud->pingTimeoutCount = 0;
// Füge Verbindung zur Liste aller Verbindungen hinzu
{
std::unique_lock<std::shared_mutex> lock(instance->connectionsMutex);
instance->allConnections.push_back(wsi);
}
const char* protocolName = lws_get_protocol(wsi)->name;
std::cout << "WebSocket-Verbindung hergestellt (Protokoll: " << (protocolName ? protocolName : "Standard") << ")" << std::endl;
char client_addr[128];
lws_get_peer_simple(wsi, client_addr, sizeof(client_addr));
std::cout << "Client-Adresse: " << client_addr << std::endl;
break;
}
case LWS_CALLBACK_RECEIVE_PONG:
// WebSocket Pong-Frame empfangen (automatische Antwort auf Ping)
if (!ud) {
std::cerr << "[RECEIVE_PONG] ud ist nullptr" << std::endl;
return 0;
}
ud->pongReceived = true;
ud->lastPongTime = std::chrono::steady_clock::now();
ud->pingTimeoutCount = 0;
// std::cout << "Pong-Frame von Client empfangen" << std::endl;
return 0;
case LWS_CALLBACK_RECEIVE: {
if (!ud) {
std::cerr << "[RECEIVE] ud ist nullptr" << std::endl;
return 0;
}
std::string msg(reinterpret_cast<char*>(in), len);
std::cout << "WebSocket-Nachricht empfangen: " << msg << std::endl;
// Fallback: Pong als Text-Nachricht (für Kompatibilität)
if (msg == "pong") {
ud->pongReceived = true;
ud->lastPongTime = std::chrono::steady_clock::now();
ud->pingTimeoutCount = 0;
std::cout << "Pong (Text) von Client empfangen" << std::endl;
break;
}
try {
json parsed = json::parse(msg);
std::cout << "[RECEIVE] Nachricht empfangen: " << msg << std::endl;
if (parsed.contains("event")) {
std::string event = parsed["event"].get<std::string>();
if (event == "setUserId") {
if (parsed.contains("data") && parsed["data"].contains("userId")) {
std::string newUserId = parsed["data"]["userId"].get<std::string>();
// Wenn die Verbindung bereits unter einer anderen userId registriert ist, entferne die alte Registrierung
if (!ud->userId.empty() && ud->userId != newUserId) {
std::cout << "[RECEIVE] User-ID ändert sich von " << ud->userId << " zu " << newUserId << ", entferne alte Registrierung" << std::endl;
instance->removeConnection(ud->userId, wsi);
}
ud->userId = newUserId;
std::cout << "[RECEIVE] User-ID gesetzt: " << ud->userId << std::endl;
// Verbindung in der Map speichern
instance->addConnection(ud->userId, wsi);
std::cout << "[RECEIVE] Verbindung gespeichert" << std::endl;
} else {
std::cerr << "[RECEIVE] setUserId-Event ohne data.userId-Feld" << std::endl;
}
} else if (event == "getConnections") {
// Admin-Funktion: Liste aller aktiven Verbindungen
std::cout << "[RECEIVE] getConnections: Start" << std::endl;
if (!ud) {
std::cerr << "[RECEIVE] getConnections: ud ist nullptr" << std::endl;
break;
}
std::cout << "[RECEIVE] getConnections: ud ist gültig" << std::endl;
// Prüfe ob ud noch gültig ist, bevor wir darauf zugreifen
try {
volatile bool test = ud->pongReceived;
(void)test;
std::cout << "[RECEIVE] getConnections: ud-Zugriff erfolgreich" << std::endl;
} catch (...) {
std::cerr << "[RECEIVE] getConnections: ud ist ungültig (Exception beim Zugriff)" << std::endl;
break;
}
if (ud->userId.empty()) {
std::cerr << "[RECEIVE] getConnections: User-ID nicht gesetzt" << std::endl;
// Sende Fehlerantwort nicht während des Callbacks, sondern lege sie in die Queue
// und triggere den WRITEABLE-Callback später
try {
std::cout << "[RECEIVE] getConnections: make response" << std::endl;
json errorResponse = {
{"event", "getConnectionsResponse"},
{"success", false},
{"error", "User-ID nicht gesetzt"}
};
std::cout << "errorResponse: " << errorResponse.dump() << std::endl;
if (instance && wsi && ud) {
std::cout << "instance: " << instance << std::endl;
std::cout << "wsi: " << wsi << std::endl;
std::cout << "ud: " << ud << std::endl;
// Prüfe ob ud noch gültig ist, indem wir versuchen, auf ein einfaches Feld zuzugreifen
try {
// Test-Zugriff auf ud, um zu prüfen ob es gültig ist
volatile bool test = ud->pongReceived;
(void)test; // Unterdrücke Warnung
std::cout << "ud ist gültig, pongReceived: " << ud->pongReceived << std::endl;
} catch (...) {
std::cerr << "[RECEIVE] ud ist ungültig (Exception beim Zugriff)" << std::endl;
break;
}
// Versuche, die Nachricht direkt zu senden, ohne die Queue zu verwenden
// Das vermeidet Probleme mit dem Mutex während des Callbacks
try {
std::string messageStr = errorResponse.dump();
std::cout << "[RECEIVE] Versuche Nachricht direkt zu senden: " << messageStr.length() << " Bytes" << std::endl;
// Prüfe ob die Nachricht nicht zu groß ist
if (messageStr.length() > 4096) {
std::cerr << "[RECEIVE] Warnung: Nachricht zu groß (" << messageStr.length() << " Bytes), wird abgeschnitten" << std::endl;
messageStr = messageStr.substr(0, 4096);
}
// Versuche, die Nachricht direkt zu senden
// In libwebsockets können wir lws_write während eines RECEIVE-Callbacks aufrufen,
// aber nur wenn der Socket schreibbar ist. Wenn nicht, müssen wir lws_callback_on_writable aufrufen.
unsigned char buf[LWS_PRE + messageStr.length()];
memcpy(buf + LWS_PRE, messageStr.c_str(), messageStr.length());
int ret = lws_write(wsi, buf + LWS_PRE, messageStr.length(), LWS_WRITE_TEXT);
if (ret < 0) {
// Socket ist nicht schreibbar, verwende lws_callback_on_writable
// und speichere die Nachricht in einer temporären Variable
std::cout << "[RECEIVE] Socket nicht schreibbar (ret=" << ret << "), verwende callback_on_writable" << std::endl;
// Versuche, die Nachricht in die Queue zu legen, aber mit zusätzlichen Prüfungen
if (instance && wsi && ud) {
// Prüfe, ob ud und der Mutex gültig sind
try {
// Test-Zugriff auf ud, um sicherzustellen, dass es gültig ist
volatile bool test = ud->pongReceived;
(void)test;
// Versuche, den Mutex zu locken
// Verwende try_lock, um zu prüfen, ob der Mutex verfügbar ist
std::unique_lock<std::mutex> lock(ud->messageQueueMutex, std::try_to_lock);
if (lock.owns_lock()) {
ud->messageQueue.push(messageStr);
std::cout << "[RECEIVE] Nachricht zur Queue hinzugefügt" << std::endl;
lws_callback_on_writable(wsi);
} else {
std::cerr << "[RECEIVE] Mutex konnte nicht gelockt werden, Nachricht wird verworfen" << std::endl;
}
} catch (...) {
std::cerr << "[RECEIVE] Fehler beim Zugriff auf Queue, Nachricht wird verworfen" << std::endl;
}
}
} else {
std::cout << "[RECEIVE] Nachricht direkt gesendet (" << ret << " Bytes)" << std::endl;
}
} catch (const std::exception &e) {
std::cerr << "[RECEIVE] Exception beim Senden der Nachricht: " << e.what() << std::endl;
} catch (...) {
std::cerr << "[RECEIVE] Unbekannte Exception beim Senden der Nachricht" << std::endl;
}
// Verwende lws_cancel_service, um den Service zu benachrichtigen
if (instance->context) {
std::cout << "Rufe lws_cancel_service auf..." << std::endl;
lws_cancel_service(instance->context);
std::cout << "lws_cancel_service(instance->context) done" << std::endl;
} else {
std::cerr << "[RECEIVE] instance->context ist nullptr" << std::endl;
}
} else {
std::cerr << "[RECEIVE] instance, wsi oder ud ist nullptr" << std::endl;
}
} catch (const std::exception &e) {
std::cerr << "[RECEIVE] Fehler beim Senden der Fehlerantwort: " << e.what() << std::endl;
} catch (...) {
std::cerr << "[RECEIVE] Unbekannter Fehler beim Senden der Fehlerantwort" << std::endl;
}
break;
}
// Prüfe Mainadmin-Rechte und sende Antwort asynchron
// (nicht während des Callbacks, um Verbindungsprobleme zu vermeiden)
try {
if (!instance || !instance->isMainAdmin(ud->userId)) {
std::cerr << "[RECEIVE] getConnections: Zugriff verweigert für User " << ud->userId << std::endl;
json errorResponse = {
{"event", "getConnectionsResponse"},
{"success", false},
{"error", "Zugriff verweigert: Nur Mainadmin-User können Verbindungen abfragen"}
};
if (instance && wsi && ud) {
// Lege Nachricht in die Queue, ohne sofort lws_callback_on_writable aufzurufen
{
std::lock_guard<std::mutex> lock(ud->messageQueueMutex);
ud->messageQueue.push(errorResponse.dump());
}
// Verwende lws_cancel_service, um den Service zu benachrichtigen
if (instance->context) {
lws_cancel_service(instance->context);
}
}
break;
}
// Hole aktive Verbindungen und sende Antwort
// Wichtig: getActiveConnections() sollte schnell sein und keine langen Operationen durchführen
json connections = instance->getActiveConnections();
json response = {
{"event", "getConnectionsResponse"},
{"success", true},
{"data", connections}
};
if (instance && wsi && ud) {
// Verwende sendMessageToConnection, das bereits alle notwendigen Prüfungen hat
instance->sendMessageToConnection(wsi, ud, response.dump());
std::cout << "[RECEIVE] getConnections: Verbindungen an Mainadmin gesendet (" << response.dump().length() << " Bytes)" << std::endl;
}
} catch (const std::exception &e) {
std::cerr << "[RECEIVE] Fehler bei getConnections: " << e.what() << std::endl;
// Sende Fehlerantwort
try {
json errorResponse = {
{"event", "getConnectionsResponse"},
{"success", false},
{"error", std::string("Fehler beim Abrufen der Verbindungen: ") + e.what()}
};
if (instance && wsi && ud) {
// Lege Nachricht in die Queue, ohne sofort lws_callback_on_writable aufzurufen
{
std::lock_guard<std::mutex> lock(ud->messageQueueMutex);
ud->messageQueue.push(errorResponse.dump());
}
// Verwende lws_cancel_service, um den Service zu benachrichtigen
if (instance->context) {
lws_cancel_service(instance->context);
}
}
} catch (...) {
// Ignoriere Fehler beim Senden der Fehlerantwort
}
}
} else {
std::cout << "[RECEIVE] Unbekanntes Event: " << event << std::endl;
}
} else {
std::cout << "[RECEIVE] Nachricht ohne event-Feld" << std::endl;
}
} catch (const std::exception &e) {
std::cerr << "[RECEIVE] Fehler beim Parsen der WebSocket-Nachricht: " << e.what() << std::endl;
}
break;
}
case LWS_CALLBACK_SERVER_WRITEABLE: {
if (!ud) {
std::cerr << "[WRITEABLE] ud ist nullptr" << std::endl;
return 0;
}
// Prüfe ob es eine Nachricht zum Senden gibt
std::string messageToSend;
{
std::lock_guard<std::mutex> lock(ud->messageQueueMutex);
if (!ud->messageQueue.empty()) {
messageToSend = std::move(ud->messageQueue.front());
ud->messageQueue.pop();
}
}
if (!messageToSend.empty()) {
// Prüfe ob Nachricht zu groß ist (max 4096 Bytes)
if (messageToSend.length() > 4096) {
std::cerr << "[WRITEABLE] Warnung: Nachricht zu groß (" << messageToSend.length() << " Bytes), wird abgeschnitten" << std::endl;
messageToSend = messageToSend.substr(0, 4096);
}
// Nachricht senden
std::cout << "[WRITEABLE] Sende Nachricht (" << messageToSend.length() << " Bytes): " << (messageToSend.length() > 100 ? messageToSend.substr(0, 100) + "..." : messageToSend) << std::endl;
unsigned char buf[LWS_PRE + messageToSend.length()];
memcpy(buf + LWS_PRE, messageToSend.c_str(), messageToSend.length());
int ret = lws_write(wsi, buf + LWS_PRE, messageToSend.length(), LWS_WRITE_TEXT);
if (ret < 0) {
std::cerr << "[WRITEABLE] Fehler beim Senden: lws_write returned " << ret << " - Verbindung wird möglicherweise geschlossen" << std::endl;
// Bei Fehler: Verbindung wird wahrscheinlich geschlossen, entferne aus Queue
{
std::lock_guard<std::mutex> lock(ud->messageQueueMutex);
// Leere die Queue, da die Verbindung nicht mehr funktioniert
while (!ud->messageQueue.empty()) {
ud->messageQueue.pop();
}
}
// Keine weitere Aktion - die Verbindung wird durch libwebsockets geschlossen
return -1; // Signalisiert libwebsockets, dass die Verbindung geschlossen werden soll
} else if (ret != static_cast<int>(messageToSend.length())) {
std::cerr << "[WRITEABLE] Warnung: Nur " << ret << " von " << messageToSend.length() << " Bytes gesendet" << std::endl;
} else {
std::cout << "[WRITEABLE] Nachricht erfolgreich gesendet (" << ret << " Bytes)" << std::endl;
}
// Wenn noch weitere Nachrichten in der Queue sind, wieder schreibbereit machen
{
std::lock_guard<std::mutex> lock(ud->messageQueueMutex);
if (!ud->messageQueue.empty()) {
lws_callback_on_writable(wsi);
}
}
} else {
// WebSocket Ping-Frame senden (nicht Text-Nachricht!)
ud->lastPingTime = std::chrono::steady_clock::now();
ud->pongReceived = false;
// Leeres Ping-Frame senden (Browser antworten automatisch mit Pong)
unsigned char buf[LWS_PRE + 0];
lws_write(wsi, buf + LWS_PRE, 0, LWS_WRITE_PING);
// std::cout << "Ping-Frame an Client gesendet" << std::endl;
}
break;
}
case LWS_CALLBACK_CLOSED:
// Verbindung aus der Map entfernen
if (ud) {
if (!ud->userId.empty()) {
instance->removeConnection(ud->userId, wsi);
std::cout << "WebSocket-Verbindung geschlossen für User: " << ud->userId << std::endl;
} else {
// Falls keine userId gesetzt ist, entferne die Verbindung aus allen möglichen Einträgen
// (Fallback für den Fall, dass setUserId nie aufgerufen wurde)
instance->removeConnectionByWsi(wsi);
std::cout << "WebSocket-Verbindung geschlossen (ohne User-ID, entferne aus allen Einträgen)" << std::endl;
}
} else {
std::cout << "WebSocket-Verbindung geschlossen (ud ist nullptr)" << std::endl;
}
// Entferne aus allConnections
{
std::unique_lock<std::shared_mutex> lock(instance->connectionsMutex);
instance->allConnections.erase(
std::remove(instance->allConnections.begin(), instance->allConnections.end(), wsi),
instance->allConnections.end()
);
}
break;
case LWS_CALLBACK_WSI_DESTROY:
// Verbindung wird zerstört - aufräumen falls nötig
if (ud) {
if (!ud->userId.empty()) {
instance->removeConnection(ud->userId, wsi);
} else {
instance->removeConnectionByWsi(wsi);
}
}
break;
case LWS_CALLBACK_CLIENT_CONNECTION_ERROR:
// Client-Verbindungsfehler (falls wir als Client fungieren)
std::cerr << "WebSocket Client-Verbindungsfehler" << std::endl;
break;
case LWS_CALLBACK_HTTP:
// Erlaube WebSocket-Upgrade-Anfragen, lehne andere HTTP-Anfragen ab
// libwebsockets behandelt WebSocket-Upgrades automatisch, daher 0 zurückgeben
return 0;
case LWS_CALLBACK_FILTER_PROTOCOL_CONNECTION:
// Protokoll-Filter für bessere Kompatibilität
return 0;
case LWS_CALLBACK_RAW_CONNECTED:
// Raw-Verbindungen behandeln
return 0;
case LWS_CALLBACK_RAW_ADOPT_FILE:
case LWS_CALLBACK_RAW_ADOPT:
// Setze SO_REUSEADDR für den Socket (falls noch nicht gesetzt)
// Hinweis: Diese Callbacks werden möglicherweise nicht für Listen-Sockets aufgerufen
{
int fd = lws_get_socket_fd(wsi);
if (fd >= 0) {
int reuse = 1;
setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &reuse, sizeof(reuse));
}
}
return 0;
default:
break;
}
return 0;
}
void WebSocketServer::handleBrokerMessage(const std::string &message) {
try {
std::cout << "[handleBrokerMessage] Nachricht empfangen: " << message << std::endl;
json parsed = json::parse(message);
if (parsed.contains("user_id")) {
int fid;
if (parsed["user_id"].is_string()) {
fid = std::stoi(parsed["user_id"].get<std::string>());
} else {
fid = parsed["user_id"].get<int>();
}
auto userId = getUserIdFromFalukantUserId(fid);
std::cout << "[handleBrokerMessage] Broker-Nachricht für Falukant-User " << fid << " -> User-ID " << userId << std::endl;
// Prüfe ob User-ID gefunden wurde
if (userId.empty()) {
std::cerr << "[handleBrokerMessage] WARNUNG: User-ID für Falukant-User " << fid << " nicht gefunden! Nachricht wird nicht gesendet." << std::endl;
return;
}
std::shared_lock<std::shared_mutex> lock(connectionsMutex);
std::cout << "[handleBrokerMessage] Aktive User-Verbindungen: " << connections.size() << std::endl;
auto it = connections.find(userId);
if (it != connections.end() && !it->second.empty()) {
std::cout << "[handleBrokerMessage] Sende Nachricht an User " << userId << " (" << it->second.size() << " Verbindungen): " << message << std::endl;
// Nachricht an alle Verbindungen des Users senden
for (auto* wsi : it->second) {
auto *ud = reinterpret_cast<WebSocketUserData*>(lws_wsi_user(wsi));
if (ud) {
bool wasEmpty = false;
{
std::lock_guard<std::mutex> lock(ud->messageQueueMutex);
wasEmpty = ud->messageQueue.empty();
ud->messageQueue.push(message);
std::cout << "[handleBrokerMessage] Nachricht zur Queue hinzugefügt (Queue-Größe: " << ud->messageQueue.size() << ")" << std::endl;
}
// Nur wenn die Queue leer war, den Callback aufrufen
// (sonst wird er bereits durch den WRITEABLE-Handler aufgerufen)
if (wasEmpty) {
lws_callback_on_writable(wsi);
}
} else {
std::cerr << "[handleBrokerMessage] FEHLER: ud ist nullptr für eine Verbindung!" << std::endl;
}
}
} else {
std::cout << "[handleBrokerMessage] Keine aktive Verbindung für User " << userId << " gefunden" << std::endl;
std::cout << "[handleBrokerMessage] Verfügbare User-IDs in connections:" << std::endl;
for (const auto& pair : connections) {
std::cout << " - " << pair.first << " (" << pair.second.size() << " Verbindungen)" << std::endl;
}
}
} else {
std::cout << "[handleBrokerMessage] Nachricht enthält kein user_id-Feld!" << std::endl;
}
} catch (const std::exception &e) {
std::cerr << "[handleBrokerMessage] Error processing broker message: " << e.what() << std::endl;
}
}
std::string WebSocketServer::getUserIdFromFalukantUserId(int userId) {
ConnectionGuard guard(pool);
auto &db = guard.get();
std::string sql = R"(
SELECT u.hashed_id
FROM community.user u
JOIN falukant_data.falukant_user fu ON u.id = fu.user_id
WHERE fu.id = $1
)";
db.prepare("get_user_id", sql);
auto res = db.execute("get_user_id", {std::to_string(userId)});
return (!res.empty()) ? res[0]["hashed_id"] : std::string();
}
bool WebSocketServer::isMainAdmin(const std::string &hashedUserId) {
ConnectionGuard guard(pool);
auto &db = guard.get();
std::string sql = R"(
SELECT COUNT(*) as count
FROM community.user u
JOIN community.user_right ur ON u.id = ur.user_id
JOIN "type".user_right tr ON ur.right_type_id = tr.id
WHERE u.hashed_id = $1
AND tr.title = 'mainadmin'
)";
db.prepare("check_mainadmin", sql);
auto res = db.execute("check_mainadmin", {hashedUserId});
if (res.empty()) {
return false;
}
int count = std::stoi(res[0]["count"].c_str());
return count > 0;
}
nlohmann::json WebSocketServer::getActiveConnections() {
json result = json::array();
std::shared_lock<std::shared_mutex> lock(connectionsMutex);
// Zähle Verbindungen ohne userId
size_t unauthenticatedCount = 0;
for (auto* wsi : allConnections) {
auto* ud = reinterpret_cast<WebSocketUserData*>(lws_wsi_user(wsi));
if (ud && ud->userId.empty()) {
unauthenticatedCount++;
}
}
// Iteriere über die Member-Variable this->connections (nicht die lokale Variable)
for (const auto& pair : this->connections) {
const std::string& userId = pair.first;
const auto& connList = pair.second;
json userConnections = {
{"userId", userId},
{"connectionCount", connList.size()},
{"connections", json::array()}
};
for (auto* wsi : connList) {
if (!wsi) continue;
auto* ud = reinterpret_cast<WebSocketUserData*>(lws_wsi_user(wsi));
if (!ud) continue;
try {
// Berechne Verbindungsdauer seit ESTABLISHED
// Verwende lastPongTime als Fallback, falls connectionTime nicht gesetzt ist
auto now = std::chrono::steady_clock::now();
auto connectionTime = ud->connectionTime.time_since_epoch().count() != 0
? ud->connectionTime
: ud->lastPongTime;
auto connectionDuration = std::chrono::duration_cast<std::chrono::seconds>(
now - connectionTime).count();
// Berechne Zeit seit letztem Pong
auto timeSinceLastPong = std::chrono::duration_cast<std::chrono::seconds>(
now - ud->lastPongTime).count();
json connInfo = {
{"connectionDurationSeconds", connectionDuration},
{"timeSinceLastPongSeconds", timeSinceLastPong},
{"pingTimeoutCount", ud->pingTimeoutCount},
{"pongReceived", ud->pongReceived}
};
userConnections["connections"].push_back(connInfo);
} catch (const std::exception &e) {
std::cerr << "[getActiveConnections] Fehler beim Verarbeiten einer Verbindung: " << e.what() << std::endl;
}
}
result.push_back(userConnections);
}
// Füge unauthentifizierte Verbindungen hinzu
if (unauthenticatedCount > 0) {
json unauthenticatedConnections = {
{"userId", ""},
{"connectionCount", unauthenticatedCount},
{"connections", json::array()}
};
for (auto* wsi : allConnections) {
if (!wsi) continue;
auto* ud = reinterpret_cast<WebSocketUserData*>(lws_wsi_user(wsi));
if (!ud || !ud->userId.empty()) continue;
try {
auto now = std::chrono::steady_clock::now();
// Verwende lastPongTime als Fallback, falls connectionTime nicht gesetzt ist
auto connectionTime = ud->connectionTime.time_since_epoch().count() != 0
? ud->connectionTime
: ud->lastPongTime;
auto connectionDuration = std::chrono::duration_cast<std::chrono::seconds>(
now - connectionTime).count();
auto timeSinceLastPong = std::chrono::duration_cast<std::chrono::seconds>(
now - ud->lastPongTime).count();
json connInfo = {
{"connectionDurationSeconds", connectionDuration},
{"timeSinceLastPongSeconds", timeSinceLastPong},
{"pingTimeoutCount", ud->pingTimeoutCount},
{"pongReceived", ud->pongReceived},
{"status", "unauthenticated"}
};
unauthenticatedConnections["connections"].push_back(connInfo);
} catch (const std::exception &e) {
std::cerr << "[getActiveConnections] Fehler beim Verarbeiten einer unauthentifizierten Verbindung: " << e.what() << std::endl;
}
}
result.push_back(unauthenticatedConnections);
}
return result;
}
void WebSocketServer::sendMessageToConnection(struct lws *wsi, const std::string &message) {
if (!wsi) {
std::cerr << "[sendMessageToConnection] wsi ist nullptr" << std::endl;
return;
}
auto* ud = reinterpret_cast<WebSocketUserData*>(lws_wsi_user(wsi));
if (!ud) {
std::cerr << "[sendMessageToConnection] ud ist nullptr" << std::endl;
return;
}
sendMessageToConnection(wsi, ud, message);
}
void WebSocketServer::sendMessageToConnection(struct lws *wsi, WebSocketUserData *ud, const std::string &message) {
if (!wsi) {
std::cerr << "[sendMessageToConnection] wsi ist nullptr" << std::endl;
return;
}
if (!ud) {
std::cerr << "[sendMessageToConnection] ud ist nullptr" << std::endl;
return;
}
if (!context) {
std::cerr << "[sendMessageToConnection] context ist nullptr" << std::endl;
return;
}
// Kopiere die Nachricht, um sicherzustellen, dass sie gültig bleibt
std::string messageCopy = message;
// Hole ud erneut aus wsi, um sicherzustellen, dass es gültig ist
// Das ist wichtig, wenn wir während eines Callbacks aufgerufen werden
auto* udFromWsi = reinterpret_cast<WebSocketUserData*>(lws_wsi_user(wsi));
if (!udFromWsi || udFromWsi != ud) {
std::cerr << "[sendMessageToConnection] ud stimmt nicht mit wsi überein oder ist ungültig" << std::endl;
return;
}
// Verwende udFromWsi statt ud, um sicherzustellen, dass wir auf die richtige Instanz zugreifen
ud = udFromWsi;
try {
bool wasEmpty = false;
{
std::lock_guard<std::mutex> lock(ud->messageQueueMutex);
wasEmpty = ud->messageQueue.empty();
ud->messageQueue.push(messageCopy);
}
// Nur wenn die Queue leer war, den Callback aufrufen
// (sonst wird er bereits durch den WRITEABLE-Handler aufgerufen)
if (wasEmpty) {
// Verwende lws_cancel_service, um den Service zu benachrichtigen
// Das ist sicherer, wenn wir uns in einem Callback befinden
if (context) {
lws_cancel_service(context);
}
}
} catch (const std::exception &e) {
std::cerr << "[sendMessageToConnection] Fehler: " << e.what() << std::endl;
} catch (...) {
std::cerr << "[sendMessageToConnection] Unbekannter Fehler" << std::endl;
}
}
void WebSocketServer::setWorkers(const std::vector<std::unique_ptr<Worker>> &workerList) {
workers.clear();
workers.reserve(workerList.size());
for (const auto &wptr : workerList) {
workers.push_back(wptr.get());
}
}
void WebSocketServer::addConnection(const std::string &userId, struct lws *wsi) {
std::unique_lock<std::shared_mutex> lock(connectionsMutex);
connections[userId].push_back(wsi);
size_t totalConnections = 0;
for (const auto& pair : connections) {
totalConnections += pair.second.size();
}
std::cout << "[addConnection] Verbindung für User " << userId << " gespeichert (User hat " << connections[userId].size() << " Verbindung(en), insgesamt: " << totalConnections << " Verbindungen)" << std::endl;
}
void WebSocketServer::removeConnection(const std::string &userId, struct lws *wsi) {
std::unique_lock<std::shared_mutex> lock(connectionsMutex);
auto it = connections.find(userId);
if (it != connections.end()) {
// Entferne die spezifische Verbindung aus dem Vektor
auto& connList = it->second;
connList.erase(std::remove(connList.begin(), connList.end(), wsi), connList.end());
// Speichere die verbleibende Anzahl vor dem möglichen Löschen
size_t remainingConnections = connList.size();
// Wenn keine Verbindungen mehr vorhanden sind, entferne den Eintrag
if (connList.empty()) {
connections.erase(it);
}
size_t totalConnections = 0;
for (const auto& pair : connections) {
totalConnections += pair.second.size();
}
std::cout << "[removeConnection] Verbindung für User " << userId << " entfernt (User hat noch " << remainingConnections << " Verbindung(en), insgesamt: " << totalConnections << " Verbindungen)" << std::endl;
} else {
std::cout << "[removeConnection] Warnung: Keine Verbindungen für User " << userId << " gefunden" << std::endl;
}
// Entferne auch aus allConnections
allConnections.erase(
std::remove(allConnections.begin(), allConnections.end(), wsi),
allConnections.end()
);
}
void WebSocketServer::removeConnectionByWsi(struct lws *wsi) {
// Entfernt eine Verbindung aus allen Einträgen in der connections-Map
// Wird verwendet, wenn die userId nicht bekannt ist (z.B. bei vorzeitigem Schließen)
std::unique_lock<std::shared_mutex> lock(connectionsMutex);
std::vector<std::string> usersToRemove;
for (auto it = connections.begin(); it != connections.end(); ++it) {
auto& connList = it->second;
auto wsiIt = std::find(connList.begin(), connList.end(), wsi);
if (wsiIt != connList.end()) {
connList.erase(wsiIt);
std::cout << "[removeConnectionByWsi] Verbindung entfernt von User " << it->first << std::endl;
// Wenn keine Verbindungen mehr vorhanden sind, markiere für Entfernung
if (connList.empty()) {
usersToRemove.push_back(it->first);
}
}
}
// Entferne leere Einträge
for (const auto& userId : usersToRemove) {
connections.erase(userId);
std::cout << "[removeConnectionByWsi] Leeren Eintrag für User " << userId << " entfernt" << std::endl;
}
// Entferne auch aus allConnections
allConnections.erase(
std::remove(allConnections.begin(), allConnections.end(), wsi),
allConnections.end()
);
}

View File

@@ -1,89 +0,0 @@
#pragma once
#include <libwebsockets.h>
#include "connection_guard.h"
#include "connection_pool.h"
#include "message_broker.h"
#include <nlohmann/json.hpp>
#include <string>
#include <atomic>
#include <thread>
#include <mutex>
#include <shared_mutex>
#include <queue>
#include <condition_variable>
#include <unordered_map>
#include <vector>
#include <memory>
#include <chrono>
struct WebSocketUserData {
std::string userId;
bool pongReceived = true;
std::queue<std::string> messageQueue;
std::mutex messageQueueMutex;
std::chrono::steady_clock::time_point connectionTime; // Zeitpunkt der Verbindungsherstellung
std::chrono::steady_clock::time_point lastPingTime;
std::chrono::steady_clock::time_point lastPongTime;
int pingTimeoutCount = 0;
static constexpr int MAX_PING_TIMEOUTS = 5; // Mehr Versuche bevor Trennung
static constexpr int PING_INTERVAL_SECONDS = 30;
static constexpr int PONG_TIMEOUT_SECONDS = 60; // Längeres Timeout (Browser können länger brauchen)
};
class Worker; // forward
class WebSocketServer {
public:
WebSocketServer(int port, ConnectionPool &pool, MessageBroker &broker,
bool useSSL = false, const std::string& certPath = "", const std::string& keyPath = "");
~WebSocketServer();
void run();
void stop();
void setWorkers(const std::vector<std::unique_ptr<Worker>> &workerList);
private:
void startServer();
void processMessageQueue();
void pingClients();
void handleBrokerMessage(const std::string &message);
std::string getUserIdFromFalukantUserId(int falukantUserId);
bool isMainAdmin(const std::string &hashedUserId);
nlohmann::json getActiveConnections();
void sendMessageToConnection(struct lws *wsi, const std::string &message);
void sendMessageToConnection(struct lws *wsi, WebSocketUserData *ud, const std::string &message);
void addConnection(const std::string &userId, struct lws *wsi);
void removeConnection(const std::string &userId, struct lws *wsi);
void removeConnectionByWsi(struct lws *wsi); // Entfernt Verbindung aus allen Einträgen (Fallback)
static int wsCallback(struct lws *wsi,
enum lws_callback_reasons reason,
void *user, void *in, size_t len);
int port;
ConnectionPool &pool;
MessageBroker &broker;
bool useSSL;
std::string certPath;
std::string keyPath;
std::atomic<bool> running{false};
struct lws_context *context = nullptr;
std::thread serverThread;
std::thread messageThread;
std::thread pingThread;
std::mutex queueMutex;
std::condition_variable queueCV;
std::queue<std::string> messageQueue;
std::shared_mutex connectionsMutex;
std::unordered_map<std::string, std::vector<struct lws*>> connections;
std::vector<struct lws*> allConnections; // Alle aktiven Verbindungen (auch ohne userId)
std::vector<Worker*> workers;
static struct lws_protocols protocols[];
static WebSocketServer* instance;
};

View File

@@ -1,201 +0,0 @@
#pragma once
#include <atomic>
#include <thread>
#include <mutex>
#include <chrono>
#include <iostream>
#include <future>
#include <nlohmann/json.hpp>
#include "connection_pool.h"
#include "message_broker.h"
#include "database.h"
#include "connection_guard.h"
class Worker {
public:
Worker(ConnectionPool &pool, MessageBroker &broker, std::string name)
: pool(pool),
broker(broker),
workerName(std::move(name)),
runningWorker(false),
runningWatchdog(false)
{}
virtual ~Worker() {
stopWorkerThread();
stopWatchdogThread();
}
void startWorkerThread() {
if (runningWorker.load()) {
std::cerr << "[" << workerName << "] Worker thread already running, skipping start.\n";
return;
}
runningWorker.store(true);
workerThread = std::thread([this]() { run(); });
}
void stopWorkerThread() {
runningWorker.store(false);
if (workerThread.joinable()) {
// Timeout für Thread-Beendigung
auto future = std::async(std::launch::async, [this]() {
workerThread.join();
});
if (future.wait_for(std::chrono::milliseconds(500)) == std::future_status::timeout) {
std::cerr << "[" << workerName << "] Worker-Thread beendet sich nicht, erzwinge Beendigung..." << std::endl;
// Thread wird beim Destruktor automatisch detached
workerThread.detach();
}
}
}
void enableWatchdog() {
if (runningWatchdog.load()) {
std::cerr << "[" << workerName << "] Watchdog already enabled, skipping.\n";
return;
}
runningWatchdog.store(true);
watchdogThread = std::thread([this]() { watchdog(); });
}
void stopWatchdogThread() {
runningWatchdog.store(false);
if (watchdogThread.joinable()) {
// Timeout für Watchdog-Thread-Beendigung
auto future = std::async(std::launch::async, [this]() {
watchdogThread.join();
});
if (future.wait_for(std::chrono::milliseconds(200)) == std::future_status::timeout) {
std::cerr << "[" << workerName << "] Watchdog-Thread beendet sich nicht, erzwinge Beendigung..." << std::endl;
watchdogThread.detach();
}
}
}
std::string getCurrentStep() {
std::lock_guard<std::mutex> lock(stepMutex);
return currentStep;
}
std::string getStatus() {
std::lock_guard<std::mutex> lock(stepMutex);
return "{\"worker\":\"" + workerName + "\", \"currentStep\":\"" + currentStep + "\"}";
}
protected:
virtual void run() = 0;
void watchdog() {
try {
while (runningWatchdog.load()) {
// Kürzere Sleep-Intervalle für bessere Shutdown-Responsivität
for (int i = 0; i < 10 && runningWatchdog.load(); ++i) {
std::this_thread::sleep_for(std::chrono::milliseconds(100));
}
if (!runningWatchdog.load()) break;
bool isActive = false;
{
std::lock_guard<std::mutex> lock(activityMutex);
isActive = active;
active = false;
}
if (!isActive) {
std::cerr << "[" << workerName << "] Watchdog: Keine Aktivität! Starte Worker neu...\n";
std::cerr << "[" << workerName << "] Letzte Aktivität: " << getCurrentStep() << "\n";
stopWorkerThread();
if (runningWatchdog.load()) { // Nur neu starten wenn nicht shutdown
startWorkerThread();
}
}
}
} catch (const std::exception &e) {
std::cerr << "[" << workerName << "] Watchdog: Ausnahme gefangen: " << e.what() << "\n";
} catch (...) {
std::cerr << "[" << workerName << "] Watchdog: Unbekannte Ausnahme gefangen.\n";
}
}
void signalActivity() {
std::lock_guard<std::mutex> lock(activityMutex);
active = true;
}
void setCurrentStep(const std::string &step) {
std::lock_guard<std::mutex> lock(stepMutex);
currentStep = step;
}
void sendMessageToRegionUsers(const int &regionId, nlohmann::json message) {
ConnectionGuard guard(pool);
auto &db = guard.get();
db.prepare("QUERY_GET_REGION_USERS", QUERY_GET_REGION_USERS);
auto users = db.execute("QUERY_GET_REGION_USERS", {std::to_string(regionId)});
for (const auto &user: users) {
message["user_id"] = user.at("user_id");
broker.publish(message.dump());
}
}
void sendMessageToFalukantUsers(const int &falukantUserId, nlohmann::json message) {
message["user_id"] = falukantUserId;
broker.publish(message.dump());
}
void changeFalukantUserMoney(int falukantUserId, double moneyChange, std::string action, nlohmann::json message) {
try {
ConnectionGuard connGuard(pool);
auto &db = connGuard.get();
db.prepare("QUERY_UPDATE_MONEY", QUERY_UPDATE_MONEY);
db.execute("QUERY_UPDATE_MONEY", {
std::to_string(falukantUserId),
std::to_string(moneyChange),
action
});
sendMessageToFalukantUsers(falukantUserId, message);
} catch (const std::exception &e) {
std::cerr << "[" << workerName << "] Fehler in changeFalukantUserMoney: " << e.what() << "\n";
}
}
time_t getLastActivity() {
return lastActivity;
}
protected:
ConnectionPool &pool;
MessageBroker &broker;
std::string workerName;
std::atomic<bool> runningWorker;
std::atomic<bool> runningWatchdog;
std::atomic<bool> active{false};
std::thread workerThread;
std::thread watchdogThread;
std::mutex activityMutex;
std::chrono::seconds watchdogInterval{10};
std::mutex stepMutex;
std::string currentStep;
time_t lastActivity;
private:
static constexpr const char *QUERY_GET_REGION_USERS = R"(
select c.user_id
from falukant_data."character" c
where c.region_id = $1
and c.user_id is not null;
)";
static constexpr const char *QUERY_UPDATE_MONEY = R"(
SELECT falukant_data.update_money(
$1,
$2,
$3
);
)";
};

View File

@@ -1,33 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIFnTCCA4WgAwIBAgIUf7ObINpycsL8eoD5HWWZlQWXjJ0wDQYJKoZIhvcNAQEL
BQAwXjELMAkGA1UEBhMCREUxEDAOBgNVBAgMB0dlcm1hbnkxDzANBgNVBAcMBkJl
cmxpbjERMA8GA1UECgwIWW91clBhcnQxGTAXBgNVBAMMEHd3dy55b3VyLXBhcnQu
ZGUwHhcNMjUwOTI5MTEyNTM0WhcNMjYwOTI5MTEyNTM0WjBeMQswCQYDVQQGEwJE
RTEQMA4GA1UECAwHR2VybWFueTEPMA0GA1UEBwwGQmVybGluMREwDwYDVQQKDAhZ
b3VyUGFydDEZMBcGA1UEAwwQd3d3LnlvdXItcGFydC5kZTCCAiIwDQYJKoZIhvcN
AQEBBQADggIPADCCAgoCggIBAJt0zR/ez1S7uidVTITbeoKAfHfYzTt0/73Iqmn5
28zT160/2Q/Cf2I6VJ6O50GY7p3M2vMO13vJwcZJ/KZn4371Tm9jwu10OMYBld4t
ZXZ8kv1n9kLyOMAoLvrT8r4qDlsl43bE2vh509aisvjEph8OETquwiWFy0Rx46vy
ilNLgwzQJcdAyR3SsYyHGbwTqyN5PdkJ6ok7gG5ZbCMD0ZYbI2KoSHoQIHZLbnLg
VB/YUK6LHvSrgAHl9c0e4dJaEpssRGZaCUPZ+zwqwPvEeCvkO244ErSXYSGkTn3Y
WDeg7cFoCn8MVp8OEBel0mHPCNlnEYoWtYr+rx8C8FdFcIU4Dx5n5GX53a+ePN3B
Tu0cEZ4HL7IcVPsAOl2/xZl2efRBsZpp+Sp+MstXQKbNp2ylYquSFm9ZAbqdN+hZ
CAmm6Cqg9fKoFSQL9ljb5traS9HeLm/rCtnQpacpzmTcTi8grNa3ydLoF6OgxUba
RlcRAI4vvJgj5c1Q65Wlu7k1ttiFZXxMuW2QiZW03/5M0msr5JO2TVTBZtVd1Xll
ON42SEhwyeq6PgfJz4gCRIFQqD8os2cVZV6DfZcSupXgpfWpQl5Z5wWNrPLeBJWm
iCveM5wXpauook3bBJDVHKhNX4XIVjpy0ZDI/INxAGxfNfTFoVuPbYvWVvf8y4Bu
0orxAgMBAAGjUzBRMB0GA1UdDgQWBBTDF/IEVy993K4Tbo+vt3y0nFaexTAfBgNV
HSMEGDAWgBTDF/IEVy993K4Tbo+vt3y0nFaexTAPBgNVHRMBAf8EBTADAQH/MA0G
CSqGSIb3DQEBCwUAA4ICAQA0EoB748+ssgldnLNqB6f0HRyrX8YP7lLc34LEp7Mj
FGB1aWTGSXVeZIz96fFKkOR3h9SgLGtiyI3L3QsADXdUmntiVun1+7ejj9/7BPQE
LiMYuln+erRJOiYDqNHjlIMIIW5mA9yO8Pup4W0pD7wGTRQbBU9jnndYViex4TFc
mpPTtFkD+sAAuh7LFIA05X4jI3eAzGK3qUDvq6z1ojcmXBeZEijuhaaClJTRPwoO
HNjxYSM17zd5DHbAPW8xEZLkf7mh+SwYO/SjMKwXs6yiTmSmo4/cjkvX/OrZX67U
oNPovGvAgfSVT2RfY2sagr5Vv8uH8np8aH6a4BbjPUI4vC5Gs23iM//YILgWOoQr
+k0CfOyO+WVTc2capgN1xJ2IcnOrN9SMOtMdaLjbk1TPfZBlHnamcholXbcor8Fp
M1Si9uCO160Lkk96VpE55AFYldxrV0a5HwjK1zCdzS4XO8GP83Qqy1ZJk8WrD/Qm
HK3q+eAWpEnVKCOPjRKJD4gJgR2/SEnBNfm4SI+v58oIF56Uq+RY+1UTR0pQS0GF
D29Es18R5toNX7j93ccyi+j2igpV9yKouKEDq78NI1KU7t8MI0Pt8gBlJQI/eBJS
L7RGWEMdjxUsm+u+gniIizGCU4gtCNRkcR+XAeKUW22qZx0otjJ4DThEeXzlsJ2y
ag==
-----END CERTIFICATE-----

View File

@@ -1,52 +0,0 @@
-----BEGIN PRIVATE KEY-----
MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQCbdM0f3s9Uu7on
VUyE23qCgHx32M07dP+9yKpp+dvM09etP9kPwn9iOlSejudBmO6dzNrzDtd7ycHG
SfymZ+N+9U5vY8LtdDjGAZXeLWV2fJL9Z/ZC8jjAKC760/K+Kg5bJeN2xNr4edPW
orL4xKYfDhE6rsIlhctEceOr8opTS4MM0CXHQMkd0rGMhxm8E6sjeT3ZCeqJO4Bu
WWwjA9GWGyNiqEh6ECB2S25y4FQf2FCuix70q4AB5fXNHuHSWhKbLERmWglD2fs8
KsD7xHgr5DtuOBK0l2EhpE592Fg3oO3BaAp/DFafDhAXpdJhzwjZZxGKFrWK/q8f
AvBXRXCFOA8eZ+Rl+d2vnjzdwU7tHBGeBy+yHFT7ADpdv8WZdnn0QbGaafkqfjLL
V0CmzadspWKrkhZvWQG6nTfoWQgJpugqoPXyqBUkC/ZY2+ba2kvR3i5v6wrZ0KWn
Kc5k3E4vIKzWt8nS6BejoMVG2kZXEQCOL7yYI+XNUOuVpbu5NbbYhWV8TLltkImV
tN/+TNJrK+STtk1UwWbVXdV5ZTjeNkhIcMnquj4Hyc+IAkSBUKg/KLNnFWVeg32X
ErqV4KX1qUJeWecFjazy3gSVpogr3jOcF6WrqKJN2wSQ1RyoTV+FyFY6ctGQyPyD
cQBsXzX0xaFbj22L1lb3/MuAbtKK8QIDAQABAoICAAd5JyyKXP4cP3npN8pOBQrh
p4IpLu4WHP6EF12sfl6fmz9j2bDwUyh/KH7eHLPOiN+XB9pODwm/WHB6cXH0Pfd2
Ll7sXURGLV3G+Rv/A5D9coFKQnhjzbq+n8oM/v8ZdVrYRKHquyJddHOtuwP6q6gD
6IwBN1n/j2bXIQhcyr2v/FEFD2Dfnl9/t8t7Oe9sxGIaX7DXsUHHRZCAfeJlyklA
nRwOvhu4m1/mds0A1+h3QSMv8tU1KqxksEMr8jQXIox5RYFEYCxF7hYNkd0UnAiT
onAFM/CAs8Ge1Qtnl2+WreFZqaIDj0U6k0dYwFc1gU3Wvq0MVA5GWbe4X+KZJuxc
W1/IIO5+rQn9vYwVrDhWcfL8PFsX4P9bWSc8Hpg/uf2UFKgxO4ydPOepy9+i4xVS
Bun2XcWh6GlyG1OEtvu6CVmAcvQ4s+K53r+2W/la9tmqLObLVCJqB2vrz8ISwy7N
glXJj55Kc5A9Mhjnct9Ap7Mv9hoVG3cZp3jaTlPbhPVlCJb+gePQ26ao6zpjOQpy
WWXBzhFAELiC8FKCNDzPwsU1mP0Z+Kkn5XT/GiCG0KabvQ1ZB2bMsZ6UzpwpEVVt
V+PjZ8GWv4qm2BxgdttVTum7/EJQDdZ6N2SpKZ0TRSdW0rmCrcs7poFx4AuopJBc
emHF9YzpgHidQIC/Yy5BAoIBAQDWZfZqdrSj6WwxXLsiQCsFnJ7hTizalkfNPmQo
D5sHJ67TFAFSss1T/vNHbnK4j89VU2NBntoqe2RWhZtwhIvyzq78yJNIbkAJke8w
SB6mrn0/Q+mbeGf5HXFNiqzy8gECAOJbC+ep6/bdE/6r9Iv81ojY6xKegjcLQdZy
J+bcMom7zGC7IfRx/uGj2k5OyqpGijrv8rUoTRpthqnvvvOFrC7J0466kIkTqphU
9muScWJYiaOR9gLb6JJxk7NfJANgZpldysmP4Hu5+3eIa5fhu3g0s1b3TTAOJlzN
m3mUBrChK+zR5Be9Wx86u+0TfwO007LGqfgCZmVT66F9+kbBAoIBAQC5nvKIrDGE
eFcq/qFfNj9Vn/3RT+eRHSUutRffpVjkQkEeqVucI17c9SX2154S85yXwOmecre5
3SeuBq3SfPw2HK3mHAAiflW14dlcvyIunF7SQQIYQ2Yp1jWbnIEZHMFMWfQ35nZO
QXn4DKpwtpBig2y35m+pXv/hDY2iKVQPlEqk5Gn0/I3LO35Dw98DPdSUK9MVTDOB
7L58WYUiLf9jypsnSLIKjgYJoUp+zTvm9agC4PoyXhw7bskQrfrAUYYUKK8ospPW
lRfKtafRqW92uSvksbLOgEaSIDUxXUdfx6qKob8yJEgZRFtEsj1u4+ai2vRhPRry
OM9CDTTPqwAxAoIBAFRMMt8ZXV01YjzmYQ3OfRvvwOiFfE1V2VVxup+lwybFC5Ai
xYJmmyTzkWP8oU6//J1n9BbRBqa9bW43ii5rbztA2Ly7gG1yK+uXXZx2Ptb6tHQz
l55xcTAZy3rZk7bSQGMxRc7Wl3fQN3glbHTf6kq3b4capm98c3gRouevmK3rkQwu
B7qMVzibJszuAOwp81lY8GN34pK9/i4iTJ7fTZC5aowginYAbmU1JkABw7oIqsp1
E8NIH0en4iyWDmjSGCHHNXYTTb0sXnl5zj3tUAKJW3IdMYx65PIrU0HkZ6E0IC6+
vpaoQE1LjrPhQA3yWtq2ggxquAD5kc21UAHgbgECggEBAIJm+OotVmhDBrHsyr+R
47KqwGzA3uTifdGvZYM3rRhGt2rr/bDWZHmEO9SLK8ESpesymq022H3ZsVuf05Ox
PJpjUYP8HdgduucZMFPo7wGh1zeMdgVHrEkt9OFKdKOIwP97nod6/5gAhchOVZrz
lsGupL0ZRU7Or6KSm/LVZ/m96yamVQ3IM3EYbZ77xvuG/4XMt/EZZIIdKMFBPreB
aw7XMmLJvlKN7g3r4uLsGe4qnIrRNNQXq2vRa62tHCDp5PDamBtWQWgZu+or7ibs
CqN0eTKj6AMMuQdFWzk/17mhEt1rvl9if8hIbnn3YhM6RjgY7GA3xmtun6Q+lOBj
uLECggEBAKa14EHMADkcVIJrWplLmPVHbgSZtCCL3O2CGfqxTcn8urSu5wK24KCE
xUtVXgHx2KuP9cWlinAF8kYT9UGNja/fMooLix3POlyp0W3W1274/cYPUqmEQwZn
CNRrSiizCXi07PFyVScrx1rTb/5wuUAMyF0Vawo2dX9zITjxbI2Jaw68c5LU6zKY
Tq8HO/4KznfSPx9DhnO0NDJgKMVyfP+Il3ItruA1lVtU/N1Eubn4uvNRhNR9BIgt
i4G/jE3lC2SIyOMLSWNt7deyiMkiXvEUb3GBPyBWmZNspH8Xh3shmC1zRx/aiGjb
Vnk0Wqf704tn4ss7Mfo2SwcZxAjov58=
-----END PRIVATE KEY-----

View File

@@ -1,43 +0,0 @@
#!/bin/bash
echo "=== WebSocket-Konfiguration testen ==="
echo ""
# Prüfe Apache-Module
echo "1. Prüfe Apache-Module:"
REQUIRED_MODULES=("proxy" "proxy_http" "proxy_wstunnel" "rewrite" "ssl" "headers")
for module in "${REQUIRED_MODULES[@]}"; do
if apache2ctl -M 2>/dev/null | grep -q "${module}_module"; then
echo "$module ist aktiviert"
else
echo "$module ist NICHT aktiviert"
fi
done
echo ""
echo "2. Prüfe Apache-Konfiguration:"
if sudo apache2ctl configtest 2>&1 | grep -q "Syntax OK"; then
echo " ✅ Konfiguration ist gültig"
else
echo " ❌ Konfiguration hat Fehler:"
sudo apache2ctl configtest 2>&1
fi
echo ""
echo "3. Prüfe aktive VirtualHosts:"
apache2ctl -S 2>/dev/null | grep -E "(443|4443|4551)" || echo " Keine relevanten VirtualHosts gefunden"
echo ""
echo "4. Prüfe Apache-Logs (letzte 20 Zeilen):"
echo " Error-Log:"
sudo tail -20 /var/log/apache2/yourpart.error.log 2>/dev/null || echo " Keine Fehler gefunden"
echo ""
echo " Access-Log (letzte 10 Zeilen mit /ws/ oder /socket.io/):"
sudo tail -100 /var/log/apache2/yourpart.access.log 2>/dev/null | grep -E "(/ws/|/socket.io/)" | tail -10 || echo " Keine relevanten Einträge gefunden"
echo ""
echo "5. Teste WebSocket-Verbindungen:"
echo " Socket.io: wss://www.your-part.de/socket.io/"
echo " Daemon: wss://www.your-part.de/ws/"
echo ""
echo " Bitte im Browser testen und dann die Logs prüfen."

View File

@@ -1,86 +0,0 @@
#!/bin/bash
# Intelligente Konfigurationsdatei-Verwaltung für YourPart Daemon
# Fügt nur fehlende Keys hinzu, ohne bestehende Konfiguration zu überschreiben
set -e
# Farben für Logging
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
CONFIG_FILE="/etc/yourpart/daemon.conf"
TEMPLATE_FILE="daemon.conf"
if [ ! -f "$TEMPLATE_FILE" ]; then
log_error "Template-Datei $TEMPLATE_FILE nicht gefunden!"
exit 1
fi
log_info "Verwalte Konfigurationsdatei: $CONFIG_FILE"
if [ ! -f "$CONFIG_FILE" ]; then
log_info "Konfigurationsdatei existiert nicht, erstelle neue..."
sudo cp "$TEMPLATE_FILE" "$CONFIG_FILE"
sudo chown yourpart:yourpart "$CONFIG_FILE"
sudo chmod 600 "$CONFIG_FILE"
log_success "Neue Konfigurationsdatei erstellt"
else
log_info "Konfigurationsdatei existiert bereits, prüfe auf fehlende Keys..."
# Erstelle temporäre Datei mit neuen Keys
temp_conf="/tmp/daemon.conf.new"
cp "$TEMPLATE_FILE" "$temp_conf"
added_keys=0
# Füge fehlende Keys hinzu
while IFS='=' read -r key value; do
# Überspringe Kommentare und leere Zeilen
if [[ "$key" =~ ^[[:space:]]*# ]] || [[ -z "$key" ]]; then
continue
fi
# Entferne Leerzeichen am Anfang
key=$(echo "$key" | sed 's/^[[:space:]]*//')
# Prüfe ob Key bereits existiert
if ! grep -q "^[[:space:]]*$key[[:space:]]*=" "$CONFIG_FILE"; then
log_info "Füge fehlenden Key hinzu: $key"
echo "$key=$value" | sudo tee -a "$CONFIG_FILE" > /dev/null
((added_keys++))
fi
done < "$temp_conf"
rm -f "$temp_conf"
if [ $added_keys -eq 0 ]; then
log_success "Keine neuen Keys hinzugefügt - Konfiguration ist aktuell"
else
log_success "$added_keys neue Keys hinzugefügt"
fi
fi
# Setze korrekte Berechtigungen
sudo chown yourpart:yourpart "$CONFIG_FILE"
sudo chmod 600 "$CONFIG_FILE"
log_success "Konfigurationsdatei-Verwaltung abgeschlossen"

View File

@@ -1,89 +0,0 @@
#!/bin/bash
# Upgrade libpqxx to version 7.x on Ubuntu 22
# Führen Sie dieses Script auf dem Ubuntu 22 Server aus
set -euo pipefail
# Farben
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[SUCCESS]${NC} $1"; }
log_warning() { echo -e "${YELLOW}[WARNING]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_info "Upgrade libpqxx auf Ubuntu 22 für bessere C++23 Kompatibilität..."
# Prüfe aktuelle Version
log_info "Aktuelle libpqxx Version:"
dpkg -l | grep libpqxx || log_info "libpqxx nicht installiert"
# Installiere Build-Dependencies
log_info "Installiere Build-Dependencies..."
apt update
apt install -y \
build-essential \
cmake \
pkg-config \
git \
libpq-dev \
postgresql-server-dev-14
# Lade libpqxx 7.x herunter und kompiliere
log_info "Lade libpqxx 7.x herunter..."
cd /tmp
if [ -d "libpqxx" ]; then
rm -rf libpqxx
fi
git clone https://github.com/jtv/libpqxx.git
cd libpqxx
# Checkout Version 7.9.2 (stabile Version)
git checkout 7.9.2
# Kompiliere und installiere
log_info "Kompiliere libpqxx 7.x..."
mkdir build
cd build
cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr/local
make -j$(nproc)
make install
# Update library cache
ldconfig
log_success "libpqxx 7.x erfolgreich installiert!"
# Prüfe neue Version
log_info "Neue libpqxx Version:"
pkg-config --modversion libpqxx
log_info "Teste Kompilierung..."
cat > /tmp/test_pqxx.cpp << 'EOF'
#include <pqxx/pqxx>
#include <iostream>
int main() {
std::cout << "libpqxx Version: " << PQXX_VERSION << std::endl;
std::cout << "Major Version: " << PQXX_VERSION_MAJOR << std::endl;
return 0;
}
EOF
if g++ -o /tmp/test_pqxx /tmp/test_pqxx.cpp -lpqxx -lpq; then
log_success "Kompilierung erfolgreich!"
/tmp/test_pqxx
rm -f /tmp/test_pqxx /tmp/test_pqxx.cpp
else
log_error "Kompilierung fehlgeschlagen!"
rm -f /tmp/test_pqxx /tmp/test_pqxx.cpp
fi
log_success "libpqxx Upgrade abgeschlossen!"
log_info "Sie können jetzt das Projekt mit der neuesten libpqxx Version kompilieren."

View File

@@ -1,45 +0,0 @@
[Unit]
Description=YourPart Daemon Service
Documentation=https://your-part.de
After=network.target postgresql.service
Wants=postgresql.service
[Service]
Type=notify
User=yourpart
Group=yourpart
WorkingDirectory=/opt/yourpart
ExecStart=/usr/local/bin/yourpart-daemon
ExecStop=/bin/kill -TERM $MAINPID
ExecReload=/bin/kill -HUP $MAINPID
KillMode=mixed
KillSignal=SIGTERM
TimeoutStartSec=30
TimeoutStopSec=30
Restart=always
RestartSec=5
NotifyAccess=main
StandardOutput=journal
StandardError=journal
SyslogIdentifier=yourpart-daemon
# Sicherheitseinstellungen
NoNewPrivileges=true
PrivateTmp=true
ProtectSystem=strict
ProtectHome=true
ReadWritePaths=/opt/yourpart/logs /var/log/yourpart
ProtectKernelTunables=true
ProtectKernelModules=true
ProtectControlGroups=true
# Umgebungsvariablen
Environment=NODE_ENV=production
Environment=PYTHONUNBUFFERED=1
# Ressourcenlimits
LimitNOFILE=65536
LimitNPROC=4096
[Install]
WantedBy=multi-user.target

View File

@@ -1,74 +0,0 @@
<IfModule mod_ssl.c>
<VirtualHost your-part.de:443>
ServerAdmin webmaster@your-part.de
ServerName your-part.de
ServerAlias www.your-part.de
DocumentRoot /opt/yourpart/frontend/dist
DirectoryIndex index.html
# Frontend statische Dateien
<Directory "/opt/yourpart/frontend/dist">
AllowOverride None
Options -Indexes +FollowSymLinks
Require all granted
# Fallback für Vue Router
FallbackResource /index.html
</Directory>
# www Redirect (muss zuerst kommen)
RewriteEngine on
RewriteCond %{SERVER_NAME} =your-part.de
RewriteRule ^ https://www.%{SERVER_NAME}%{REQUEST_URI} [END,NE,R=permanent]
# Proxy-Einstellungen
ProxyPreserveHost On
ProxyRequests Off
RequestHeader set X-Forwarded-Proto "https"
AllowEncodedSlashes NoDecode
# API-Requests an Backend weiterleiten
ProxyPass "/api/" "http://localhost:2020/api/"
ProxyPassReverse "/api/" "http://localhost:2020/api/"
# Socket.io: WebSocket und HTTP-Polling mit Location-Blöcken
<LocationMatch "^/socket.io/">
# WebSocket-Upgrade
RewriteEngine on
RewriteCond %{HTTP:Upgrade} websocket [NC]
RewriteCond %{HTTP:Connection} upgrade [NC]
RewriteRule .* "ws://localhost:2020%{REQUEST_URI}" [P,L]
# HTTP-Fallback für Polling
ProxyPass "http://localhost:2020/socket.io/"
ProxyPassReverse "http://localhost:2020/socket.io/"
</LocationMatch>
# Daemon: WebSocket mit Location-Block
<LocationMatch "^/ws/">
# WebSocket-Upgrade
RewriteEngine on
RewriteCond %{HTTP:Upgrade} websocket [NC]
RewriteCond %{HTTP:Connection} upgrade [NC]
RewriteRule .* "ws://localhost:4551%{REQUEST_URI}" [P,L]
# HTTP-Fallback (sollte eigentlich nicht benötigt werden)
ProxyPass "http://localhost:4551/"
ProxyPassReverse "http://localhost:4551/"
</LocationMatch>
ErrorLog /var/log/apache2/yourpart.error.log
CustomLog /var/log/apache2/yourpart.access.log combined
HostnameLookups Off
UseCanonicalName Off
ServerSignature On
# SSL-Konfiguration
Include /etc/letsencrypt/options-ssl-apache.conf
SSLCertificateFile /etc/letsencrypt/live/www.your-part.de/fullchain.pem
SSLCertificateKeyFile /etc/letsencrypt/live/www.your-part.de/privkey.pem
</VirtualHost>
</IfModule>

View File

@@ -18,23 +18,28 @@
FallbackResource /index.html FallbackResource /index.html
</Directory> </Directory>
# Proxy-Einstellungen # API-Requests an Backend weiterleiten
ProxyPreserveHost On ProxyPass "/api/" "http://localhost:2020/api/"
ProxyRequests Off ProxyPassReverse "/api/" "http://localhost:2020/api/"
RequestHeader set X-Forwarded-Proto "https"
AllowEncodedSlashes NoDecode
# www Redirect (muss zuerst kommen, aber nicht für API-Pfade) # WebSocket-Requests an Backend weiterleiten
ProxyPass "/socket.io/" "http://localhost:2020/socket.io/"
ProxyPassReverse "/socket.io/" "http://localhost:2020/socket.io/"
# WebSocket-Upgrade-Header für Socket.io
RewriteEngine on RewriteEngine on
RewriteCond %{SERVER_NAME} =your-part.de RewriteCond %{HTTP:Upgrade} websocket [NC]
RewriteCond %{REQUEST_URI} !^/api/ RewriteCond %{HTTP:Connection} upgrade [NC]
RewriteRule ^ https://www.%{SERVER_NAME}%{REQUEST_URI} [END,NE,R=permanent] RewriteRule ^/socket.io/(.*)$ "ws://localhost:2020/socket.io/$1" [P,L]
# API-Requests an Backend weiterleiten (Location-Block hat höhere Priorität) # WebSocket-Upgrade-Header für Daemon-Verbindungen
<Location "/api/"> RewriteCond %{HTTP:Upgrade} websocket [NC]
ProxyPass "http://localhost:2020/api/" RewriteCond %{HTTP:Connection} upgrade [NC]
ProxyPassReverse "http://localhost:2020/api/" RewriteRule ^/ws/(.*)$ "ws://localhost:4551/$1" [P,L]
</Location>
# WebSocket-Proxy für Daemon-Verbindungen mit benutzerdefiniertem Protokoll
ProxyPass "/ws/" "ws://localhost:4551/" upgrade=websocket
ProxyPassReverse "/ws/" "ws://localhost:4551/"
ErrorLog /var/log/apache2/yourpart.error.log ErrorLog /var/log/apache2/yourpart.error.log
CustomLog /var/log/apache2/yourpart.access.log combined CustomLog /var/log/apache2/yourpart.access.log combined
@@ -47,5 +52,9 @@
Include /etc/letsencrypt/options-ssl-apache.conf Include /etc/letsencrypt/options-ssl-apache.conf
SSLCertificateFile /etc/letsencrypt/live/www.your-part.de/fullchain.pem SSLCertificateFile /etc/letsencrypt/live/www.your-part.de/fullchain.pem
SSLCertificateKeyFile /etc/letsencrypt/live/www.your-part.de/privkey.pem SSLCertificateKeyFile /etc/letsencrypt/live/www.your-part.de/privkey.pem
# www Redirect
RewriteCond %{SERVER_NAME} =your-part.de
RewriteRule ^ https://www.%{SERVER_NAME}%{REQUEST_URI} [END,NE,R=permanent]
</VirtualHost> </VirtualHost>
</IfModule> </IfModule>

Some files were not shown because too many files have changed in this diff Show More