Refactor WebSocket server access control logic for worker schedules: Updated the handling of user permissions in handle_connection to utilize tokio::task::spawn_blocking, improving error handling and debugging information in the WorkerScheduleAccessDebug struct. This change enhances the robustness of access checks and provides clearer feedback when access is denied.
All checks were successful
Deploy yourpart (blue-green) / deploy (push) Successful in 1m40s

This commit is contained in:
Torsten Schulz (local)
2026-05-08 11:15:53 +02:00
parent d27230a0b5
commit dd8d8be79b

View File

@@ -480,10 +480,27 @@ async fn handle_connection<S>(
let guard = user_id_for_incoming.lock().await; let guard = user_id_for_incoming.lock().await;
guard.clone() guard.clone()
}; };
let access = uid_opt let access = match uid_opt {
.as_deref() Some(uid) => {
.map(|uid| user_can_read_worker_schedules(&pool_for_incoming, uid)) let pool_for_check = pool_for_incoming.clone();
.unwrap_or(WorkerScheduleAccessDebug { match tokio::task::spawn_blocking(move || {
user_can_read_worker_schedules(&pool_for_check, &uid)
})
.await
{
Ok(v) => v,
Err(e) => WorkerScheduleAccessDebug {
requested_user_id: "".to_string(),
resolved_community_user_id: None,
matched_by: "spawn_blocking_failed".to_string(),
allowed_direct: false,
allowed_falukant: false,
allowed: false,
error: Some(format!("spawn_blocking_failed: {e}")),
},
}
}
None => WorkerScheduleAccessDebug {
requested_user_id: "".to_string(), requested_user_id: "".to_string(),
resolved_community_user_id: None, resolved_community_user_id: None,
matched_by: "missing_setUserId".to_string(), matched_by: "missing_setUserId".to_string(),
@@ -491,7 +508,8 @@ async fn handle_connection<S>(
allowed_falukant: false, allowed_falukant: false,
allowed: false, allowed: false,
error: Some("missing_setUserId".to_string()), error: Some("missing_setUserId".to_string()),
}); },
};
if !access.allowed { if !access.allowed {
let payload = serde_json::json!({ let payload = serde_json::json!({
"event": "getWorkerSchedulesResponse", "event": "getWorkerSchedulesResponse",
@@ -523,10 +541,27 @@ async fn handle_connection<S>(
let guard = user_id_for_incoming.lock().await; let guard = user_id_for_incoming.lock().await;
guard.clone() guard.clone()
}; };
let access = uid_opt let access = match uid_opt {
.as_deref() Some(uid) => {
.map(|uid| user_can_read_worker_schedules(&pool_for_incoming, uid)) let pool_for_check = pool_for_incoming.clone();
.unwrap_or(WorkerScheduleAccessDebug { match tokio::task::spawn_blocking(move || {
user_can_read_worker_schedules(&pool_for_check, &uid)
})
.await
{
Ok(v) => v,
Err(e) => WorkerScheduleAccessDebug {
requested_user_id: "".to_string(),
resolved_community_user_id: None,
matched_by: "spawn_blocking_failed".to_string(),
allowed_direct: false,
allowed_falukant: false,
allowed: false,
error: Some(format!("spawn_blocking_failed: {e}")),
},
}
}
None => WorkerScheduleAccessDebug {
requested_user_id: "".to_string(), requested_user_id: "".to_string(),
resolved_community_user_id: None, resolved_community_user_id: None,
matched_by: "missing_setUserId".to_string(), matched_by: "missing_setUserId".to_string(),
@@ -534,7 +569,8 @@ async fn handle_connection<S>(
allowed_falukant: false, allowed_falukant: false,
allowed: false, allowed: false,
error: Some("missing_setUserId".to_string()), error: Some("missing_setUserId".to_string()),
}); },
};
if !access.allowed { if !access.allowed {
let payload = serde_json::json!({ let payload = serde_json::json!({
"event": "getWorkerSchedulesDetailedResponse", "event": "getWorkerSchedulesDetailedResponse",