From 75313b83c0b6bd4fed136462d9ce92b043db706d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pawe=C5=82=20Orzech?=
Date: Sun, 1 Mar 2026 23:02:34 +0100
Subject: [PATCH] Add multi-user workflows/pipelines and error tracking
Add multi-user automation features and per-user error tracking.
- Database migrations: add workflow_configs/workflow_runs (004), app_errors (005), pipeline_configs/pipeline_runs (006), and add user_token_hash to app_errors (007).
- Backend: introduce per-request token handling (X-API-Token) via app.api.deps and update many API routes (auth, automations, bank, characters, dashboard, events, exchange, logs) to use user-scoped Artifacts client and character scoping. Auth endpoints no longer store tokens server-side (validate-only); clear is a no-op on server.
- New Errors API and services: endpoint to list, filter, resolve, and report errors scoped to the requesting user; add error models, schemas, middleware/error handler and error_service for recording/hashing tokens.
- Pipelines & Workflows: add API routers, models, schemas and engine modules (pipeline/worker/coordinator, workflow runner/conditions) and action_executor updates to support workflow/pipeline execution.
- Logs: logs endpoint now prefers fetching recent action logs from the game API (with fallback to local DB), supports paging and filtering, and scopes results to the user.
- Frontend: add pipeline/workflow builders, lists, progress components and hooks (use-errors, use-pipelines, use-workflows), sentry client config, and updates to API client/constants/types.
- Misc: add middleware error handler, various engine strategy tweaks, tests adjusted.
Overall this change enables per-user API tokens, scopes DB queries to each user, introduces pipelines/workflows runtime support, and centralizes application error tracking.
---
.../versions/004_add_workflow_tables.py | 114 +
.../versions/005_add_app_errors_table.py | 102 +
.../versions/006_add_pipeline_tables.py | 112 +
.../007_add_user_token_hash_to_app_errors.py | 40 +
backend/app/api/auth.py | 67 +-
backend/app/api/automations.py | 10 +-
backend/app/api/bank.py | 179 +-
backend/app/api/characters.py | 10 +-
backend/app/api/dashboard.py | 8 +-
backend/app/api/deps.py | 47 +
backend/app/api/errors.py | 189 ++
backend/app/api/events.py | 8 +-
backend/app/api/exchange.py | 14 +-
backend/app/api/logs.py | 130 +-
backend/app/api/pipelines.py | 267 +++
backend/app/api/workflows.py | 261 +++
backend/app/config.py | 4 +
backend/app/engine/action_executor.py | 150 ++
backend/app/engine/manager.py | 475 ++++-
backend/app/engine/pipeline/__init__.py | 4 +
backend/app/engine/pipeline/coordinator.py | 444 ++++
backend/app/engine/pipeline/worker.py | 241 +++
backend/app/engine/runner.py | 164 +-
backend/app/engine/strategies/base.py | 49 +-
backend/app/engine/strategies/combat.py | 42 +-
backend/app/engine/strategies/crafting.py | 33 +-
backend/app/engine/strategies/gathering.py | 32 +-
backend/app/engine/strategies/leveling.py | 73 +-
backend/app/engine/strategies/trading.py | 49 +-
backend/app/engine/workflow/__init__.py | 4 +
backend/app/engine/workflow/conditions.py | 159 ++
backend/app/engine/workflow/runner.py | 543 +++++
backend/app/main.py | 57 +-
backend/app/middleware/__init__.py | 0
backend/app/middleware/error_handler.py | 71 +
backend/app/models/__init__.py | 8 +
backend/app/models/app_error.py | 51 +
backend/app/models/pipeline.py | 98 +
backend/app/models/workflow.py | 94 +
backend/app/schemas/errors.py | 44 +
backend/app/schemas/pipeline.py | 127 ++
backend/app/schemas/workflow.py | 146 ++
backend/app/services/artifacts_client.py | 89 +-
backend/app/services/error_service.py | 77 +
backend/pyproject.toml | 1 +
backend/tests/test_base_strategy.py | 5 +-
backend/tests/test_crafting_strategy.py | 30 +-
backend/tests/test_leveling_strategy.py | 42 +-
frontend/next.config.ts | 6 +-
frontend/package.json | 1 +
frontend/pnpm-lock.yaml | 1900 ++++++++++++++++-
frontend/sentry.client.config.ts | 10 +
frontend/src/app/automations/page.tsx | 64 +-
.../app/automations/pipelines/[id]/page.tsx | 218 ++
.../app/automations/pipelines/new/page.tsx | 39 +
.../app/automations/workflows/[id]/page.tsx | 349 +++
.../app/automations/workflows/new/page.tsx | 46 +
frontend/src/app/characters/[name]/page.tsx | 974 ++++++++-
frontend/src/app/error.tsx | 16 +
frontend/src/app/errors/page.tsx | 395 ++++
frontend/src/app/exchange/page.tsx | 441 +++-
frontend/src/app/logs/page.tsx | 132 +-
.../src/components/auth/auth-provider.tsx | 47 +-
.../components/character/equipment-grid.tsx | 64 +-
.../components/character/inventory-grid.tsx | 310 ++-
.../components/exchange/buy-equip-dialog.tsx | 299 +++
frontend/src/components/layout/sidebar.tsx | 2 +
.../components/pipeline/pipeline-builder.tsx | 714 +++++++
.../src/components/pipeline/pipeline-list.tsx | 352 +++
.../components/pipeline/pipeline-progress.tsx | 239 +++
.../pipeline/pipeline-template-gallery.tsx | 131 ++
.../components/pipeline/pipeline-templates.ts | 346 +++
.../components/workflow/transition-editor.tsx | 188 ++
.../components/workflow/workflow-builder.tsx | 311 +++
.../src/components/workflow/workflow-list.tsx | 334 +++
.../components/workflow/workflow-progress.tsx | 205 ++
.../workflow/workflow-step-card.tsx | 259 +++
.../workflow/workflow-template-gallery.tsx | 121 ++
.../components/workflow/workflow-templates.ts | 1168 ++++++++++
frontend/src/hooks/use-analytics.ts | 21 +-
frontend/src/hooks/use-errors.ts | 49 +
frontend/src/hooks/use-pipelines.ts | 140 ++
frontend/src/hooks/use-workflows.ts | 141 ++
frontend/src/lib/api-client.ts | 232 +-
frontend/src/lib/constants.ts | 1 +
frontend/src/lib/types.ts | 193 ++
86 files changed, 14835 insertions(+), 587 deletions(-)
create mode 100644 backend/alembic/versions/004_add_workflow_tables.py
create mode 100644 backend/alembic/versions/005_add_app_errors_table.py
create mode 100644 backend/alembic/versions/006_add_pipeline_tables.py
create mode 100644 backend/alembic/versions/007_add_user_token_hash_to_app_errors.py
create mode 100644 backend/app/api/deps.py
create mode 100644 backend/app/api/errors.py
create mode 100644 backend/app/api/pipelines.py
create mode 100644 backend/app/api/workflows.py
create mode 100644 backend/app/engine/action_executor.py
create mode 100644 backend/app/engine/pipeline/__init__.py
create mode 100644 backend/app/engine/pipeline/coordinator.py
create mode 100644 backend/app/engine/pipeline/worker.py
create mode 100644 backend/app/engine/workflow/__init__.py
create mode 100644 backend/app/engine/workflow/conditions.py
create mode 100644 backend/app/engine/workflow/runner.py
create mode 100644 backend/app/middleware/__init__.py
create mode 100644 backend/app/middleware/error_handler.py
create mode 100644 backend/app/models/app_error.py
create mode 100644 backend/app/models/pipeline.py
create mode 100644 backend/app/models/workflow.py
create mode 100644 backend/app/schemas/errors.py
create mode 100644 backend/app/schemas/pipeline.py
create mode 100644 backend/app/schemas/workflow.py
create mode 100644 backend/app/services/error_service.py
create mode 100644 frontend/sentry.client.config.ts
create mode 100644 frontend/src/app/automations/pipelines/[id]/page.tsx
create mode 100644 frontend/src/app/automations/pipelines/new/page.tsx
create mode 100644 frontend/src/app/automations/workflows/[id]/page.tsx
create mode 100644 frontend/src/app/automations/workflows/new/page.tsx
create mode 100644 frontend/src/app/errors/page.tsx
create mode 100644 frontend/src/components/exchange/buy-equip-dialog.tsx
create mode 100644 frontend/src/components/pipeline/pipeline-builder.tsx
create mode 100644 frontend/src/components/pipeline/pipeline-list.tsx
create mode 100644 frontend/src/components/pipeline/pipeline-progress.tsx
create mode 100644 frontend/src/components/pipeline/pipeline-template-gallery.tsx
create mode 100644 frontend/src/components/pipeline/pipeline-templates.ts
create mode 100644 frontend/src/components/workflow/transition-editor.tsx
create mode 100644 frontend/src/components/workflow/workflow-builder.tsx
create mode 100644 frontend/src/components/workflow/workflow-list.tsx
create mode 100644 frontend/src/components/workflow/workflow-progress.tsx
create mode 100644 frontend/src/components/workflow/workflow-step-card.tsx
create mode 100644 frontend/src/components/workflow/workflow-template-gallery.tsx
create mode 100644 frontend/src/components/workflow/workflow-templates.ts
create mode 100644 frontend/src/hooks/use-errors.ts
create mode 100644 frontend/src/hooks/use-pipelines.ts
create mode 100644 frontend/src/hooks/use-workflows.ts
diff --git a/backend/alembic/versions/004_add_workflow_tables.py b/backend/alembic/versions/004_add_workflow_tables.py
new file mode 100644
index 0000000..a0820e7
--- /dev/null
+++ b/backend/alembic/versions/004_add_workflow_tables.py
@@ -0,0 +1,114 @@
+"""Add workflow_configs, workflow_runs tables
+
+Revision ID: 004_workflows
+Revises: 003_price_event
+Create Date: 2026-03-01
+
+"""
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+
+# revision identifiers, used by Alembic.
+revision: str = "004_workflows"
+down_revision: Union[str, None] = "003_price_event"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # workflow_configs
+ op.create_table(
+ "workflow_configs",
+ sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
+ sa.Column("name", sa.String(length=100), nullable=False),
+ sa.Column("character_name", sa.String(length=100), nullable=False),
+ sa.Column("description", sa.Text(), nullable=False, server_default=sa.text("''")),
+ sa.Column(
+ "steps",
+ sa.JSON(),
+ nullable=False,
+ comment="JSON array of workflow steps",
+ ),
+ sa.Column("loop", sa.Boolean(), nullable=False, server_default=sa.text("false")),
+ sa.Column("max_loops", sa.Integer(), nullable=False, server_default=sa.text("0")),
+ sa.Column("enabled", sa.Boolean(), nullable=False, server_default=sa.text("true")),
+ sa.Column(
+ "created_at",
+ sa.DateTime(timezone=True),
+ server_default=sa.text("now()"),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_at",
+ sa.DateTime(timezone=True),
+ server_default=sa.text("now()"),
+ nullable=False,
+ ),
+ sa.PrimaryKeyConstraint("id"),
+ )
+ op.create_index(
+ op.f("ix_workflow_configs_character_name"),
+ "workflow_configs",
+ ["character_name"],
+ unique=False,
+ )
+
+ # workflow_runs
+ op.create_table(
+ "workflow_runs",
+ sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
+ sa.Column(
+ "workflow_id",
+ sa.Integer(),
+ sa.ForeignKey("workflow_configs.id", ondelete="CASCADE"),
+ nullable=False,
+ ),
+ sa.Column(
+ "status",
+ sa.String(length=20),
+ nullable=False,
+ server_default=sa.text("'running'"),
+ comment="Status: running, paused, stopped, completed, error",
+ ),
+ sa.Column("current_step_index", sa.Integer(), nullable=False, server_default=sa.text("0")),
+ sa.Column(
+ "current_step_id",
+ sa.String(length=100),
+ nullable=False,
+ server_default=sa.text("''"),
+ ),
+ sa.Column("loop_count", sa.Integer(), nullable=False, server_default=sa.text("0")),
+ sa.Column("total_actions_count", sa.Integer(), nullable=False, server_default=sa.text("0")),
+ sa.Column("step_actions_count", sa.Integer(), nullable=False, server_default=sa.text("0")),
+ sa.Column(
+ "started_at",
+ sa.DateTime(timezone=True),
+ server_default=sa.text("now()"),
+ nullable=False,
+ ),
+ sa.Column("stopped_at", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("error_message", sa.Text(), nullable=True),
+ sa.Column(
+ "step_history",
+ sa.JSON(),
+ nullable=False,
+ server_default=sa.text("'[]'::json"),
+ comment="JSON array of completed step records",
+ ),
+ sa.PrimaryKeyConstraint("id"),
+ )
+ op.create_index(
+ op.f("ix_workflow_runs_workflow_id"),
+ "workflow_runs",
+ ["workflow_id"],
+ unique=False,
+ )
+
+
+def downgrade() -> None:
+ op.drop_index(op.f("ix_workflow_runs_workflow_id"), table_name="workflow_runs")
+ op.drop_table("workflow_runs")
+ op.drop_index(op.f("ix_workflow_configs_character_name"), table_name="workflow_configs")
+ op.drop_table("workflow_configs")
diff --git a/backend/alembic/versions/005_add_app_errors_table.py b/backend/alembic/versions/005_add_app_errors_table.py
new file mode 100644
index 0000000..9599369
--- /dev/null
+++ b/backend/alembic/versions/005_add_app_errors_table.py
@@ -0,0 +1,102 @@
+"""Add app_errors table for error tracking
+
+Revision ID: 005_app_errors
+Revises: 004_workflows
+Create Date: 2026-03-01
+
+"""
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+
+# revision identifiers, used by Alembic.
+revision: str = "005_app_errors"
+down_revision: Union[str, None] = "004_workflows"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ op.create_table(
+ "app_errors",
+ sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
+ sa.Column(
+ "severity",
+ sa.String(length=20),
+ nullable=False,
+ server_default="error",
+ comment="error | warning | critical",
+ ),
+ sa.Column(
+ "source",
+ sa.String(length=50),
+ nullable=False,
+ comment="backend | frontend | automation | middleware",
+ ),
+ sa.Column(
+ "error_type",
+ sa.String(length=200),
+ nullable=False,
+ comment="Exception class name or error category",
+ ),
+ sa.Column("message", sa.Text(), nullable=False),
+ sa.Column("stack_trace", sa.Text(), nullable=True),
+ sa.Column(
+ "context",
+ sa.JSON(),
+ nullable=True,
+ comment="Arbitrary JSON context",
+ ),
+ sa.Column(
+ "correlation_id",
+ sa.String(length=36),
+ nullable=True,
+ comment="Request correlation ID",
+ ),
+ sa.Column(
+ "resolved",
+ sa.Boolean(),
+ nullable=False,
+ server_default=sa.text("false"),
+ ),
+ sa.Column(
+ "created_at",
+ sa.DateTime(timezone=True),
+ server_default=sa.text("now()"),
+ nullable=False,
+ ),
+ sa.PrimaryKeyConstraint("id"),
+ )
+ op.create_index(
+ op.f("ix_app_errors_correlation_id"),
+ "app_errors",
+ ["correlation_id"],
+ unique=False,
+ )
+ op.create_index(
+ op.f("ix_app_errors_created_at"),
+ "app_errors",
+ ["created_at"],
+ unique=False,
+ )
+ op.create_index(
+ op.f("ix_app_errors_severity"),
+ "app_errors",
+ ["severity"],
+ unique=False,
+ )
+ op.create_index(
+ op.f("ix_app_errors_source"),
+ "app_errors",
+ ["source"],
+ unique=False,
+ )
+
+
+def downgrade() -> None:
+ op.drop_index(op.f("ix_app_errors_source"), table_name="app_errors")
+ op.drop_index(op.f("ix_app_errors_severity"), table_name="app_errors")
+ op.drop_index(op.f("ix_app_errors_created_at"), table_name="app_errors")
+ op.drop_index(op.f("ix_app_errors_correlation_id"), table_name="app_errors")
+ op.drop_table("app_errors")
diff --git a/backend/alembic/versions/006_add_pipeline_tables.py b/backend/alembic/versions/006_add_pipeline_tables.py
new file mode 100644
index 0000000..c2505b3
--- /dev/null
+++ b/backend/alembic/versions/006_add_pipeline_tables.py
@@ -0,0 +1,112 @@
+"""Add pipeline_configs, pipeline_runs tables for multi-character pipelines
+
+Revision ID: 006_pipelines
+Revises: 005_app_errors
+Create Date: 2026-03-01
+
+"""
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+
+# revision identifiers, used by Alembic.
+revision: str = "006_pipelines"
+down_revision: Union[str, None] = "005_app_errors"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # pipeline_configs
+ op.create_table(
+ "pipeline_configs",
+ sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
+ sa.Column("name", sa.String(length=100), nullable=False),
+ sa.Column("description", sa.Text(), nullable=False, server_default=sa.text("''")),
+ sa.Column(
+ "stages",
+ sa.JSON(),
+ nullable=False,
+ comment="JSON array of pipeline stages with character_steps",
+ ),
+ sa.Column("loop", sa.Boolean(), nullable=False, server_default=sa.text("false")),
+ sa.Column("max_loops", sa.Integer(), nullable=False, server_default=sa.text("0")),
+ sa.Column("enabled", sa.Boolean(), nullable=False, server_default=sa.text("true")),
+ sa.Column(
+ "created_at",
+ sa.DateTime(timezone=True),
+ server_default=sa.text("now()"),
+ nullable=False,
+ ),
+ sa.Column(
+ "updated_at",
+ sa.DateTime(timezone=True),
+ server_default=sa.text("now()"),
+ nullable=False,
+ ),
+ sa.PrimaryKeyConstraint("id"),
+ )
+
+ # pipeline_runs
+ op.create_table(
+ "pipeline_runs",
+ sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
+ sa.Column(
+ "pipeline_id",
+ sa.Integer(),
+ sa.ForeignKey("pipeline_configs.id", ondelete="CASCADE"),
+ nullable=False,
+ ),
+ sa.Column(
+ "status",
+ sa.String(length=20),
+ nullable=False,
+ server_default=sa.text("'running'"),
+ comment="Status: running, paused, stopped, completed, error",
+ ),
+ sa.Column("current_stage_index", sa.Integer(), nullable=False, server_default=sa.text("0")),
+ sa.Column(
+ "current_stage_id",
+ sa.String(length=100),
+ nullable=False,
+ server_default=sa.text("''"),
+ ),
+ sa.Column("loop_count", sa.Integer(), nullable=False, server_default=sa.text("0")),
+ sa.Column("total_actions_count", sa.Integer(), nullable=False, server_default=sa.text("0")),
+ sa.Column(
+ "character_states",
+ sa.JSON(),
+ nullable=False,
+ server_default=sa.text("'{}'::json"),
+ comment="Per-character state JSON",
+ ),
+ sa.Column(
+ "stage_history",
+ sa.JSON(),
+ nullable=False,
+ server_default=sa.text("'[]'::json"),
+ comment="JSON array of completed stage records",
+ ),
+ sa.Column(
+ "started_at",
+ sa.DateTime(timezone=True),
+ server_default=sa.text("now()"),
+ nullable=False,
+ ),
+ sa.Column("stopped_at", sa.DateTime(timezone=True), nullable=True),
+ sa.Column("error_message", sa.Text(), nullable=True),
+ sa.PrimaryKeyConstraint("id"),
+ )
+ op.create_index(
+ op.f("ix_pipeline_runs_pipeline_id"),
+ "pipeline_runs",
+ ["pipeline_id"],
+ unique=False,
+ )
+
+
+def downgrade() -> None:
+ op.drop_index(op.f("ix_pipeline_runs_pipeline_id"), table_name="pipeline_runs")
+ op.drop_table("pipeline_runs")
+ op.drop_table("pipeline_configs")
diff --git a/backend/alembic/versions/007_add_user_token_hash_to_app_errors.py b/backend/alembic/versions/007_add_user_token_hash_to_app_errors.py
new file mode 100644
index 0000000..f4317a9
--- /dev/null
+++ b/backend/alembic/versions/007_add_user_token_hash_to_app_errors.py
@@ -0,0 +1,40 @@
+"""Add user_token_hash column to app_errors for per-user scoping
+
+Revision ID: 007_user_token_hash
+Revises: 006_pipelines
+Create Date: 2026-03-01
+
+"""
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+
+# revision identifiers, used by Alembic.
+revision: str = "007_user_token_hash"
+down_revision: Union[str, None] = "006_pipelines"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ op.add_column(
+ "app_errors",
+ sa.Column(
+ "user_token_hash",
+ sa.String(length=64),
+ nullable=True,
+ comment="SHA-256 hash of the user API token",
+ ),
+ )
+ op.create_index(
+ op.f("ix_app_errors_user_token_hash"),
+ "app_errors",
+ ["user_token_hash"],
+ unique=False,
+ )
+
+
+def downgrade() -> None:
+ op.drop_index(op.f("ix_app_errors_user_token_hash"), table_name="app_errors")
+ op.drop_column("app_errors", "user_token_hash")
diff --git a/backend/app/api/auth.py b/backend/app/api/auth.py
index 7163300..8e55178 100644
--- a/backend/app/api/auth.py
+++ b/backend/app/api/auth.py
@@ -1,8 +1,9 @@
-"""Auth endpoints for runtime API token management.
+"""Auth endpoints for per-user API token management.
-When no ARTIFACTS_TOKEN is set in the environment, users can provide
-their own token through the UI. The token is stored in memory only
-and must be re-sent if the backend restarts.
+Each user provides their own Artifacts API token via the frontend.
+The token is stored in the browser's localStorage and sent with every
+request as the ``X-API-Token`` header. The backend validates the token
+but does NOT store it globally — this allows true multi-user support.
"""
import logging
@@ -12,7 +13,6 @@ from fastapi import APIRouter, Request
from pydantic import BaseModel
from app.config import settings
-from app.services.artifacts_client import ArtifactsClient
logger = logging.getLogger(__name__)
@@ -21,7 +21,7 @@ router = APIRouter(prefix="/api/auth", tags=["auth"])
class AuthStatus(BaseModel):
has_token: bool
- source: str # "env", "user", or "none"
+ source: str # "header", "env", or "none"
class SetTokenRequest(BaseModel):
@@ -37,15 +37,24 @@ class SetTokenResponse(BaseModel):
@router.get("/status", response_model=AuthStatus)
async def auth_status(request: Request) -> AuthStatus:
- client: ArtifactsClient = request.app.state.artifacts_client
- return AuthStatus(
- has_token=client.has_token,
- source=client.token_source,
- )
+ """Check whether the *requesting* client has a valid token.
+
+ The frontend sends the token in the ``X-API-Token`` header.
+ This endpoint tells the frontend whether that token is present.
+ """
+ token = request.headers.get("X-API-Token")
+ if token:
+ return AuthStatus(has_token=True, source="header")
+ return AuthStatus(has_token=False, source="none")
@router.post("/token", response_model=SetTokenResponse)
-async def set_token(body: SetTokenRequest, request: Request) -> SetTokenResponse:
+async def validate_token(body: SetTokenRequest) -> SetTokenResponse:
+ """Validate an Artifacts API token.
+
+ Does NOT store the token on the server. The frontend is responsible
+ for persisting it in localStorage and sending it with every request.
+ """
token = body.token.strip()
if not token:
return SetTokenResponse(success=False, source="none", error="Token cannot be empty")
@@ -78,37 +87,11 @@ async def set_token(body: SetTokenRequest, request: Request) -> SetTokenResponse
error="Could not validate token. Check your network connection.",
)
- # Token is valid — apply it
- client: ArtifactsClient = request.app.state.artifacts_client
- client.set_token(token)
-
- # Reconnect WebSocket with new token
- game_ws_client = getattr(request.app.state, "game_ws_client", None)
- if game_ws_client is not None:
- try:
- await game_ws_client.reconnect_with_token(token)
- except Exception:
- logger.exception("Failed to reconnect WebSocket with new token")
-
- logger.info("API token updated via UI (source: user)")
+ logger.info("API token validated via UI")
return SetTokenResponse(success=True, source="user")
@router.delete("/token")
-async def clear_token(request: Request) -> AuthStatus:
- client: ArtifactsClient = request.app.state.artifacts_client
- client.clear_token()
-
- # Reconnect WebSocket with env token (or empty)
- game_ws_client = getattr(request.app.state, "game_ws_client", None)
- if game_ws_client is not None and settings.artifacts_token:
- try:
- await game_ws_client.reconnect_with_token(settings.artifacts_token)
- except Exception:
- logger.exception("Failed to reconnect WebSocket after token clear")
-
- logger.info("API token cleared, reverted to env")
- return AuthStatus(
- has_token=client.has_token,
- source=client.token_source,
- )
+async def clear_token() -> AuthStatus:
+ """No-op on the backend — the frontend clears its own localStorage."""
+ return AuthStatus(has_token=False, source="none")
diff --git a/backend/app/api/automations.py b/backend/app/api/automations.py
index 244bc71..f5390a7 100644
--- a/backend/app/api/automations.py
+++ b/backend/app/api/automations.py
@@ -6,6 +6,7 @@ from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import selectinload
+from app.api.deps import get_user_character_names
from app.database import async_session_factory
from app.engine.manager import AutomationManager
from app.models.automation import AutomationConfig, AutomationLog, AutomationRun
@@ -46,9 +47,14 @@ def _get_manager(request: Request) -> AutomationManager:
@router.get("/", response_model=list[AutomationConfigResponse])
async def list_configs(request: Request) -> list[AutomationConfigResponse]:
- """List all automation configurations with their current status."""
+ """List automation configurations belonging to the current user."""
+ user_chars = await get_user_character_names(request)
async with async_session_factory() as db:
- stmt = select(AutomationConfig).order_by(AutomationConfig.id)
+ stmt = (
+ select(AutomationConfig)
+ .where(AutomationConfig.character_name.in_(user_chars))
+ .order_by(AutomationConfig.id)
+ )
result = await db.execute(stmt)
configs = result.scalars().all()
return [AutomationConfigResponse.model_validate(c) for c in configs]
diff --git a/backend/app/api/bank.py b/backend/app/api/bank.py
index adeadf5..eb07eb8 100644
--- a/backend/app/api/bank.py
+++ b/backend/app/api/bank.py
@@ -5,8 +5,8 @@ from fastapi import APIRouter, HTTPException, Request
from httpx import HTTPStatusError
from pydantic import BaseModel, Field
+from app.api.deps import get_user_client
from app.database import async_session_factory
-from app.services.artifacts_client import ArtifactsClient
from app.services.bank_service import BankService
from app.services.game_data_cache import GameDataCacheService
@@ -15,10 +15,6 @@ logger = logging.getLogger(__name__)
router = APIRouter(prefix="/api", tags=["bank"])
-def _get_client(request: Request) -> ArtifactsClient:
- return request.app.state.artifacts_client
-
-
def _get_cache_service(request: Request) -> GameDataCacheService:
return request.app.state.cache_service
@@ -33,11 +29,17 @@ class ManualActionRequest(BaseModel):
action: str = Field(
...,
- description="Action to perform: 'move', 'fight', 'gather', 'rest'",
+ description=(
+ "Action to perform: move, fight, gather, rest, equip, unequip, "
+ "use_item, deposit, withdraw, deposit_gold, withdraw_gold, "
+ "craft, recycle, ge_buy, ge_create_buy, ge_sell, ge_fill, ge_cancel, "
+ "task_new, task_trade, task_complete, task_exchange, task_cancel, "
+ "npc_buy, npc_sell"
+ ),
)
params: dict = Field(
default_factory=dict,
- description="Action parameters (e.g. {x, y} for move)",
+ description="Action parameters (varies per action type)",
)
@@ -49,7 +51,7 @@ class ManualActionRequest(BaseModel):
@router.get("/bank")
async def get_bank(request: Request) -> dict[str, Any]:
"""Return bank details with enriched item data from game cache."""
- client = _get_client(request)
+ client = get_user_client(request)
cache_service = _get_cache_service(request)
bank_service = BankService()
@@ -75,7 +77,7 @@ async def get_bank(request: Request) -> dict[str, Any]:
@router.get("/bank/summary")
async def get_bank_summary(request: Request) -> dict[str, Any]:
"""Return a summary of bank contents: gold, item count, slots."""
- client = _get_client(request)
+ client = get_user_client(request)
bank_service = BankService()
try:
@@ -87,6 +89,16 @@ async def get_bank_summary(request: Request) -> dict[str, Any]:
) from exc
+def _require(params: dict, *keys: str) -> None:
+ """Raise 400 if any required key is missing from params."""
+ missing = [k for k in keys if params.get(k) is None]
+ if missing:
+ raise HTTPException(
+ status_code=400,
+ detail=f"Missing required params: {', '.join(missing)}",
+ )
+
+
@router.post("/characters/{name}/action")
async def manual_action(
name: str,
@@ -95,35 +107,154 @@ async def manual_action(
) -> dict[str, Any]:
"""Execute a manual action on a character.
- Supported actions:
- - **move**: Move to coordinates. Params: {"x": int, "y": int}
- - **fight**: Fight the monster at the current tile. No params.
- - **gather**: Gather the resource at the current tile. No params.
- - **rest**: Rest to recover HP. No params.
+ Supported actions and their params:
+ - **move**: {x: int, y: int}
+ - **fight**: no params
+ - **gather**: no params
+ - **rest**: no params
+ - **equip**: {code: str, slot: str, quantity?: int}
+ - **unequip**: {slot: str, quantity?: int}
+ - **use_item**: {code: str, quantity?: int}
+ - **deposit**: {code: str, quantity: int}
+ - **withdraw**: {code: str, quantity: int}
+ - **deposit_gold**: {quantity: int}
+ - **withdraw_gold**: {quantity: int}
+ - **craft**: {code: str, quantity?: int}
+ - **recycle**: {code: str, quantity?: int}
+ - **ge_buy**: {id: str, quantity: int} — buy from an existing sell order
+ - **ge_create_buy**: {code: str, quantity: int, price: int} — create a standing buy order
+ - **ge_sell**: {code: str, quantity: int, price: int} — create a sell order
+ - **ge_fill**: {id: str, quantity: int} — fill an existing buy order
+ - **ge_cancel**: {order_id: str}
+ - **task_new**: no params
+ - **task_trade**: {code: str, quantity: int}
+ - **task_complete**: no params
+ - **task_exchange**: no params
+ - **task_cancel**: no params
+ - **npc_buy**: {code: str, quantity: int}
+ - **npc_sell**: {code: str, quantity: int}
"""
- client = _get_client(request)
+ client = get_user_client(request)
+ p = body.params
try:
match body.action:
+ # --- Basic actions ---
case "move":
- x = body.params.get("x")
- y = body.params.get("y")
- if x is None or y is None:
- raise HTTPException(
- status_code=400,
- detail="Move action requires 'x' and 'y' in params",
- )
- result = await client.move(name, int(x), int(y))
+ _require(p, "x", "y")
+ result = await client.move(name, int(p["x"]), int(p["y"]))
case "fight":
result = await client.fight(name)
case "gather":
result = await client.gather(name)
case "rest":
result = await client.rest(name)
+
+ # --- Equipment ---
+ case "equip":
+ _require(p, "code", "slot")
+ result = await client.equip(
+ name, p["code"], p["slot"], int(p.get("quantity", 1))
+ )
+ case "unequip":
+ _require(p, "slot")
+ result = await client.unequip(
+ name, p["slot"], int(p.get("quantity", 1))
+ )
+
+ # --- Consumables ---
+ case "use_item":
+ _require(p, "code")
+ result = await client.use_item(
+ name, p["code"], int(p.get("quantity", 1))
+ )
+
+ # --- Bank ---
+ case "deposit":
+ _require(p, "code", "quantity")
+ result = await client.deposit_item(
+ name, p["code"], int(p["quantity"])
+ )
+ case "withdraw":
+ _require(p, "code", "quantity")
+ result = await client.withdraw_item(
+ name, p["code"], int(p["quantity"])
+ )
+ case "deposit_gold":
+ _require(p, "quantity")
+ result = await client.deposit_gold(name, int(p["quantity"]))
+ case "withdraw_gold":
+ _require(p, "quantity")
+ result = await client.withdraw_gold(name, int(p["quantity"]))
+
+ # --- Crafting ---
+ case "craft":
+ _require(p, "code")
+ result = await client.craft(
+ name, p["code"], int(p.get("quantity", 1))
+ )
+ case "recycle":
+ _require(p, "code")
+ result = await client.recycle(
+ name, p["code"], int(p.get("quantity", 1))
+ )
+
+ # --- Grand Exchange ---
+ case "ge_buy":
+ _require(p, "id", "quantity")
+ result = await client.ge_buy(
+ name, str(p["id"]), int(p["quantity"])
+ )
+ case "ge_create_buy":
+ _require(p, "code", "quantity", "price")
+ result = await client.ge_create_buy_order(
+ name, p["code"], int(p["quantity"]), int(p["price"])
+ )
+ case "ge_sell":
+ _require(p, "code", "quantity", "price")
+ result = await client.ge_sell_order(
+ name, p["code"], int(p["quantity"]), int(p["price"])
+ )
+ case "ge_fill":
+ _require(p, "id", "quantity")
+ result = await client.ge_fill_buy_order(
+ name, str(p["id"]), int(p["quantity"])
+ )
+ case "ge_cancel":
+ _require(p, "order_id")
+ result = await client.ge_cancel(name, p["order_id"])
+
+ # --- Tasks ---
+ case "task_new":
+ result = await client.task_new(name)
+ case "task_trade":
+ _require(p, "code", "quantity")
+ result = await client.task_trade(
+ name, p["code"], int(p["quantity"])
+ )
+ case "task_complete":
+ result = await client.task_complete(name)
+ case "task_exchange":
+ result = await client.task_exchange(name)
+ case "task_cancel":
+ result = await client.task_cancel(name)
+
+ # --- NPC ---
+ case "npc_buy":
+ _require(p, "code", "quantity")
+ result = await client.npc_buy(
+ name, p["code"], int(p["quantity"])
+ )
+ case "npc_sell":
+ _require(p, "code", "quantity")
+ result = await client.npc_sell(
+ name, p["code"], int(p["quantity"])
+ )
+
case _:
raise HTTPException(
status_code=400,
- detail=f"Unknown action: {body.action!r}. Supported: move, fight, gather, rest",
+ detail=f"Unknown action: {body.action!r}",
)
except HTTPStatusError as exc:
raise HTTPException(
diff --git a/backend/app/api/characters.py b/backend/app/api/characters.py
index 8f2629f..ae3496c 100644
--- a/backend/app/api/characters.py
+++ b/backend/app/api/characters.py
@@ -1,17 +1,13 @@
from fastapi import APIRouter, HTTPException, Request
from httpx import HTTPStatusError
+from app.api.deps import get_user_client
from app.schemas.game import CharacterSchema
-from app.services.artifacts_client import ArtifactsClient
from app.services.character_service import CharacterService
router = APIRouter(prefix="/api/characters", tags=["characters"])
-def _get_client(request: Request) -> ArtifactsClient:
- return request.app.state.artifacts_client
-
-
def _get_service(request: Request) -> CharacterService:
return request.app.state.character_service
@@ -19,7 +15,7 @@ def _get_service(request: Request) -> CharacterService:
@router.get("/", response_model=list[CharacterSchema])
async def list_characters(request: Request) -> list[CharacterSchema]:
"""Return all characters belonging to the authenticated account."""
- client = _get_client(request)
+ client = get_user_client(request)
service = _get_service(request)
try:
return await service.get_all(client)
@@ -33,7 +29,7 @@ async def list_characters(request: Request) -> list[CharacterSchema]:
@router.get("/{name}", response_model=CharacterSchema)
async def get_character(name: str, request: Request) -> CharacterSchema:
"""Return a single character by name."""
- client = _get_client(request)
+ client = get_user_client(request)
service = _get_service(request)
try:
return await service.get_one(client, name)
diff --git a/backend/app/api/dashboard.py b/backend/app/api/dashboard.py
index e21e6f1..a4c711a 100644
--- a/backend/app/api/dashboard.py
+++ b/backend/app/api/dashboard.py
@@ -3,8 +3,8 @@ import logging
from fastapi import APIRouter, HTTPException, Request
from httpx import HTTPStatusError
+from app.api.deps import get_user_client
from app.schemas.game import DashboardData
-from app.services.artifacts_client import ArtifactsClient
from app.services.character_service import CharacterService
logger = logging.getLogger(__name__)
@@ -12,10 +12,6 @@ logger = logging.getLogger(__name__)
router = APIRouter(prefix="/api", tags=["dashboard"])
-def _get_client(request: Request) -> ArtifactsClient:
- return request.app.state.artifacts_client
-
-
def _get_service(request: Request) -> CharacterService:
return request.app.state.character_service
@@ -23,7 +19,7 @@ def _get_service(request: Request) -> CharacterService:
@router.get("/dashboard", response_model=DashboardData)
async def get_dashboard(request: Request) -> DashboardData:
"""Return aggregated dashboard data: all characters + server status."""
- client = _get_client(request)
+ client = get_user_client(request)
service = _get_service(request)
try:
diff --git a/backend/app/api/deps.py b/backend/app/api/deps.py
new file mode 100644
index 0000000..59cfe25
--- /dev/null
+++ b/backend/app/api/deps.py
@@ -0,0 +1,47 @@
+"""Shared FastAPI dependencies for API endpoints."""
+
+import hashlib
+
+from fastapi import HTTPException, Request
+
+from app.services.artifacts_client import ArtifactsClient
+
+
+def get_user_client(request: Request) -> ArtifactsClient:
+ """Return an ArtifactsClient scoped to the requesting user's token.
+
+ Reads the ``X-API-Token`` header sent by the frontend and creates a
+ lightweight clone of the global client that uses that token. Falls
+ back to the global client when no per-request token is provided (e.g.
+ for public / unauthenticated endpoints).
+ """
+ token = request.headers.get("X-API-Token")
+ base_client: ArtifactsClient = request.app.state.artifacts_client
+
+ if token:
+ return base_client.with_token(token)
+
+ # No per-request token — use the global client if it has a token
+ if base_client.has_token:
+ return base_client
+
+ raise HTTPException(status_code=401, detail="No API token provided")
+
+
+async def get_user_character_names(request: Request) -> list[str]:
+ """Return the character names belonging to the requesting user.
+
+ Calls the Artifacts API with the user's token to get their characters,
+ then returns just the names. Used to scope DB queries to a single user.
+ """
+ client = get_user_client(request)
+ characters = await client.get_characters()
+ return [c.name for c in characters]
+
+
+def get_token_hash(request: Request) -> str | None:
+ """Return a SHA-256 hash of the user's API token, or None."""
+ token = request.headers.get("X-API-Token")
+ if token:
+ return hashlib.sha256(token.encode()).hexdigest()
+ return None
diff --git a/backend/app/api/errors.py b/backend/app/api/errors.py
new file mode 100644
index 0000000..f18c28c
--- /dev/null
+++ b/backend/app/api/errors.py
@@ -0,0 +1,189 @@
+"""Errors API router - browse, filter, resolve, and report errors.
+
+All read endpoints are scoped to the requesting user's token so that
+one user never sees errors belonging to another user.
+"""
+
+import logging
+from datetime import datetime, timedelta, timezone
+
+from fastapi import APIRouter, HTTPException, Query, Request
+from sqlalchemy import func, select
+
+from app.database import async_session_factory
+from app.models.app_error import AppError
+from app.schemas.errors import (
+ AppErrorListResponse,
+ AppErrorResponse,
+ AppErrorStats,
+ FrontendErrorReport,
+)
+from app.services.error_service import hash_token, log_error
+
+logger = logging.getLogger(__name__)
+
+router = APIRouter(prefix="/api/errors", tags=["errors"])
+
+
+def _get_user_hash(request: Request) -> str | None:
+ """Extract and hash the user token from the request."""
+ token = request.headers.get("X-API-Token")
+ return hash_token(token) if token else None
+
+
+@router.get("/", response_model=AppErrorListResponse)
+async def list_errors(
+ request: Request,
+ severity: str = Query(default="", description="Filter by severity"),
+ source: str = Query(default="", description="Filter by source"),
+ resolved: str = Query(default="", description="Filter by resolved status: true/false"),
+ page: int = Query(default=1, ge=1),
+ size: int = Query(default=50, ge=1, le=100),
+) -> AppErrorListResponse:
+ """List errors with optional filtering and pagination.
+
+ Only returns errors belonging to the authenticated user.
+ """
+ user_hash = _get_user_hash(request)
+ if not user_hash:
+ return AppErrorListResponse(errors=[], total=0, page=1, pages=1)
+
+ async with async_session_factory() as db:
+ user_filter = AppError.user_token_hash == user_hash
+ stmt = select(AppError).where(user_filter).order_by(AppError.created_at.desc())
+ count_stmt = select(func.count(AppError.id)).where(user_filter)
+
+ if severity:
+ stmt = stmt.where(AppError.severity == severity)
+ count_stmt = count_stmt.where(AppError.severity == severity)
+ if source:
+ stmt = stmt.where(AppError.source == source)
+ count_stmt = count_stmt.where(AppError.source == source)
+ if resolved in ("true", "false"):
+ val = resolved == "true"
+ stmt = stmt.where(AppError.resolved == val)
+ count_stmt = count_stmt.where(AppError.resolved == val)
+
+ total = (await db.execute(count_stmt)).scalar() or 0
+ pages = max(1, (total + size - 1) // size)
+
+ offset = (page - 1) * size
+ stmt = stmt.offset(offset).limit(size)
+ result = await db.execute(stmt)
+ rows = result.scalars().all()
+
+ return AppErrorListResponse(
+ errors=[AppErrorResponse.model_validate(r) for r in rows],
+ total=total,
+ page=page,
+ pages=pages,
+ )
+
+
+@router.get("/stats", response_model=AppErrorStats)
+async def error_stats(request: Request) -> AppErrorStats:
+ """Aggregated error statistics scoped to the authenticated user."""
+ user_hash = _get_user_hash(request)
+ if not user_hash:
+ return AppErrorStats()
+
+ async with async_session_factory() as db:
+ user_filter = AppError.user_token_hash == user_hash
+
+ total = (
+ await db.execute(select(func.count(AppError.id)).where(user_filter))
+ ).scalar() or 0
+ unresolved = (
+ await db.execute(
+ select(func.count(AppError.id)).where(
+ user_filter, AppError.resolved == False # noqa: E712
+ )
+ )
+ ).scalar() or 0
+
+ one_hour_ago = datetime.now(timezone.utc) - timedelta(hours=1)
+ last_hour = (
+ await db.execute(
+ select(func.count(AppError.id)).where(
+ user_filter, AppError.created_at >= one_hour_ago
+ )
+ )
+ ).scalar() or 0
+
+ # By severity
+ sev_rows = (
+ await db.execute(
+ select(AppError.severity, func.count(AppError.id))
+ .where(user_filter)
+ .group_by(AppError.severity)
+ )
+ ).all()
+ by_severity = {row[0]: row[1] for row in sev_rows}
+
+ # By source
+ src_rows = (
+ await db.execute(
+ select(AppError.source, func.count(AppError.id))
+ .where(user_filter)
+ .group_by(AppError.source)
+ )
+ ).all()
+ by_source = {row[0]: row[1] for row in src_rows}
+
+ return AppErrorStats(
+ total=total,
+ unresolved=unresolved,
+ last_hour=last_hour,
+ by_severity=by_severity,
+ by_source=by_source,
+ )
+
+
+@router.post("/{error_id}/resolve", response_model=AppErrorResponse)
+async def resolve_error(error_id: int, request: Request) -> AppErrorResponse:
+ """Mark an error as resolved (only if it belongs to the requesting user)."""
+ user_hash = _get_user_hash(request)
+
+ async with async_session_factory() as db:
+ stmt = select(AppError).where(AppError.id == error_id)
+ if user_hash:
+ stmt = stmt.where(AppError.user_token_hash == user_hash)
+ result = await db.execute(stmt)
+ record = result.scalar_one_or_none()
+ if record is None:
+ raise HTTPException(status_code=404, detail="Error not found")
+ record.resolved = True
+ await db.commit()
+ await db.refresh(record)
+ return AppErrorResponse.model_validate(record)
+
+
+@router.post("/report", status_code=201)
+async def report_frontend_error(
+ body: FrontendErrorReport, request: Request
+) -> dict[str, str]:
+ """Receive error reports from the frontend."""
+ user_hash = _get_user_hash(request)
+
+ await log_error(
+ async_session_factory,
+ severity=body.severity,
+ source="frontend",
+ error_type=body.error_type,
+ message=body.message,
+ context=body.context,
+ user_token_hash=user_hash,
+ )
+
+ # Also capture in Sentry if available
+ try:
+ import sentry_sdk
+
+ sentry_sdk.capture_message(
+ f"[Frontend] {body.error_type}: {body.message}",
+ level="error",
+ )
+ except Exception:
+ pass
+
+ return {"status": "recorded"}
diff --git a/backend/app/api/events.py b/backend/app/api/events.py
index 2a3d897..afe429f 100644
--- a/backend/app/api/events.py
+++ b/backend/app/api/events.py
@@ -7,23 +7,19 @@ from fastapi import APIRouter, HTTPException, Query, Request
from httpx import HTTPStatusError
from sqlalchemy import select
+from app.api.deps import get_user_client
from app.database import async_session_factory
from app.models.event_log import EventLog
-from app.services.artifacts_client import ArtifactsClient
logger = logging.getLogger(__name__)
router = APIRouter(prefix="/api/events", tags=["events"])
-def _get_client(request: Request) -> ArtifactsClient:
- return request.app.state.artifacts_client
-
-
@router.get("/")
async def get_active_events(request: Request) -> dict[str, Any]:
"""Get currently active game events from the Artifacts API."""
- client = _get_client(request)
+ client = get_user_client(request)
try:
events = await client.get_events()
diff --git a/backend/app/api/exchange.py b/backend/app/api/exchange.py
index f8c04ef..65fef71 100644
--- a/backend/app/api/exchange.py
+++ b/backend/app/api/exchange.py
@@ -6,8 +6,8 @@ from typing import Any
from fastapi import APIRouter, HTTPException, Query, Request
from httpx import HTTPStatusError
+from app.api.deps import get_user_client
from app.database import async_session_factory
-from app.services.artifacts_client import ArtifactsClient
from app.services.exchange_service import ExchangeService
logger = logging.getLogger(__name__)
@@ -15,10 +15,6 @@ logger = logging.getLogger(__name__)
router = APIRouter(prefix="/api/exchange", tags=["exchange"])
-def _get_client(request: Request) -> ArtifactsClient:
- return request.app.state.artifacts_client
-
-
def _get_exchange_service(request: Request) -> ExchangeService:
service: ExchangeService | None = getattr(request.app.state, "exchange_service", None)
if service is None:
@@ -36,7 +32,7 @@ async def browse_orders(
type: str | None = Query(default=None, description="Filter by order type (sell or buy)"),
) -> dict[str, Any]:
"""Browse all active Grand Exchange orders (public market data)."""
- client = _get_client(request)
+ client = get_user_client(request)
service = _get_exchange_service(request)
try:
@@ -53,7 +49,7 @@ async def browse_orders(
@router.get("/my-orders")
async def get_my_orders(request: Request) -> dict[str, Any]:
"""Get the authenticated account's own active GE orders."""
- client = _get_client(request)
+ client = get_user_client(request)
service = _get_exchange_service(request)
try:
@@ -70,7 +66,7 @@ async def get_my_orders(request: Request) -> dict[str, Any]:
@router.get("/history")
async def get_history(request: Request) -> dict[str, Any]:
"""Get the authenticated account's GE transaction history."""
- client = _get_client(request)
+ client = get_user_client(request)
service = _get_exchange_service(request)
try:
@@ -87,7 +83,7 @@ async def get_history(request: Request) -> dict[str, Any]:
@router.get("/sell-history/{item_code}")
async def get_sell_history(item_code: str, request: Request) -> dict[str, Any]:
"""Get public sale history for a specific item (last 7 days from API)."""
- client = _get_client(request)
+ client = get_user_client(request)
service = _get_exchange_service(request)
try:
diff --git a/backend/app/api/logs.py b/backend/app/api/logs.py
index 0d13301..873f193 100644
--- a/backend/app/api/logs.py
+++ b/backend/app/api/logs.py
@@ -6,6 +6,7 @@ from typing import Any
from fastapi import APIRouter, Query, Request
from sqlalchemy import select
+from app.api.deps import get_user_character_names, get_user_client
from app.database import async_session_factory
from app.models.automation import AutomationConfig, AutomationLog, AutomationRun
from app.services.analytics_service import AnalyticsService
@@ -17,14 +18,112 @@ router = APIRouter(prefix="/api/logs", tags=["logs"])
@router.get("/")
async def get_logs(
+ request: Request,
character: str = Query(default="", description="Character name to filter logs"),
- limit: int = Query(default=50, ge=1, le=200, description="Max entries to return"),
+ type: str = Query(default="", description="Action type to filter (e.g. fight, gathering)"),
+ page: int = Query(default=1, ge=1, description="Page number"),
+ size: int = Query(default=50, ge=1, le=100, description="Page size"),
) -> dict[str, Any]:
- """Get automation action logs from the database.
+ """Get action logs from the Artifacts game API.
- Joins automation_logs -> automation_runs -> automation_configs
- to include character_name with each log entry.
+ Fetches the last 5000 character actions directly from the game server.
+ Falls back to local automation logs if the game API is unavailable.
"""
+ client = get_user_client(request)
+
+ try:
+ if character:
+ result = await client.get_character_logs(character, page=page, size=size)
+ else:
+ result = await client.get_logs(page=page, size=size)
+
+ raw_logs = result.get("data", [])
+ total = result.get("total", 0)
+ pages = result.get("pages", 1)
+
+ # Filter by type if specified
+ if type:
+ raw_logs = [log for log in raw_logs if log.get("type") == type]
+
+ logs = []
+ for entry in raw_logs:
+ content = entry.get("content", {})
+ action_type = entry.get("type", "unknown")
+
+ # Build details - description is the main human-readable field
+ details: dict[str, Any] = {}
+ description = entry.get("description", "")
+ if description:
+ details["description"] = description
+
+ # Extract structured data per action type
+ if "fight" in content:
+ fight = content["fight"]
+ details["monster"] = fight.get("opponent", "")
+ details["result"] = fight.get("result", "")
+ details["turns"] = fight.get("turns", 0)
+
+ if "gathering" in content:
+ g = content["gathering"]
+ details["resource"] = g.get("resource", "")
+ details["skill"] = g.get("skill", "")
+ details["xp"] = g.get("xp_gained", 0)
+
+ if "drops" in content:
+ items = content["drops"].get("items", [])
+ if items:
+ details["drops"] = [
+ f"{i.get('quantity', 1)}x {i.get('code', '?')}" for i in items
+ ]
+
+ if "map" in content:
+ m = content["map"]
+ details["x"] = m.get("x")
+ details["y"] = m.get("y")
+ details["map_name"] = m.get("name", "")
+
+ if "crafting" in content:
+ c = content["crafting"]
+ details["item"] = c.get("code", "")
+ details["skill"] = c.get("skill", "")
+ details["xp"] = c.get("xp_gained", 0)
+
+ if "hp_restored" in content:
+ details["hp_restored"] = content["hp_restored"]
+
+ logs.append({
+ "id": hash(f"{entry.get('character', '')}-{entry.get('created_at', '')}") & 0x7FFFFFFF,
+ "character_name": entry.get("character", ""),
+ "action_type": action_type,
+ "details": details,
+ "success": True,
+ "created_at": entry.get("created_at", ""),
+ "cooldown": entry.get("cooldown", 0),
+ })
+
+ return {
+ "logs": logs,
+ "total": total,
+ "page": page,
+ "pages": pages,
+ }
+
+ except Exception:
+ logger.warning("Failed to fetch logs from game API, falling back to local DB", exc_info=True)
+ user_chars = await get_user_character_names(request)
+ return await _get_local_logs(character, type, page, size, user_chars)
+
+
+async def _get_local_logs(
+ character: str,
+ type: str,
+ page: int,
+ size: int,
+ user_characters: list[str] | None = None,
+) -> dict[str, Any]:
+ """Fallback: get automation logs from local database."""
+ offset = (page - 1) * size
+
async with async_session_factory() as db:
stmt = (
select(
@@ -38,12 +137,20 @@ async def get_logs(
.join(AutomationRun, AutomationLog.run_id == AutomationRun.id)
.join(AutomationConfig, AutomationRun.config_id == AutomationConfig.id)
.order_by(AutomationLog.created_at.desc())
- .limit(limit)
)
+ # Scope to the current user's characters
+ if user_characters is not None:
+ stmt = stmt.where(AutomationConfig.character_name.in_(user_characters))
+
if character:
stmt = stmt.where(AutomationConfig.character_name == character)
+ if type:
+ stmt = stmt.where(AutomationLog.action_type == type)
+
+ stmt = stmt.offset(offset).limit(size)
+
result = await db.execute(stmt)
rows = result.all()
@@ -59,6 +166,9 @@ async def get_logs(
}
for row in rows
],
+ "total": len(rows),
+ "page": page,
+ "pages": 1,
}
@@ -71,15 +181,21 @@ async def get_analytics(
"""Get analytics aggregations for a character.
Returns XP history, gold history, and estimated actions per hour.
- If no character is specified, aggregates across all characters with snapshots.
+ If no character is specified, aggregates across the current user's characters.
"""
analytics = AnalyticsService()
+ user_chars = await get_user_character_names(request)
async with async_session_factory() as db:
if character:
+ # Verify the requested character belongs to the current user
+ if character not in user_chars:
+ return {"xp_history": [], "gold_history": [], "actions_per_hour": 0}
characters = [character]
else:
- characters = await analytics.get_tracked_characters(db)
+ # Only aggregate characters belonging to the current user
+ tracked = await analytics.get_tracked_characters(db)
+ characters = [c for c in tracked if c in user_chars]
all_xp: list[dict[str, Any]] = []
all_gold: list[dict[str, Any]] = []
diff --git a/backend/app/api/pipelines.py b/backend/app/api/pipelines.py
new file mode 100644
index 0000000..48d72ba
--- /dev/null
+++ b/backend/app/api/pipelines.py
@@ -0,0 +1,267 @@
+import logging
+
+from fastapi import APIRouter, HTTPException, Request
+from sqlalchemy import select
+from sqlalchemy.ext.asyncio import AsyncSession
+from sqlalchemy.orm import selectinload
+
+from app.api.deps import get_user_character_names
+from app.database import async_session_factory
+from app.engine.manager import AutomationManager
+from app.models.automation import AutomationLog
+from app.models.pipeline import PipelineConfig, PipelineRun
+from app.schemas.automation import AutomationLogResponse
+from app.schemas.pipeline import (
+ PipelineConfigCreate,
+ PipelineConfigDetailResponse,
+ PipelineConfigResponse,
+ PipelineConfigUpdate,
+ PipelineRunResponse,
+ PipelineStatusResponse,
+)
+
+logger = logging.getLogger(__name__)
+
+router = APIRouter(prefix="/api/pipelines", tags=["pipelines"])
+
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+
+def _get_manager(request: Request) -> AutomationManager:
+ manager: AutomationManager | None = getattr(request.app.state, "automation_manager", None)
+ if manager is None:
+ raise HTTPException(
+ status_code=503,
+ detail="Automation engine is not available",
+ )
+ return manager
+
+
+# ---------------------------------------------------------------------------
+# CRUD -- Pipeline Configs
+# ---------------------------------------------------------------------------
+
+
+def _pipeline_belongs_to_user(pipeline: PipelineConfig, user_chars: set[str]) -> bool:
+ """Check if any character in the pipeline stages belongs to the user."""
+ for stage in (pipeline.stages or []):
+ for step in (stage.get("character_steps") or []):
+ if step.get("character_name") in user_chars:
+ return True
+ return False
+
+
+@router.get("/", response_model=list[PipelineConfigResponse])
+async def list_pipelines(request: Request) -> list[PipelineConfigResponse]:
+ """List pipeline configurations belonging to the current user."""
+ user_chars = set(await get_user_character_names(request))
+ async with async_session_factory() as db:
+ stmt = select(PipelineConfig).order_by(PipelineConfig.id)
+ result = await db.execute(stmt)
+ configs = result.scalars().all()
+ return [
+ PipelineConfigResponse.model_validate(c)
+ for c in configs
+ if _pipeline_belongs_to_user(c, user_chars)
+ ]
+
+
+@router.post("/", response_model=PipelineConfigResponse, status_code=201)
+async def create_pipeline(
+ payload: PipelineConfigCreate,
+ request: Request,
+) -> PipelineConfigResponse:
+ """Create a new pipeline configuration."""
+ async with async_session_factory() as db:
+ config = PipelineConfig(
+ name=payload.name,
+ description=payload.description,
+ stages=[stage.model_dump() for stage in payload.stages],
+ loop=payload.loop,
+ max_loops=payload.max_loops,
+ )
+ db.add(config)
+ await db.commit()
+ await db.refresh(config)
+ return PipelineConfigResponse.model_validate(config)
+
+
+@router.get("/status/all", response_model=list[PipelineStatusResponse])
+async def get_all_pipeline_statuses(request: Request) -> list[PipelineStatusResponse]:
+ """Get live status for all active pipelines."""
+ manager = _get_manager(request)
+ return manager.get_all_pipeline_statuses()
+
+
+@router.get("/{pipeline_id}", response_model=PipelineConfigDetailResponse)
+async def get_pipeline(pipeline_id: int, request: Request) -> PipelineConfigDetailResponse:
+ """Get a pipeline configuration with its run history."""
+ async with async_session_factory() as db:
+ stmt = (
+ select(PipelineConfig)
+ .options(selectinload(PipelineConfig.runs))
+ .where(PipelineConfig.id == pipeline_id)
+ )
+ result = await db.execute(stmt)
+ config = result.scalar_one_or_none()
+
+ if config is None:
+ raise HTTPException(status_code=404, detail="Pipeline config not found")
+
+ return PipelineConfigDetailResponse(
+ config=PipelineConfigResponse.model_validate(config),
+ runs=[PipelineRunResponse.model_validate(r) for r in config.runs],
+ )
+
+
+@router.put("/{pipeline_id}", response_model=PipelineConfigResponse)
+async def update_pipeline(
+ pipeline_id: int,
+ payload: PipelineConfigUpdate,
+ request: Request,
+) -> PipelineConfigResponse:
+ """Update a pipeline configuration. Cannot update while running."""
+ manager = _get_manager(request)
+ if manager.is_pipeline_running(pipeline_id):
+ raise HTTPException(
+ status_code=409,
+ detail="Cannot update a pipeline while it is running. Stop it first.",
+ )
+
+ async with async_session_factory() as db:
+ config = await db.get(PipelineConfig, pipeline_id)
+ if config is None:
+ raise HTTPException(status_code=404, detail="Pipeline config not found")
+
+ if payload.name is not None:
+ config.name = payload.name
+ if payload.description is not None:
+ config.description = payload.description
+ if payload.stages is not None:
+ config.stages = [stage.model_dump() for stage in payload.stages]
+ if payload.loop is not None:
+ config.loop = payload.loop
+ if payload.max_loops is not None:
+ config.max_loops = payload.max_loops
+ if payload.enabled is not None:
+ config.enabled = payload.enabled
+
+ await db.commit()
+ await db.refresh(config)
+ return PipelineConfigResponse.model_validate(config)
+
+
+@router.delete("/{pipeline_id}", status_code=204)
+async def delete_pipeline(pipeline_id: int, request: Request) -> None:
+ """Delete a pipeline configuration. Cannot delete while running."""
+ manager = _get_manager(request)
+ if manager.is_pipeline_running(pipeline_id):
+ raise HTTPException(
+ status_code=409,
+ detail="Cannot delete a pipeline while it is running. Stop it first.",
+ )
+
+ async with async_session_factory() as db:
+ config = await db.get(PipelineConfig, pipeline_id)
+ if config is None:
+ raise HTTPException(status_code=404, detail="Pipeline config not found")
+ await db.delete(config)
+ await db.commit()
+
+
+# ---------------------------------------------------------------------------
+# Control -- Start / Stop / Pause / Resume
+# ---------------------------------------------------------------------------
+
+
+@router.post("/{pipeline_id}/start", response_model=PipelineRunResponse)
+async def start_pipeline(pipeline_id: int, request: Request) -> PipelineRunResponse:
+ """Start a pipeline from its configuration."""
+ manager = _get_manager(request)
+ try:
+ return await manager.start_pipeline(pipeline_id)
+ except ValueError as exc:
+ raise HTTPException(status_code=400, detail=str(exc)) from exc
+
+
+@router.post("/{pipeline_id}/stop", status_code=204)
+async def stop_pipeline(pipeline_id: int, request: Request) -> None:
+ """Stop a running pipeline."""
+ manager = _get_manager(request)
+ try:
+ await manager.stop_pipeline(pipeline_id)
+ except ValueError as exc:
+ raise HTTPException(status_code=400, detail=str(exc)) from exc
+
+
+@router.post("/{pipeline_id}/pause", status_code=204)
+async def pause_pipeline(pipeline_id: int, request: Request) -> None:
+ """Pause a running pipeline."""
+ manager = _get_manager(request)
+ try:
+ await manager.pause_pipeline(pipeline_id)
+ except ValueError as exc:
+ raise HTTPException(status_code=400, detail=str(exc)) from exc
+
+
+@router.post("/{pipeline_id}/resume", status_code=204)
+async def resume_pipeline(pipeline_id: int, request: Request) -> None:
+ """Resume a paused pipeline."""
+ manager = _get_manager(request)
+ try:
+ await manager.resume_pipeline(pipeline_id)
+ except ValueError as exc:
+ raise HTTPException(status_code=400, detail=str(exc)) from exc
+
+
+# ---------------------------------------------------------------------------
+# Status & Logs
+# ---------------------------------------------------------------------------
+
+
+@router.get("/{pipeline_id}/status", response_model=PipelineStatusResponse)
+async def get_pipeline_status(
+ pipeline_id: int,
+ request: Request,
+) -> PipelineStatusResponse:
+ """Get live status for a specific pipeline."""
+ manager = _get_manager(request)
+ status = manager.get_pipeline_status(pipeline_id)
+ if status is None:
+ async with async_session_factory() as db:
+ config = await db.get(PipelineConfig, pipeline_id)
+ if config is None:
+ raise HTTPException(status_code=404, detail="Pipeline config not found")
+ return PipelineStatusResponse(
+ pipeline_id=pipeline_id,
+ status="stopped",
+ total_stages=len(config.stages),
+ )
+ return status
+
+
+@router.get("/{pipeline_id}/logs", response_model=list[AutomationLogResponse])
+async def get_pipeline_logs(
+ pipeline_id: int,
+ request: Request,
+ limit: int = 100,
+) -> list[AutomationLogResponse]:
+ """Get recent logs for a pipeline (across all its runs)."""
+ async with async_session_factory() as db:
+ config = await db.get(PipelineConfig, pipeline_id)
+ if config is None:
+ raise HTTPException(status_code=404, detail="Pipeline config not found")
+
+ stmt = (
+ select(AutomationLog)
+ .join(PipelineRun, AutomationLog.run_id == PipelineRun.id)
+ .where(PipelineRun.pipeline_id == pipeline_id)
+ .order_by(AutomationLog.created_at.desc())
+ .limit(min(limit, 500))
+ )
+ result = await db.execute(stmt)
+ logs = result.scalars().all()
+ return [AutomationLogResponse.model_validate(log) for log in logs]
diff --git a/backend/app/api/workflows.py b/backend/app/api/workflows.py
new file mode 100644
index 0000000..a6f1003
--- /dev/null
+++ b/backend/app/api/workflows.py
@@ -0,0 +1,261 @@
+import logging
+
+from fastapi import APIRouter, HTTPException, Request
+from sqlalchemy import select
+from sqlalchemy.ext.asyncio import AsyncSession
+from sqlalchemy.orm import selectinload
+
+from app.api.deps import get_user_character_names
+from app.database import async_session_factory
+from app.engine.manager import AutomationManager
+from app.models.automation import AutomationLog
+from app.models.workflow import WorkflowConfig, WorkflowRun
+from app.schemas.automation import AutomationLogResponse
+from app.schemas.workflow import (
+ WorkflowConfigCreate,
+ WorkflowConfigDetailResponse,
+ WorkflowConfigResponse,
+ WorkflowConfigUpdate,
+ WorkflowRunResponse,
+ WorkflowStatusResponse,
+)
+
+logger = logging.getLogger(__name__)
+
+router = APIRouter(prefix="/api/workflows", tags=["workflows"])
+
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+
+def _get_manager(request: Request) -> AutomationManager:
+ manager: AutomationManager | None = getattr(request.app.state, "automation_manager", None)
+ if manager is None:
+ raise HTTPException(
+ status_code=503,
+ detail="Automation engine is not available",
+ )
+ return manager
+
+
+# ---------------------------------------------------------------------------
+# CRUD -- Workflow Configs
+# ---------------------------------------------------------------------------
+
+
+@router.get("/", response_model=list[WorkflowConfigResponse])
+async def list_workflows(request: Request) -> list[WorkflowConfigResponse]:
+ """List workflow configurations belonging to the current user."""
+ user_chars = await get_user_character_names(request)
+ async with async_session_factory() as db:
+ stmt = (
+ select(WorkflowConfig)
+ .where(WorkflowConfig.character_name.in_(user_chars))
+ .order_by(WorkflowConfig.id)
+ )
+ result = await db.execute(stmt)
+ configs = result.scalars().all()
+ return [WorkflowConfigResponse.model_validate(c) for c in configs]
+
+
+@router.post("/", response_model=WorkflowConfigResponse, status_code=201)
+async def create_workflow(
+ payload: WorkflowConfigCreate,
+ request: Request,
+) -> WorkflowConfigResponse:
+ """Create a new workflow configuration."""
+ async with async_session_factory() as db:
+ config = WorkflowConfig(
+ name=payload.name,
+ character_name=payload.character_name,
+ description=payload.description,
+ steps=[step.model_dump() for step in payload.steps],
+ loop=payload.loop,
+ max_loops=payload.max_loops,
+ )
+ db.add(config)
+ await db.commit()
+ await db.refresh(config)
+ return WorkflowConfigResponse.model_validate(config)
+
+
+@router.get("/status/all", response_model=list[WorkflowStatusResponse])
+async def get_all_workflow_statuses(request: Request) -> list[WorkflowStatusResponse]:
+ """Get live status for all active workflows."""
+ manager = _get_manager(request)
+ return manager.get_all_workflow_statuses()
+
+
+@router.get("/{workflow_id}", response_model=WorkflowConfigDetailResponse)
+async def get_workflow(workflow_id: int, request: Request) -> WorkflowConfigDetailResponse:
+ """Get a workflow configuration with its run history."""
+ async with async_session_factory() as db:
+ stmt = (
+ select(WorkflowConfig)
+ .options(selectinload(WorkflowConfig.runs))
+ .where(WorkflowConfig.id == workflow_id)
+ )
+ result = await db.execute(stmt)
+ config = result.scalar_one_or_none()
+
+ if config is None:
+ raise HTTPException(status_code=404, detail="Workflow config not found")
+
+ return WorkflowConfigDetailResponse(
+ config=WorkflowConfigResponse.model_validate(config),
+ runs=[WorkflowRunResponse.model_validate(r) for r in config.runs],
+ )
+
+
+@router.put("/{workflow_id}", response_model=WorkflowConfigResponse)
+async def update_workflow(
+ workflow_id: int,
+ payload: WorkflowConfigUpdate,
+ request: Request,
+) -> WorkflowConfigResponse:
+ """Update a workflow configuration. Cannot update while running."""
+ manager = _get_manager(request)
+ if manager.is_workflow_running(workflow_id):
+ raise HTTPException(
+ status_code=409,
+ detail="Cannot update a workflow while it is running. Stop it first.",
+ )
+
+ async with async_session_factory() as db:
+ config = await db.get(WorkflowConfig, workflow_id)
+ if config is None:
+ raise HTTPException(status_code=404, detail="Workflow config not found")
+
+ if payload.name is not None:
+ config.name = payload.name
+ if payload.description is not None:
+ config.description = payload.description
+ if payload.steps is not None:
+ config.steps = [step.model_dump() for step in payload.steps]
+ if payload.loop is not None:
+ config.loop = payload.loop
+ if payload.max_loops is not None:
+ config.max_loops = payload.max_loops
+ if payload.enabled is not None:
+ config.enabled = payload.enabled
+
+ await db.commit()
+ await db.refresh(config)
+ return WorkflowConfigResponse.model_validate(config)
+
+
+@router.delete("/{workflow_id}", status_code=204)
+async def delete_workflow(workflow_id: int, request: Request) -> None:
+ """Delete a workflow configuration. Cannot delete while running."""
+ manager = _get_manager(request)
+ if manager.is_workflow_running(workflow_id):
+ raise HTTPException(
+ status_code=409,
+ detail="Cannot delete a workflow while it is running. Stop it first.",
+ )
+
+ async with async_session_factory() as db:
+ config = await db.get(WorkflowConfig, workflow_id)
+ if config is None:
+ raise HTTPException(status_code=404, detail="Workflow config not found")
+ await db.delete(config)
+ await db.commit()
+
+
+# ---------------------------------------------------------------------------
+# Control -- Start / Stop / Pause / Resume
+# ---------------------------------------------------------------------------
+
+
+@router.post("/{workflow_id}/start", response_model=WorkflowRunResponse)
+async def start_workflow(workflow_id: int, request: Request) -> WorkflowRunResponse:
+ """Start a workflow from its configuration."""
+ manager = _get_manager(request)
+ try:
+ return await manager.start_workflow(workflow_id)
+ except ValueError as exc:
+ raise HTTPException(status_code=400, detail=str(exc)) from exc
+
+
+@router.post("/{workflow_id}/stop", status_code=204)
+async def stop_workflow(workflow_id: int, request: Request) -> None:
+ """Stop a running workflow."""
+ manager = _get_manager(request)
+ try:
+ await manager.stop_workflow(workflow_id)
+ except ValueError as exc:
+ raise HTTPException(status_code=400, detail=str(exc)) from exc
+
+
+@router.post("/{workflow_id}/pause", status_code=204)
+async def pause_workflow(workflow_id: int, request: Request) -> None:
+ """Pause a running workflow."""
+ manager = _get_manager(request)
+ try:
+ await manager.pause_workflow(workflow_id)
+ except ValueError as exc:
+ raise HTTPException(status_code=400, detail=str(exc)) from exc
+
+
+@router.post("/{workflow_id}/resume", status_code=204)
+async def resume_workflow(workflow_id: int, request: Request) -> None:
+ """Resume a paused workflow."""
+ manager = _get_manager(request)
+ try:
+ await manager.resume_workflow(workflow_id)
+ except ValueError as exc:
+ raise HTTPException(status_code=400, detail=str(exc)) from exc
+
+
+# ---------------------------------------------------------------------------
+# Status & Logs
+# ---------------------------------------------------------------------------
+
+
+@router.get("/{workflow_id}/status", response_model=WorkflowStatusResponse)
+async def get_workflow_status(
+ workflow_id: int,
+ request: Request,
+) -> WorkflowStatusResponse:
+ """Get live status for a specific workflow."""
+ manager = _get_manager(request)
+ status = manager.get_workflow_status(workflow_id)
+ if status is None:
+ async with async_session_factory() as db:
+ config = await db.get(WorkflowConfig, workflow_id)
+ if config is None:
+ raise HTTPException(status_code=404, detail="Workflow config not found")
+ return WorkflowStatusResponse(
+ workflow_id=workflow_id,
+ character_name=config.character_name,
+ status="stopped",
+ total_steps=len(config.steps),
+ )
+ return status
+
+
+@router.get("/{workflow_id}/logs", response_model=list[AutomationLogResponse])
+async def get_workflow_logs(
+ workflow_id: int,
+ request: Request,
+ limit: int = 100,
+) -> list[AutomationLogResponse]:
+ """Get recent logs for a workflow (across all its runs)."""
+ async with async_session_factory() as db:
+ config = await db.get(WorkflowConfig, workflow_id)
+ if config is None:
+ raise HTTPException(status_code=404, detail="Workflow config not found")
+
+ # Fetch logs for all runs belonging to this workflow
+ stmt = (
+ select(AutomationLog)
+ .join(WorkflowRun, AutomationLog.run_id == WorkflowRun.id)
+ .where(WorkflowRun.workflow_id == workflow_id)
+ .order_by(AutomationLog.created_at.desc())
+ .limit(min(limit, 500))
+ )
+ result = await db.execute(stmt)
+ logs = result.scalars().all()
+ return [AutomationLogResponse.model_validate(log) for log in logs]
diff --git a/backend/app/config.py b/backend/app/config.py
index a1851b1..6e7844f 100644
--- a/backend/app/config.py
+++ b/backend/app/config.py
@@ -15,6 +15,10 @@ class Settings(BaseSettings):
data_rate_limit: int = 20 # data requests per window
data_rate_window: float = 1.0 # seconds
+ # Observability
+ sentry_dsn: str = ""
+ environment: str = "development"
+
model_config = {"env_file": ".env", "extra": "ignore"}
diff --git a/backend/app/engine/action_executor.py b/backend/app/engine/action_executor.py
new file mode 100644
index 0000000..c22bc2e
--- /dev/null
+++ b/backend/app/engine/action_executor.py
@@ -0,0 +1,150 @@
+"""Shared action execution logic.
+
+Dispatches an ``ActionPlan`` to the appropriate ``ArtifactsClient`` method.
+Used by ``AutomationRunner``, ``WorkflowRunner``, and ``CharacterWorker``
+so the match statement is defined in exactly one place.
+"""
+
+from __future__ import annotations
+
+import logging
+from typing import Any
+
+from app.engine.strategies.base import ActionPlan, ActionType
+from app.services.artifacts_client import ArtifactsClient
+
+logger = logging.getLogger(__name__)
+
+
+async def execute_action(
+ client: ArtifactsClient,
+ character_name: str,
+ plan: ActionPlan,
+) -> dict[str, Any]:
+ """Execute an action plan against the game API and return the raw result."""
+ match plan.action_type:
+ case ActionType.MOVE:
+ return await client.move(
+ character_name,
+ plan.params["x"],
+ plan.params["y"],
+ )
+ case ActionType.FIGHT:
+ return await client.fight(character_name)
+ case ActionType.GATHER:
+ return await client.gather(character_name)
+ case ActionType.REST:
+ return await client.rest(character_name)
+ case ActionType.EQUIP:
+ return await client.equip(
+ character_name,
+ plan.params["code"],
+ plan.params["slot"],
+ plan.params.get("quantity", 1),
+ )
+ case ActionType.UNEQUIP:
+ return await client.unequip(
+ character_name,
+ plan.params["slot"],
+ plan.params.get("quantity", 1),
+ )
+ case ActionType.USE_ITEM:
+ return await client.use_item(
+ character_name,
+ plan.params["code"],
+ plan.params.get("quantity", 1),
+ )
+ case ActionType.DEPOSIT_ITEM:
+ return await client.deposit_item(
+ character_name,
+ plan.params["code"],
+ plan.params["quantity"],
+ )
+ case ActionType.WITHDRAW_ITEM:
+ return await client.withdraw_item(
+ character_name,
+ plan.params["code"],
+ plan.params["quantity"],
+ )
+ case ActionType.CRAFT:
+ return await client.craft(
+ character_name,
+ plan.params["code"],
+ plan.params.get("quantity", 1),
+ )
+ case ActionType.RECYCLE:
+ return await client.recycle(
+ character_name,
+ plan.params["code"],
+ plan.params.get("quantity", 1),
+ )
+ case ActionType.GE_BUY:
+ return await client.ge_buy(
+ character_name,
+ plan.params["id"],
+ plan.params["quantity"],
+ )
+ case ActionType.GE_CREATE_BUY:
+ return await client.ge_create_buy_order(
+ character_name,
+ plan.params["code"],
+ plan.params["quantity"],
+ plan.params["price"],
+ )
+ case ActionType.GE_SELL:
+ return await client.ge_sell_order(
+ character_name,
+ plan.params["code"],
+ plan.params["quantity"],
+ plan.params["price"],
+ )
+ case ActionType.GE_FILL:
+ return await client.ge_fill_buy_order(
+ character_name,
+ plan.params["id"],
+ plan.params["quantity"],
+ )
+ case ActionType.GE_CANCEL:
+ return await client.ge_cancel(
+ character_name,
+ plan.params["order_id"],
+ )
+ case ActionType.TASK_NEW:
+ return await client.task_new(character_name)
+ case ActionType.TASK_TRADE:
+ return await client.task_trade(
+ character_name,
+ plan.params["code"],
+ plan.params["quantity"],
+ )
+ case ActionType.TASK_COMPLETE:
+ return await client.task_complete(character_name)
+ case ActionType.TASK_EXCHANGE:
+ return await client.task_exchange(character_name)
+ case ActionType.TASK_CANCEL:
+ return await client.task_cancel(character_name)
+ case ActionType.DEPOSIT_GOLD:
+ return await client.deposit_gold(
+ character_name,
+ plan.params["quantity"],
+ )
+ case ActionType.WITHDRAW_GOLD:
+ return await client.withdraw_gold(
+ character_name,
+ plan.params["quantity"],
+ )
+ case ActionType.NPC_BUY:
+ return await client.npc_buy(
+ character_name,
+ plan.params["code"],
+ plan.params["quantity"],
+ )
+ case ActionType.NPC_SELL:
+ return await client.npc_sell(
+ character_name,
+ plan.params["code"],
+ plan.params["quantity"],
+ )
+ case _:
+ logger.warning("Unhandled action type: %s", plan.action_type)
+ return {}
diff --git a/backend/app/engine/manager.py b/backend/app/engine/manager.py
index 10a7d5a..dcb03e8 100644
--- a/backend/app/engine/manager.py
+++ b/backend/app/engine/manager.py
@@ -1,16 +1,17 @@
from __future__ import annotations
import logging
-from datetime import datetime, timezone
from typing import TYPE_CHECKING
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker
-from sqlalchemy.orm import selectinload
from app.engine.cooldown import CooldownTracker
from app.engine.pathfinder import Pathfinder
from app.engine.runner import AutomationRunner
+from app.engine.decision.equipment_optimizer import EquipmentOptimizer
+from app.engine.decision.monster_selector import MonsterSelector
+from app.engine.decision.resource_selector import ResourceSelector
from app.engine.strategies.base import BaseStrategy
from app.engine.strategies.combat import CombatStrategy
from app.engine.strategies.crafting import CraftingStrategy
@@ -18,12 +19,22 @@ from app.engine.strategies.gathering import GatheringStrategy
from app.engine.strategies.leveling import LevelingStrategy
from app.engine.strategies.task import TaskStrategy
from app.engine.strategies.trading import TradingStrategy
-from app.models.automation import AutomationConfig, AutomationLog, AutomationRun
+from app.engine.pipeline.coordinator import PipelineCoordinator
+from app.engine.workflow.runner import WorkflowRunner
+from app.models.automation import AutomationConfig, AutomationRun
+from app.models.pipeline import PipelineConfig, PipelineRun
+from app.models.workflow import WorkflowConfig, WorkflowRun
from app.schemas.automation import (
- AutomationLogResponse,
AutomationRunResponse,
AutomationStatusResponse,
)
+from app.schemas.game import ItemSchema, MonsterSchema, ResourceSchema
+from app.schemas.pipeline import (
+ CharacterStateResponse,
+ PipelineRunResponse,
+ PipelineStatusResponse,
+)
+from app.schemas.workflow import WorkflowRunResponse, WorkflowStatusResponse
from app.services.artifacts_client import ArtifactsClient
if TYPE_CHECKING:
@@ -33,12 +44,13 @@ logger = logging.getLogger(__name__)
class AutomationManager:
- """Central manager that orchestrates all automation runners.
+ """Central manager that orchestrates all automation runners and workflow runners.
One manager exists per application instance and is stored on
``app.state.automation_manager``. It holds references to all active
- runners (keyed by ``config_id``) and provides high-level start / stop /
- pause / resume operations.
+ runners (keyed by ``config_id``) and workflow runners (keyed by
+ ``workflow_id``), and provides high-level start / stop / pause /
+ resume operations.
"""
def __init__(
@@ -53,24 +65,68 @@ class AutomationManager:
self._pathfinder = pathfinder
self._event_bus = event_bus
self._runners: dict[int, AutomationRunner] = {}
+ self._workflow_runners: dict[int, WorkflowRunner] = {}
+ self._pipeline_coordinators: dict[int, PipelineCoordinator] = {}
self._cooldown_tracker = CooldownTracker()
+ # Lazy-loaded game data caches for smart strategies
+ self._monsters_cache: list[MonsterSchema] | None = None
+ self._resources_cache: list[ResourceSchema] | None = None
+ self._items_cache: list[ItemSchema] | None = None
+
# ------------------------------------------------------------------
- # Lifecycle
+ # Game data cache
+ # ------------------------------------------------------------------
+
+ async def _ensure_game_data(self) -> None:
+ """Load game data caches lazily on first use."""
+ if self._monsters_cache is None:
+ try:
+ raw = await self._client.get_all_monsters()
+ self._monsters_cache = [MonsterSchema(**m) for m in raw]
+ except Exception:
+ logger.exception("Failed to load monsters cache")
+ self._monsters_cache = []
+
+ if self._resources_cache is None:
+ try:
+ raw = await self._client.get_all_resources()
+ self._resources_cache = [ResourceSchema(**r) for r in raw]
+ except Exception:
+ logger.exception("Failed to load resources cache")
+ self._resources_cache = []
+
+ if self._items_cache is None:
+ try:
+ raw = await self._client.get_all_items()
+ self._items_cache = [ItemSchema(**i) for i in raw]
+ except Exception:
+ logger.exception("Failed to load items cache")
+ self._items_cache = []
+
+ # ------------------------------------------------------------------
+ # Character busy check
+ # ------------------------------------------------------------------
+
+ def is_character_busy(self, character_name: str) -> bool:
+ """Return True if the character is running any automation, workflow, or pipeline."""
+ for runner in self._runners.values():
+ if runner.character_name == character_name and (runner.is_running or runner.is_paused):
+ return True
+ for wf_runner in self._workflow_runners.values():
+ if wf_runner.character_name == character_name and (wf_runner.is_running or wf_runner.is_paused):
+ return True
+ for coord in self._pipeline_coordinators.values():
+ if (coord.is_running or coord.is_paused) and character_name in coord.all_characters:
+ return True
+ return False
+
+ # ------------------------------------------------------------------
+ # Automation Lifecycle
# ------------------------------------------------------------------
async def start(self, config_id: int) -> AutomationRunResponse:
- """Start an automation from its persisted configuration.
-
- Creates a new :class:`AutomationRun` record and spawns an
- :class:`AutomationRunner` task.
-
- Raises
- ------
- ValueError
- If the config does not exist, is disabled, or is already running.
- """
- # Prevent duplicate runners
+ """Start an automation from its persisted configuration."""
if config_id in self._runners:
runner = self._runners[config_id]
if runner.is_running or runner.is_paused:
@@ -80,17 +136,23 @@ class AutomationManager:
)
async with self._db_factory() as db:
- # Load the config
config = await db.get(AutomationConfig, config_id)
if config is None:
raise ValueError(f"Automation config {config_id} not found")
if not config.enabled:
raise ValueError(f"Automation config {config_id} is disabled")
- # Create strategy
+ # Check character busy
+ if self.is_character_busy(config.character_name):
+ raise ValueError(
+ f"Character {config.character_name!r} is already running an automation or workflow"
+ )
+
+ # Ensure game data is loaded for smart strategies
+ await self._ensure_game_data()
+
strategy = self._create_strategy(config.strategy_type, config.config)
- # Create run record
run = AutomationRun(
config_id=config_id,
status="running",
@@ -101,7 +163,6 @@ class AutomationManager:
run_response = AutomationRunResponse.model_validate(run)
- # Build and start the runner
runner = AutomationRunner(
config_id=config_id,
character_name=config.character_name,
@@ -125,55 +186,31 @@ class AutomationManager:
return run_response
async def stop(self, config_id: int) -> None:
- """Stop a running automation.
-
- Raises
- ------
- ValueError
- If no runner exists for the given config.
- """
runner = self._runners.get(config_id)
if runner is None:
raise ValueError(f"No active runner for config {config_id}")
-
await runner.stop()
del self._runners[config_id]
logger.info("Stopped automation config=%d", config_id)
async def pause(self, config_id: int) -> None:
- """Pause a running automation.
-
- Raises
- ------
- ValueError
- If no runner exists for the given config or it is not running.
- """
runner = self._runners.get(config_id)
if runner is None:
raise ValueError(f"No active runner for config {config_id}")
if not runner.is_running:
raise ValueError(f"Runner for config {config_id} is not running (status={runner.status})")
-
await runner.pause()
async def resume(self, config_id: int) -> None:
- """Resume a paused automation.
-
- Raises
- ------
- ValueError
- If no runner exists for the given config or it is not paused.
- """
runner = self._runners.get(config_id)
if runner is None:
raise ValueError(f"No active runner for config {config_id}")
if not runner.is_paused:
raise ValueError(f"Runner for config {config_id} is not paused (status={runner.status})")
-
await runner.resume()
async def stop_all(self) -> None:
- """Stop all running automations (used during shutdown)."""
+ """Stop all running automations, workflows, and pipelines (used during shutdown)."""
config_ids = list(self._runners.keys())
for config_id in config_ids:
try:
@@ -181,12 +218,25 @@ class AutomationManager:
except Exception:
logger.exception("Error stopping automation config=%d", config_id)
+ workflow_ids = list(self._workflow_runners.keys())
+ for wf_id in workflow_ids:
+ try:
+ await self.stop_workflow(wf_id)
+ except Exception:
+ logger.exception("Error stopping workflow=%d", wf_id)
+
+ pipeline_ids = list(self._pipeline_coordinators.keys())
+ for pid in pipeline_ids:
+ try:
+ await self.stop_pipeline(pid)
+ except Exception:
+ logger.exception("Error stopping pipeline=%d", pid)
+
# ------------------------------------------------------------------
- # Status queries
+ # Automation Status queries
# ------------------------------------------------------------------
def get_status(self, config_id: int) -> AutomationStatusResponse | None:
- """Return the live status of a single automation, or ``None``."""
runner = self._runners.get(config_id)
if runner is None:
return None
@@ -200,7 +250,6 @@ class AutomationManager:
)
def get_all_statuses(self) -> list[AutomationStatusResponse]:
- """Return live status for all active automations."""
return [
AutomationStatusResponse(
config_id=r.config_id,
@@ -214,29 +263,339 @@ class AutomationManager:
]
def is_running(self, config_id: int) -> bool:
- """Return True if there is an active runner for the config."""
runner = self._runners.get(config_id)
return runner is not None and (runner.is_running or runner.is_paused)
+ # ------------------------------------------------------------------
+ # Workflow Lifecycle
+ # ------------------------------------------------------------------
+
+ async def start_workflow(self, workflow_id: int) -> WorkflowRunResponse:
+ """Start a workflow from its persisted configuration."""
+ if workflow_id in self._workflow_runners:
+ runner = self._workflow_runners[workflow_id]
+ if runner.is_running or runner.is_paused:
+ raise ValueError(
+ f"Workflow {workflow_id} is already running "
+ f"(run_id={runner.run_id}, status={runner.status})"
+ )
+
+ async with self._db_factory() as db:
+ config = await db.get(WorkflowConfig, workflow_id)
+ if config is None:
+ raise ValueError(f"Workflow config {workflow_id} not found")
+ if not config.enabled:
+ raise ValueError(f"Workflow config {workflow_id} is disabled")
+ if not config.steps:
+ raise ValueError(f"Workflow config {workflow_id} has no steps")
+
+ # Check character busy
+ if self.is_character_busy(config.character_name):
+ raise ValueError(
+ f"Character {config.character_name!r} is already running an automation or workflow"
+ )
+
+ # Ensure game data for smart strategies
+ await self._ensure_game_data()
+
+ # Create workflow run record
+ run = WorkflowRun(
+ workflow_id=workflow_id,
+ status="running",
+ current_step_index=0,
+ current_step_id=config.steps[0].get("id", "") if config.steps else "",
+ )
+ db.add(run)
+ await db.commit()
+ await db.refresh(run)
+
+ run_response = WorkflowRunResponse.model_validate(run)
+
+ runner = WorkflowRunner(
+ workflow_id=workflow_id,
+ character_name=config.character_name,
+ steps=config.steps,
+ loop=config.loop,
+ max_loops=config.max_loops,
+ strategy_factory=self._create_strategy,
+ client=self._client,
+ cooldown_tracker=self._cooldown_tracker,
+ db_factory=self._db_factory,
+ run_id=run.id,
+ event_bus=self._event_bus,
+ )
+ self._workflow_runners[workflow_id] = runner
+ await runner.start()
+
+ logger.info(
+ "Started workflow=%d character=%s steps=%d run=%d",
+ workflow_id,
+ config.character_name,
+ len(config.steps),
+ run.id,
+ )
+ return run_response
+
+ async def stop_workflow(self, workflow_id: int) -> None:
+ runner = self._workflow_runners.get(workflow_id)
+ if runner is None:
+ raise ValueError(f"No active runner for workflow {workflow_id}")
+ await runner.stop()
+ del self._workflow_runners[workflow_id]
+ logger.info("Stopped workflow=%d", workflow_id)
+
+ async def pause_workflow(self, workflow_id: int) -> None:
+ runner = self._workflow_runners.get(workflow_id)
+ if runner is None:
+ raise ValueError(f"No active runner for workflow {workflow_id}")
+ if not runner.is_running:
+ raise ValueError(f"Workflow runner {workflow_id} is not running (status={runner.status})")
+ await runner.pause()
+
+ async def resume_workflow(self, workflow_id: int) -> None:
+ runner = self._workflow_runners.get(workflow_id)
+ if runner is None:
+ raise ValueError(f"No active runner for workflow {workflow_id}")
+ if not runner.is_paused:
+ raise ValueError(f"Workflow runner {workflow_id} is not paused (status={runner.status})")
+ await runner.resume()
+
+ # ------------------------------------------------------------------
+ # Workflow Status queries
+ # ------------------------------------------------------------------
+
+ def get_workflow_status(self, workflow_id: int) -> WorkflowStatusResponse | None:
+ runner = self._workflow_runners.get(workflow_id)
+ if runner is None:
+ return None
+ return WorkflowStatusResponse(
+ workflow_id=runner.workflow_id,
+ character_name=runner.character_name,
+ status=runner.status,
+ run_id=runner.run_id,
+ current_step_index=runner.current_step_index,
+ current_step_id=runner.current_step_id,
+ total_steps=len(runner._steps),
+ loop_count=runner.loop_count,
+ total_actions_count=runner.total_actions_count,
+ step_actions_count=runner.step_actions_count,
+ strategy_state=runner.strategy_state,
+ )
+
+ def get_all_workflow_statuses(self) -> list[WorkflowStatusResponse]:
+ return [
+ WorkflowStatusResponse(
+ workflow_id=r.workflow_id,
+ character_name=r.character_name,
+ status=r.status,
+ run_id=r.run_id,
+ current_step_index=r.current_step_index,
+ current_step_id=r.current_step_id,
+ total_steps=len(r._steps),
+ loop_count=r.loop_count,
+ total_actions_count=r.total_actions_count,
+ step_actions_count=r.step_actions_count,
+ strategy_state=r.strategy_state,
+ )
+ for r in self._workflow_runners.values()
+ ]
+
+ def is_workflow_running(self, workflow_id: int) -> bool:
+ runner = self._workflow_runners.get(workflow_id)
+ return runner is not None and (runner.is_running or runner.is_paused)
+
+ # ------------------------------------------------------------------
+ # Pipeline Lifecycle
+ # ------------------------------------------------------------------
+
+ async def start_pipeline(self, pipeline_id: int) -> PipelineRunResponse:
+ """Start a pipeline from its persisted configuration."""
+ if pipeline_id in self._pipeline_coordinators:
+ coord = self._pipeline_coordinators[pipeline_id]
+ if coord.is_running or coord.is_paused:
+ raise ValueError(
+ f"Pipeline {pipeline_id} is already running "
+ f"(run_id={coord.run_id}, status={coord.status})"
+ )
+
+ async with self._db_factory() as db:
+ config = await db.get(PipelineConfig, pipeline_id)
+ if config is None:
+ raise ValueError(f"Pipeline config {pipeline_id} not found")
+ if not config.enabled:
+ raise ValueError(f"Pipeline config {pipeline_id} is disabled")
+ if not config.stages:
+ raise ValueError(f"Pipeline config {pipeline_id} has no stages")
+
+ # Collect all characters and verify none are busy
+ all_chars: set[str] = set()
+ for stage in config.stages:
+ for cs in stage.get("character_steps", []):
+ all_chars.add(cs["character_name"])
+
+ busy = [c for c in all_chars if self.is_character_busy(c)]
+ if busy:
+ raise ValueError(
+ f"Characters already busy: {', '.join(sorted(busy))}"
+ )
+
+ # Ensure game data for strategies
+ await self._ensure_game_data()
+
+ run = PipelineRun(
+ pipeline_id=pipeline_id,
+ status="running",
+ current_stage_index=0,
+ current_stage_id=config.stages[0].get("id", "") if config.stages else "",
+ )
+ db.add(run)
+ await db.commit()
+ await db.refresh(run)
+
+ run_response = PipelineRunResponse.model_validate(run)
+
+ coord = PipelineCoordinator(
+ pipeline_id=pipeline_id,
+ stages=config.stages,
+ loop=config.loop,
+ max_loops=config.max_loops,
+ strategy_factory=self._create_strategy,
+ client=self._client,
+ cooldown_tracker=self._cooldown_tracker,
+ db_factory=self._db_factory,
+ run_id=run.id,
+ event_bus=self._event_bus,
+ )
+ self._pipeline_coordinators[pipeline_id] = coord
+ await coord.start()
+
+ logger.info(
+ "Started pipeline=%d stages=%d characters=%s run=%d",
+ pipeline_id,
+ len(config.stages),
+ sorted(all_chars),
+ run.id,
+ )
+ return run_response
+
+ async def stop_pipeline(self, pipeline_id: int) -> None:
+ coord = self._pipeline_coordinators.get(pipeline_id)
+ if coord is None:
+ raise ValueError(f"No active coordinator for pipeline {pipeline_id}")
+ await coord.stop()
+ del self._pipeline_coordinators[pipeline_id]
+ logger.info("Stopped pipeline=%d", pipeline_id)
+
+ async def pause_pipeline(self, pipeline_id: int) -> None:
+ coord = self._pipeline_coordinators.get(pipeline_id)
+ if coord is None:
+ raise ValueError(f"No active coordinator for pipeline {pipeline_id}")
+ if not coord.is_running:
+ raise ValueError(f"Pipeline {pipeline_id} is not running (status={coord.status})")
+ await coord.pause()
+
+ async def resume_pipeline(self, pipeline_id: int) -> None:
+ coord = self._pipeline_coordinators.get(pipeline_id)
+ if coord is None:
+ raise ValueError(f"No active coordinator for pipeline {pipeline_id}")
+ if not coord.is_paused:
+ raise ValueError(f"Pipeline {pipeline_id} is not paused (status={coord.status})")
+ await coord.resume()
+
+ # ------------------------------------------------------------------
+ # Pipeline Status queries
+ # ------------------------------------------------------------------
+
+ def get_pipeline_status(self, pipeline_id: int) -> PipelineStatusResponse | None:
+ coord = self._pipeline_coordinators.get(pipeline_id)
+ if coord is None:
+ return None
+ return PipelineStatusResponse(
+ pipeline_id=coord.pipeline_id,
+ status=coord.status,
+ run_id=coord.run_id,
+ current_stage_index=coord.current_stage_index,
+ current_stage_id=coord.current_stage_id,
+ total_stages=len(coord._stages),
+ loop_count=coord.loop_count,
+ total_actions_count=coord.total_actions_count,
+ character_states=[
+ CharacterStateResponse(
+ character_name=name,
+ status=state.get("status", "idle"),
+ step_id=state.get("step_id", ""),
+ actions_count=state.get("actions_count", 0),
+ strategy_state=state.get("strategy_state", ""),
+ error=state.get("error"),
+ )
+ for name, state in coord.character_states.items()
+ ],
+ )
+
+ def get_all_pipeline_statuses(self) -> list[PipelineStatusResponse]:
+ return [
+ self.get_pipeline_status(pid)
+ for pid in self._pipeline_coordinators
+ if self.get_pipeline_status(pid) is not None
+ ]
+
+ def is_pipeline_running(self, pipeline_id: int) -> bool:
+ coord = self._pipeline_coordinators.get(pipeline_id)
+ return coord is not None and (coord.is_running or coord.is_paused)
+
# ------------------------------------------------------------------
# Strategy factory
# ------------------------------------------------------------------
def _create_strategy(self, strategy_type: str, config: dict) -> BaseStrategy:
- """Instantiate a strategy by type name."""
+ """Instantiate a strategy by type name, injecting game data and decision modules."""
+ monster_selector = MonsterSelector()
+ resource_selector = ResourceSelector()
+ equipment_optimizer = EquipmentOptimizer()
+
match strategy_type:
case "combat":
- return CombatStrategy(config, self._pathfinder)
+ return CombatStrategy(
+ config,
+ self._pathfinder,
+ monster_selector=monster_selector,
+ monsters_data=self._monsters_cache,
+ equipment_optimizer=equipment_optimizer,
+ available_items=self._items_cache,
+ )
case "gathering":
- return GatheringStrategy(config, self._pathfinder)
+ return GatheringStrategy(
+ config,
+ self._pathfinder,
+ resource_selector=resource_selector,
+ resources_data=self._resources_cache,
+ )
case "crafting":
- return CraftingStrategy(config, self._pathfinder)
+ return CraftingStrategy(
+ config,
+ self._pathfinder,
+ items_data=self._items_cache,
+ resources_data=self._resources_cache,
+ )
case "trading":
- return TradingStrategy(config, self._pathfinder)
+ return TradingStrategy(
+ config,
+ self._pathfinder,
+ client=self._client,
+ )
case "task":
return TaskStrategy(config, self._pathfinder)
case "leveling":
- return LevelingStrategy(config, self._pathfinder)
+ return LevelingStrategy(
+ config,
+ self._pathfinder,
+ resources_data=self._resources_cache,
+ monsters_data=self._monsters_cache,
+ resource_selector=resource_selector,
+ monster_selector=monster_selector,
+ equipment_optimizer=equipment_optimizer,
+ available_items=self._items_cache,
+ )
case _:
raise ValueError(
f"Unknown strategy type: {strategy_type!r}. "
diff --git a/backend/app/engine/pipeline/__init__.py b/backend/app/engine/pipeline/__init__.py
new file mode 100644
index 0000000..6c46bbb
--- /dev/null
+++ b/backend/app/engine/pipeline/__init__.py
@@ -0,0 +1,4 @@
+from app.engine.pipeline.coordinator import PipelineCoordinator
+from app.engine.pipeline.worker import CharacterWorker
+
+__all__ = ["PipelineCoordinator", "CharacterWorker"]
diff --git a/backend/app/engine/pipeline/coordinator.py b/backend/app/engine/pipeline/coordinator.py
new file mode 100644
index 0000000..f145c0c
--- /dev/null
+++ b/backend/app/engine/pipeline/coordinator.py
@@ -0,0 +1,444 @@
+"""PipelineCoordinator — orchestrates stages sequentially with parallel character workers."""
+
+from __future__ import annotations
+
+import asyncio
+import logging
+from datetime import datetime, timezone
+from typing import TYPE_CHECKING, Any
+
+from sqlalchemy import select
+from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker
+
+from app.engine.cooldown import CooldownTracker
+from app.engine.pipeline.worker import CharacterWorker
+from app.engine.strategies.base import BaseStrategy
+from app.models.pipeline import PipelineRun
+from app.services.artifacts_client import ArtifactsClient
+
+if TYPE_CHECKING:
+ from app.websocket.event_bus import EventBus
+
+logger = logging.getLogger(__name__)
+
+
+class PipelineCoordinator:
+ """Orchestrates a multi-character pipeline.
+
+ Iterates stages sequentially. For each stage, spawns a
+ ``CharacterWorker`` per character-step and waits for all of them to
+ complete their transition (or error). Then advances to the next stage.
+ """
+
+ def __init__(
+ self,
+ pipeline_id: int,
+ stages: list[dict],
+ loop: bool,
+ max_loops: int,
+ strategy_factory: Any, # callable(strategy_type, config) -> BaseStrategy
+ client: ArtifactsClient,
+ cooldown_tracker: CooldownTracker,
+ db_factory: async_sessionmaker[AsyncSession],
+ run_id: int,
+ event_bus: EventBus | None = None,
+ ) -> None:
+ self._pipeline_id = pipeline_id
+ self._stages = stages
+ self._loop = loop
+ self._max_loops = max_loops
+ self._strategy_factory = strategy_factory
+ self._client = client
+ self._cooldown = cooldown_tracker
+ self._db_factory = db_factory
+ self._run_id = run_id
+ self._event_bus = event_bus
+
+ self._running = False
+ self._paused = False
+ self._task: asyncio.Task[None] | None = None
+
+ # Runtime state
+ self._current_stage_index: int = 0
+ self._loop_count: int = 0
+ self._total_actions: int = 0
+ self._stage_history: list[dict] = []
+ self._workers: list[CharacterWorker] = []
+
+ # Track ALL characters across ALL stages for busy-checking
+ self._all_characters: set[str] = set()
+ for stage in stages:
+ for cs in stage.get("character_steps", []):
+ self._all_characters.add(cs["character_name"])
+
+ # ------------------------------------------------------------------
+ # Public properties
+ # ------------------------------------------------------------------
+
+ @property
+ def pipeline_id(self) -> int:
+ return self._pipeline_id
+
+ @property
+ def run_id(self) -> int:
+ return self._run_id
+
+ @property
+ def all_characters(self) -> set[str]:
+ return self._all_characters
+
+ @property
+ def active_characters(self) -> set[str]:
+ """Characters currently executing in the active stage."""
+ return {w.character_name for w in self._workers if w.is_running}
+
+ @property
+ def current_stage_index(self) -> int:
+ return self._current_stage_index
+
+ @property
+ def current_stage_id(self) -> str:
+ if 0 <= self._current_stage_index < len(self._stages):
+ return self._stages[self._current_stage_index].get("id", "")
+ return ""
+
+ @property
+ def loop_count(self) -> int:
+ return self._loop_count
+
+ @property
+ def total_actions_count(self) -> int:
+ return self._total_actions + sum(w.actions_count for w in self._workers)
+
+ @property
+ def is_running(self) -> bool:
+ return self._running and not self._paused
+
+ @property
+ def is_paused(self) -> bool:
+ return self._running and self._paused
+
+ @property
+ def status(self) -> str:
+ if not self._running:
+ return "stopped"
+ if self._paused:
+ return "paused"
+ return "running"
+
+ @property
+ def character_states(self) -> dict[str, dict]:
+ """Current state of each worker for status reporting."""
+ result: dict[str, dict] = {}
+ for w in self._workers:
+ result[w.character_name] = {
+ "status": w.status,
+ "step_id": w.step_id,
+ "actions_count": w.actions_count,
+ "strategy_state": w.strategy_state,
+ "error": w.error_message,
+ }
+ return result
+
+ # ------------------------------------------------------------------
+ # Event bus helpers
+ # ------------------------------------------------------------------
+
+ async def _publish(self, event_type: str, data: dict) -> None:
+ if self._event_bus is not None:
+ try:
+ await self._event_bus.publish(event_type, data)
+ except Exception:
+ logger.exception("Failed to publish event %s", event_type)
+
+ async def _publish_status(self, status: str) -> None:
+ await self._publish(
+ "pipeline_status_changed",
+ {
+ "pipeline_id": self._pipeline_id,
+ "status": status,
+ "run_id": self._run_id,
+ "current_stage_index": self._current_stage_index,
+ "loop_count": self._loop_count,
+ "character_states": self.character_states,
+ },
+ )
+
+ # ------------------------------------------------------------------
+ # Lifecycle
+ # ------------------------------------------------------------------
+
+ async def start(self) -> None:
+ if self._running:
+ return
+ self._running = True
+ self._paused = False
+ self._task = asyncio.create_task(
+ self._run_loop(),
+ name=f"pipeline-coord-{self._pipeline_id}",
+ )
+ logger.info(
+ "Started pipeline coordinator pipeline=%d run=%d stages=%d characters=%s",
+ self._pipeline_id,
+ self._run_id,
+ len(self._stages),
+ sorted(self._all_characters),
+ )
+ await self._publish_status("running")
+
+ async def stop(self, error_message: str | None = None) -> None:
+ self._running = False
+ # Stop all active workers
+ for w in self._workers:
+ await w.stop()
+ if self._task is not None and not self._task.done():
+ self._task.cancel()
+ try:
+ await self._task
+ except asyncio.CancelledError:
+ pass
+ self._task = None
+
+ final_status = "error" if error_message else "stopped"
+ await self._finalize_run(status=final_status, error_message=error_message)
+ logger.info("Stopped pipeline %d (actions=%d)", self._pipeline_id, self.total_actions_count)
+ await self._publish_status(final_status)
+
+ async def pause(self) -> None:
+ self._paused = True
+ for w in self._workers:
+ await w.stop()
+ await self._update_run_status("paused")
+ await self._publish_status("paused")
+
+ async def resume(self) -> None:
+ self._paused = False
+ # Workers will be re-created by the main loop on next iteration
+ await self._update_run_status("running")
+ await self._publish_status("running")
+
+ # ------------------------------------------------------------------
+ # Main loop
+ # ------------------------------------------------------------------
+
+ async def _run_loop(self) -> None:
+ try:
+ while self._running:
+ if self._paused:
+ await asyncio.sleep(1)
+ continue
+
+ try:
+ completed = await self._run_stage(self._current_stage_index)
+ except asyncio.CancelledError:
+ raise
+ except Exception as exc:
+ logger.exception(
+ "Error running stage %d of pipeline %d: %s",
+ self._current_stage_index,
+ self._pipeline_id,
+ exc,
+ )
+ await self._finalize_run(
+ status="error",
+ error_message=f"Stage {self._current_stage_index} error: {exc}",
+ )
+ self._running = False
+ await self._publish_status("error")
+ return
+
+ if not self._running:
+ return
+
+ if not completed:
+ # Stage had errors
+ await self._finalize_run(
+ status="error",
+ error_message="Stage workers encountered errors",
+ )
+ self._running = False
+ await self._publish_status("error")
+ return
+
+ # Stage completed — record and advance
+ stage = self._stages[self._current_stage_index]
+ self._total_actions += sum(w.actions_count for w in self._workers)
+ self._stage_history.append({
+ "stage_id": stage.get("id", ""),
+ "stage_name": stage.get("name", ""),
+ "character_actions": {
+ w.character_name: w.actions_count for w in self._workers
+ },
+ "completed_at": datetime.now(timezone.utc).isoformat(),
+ })
+ self._workers = []
+
+ logger.info(
+ "Pipeline %d stage %d/%d completed (%s)",
+ self._pipeline_id,
+ self._current_stage_index + 1,
+ len(self._stages),
+ stage.get("name", ""),
+ )
+
+ next_index = self._current_stage_index + 1
+ if next_index >= len(self._stages):
+ # End of pipeline
+ if self._loop:
+ self._loop_count += 1
+ if self._max_loops > 0 and self._loop_count >= self._max_loops:
+ await self._finalize_run(status="completed")
+ self._running = False
+ await self._publish_status("completed")
+ return
+ # Loop back to stage 0
+ logger.info("Pipeline %d looping (loop %d)", self._pipeline_id, self._loop_count)
+ self._current_stage_index = 0
+ else:
+ await self._finalize_run(status="completed")
+ self._running = False
+ await self._publish_status("completed")
+ return
+ else:
+ self._current_stage_index = next_index
+
+ await self._update_run_progress()
+ await self._publish_status("running")
+
+ except asyncio.CancelledError:
+ logger.info("Pipeline coordinator %d cancelled", self._pipeline_id)
+
+ async def _run_stage(self, stage_index: int) -> bool:
+ """Run all character-steps in a stage in parallel.
+
+ Returns True if all workers completed successfully, False if any errored.
+ """
+ if stage_index < 0 or stage_index >= len(self._stages):
+ return False
+
+ stage = self._stages[stage_index]
+ character_steps = stage.get("character_steps", [])
+
+ logger.info(
+ "Pipeline %d starting stage %d/%d: %s (%d workers)",
+ self._pipeline_id,
+ stage_index + 1,
+ len(self._stages),
+ stage.get("name", ""),
+ len(character_steps),
+ )
+
+ # Create workers for each character-step
+ self._workers = []
+ for cs in character_steps:
+ try:
+ strategy = self._strategy_factory(
+ cs["strategy_type"],
+ cs.get("config", {}),
+ )
+ except Exception:
+ logger.exception(
+ "Failed to create strategy for pipeline %d character %s",
+ self._pipeline_id,
+ cs.get("character_name", "?"),
+ )
+ return False
+
+ worker = CharacterWorker(
+ pipeline_id=self._pipeline_id,
+ stage_id=stage.get("id", ""),
+ step=cs,
+ strategy=strategy,
+ client=self._client,
+ cooldown_tracker=self._cooldown,
+ event_bus=self._event_bus,
+ )
+ self._workers.append(worker)
+
+ # Start all workers in parallel
+ for w in self._workers:
+ await w.start()
+
+ # Wait for all workers to complete or error
+ while self._running and not self._paused:
+ all_done = all(w.is_completed or w.is_errored for w in self._workers)
+ if all_done:
+ break
+ await asyncio.sleep(0.5)
+
+ if self._paused or not self._running:
+ return False
+
+ # Check if any worker errored
+ errored = [w for w in self._workers if w.is_errored]
+ if errored:
+ error_msgs = "; ".join(
+ f"{w.character_name}: {w.error_message}" for w in errored
+ )
+ logger.error(
+ "Pipeline %d stage %d had worker errors: %s",
+ self._pipeline_id,
+ stage_index,
+ error_msgs,
+ )
+ return False
+
+ return True
+
+ # ------------------------------------------------------------------
+ # Database helpers
+ # ------------------------------------------------------------------
+
+ async def _update_run_status(self, status: str) -> None:
+ try:
+ async with self._db_factory() as db:
+ stmt = select(PipelineRun).where(PipelineRun.id == self._run_id)
+ result = await db.execute(stmt)
+ run = result.scalar_one_or_none()
+ if run is not None:
+ run.status = status
+ await db.commit()
+ except Exception:
+ logger.exception("Failed to update pipeline run %d status", self._run_id)
+
+ async def _update_run_progress(self) -> None:
+ try:
+ async with self._db_factory() as db:
+ stmt = select(PipelineRun).where(PipelineRun.id == self._run_id)
+ result = await db.execute(stmt)
+ run = result.scalar_one_or_none()
+ if run is not None:
+ run.current_stage_index = self._current_stage_index
+ run.current_stage_id = self.current_stage_id
+ run.loop_count = self._loop_count
+ run.total_actions_count = self.total_actions_count
+ run.character_states = self.character_states
+ run.stage_history = self._stage_history
+ await db.commit()
+ except Exception:
+ logger.exception("Failed to update pipeline run %d progress", self._run_id)
+
+ async def _finalize_run(
+ self,
+ status: str,
+ error_message: str | None = None,
+ ) -> None:
+ try:
+ async with self._db_factory() as db:
+ stmt = select(PipelineRun).where(PipelineRun.id == self._run_id)
+ result = await db.execute(stmt)
+ run = result.scalar_one_or_none()
+ if run is not None:
+ run.status = status
+ run.stopped_at = datetime.now(timezone.utc)
+ run.current_stage_index = self._current_stage_index
+ run.current_stage_id = self.current_stage_id
+ run.loop_count = self._loop_count
+ run.total_actions_count = self.total_actions_count
+ run.character_states = self.character_states
+ run.stage_history = self._stage_history
+ if error_message:
+ run.error_message = error_message
+ await db.commit()
+ except Exception:
+ logger.exception("Failed to finalize pipeline run %d", self._run_id)
diff --git a/backend/app/engine/pipeline/worker.py b/backend/app/engine/pipeline/worker.py
new file mode 100644
index 0000000..d247cd4
--- /dev/null
+++ b/backend/app/engine/pipeline/worker.py
@@ -0,0 +1,241 @@
+"""CharacterWorker — runs one character's strategy within a pipeline stage.
+
+Same tick loop pattern as WorkflowRunner._tick(): wait cooldown -> get
+character -> get action -> check transition -> execute action. Reuses the
+shared ``execute_action`` helper and the existing ``TransitionEvaluator``.
+"""
+
+from __future__ import annotations
+
+import asyncio
+import logging
+import time
+from typing import TYPE_CHECKING, Any
+
+from app.engine.action_executor import execute_action
+from app.engine.cooldown import CooldownTracker
+from app.engine.strategies.base import ActionPlan, ActionType, BaseStrategy
+from app.engine.workflow.conditions import TransitionEvaluator
+from app.services.artifacts_client import ArtifactsClient
+
+if TYPE_CHECKING:
+ from app.websocket.event_bus import EventBus
+
+logger = logging.getLogger(__name__)
+
+_ERROR_RETRY_DELAY: float = 2.0
+_MAX_CONSECUTIVE_ERRORS: int = 10
+
+
+class CharacterWorker:
+ """Drives a single character's strategy within a pipeline stage."""
+
+ def __init__(
+ self,
+ pipeline_id: int,
+ stage_id: str,
+ step: dict,
+ strategy: BaseStrategy,
+ client: ArtifactsClient,
+ cooldown_tracker: CooldownTracker,
+ event_bus: EventBus | None = None,
+ ) -> None:
+ self._pipeline_id = pipeline_id
+ self._stage_id = stage_id
+ self._step = step
+ self._character_name: str = step["character_name"]
+ self._step_id: str = step["id"]
+ self._strategy = strategy
+ self._client = client
+ self._cooldown = cooldown_tracker
+ self._event_bus = event_bus
+
+ self._transition_evaluator = TransitionEvaluator(client)
+ self._running = False
+ self._completed = False
+ self._errored = False
+ self._error_message: str | None = None
+ self._task: asyncio.Task[None] | None = None
+ self._actions_count: int = 0
+ self._step_start_time: float = 0.0
+ self._consecutive_errors: int = 0
+
+ # ------------------------------------------------------------------
+ # Public properties
+ # ------------------------------------------------------------------
+
+ @property
+ def character_name(self) -> str:
+ return self._character_name
+
+ @property
+ def step_id(self) -> str:
+ return self._step_id
+
+ @property
+ def is_completed(self) -> bool:
+ return self._completed
+
+ @property
+ def is_errored(self) -> bool:
+ return self._errored
+
+ @property
+ def is_running(self) -> bool:
+ return self._running and not self._completed and not self._errored
+
+ @property
+ def actions_count(self) -> int:
+ return self._actions_count
+
+ @property
+ def strategy_state(self) -> str:
+ return self._strategy.get_state() if self._strategy else ""
+
+ @property
+ def error_message(self) -> str | None:
+ return self._error_message
+
+ @property
+ def status(self) -> str:
+ if self._errored:
+ return "error"
+ if self._completed:
+ return "completed"
+ if self._running:
+ return "running"
+ return "idle"
+
+ # ------------------------------------------------------------------
+ # Lifecycle
+ # ------------------------------------------------------------------
+
+ async def start(self) -> None:
+ if self._running:
+ return
+ self._running = True
+ self._step_start_time = time.time()
+ self._transition_evaluator.reset()
+ self._task = asyncio.create_task(
+ self._run_loop(),
+ name=f"pipeline-{self._pipeline_id}-{self._character_name}",
+ )
+
+ async def stop(self) -> None:
+ self._running = False
+ if self._task is not None and not self._task.done():
+ self._task.cancel()
+ try:
+ await self._task
+ except asyncio.CancelledError:
+ pass
+ self._task = None
+
+ # ------------------------------------------------------------------
+ # Main loop
+ # ------------------------------------------------------------------
+
+ async def _run_loop(self) -> None:
+ try:
+ while self._running and not self._completed:
+ try:
+ await self._tick()
+ self._consecutive_errors = 0
+ except asyncio.CancelledError:
+ raise
+ except Exception as exc:
+ self._consecutive_errors += 1
+ logger.exception(
+ "Error in pipeline worker %s/%s (error %d/%d): %s",
+ self._pipeline_id,
+ self._character_name,
+ self._consecutive_errors,
+ _MAX_CONSECUTIVE_ERRORS,
+ exc,
+ )
+ if self._consecutive_errors >= _MAX_CONSECUTIVE_ERRORS:
+ self._errored = True
+ self._error_message = (
+ f"Stopped after {_MAX_CONSECUTIVE_ERRORS} "
+ f"consecutive errors. Last: {exc}"
+ )
+ self._running = False
+ return
+ await asyncio.sleep(_ERROR_RETRY_DELAY)
+ except asyncio.CancelledError:
+ logger.info(
+ "Pipeline worker %s/%s cancelled",
+ self._pipeline_id,
+ self._character_name,
+ )
+
+ async def _tick(self) -> None:
+ if self._strategy is None:
+ self._errored = True
+ self._error_message = "No strategy configured"
+ self._running = False
+ return
+
+ # 1. Wait for cooldown
+ await self._cooldown.wait(self._character_name)
+
+ # 2. Fetch character state
+ character = await self._client.get_character(self._character_name)
+
+ # 3. Ask strategy for next action
+ plan = await self._strategy.next_action(character)
+ strategy_completed = plan.action_type == ActionType.COMPLETE
+
+ # 4. Check transition condition
+ transition = self._step.get("transition")
+ if transition is not None:
+ should_advance = await self._transition_evaluator.should_transition(
+ transition,
+ character,
+ actions_count=self._actions_count,
+ step_start_time=self._step_start_time,
+ strategy_completed=strategy_completed,
+ )
+ if should_advance:
+ self._completed = True
+ self._running = False
+ return
+
+ # 5. If strategy completed and no transition, treat as done
+ if strategy_completed:
+ if transition is None:
+ self._completed = True
+ self._running = False
+ return
+ # Strategy completed but transition not met yet -- idle
+ await asyncio.sleep(1)
+ return
+
+ if plan.action_type == ActionType.IDLE:
+ await asyncio.sleep(1)
+ return
+
+ # 6. Execute the action
+ result = await execute_action(self._client, self._character_name, plan)
+
+ # 7. Update cooldown
+ cooldown = result.get("cooldown")
+ if cooldown:
+ self._cooldown.update(
+ self._character_name,
+ cooldown.get("total_seconds", 0),
+ cooldown.get("expiration"),
+ )
+
+ # 8. Record
+ self._actions_count += 1
+
+ # 9. Publish character update
+ if self._event_bus is not None:
+ try:
+ await self._event_bus.publish(
+ "character_update",
+ {"character_name": self._character_name},
+ )
+ except Exception:
+ pass
diff --git a/backend/app/engine/runner.py b/backend/app/engine/runner.py
index 4221210..96a53d7 100644
--- a/backend/app/engine/runner.py
+++ b/backend/app/engine/runner.py
@@ -5,13 +5,16 @@ import logging
from datetime import datetime, timezone
from typing import TYPE_CHECKING, Any
+import httpx
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker
+from app.engine.action_executor import execute_action
from app.engine.cooldown import CooldownTracker
from app.engine.strategies.base import ActionPlan, ActionType, BaseStrategy
from app.models.automation import AutomationLog, AutomationRun
from app.services.artifacts_client import ArtifactsClient
+from app.services.error_service import hash_token, log_error
if TYPE_CHECKING:
from app.websocket.event_bus import EventBus
@@ -235,6 +238,62 @@ class AutomationRunner:
self._consecutive_errors = 0
except asyncio.CancelledError:
raise
+ except httpx.HTTPStatusError as exc:
+ status = exc.response.status_code
+ # 498 = character in cooldown – not a real error,
+ # just wait and retry without incrementing the counter.
+ if status == 498:
+ logger.info(
+ "Cooldown error for config %d, will retry",
+ self._config_id,
+ )
+ await asyncio.sleep(_ERROR_RETRY_DELAY)
+ continue
+ # Other HTTP errors – treat as real failures
+ self._consecutive_errors += 1
+ logger.exception(
+ "HTTP %d in automation loop for config %d (error %d/%d): %s",
+ status,
+ self._config_id,
+ self._consecutive_errors,
+ _MAX_CONSECUTIVE_ERRORS,
+ exc,
+ )
+ await log_error(
+ self._db_factory,
+ severity="error",
+ source="automation",
+ exc=exc,
+ context={
+ "config_id": self._config_id,
+ "character": self._character_name,
+ "run_id": self._run_id,
+ "consecutive_errors": self._consecutive_errors,
+ "http_status": status,
+ },
+ )
+ await self._log_action(
+ ActionPlan(ActionType.IDLE, reason=str(exc)),
+ success=False,
+ )
+ await self._publish_action(
+ "error",
+ success=False,
+ details={"error": str(exc)},
+ )
+ if self._consecutive_errors >= _MAX_CONSECUTIVE_ERRORS:
+ logger.error(
+ "Too many consecutive errors for config %d, stopping",
+ self._config_id,
+ )
+ await self._finalize_run(
+ status="error",
+ error_message=f"Stopped after {_MAX_CONSECUTIVE_ERRORS} consecutive errors. Last: {exc}",
+ )
+ self._running = False
+ await self._publish_status("error")
+ return
+ await asyncio.sleep(_ERROR_RETRY_DELAY)
except Exception as exc:
self._consecutive_errors += 1
logger.exception(
@@ -244,6 +303,20 @@ class AutomationRunner:
_MAX_CONSECUTIVE_ERRORS,
exc,
)
+ token_hash = hash_token(self._client._token) if self._client._token else None
+ await log_error(
+ self._db_factory,
+ severity="error",
+ source="automation",
+ exc=exc,
+ context={
+ "config_id": self._config_id,
+ "character": self._character_name,
+ "run_id": self._run_id,
+ "consecutive_errors": self._consecutive_errors,
+ },
+ user_token_hash=token_hash,
+ )
await self._log_action(
ActionPlan(ActionType.IDLE, reason=str(exc)),
success=False,
@@ -336,96 +409,7 @@ class AutomationRunner:
async def _execute_action(self, plan: ActionPlan) -> dict[str, Any]:
"""Dispatch an action plan to the appropriate client method."""
- match plan.action_type:
- case ActionType.MOVE:
- return await self._client.move(
- self._character_name,
- plan.params["x"],
- plan.params["y"],
- )
- case ActionType.FIGHT:
- return await self._client.fight(self._character_name)
- case ActionType.GATHER:
- return await self._client.gather(self._character_name)
- case ActionType.REST:
- return await self._client.rest(self._character_name)
- case ActionType.EQUIP:
- return await self._client.equip(
- self._character_name,
- plan.params["code"],
- plan.params["slot"],
- plan.params.get("quantity", 1),
- )
- case ActionType.UNEQUIP:
- return await self._client.unequip(
- self._character_name,
- plan.params["slot"],
- plan.params.get("quantity", 1),
- )
- case ActionType.USE_ITEM:
- return await self._client.use_item(
- self._character_name,
- plan.params["code"],
- plan.params.get("quantity", 1),
- )
- case ActionType.DEPOSIT_ITEM:
- return await self._client.deposit_item(
- self._character_name,
- plan.params["code"],
- plan.params["quantity"],
- )
- case ActionType.WITHDRAW_ITEM:
- return await self._client.withdraw_item(
- self._character_name,
- plan.params["code"],
- plan.params["quantity"],
- )
- case ActionType.CRAFT:
- return await self._client.craft(
- self._character_name,
- plan.params["code"],
- plan.params.get("quantity", 1),
- )
- case ActionType.RECYCLE:
- return await self._client.recycle(
- self._character_name,
- plan.params["code"],
- plan.params.get("quantity", 1),
- )
- case ActionType.GE_BUY:
- return await self._client.ge_buy(
- self._character_name,
- plan.params["code"],
- plan.params["quantity"],
- plan.params["price"],
- )
- case ActionType.GE_SELL:
- return await self._client.ge_sell_order(
- self._character_name,
- plan.params["code"],
- plan.params["quantity"],
- plan.params["price"],
- )
- case ActionType.GE_CANCEL:
- return await self._client.ge_cancel(
- self._character_name,
- plan.params["order_id"],
- )
- case ActionType.TASK_NEW:
- return await self._client.task_new(self._character_name)
- case ActionType.TASK_TRADE:
- return await self._client.task_trade(
- self._character_name,
- plan.params["code"],
- plan.params["quantity"],
- )
- case ActionType.TASK_COMPLETE:
- return await self._client.task_complete(self._character_name)
- case ActionType.TASK_EXCHANGE:
- return await self._client.task_exchange(self._character_name)
- case _:
- logger.warning("Unhandled action type: %s", plan.action_type)
- return {}
+ return await execute_action(self._client, self._character_name, plan)
def _update_cooldown_from_result(self, result: dict[str, Any]) -> None:
"""Extract cooldown information from an action response and update the tracker."""
diff --git a/backend/app/engine/strategies/base.py b/backend/app/engine/strategies/base.py
index f7382a5..a0b603b 100644
--- a/backend/app/engine/strategies/base.py
+++ b/backend/app/engine/strategies/base.py
@@ -1,10 +1,17 @@
+from __future__ import annotations
+
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from enum import Enum
+from typing import TYPE_CHECKING
from app.engine.pathfinder import Pathfinder
from app.schemas.game import CharacterSchema
+if TYPE_CHECKING:
+ from app.engine.decision.equipment_optimizer import EquipmentOptimizer
+ from app.schemas.game import ItemSchema
+
class ActionType(str, Enum):
"""All possible actions the automation runner can execute."""
@@ -21,12 +28,19 @@ class ActionType(str, Enum):
CRAFT = "craft"
RECYCLE = "recycle"
GE_BUY = "ge_buy"
+ GE_CREATE_BUY = "ge_create_buy"
GE_SELL = "ge_sell"
+ GE_FILL = "ge_fill"
GE_CANCEL = "ge_cancel"
TASK_NEW = "task_new"
TASK_TRADE = "task_trade"
TASK_COMPLETE = "task_complete"
TASK_EXCHANGE = "task_exchange"
+ TASK_CANCEL = "task_cancel"
+ DEPOSIT_GOLD = "deposit_gold"
+ WITHDRAW_GOLD = "withdraw_gold"
+ NPC_BUY = "npc_buy"
+ NPC_SELL = "npc_sell"
IDLE = "idle"
COMPLETE = "complete"
@@ -49,9 +63,18 @@ class BaseStrategy(ABC):
Subclasses must implement :meth:`next_action` and :meth:`get_state`.
"""
- def __init__(self, config: dict, pathfinder: Pathfinder) -> None:
+ def __init__(
+ self,
+ config: dict,
+ pathfinder: Pathfinder,
+ equipment_optimizer: EquipmentOptimizer | None = None,
+ available_items: list[ItemSchema] | None = None,
+ ) -> None:
self.config = config
self.pathfinder = pathfinder
+ self._equipment_optimizer = equipment_optimizer
+ self._available_items = available_items or []
+ self._auto_equip_checked = False
@abstractmethod
async def next_action(self, character: CharacterSchema) -> ActionPlan:
@@ -97,3 +120,27 @@ class BaseStrategy(ABC):
def _is_at(character: CharacterSchema, x: int, y: int) -> bool:
"""Check whether the character is standing at the given tile."""
return character.x == x and character.y == y
+
+ def _check_auto_equip(self, character: CharacterSchema) -> ActionPlan | None:
+ """Return an EQUIP action if better gear is available, else None.
+
+ Only runs once per strategy lifetime to avoid re-checking every tick.
+ """
+ if self._auto_equip_checked:
+ return None
+ self._auto_equip_checked = True
+
+ if self._equipment_optimizer is None or not self._available_items:
+ return None
+
+ analysis = self._equipment_optimizer.suggest_equipment(
+ character, self._available_items
+ )
+ if analysis.suggestions:
+ best = analysis.suggestions[0]
+ return ActionPlan(
+ ActionType.EQUIP,
+ params={"code": best.suggested_item_code, "slot": best.slot},
+ reason=f"Auto-equip: {best.reason}",
+ )
+ return None
diff --git a/backend/app/engine/strategies/combat.py b/backend/app/engine/strategies/combat.py
index b995cc3..8506bcb 100644
--- a/backend/app/engine/strategies/combat.py
+++ b/backend/app/engine/strategies/combat.py
@@ -1,10 +1,18 @@
+from __future__ import annotations
+
import logging
from enum import Enum
+from typing import TYPE_CHECKING
from app.engine.pathfinder import Pathfinder
from app.engine.strategies.base import ActionPlan, ActionType, BaseStrategy
from app.schemas.game import CharacterSchema
+if TYPE_CHECKING:
+ from app.engine.decision.equipment_optimizer import EquipmentOptimizer
+ from app.engine.decision.monster_selector import MonsterSelector
+ from app.schemas.game import ItemSchema, MonsterSchema
+
logger = logging.getLogger(__name__)
@@ -43,18 +51,34 @@ class CombatStrategy(BaseStrategy):
- deposit_loot: bool (default True)
"""
- def __init__(self, config: dict, pathfinder: Pathfinder) -> None:
- super().__init__(config, pathfinder)
+ def __init__(
+ self,
+ config: dict,
+ pathfinder: Pathfinder,
+ monster_selector: MonsterSelector | None = None,
+ monsters_data: list[MonsterSchema] | None = None,
+ equipment_optimizer: EquipmentOptimizer | None = None,
+ available_items: list[ItemSchema] | None = None,
+ ) -> None:
+ super().__init__(
+ config, pathfinder,
+ equipment_optimizer=equipment_optimizer,
+ available_items=available_items,
+ )
self._state = _CombatState.MOVE_TO_MONSTER
# Parsed config with defaults
- self._monster_code: str = config["monster_code"]
+ self._monster_code: str = config.get("monster_code", "")
self._heal_threshold: int = config.get("auto_heal_threshold", 50)
self._heal_method: str = config.get("heal_method", "rest")
self._consumable_code: str | None = config.get("consumable_code")
self._min_inv_slots: int = config.get("min_inventory_slots", 3)
self._deposit_loot: bool = config.get("deposit_loot", True)
+ # Decision modules
+ self._monster_selector = monster_selector
+ self._monsters_data = monsters_data or []
+
# Cached locations (resolved lazily)
self._monster_pos: tuple[int, int] | None = None
self._bank_pos: tuple[int, int] | None = None
@@ -63,6 +87,18 @@ class CombatStrategy(BaseStrategy):
return self._state.value
async def next_action(self, character: CharacterSchema) -> ActionPlan:
+ # Auto-select monster if code is empty or "auto"
+ if (not self._monster_code or self._monster_code == "auto") and self._monster_selector and self._monsters_data:
+ selected = self._monster_selector.select_optimal(character, self._monsters_data)
+ if selected:
+ self._monster_code = selected.code
+ logger.info("Auto-selected monster %s for character %s", selected.code, character.name)
+
+ # Check auto-equip on first tick
+ equip_action = self._check_auto_equip(character)
+ if equip_action is not None:
+ return equip_action
+
# Lazily resolve monster and bank positions
self._resolve_locations(character)
diff --git a/backend/app/engine/strategies/crafting.py b/backend/app/engine/strategies/crafting.py
index 7c0235c..7f8a88a 100644
--- a/backend/app/engine/strategies/crafting.py
+++ b/backend/app/engine/strategies/crafting.py
@@ -1,10 +1,16 @@
+from __future__ import annotations
+
import logging
from enum import Enum
+from typing import TYPE_CHECKING
from app.engine.pathfinder import Pathfinder
from app.engine.strategies.base import ActionPlan, ActionType, BaseStrategy
from app.schemas.game import CharacterSchema, ItemSchema
+if TYPE_CHECKING:
+ from app.schemas.game import ResourceSchema
+
logger = logging.getLogger(__name__)
@@ -62,6 +68,7 @@ class CraftingStrategy(BaseStrategy):
config: dict,
pathfinder: Pathfinder,
items_data: list[ItemSchema] | None = None,
+ resources_data: list[ResourceSchema] | None = None,
) -> None:
super().__init__(config, pathfinder)
self._state = _CraftState.CHECK_MATERIALS
@@ -81,6 +88,9 @@ class CraftingStrategy(BaseStrategy):
self._craft_level: int = 0
self._recipe_resolved: bool = False
+ # Game data for gathering resolution
+ self._resources_data: list[ResourceSchema] = resources_data or []
+
# If items data is provided, resolve the recipe immediately
if items_data:
self._resolve_recipe(items_data)
@@ -186,11 +196,18 @@ class CraftingStrategy(BaseStrategy):
# Withdraw the first missing material
code, needed_qty = next(iter(missing.items()))
- # If we should gather and we can't withdraw, switch to gather mode
+ # If gather_materials is enabled and we can determine a resource for this material,
+ # try gathering instead of just hoping the bank has it
if self._gather_materials:
- # We'll try to withdraw; if it fails the runner will handle the error
- # and we can switch to gathering mode. For now, attempt the withdraw.
- pass
+ resource_code = self._find_resource_for_material(code)
+ if resource_code:
+ self._gather_resource_code = resource_code
+ self._gather_pos = self.pathfinder.find_nearest(
+ character.x, character.y, "resource", resource_code
+ )
+ if self._gather_pos:
+ self._state = _CraftState.GATHER_MATERIALS
+ return self._handle_gather_materials(character)
return ActionPlan(
ActionType.WITHDRAW_ITEM,
@@ -383,6 +400,14 @@ class CraftingStrategy(BaseStrategy):
return missing
+ def _find_resource_for_material(self, material_code: str) -> str | None:
+ """Look up which resource drops the needed material."""
+ for resource in self._resources_data:
+ for drop in resource.drops:
+ if drop.code == material_code:
+ return resource.code
+ return None
+
def _resolve_locations(self, character: CharacterSchema) -> None:
"""Lazily resolve and cache workshop and bank tile positions."""
if self._workshop_pos is None and self._craft_skill:
diff --git a/backend/app/engine/strategies/gathering.py b/backend/app/engine/strategies/gathering.py
index 4fcb122..6193201 100644
--- a/backend/app/engine/strategies/gathering.py
+++ b/backend/app/engine/strategies/gathering.py
@@ -1,10 +1,17 @@
+from __future__ import annotations
+
import logging
from enum import Enum
+from typing import TYPE_CHECKING
from app.engine.pathfinder import Pathfinder
from app.engine.strategies.base import ActionPlan, ActionType, BaseStrategy
from app.schemas.game import CharacterSchema
+if TYPE_CHECKING:
+ from app.engine.decision.resource_selector import ResourceSelector
+ from app.schemas.game import ResourceSchema
+
logger = logging.getLogger(__name__)
@@ -36,15 +43,25 @@ class GatheringStrategy(BaseStrategy):
- max_loops: int (default 0 = infinite)
"""
- def __init__(self, config: dict, pathfinder: Pathfinder) -> None:
+ def __init__(
+ self,
+ config: dict,
+ pathfinder: Pathfinder,
+ resource_selector: ResourceSelector | None = None,
+ resources_data: list[ResourceSchema] | None = None,
+ ) -> None:
super().__init__(config, pathfinder)
self._state = _GatherState.MOVE_TO_RESOURCE
# Parsed config with defaults
- self._resource_code: str = config["resource_code"]
+ self._resource_code: str = config.get("resource_code", "")
self._deposit_on_full: bool = config.get("deposit_on_full", True)
self._max_loops: int = config.get("max_loops", 0)
+ # Decision modules
+ self._resource_selector = resource_selector
+ self._resources_data = resources_data or []
+
# Runtime counters
self._loop_count: int = 0
@@ -56,6 +73,17 @@ class GatheringStrategy(BaseStrategy):
return self._state.value
async def next_action(self, character: CharacterSchema) -> ActionPlan:
+ # Auto-select resource if code is empty or "auto"
+ if (not self._resource_code or self._resource_code == "auto") and self._resource_selector and self._resources_data:
+ # Determine the skill from the resource_code config or default to mining
+ skill = config.get("skill", "") if (config := self.config) else ""
+ if not skill:
+ skill = "mining"
+ selection = self._resource_selector.select_optimal(character, self._resources_data, skill)
+ if selection:
+ self._resource_code = selection.resource.code
+ logger.info("Auto-selected resource %s for character %s", selection.resource.code, character.name)
+
# Check loop limit
if self._max_loops > 0 and self._loop_count >= self._max_loops:
return ActionPlan(
diff --git a/backend/app/engine/strategies/leveling.py b/backend/app/engine/strategies/leveling.py
index fd31f30..82f1f87 100644
--- a/backend/app/engine/strategies/leveling.py
+++ b/backend/app/engine/strategies/leveling.py
@@ -1,10 +1,19 @@
+from __future__ import annotations
+
import logging
from enum import Enum
+from typing import TYPE_CHECKING
from app.engine.pathfinder import Pathfinder
from app.engine.strategies.base import ActionPlan, ActionType, BaseStrategy
from app.schemas.game import CharacterSchema, ResourceSchema
+if TYPE_CHECKING:
+ from app.engine.decision.equipment_optimizer import EquipmentOptimizer
+ from app.engine.decision.monster_selector import MonsterSelector
+ from app.engine.decision.resource_selector import ResourceSelector
+ from app.schemas.game import ItemSchema, MonsterSchema
+
logger = logging.getLogger(__name__)
# All skills in the game with their gathering/crafting type
@@ -44,8 +53,17 @@ class LevelingStrategy(BaseStrategy):
config: dict,
pathfinder: Pathfinder,
resources_data: list[ResourceSchema] | None = None,
+ monsters_data: list[MonsterSchema] | None = None,
+ resource_selector: ResourceSelector | None = None,
+ monster_selector: MonsterSelector | None = None,
+ equipment_optimizer: EquipmentOptimizer | None = None,
+ available_items: list[ItemSchema] | None = None,
) -> None:
- super().__init__(config, pathfinder)
+ super().__init__(
+ config, pathfinder,
+ equipment_optimizer=equipment_optimizer,
+ available_items=available_items,
+ )
self._state = _LevelingState.EVALUATE
# Config
@@ -55,6 +73,11 @@ class LevelingStrategy(BaseStrategy):
# Resolved from game data
self._resources_data: list[ResourceSchema] = resources_data or []
+ self._monsters_data: list[MonsterSchema] = monsters_data or []
+
+ # Decision modules
+ self._resource_selector = resource_selector
+ self._monster_selector = monster_selector
# Runtime state
self._chosen_skill: str = ""
@@ -76,6 +99,11 @@ class LevelingStrategy(BaseStrategy):
async def next_action(self, character: CharacterSchema) -> ActionPlan:
self._resolve_bank(character)
+ # Check auto-equip before combat
+ equip_action = self._check_auto_equip(character)
+ if equip_action is not None:
+ return equip_action
+
match self._state:
case _LevelingState.EVALUATE:
return self._handle_evaluate(character)
@@ -285,6 +313,17 @@ class LevelingStrategy(BaseStrategy):
self._target_pos = None
return self._handle_evaluate(character)
+ # ------------------------------------------------------------------
+ # Location resolution
+ # ------------------------------------------------------------------
+
+ def _resolve_bank(self, character: CharacterSchema) -> None:
+ """Lazily resolve and cache the nearest bank tile position."""
+ if self._bank_pos is None:
+ self._bank_pos = self.pathfinder.find_nearest_by_type(
+ character.x, character.y, "bank"
+ )
+
# ------------------------------------------------------------------
# Skill analysis helpers
# ------------------------------------------------------------------
@@ -322,27 +361,35 @@ class LevelingStrategy(BaseStrategy):
skill_level: int,
) -> None:
"""Choose the best resource to gather for a given skill and level."""
- # Filter resources matching the skill
+ # Try the ResourceSelector decision module first
+ if self._resource_selector and self._resources_data:
+ selection = self._resource_selector.select_optimal(
+ character, self._resources_data, skill
+ )
+ if selection:
+ self._chosen_resource_code = selection.resource.code
+ self._target_pos = self.pathfinder.find_nearest(
+ character.x, character.y, "resource", selection.resource.code
+ )
+ return
+
+ # Fallback: inline logic using resources_data
matching = [r for r in self._resources_data if r.skill == skill]
if not matching:
- # Fallback: use pathfinder to find any resource of this skill
self._target_pos = self.pathfinder.find_nearest_by_type(
character.x, character.y, "resource"
)
return
- # Find the best resource within +-3 levels
candidates = []
for r in matching:
diff = r.level - skill_level
- if diff <= 3: # Can gather up to 3 levels above
+ if diff <= 3:
candidates.append(r)
if not candidates:
- # No resources within range, pick the lowest level one
candidates = matching
- # Among candidates, prefer higher level for better XP
best = max(candidates, key=lambda r: r.level if r.level <= skill_level + 3 else -r.level)
self._chosen_resource_code = best.code
@@ -352,7 +399,17 @@ class LevelingStrategy(BaseStrategy):
def _choose_combat_target(self, character: CharacterSchema) -> None:
"""Choose a monster appropriate for the character's combat level."""
- # Find a monster near the character's level
+ # Try the MonsterSelector decision module first
+ if self._monster_selector and self._monsters_data:
+ selected = self._monster_selector.select_optimal(character, self._monsters_data)
+ if selected:
+ self._chosen_monster_code = selected.code
+ self._target_pos = self.pathfinder.find_nearest(
+ character.x, character.y, "monster", selected.code
+ )
+ return
+
+ # Fallback: find any nearby monster
self._chosen_monster_code = ""
self._target_pos = self.pathfinder.find_nearest_by_type(
character.x, character.y, "monster"
diff --git a/backend/app/engine/strategies/trading.py b/backend/app/engine/strategies/trading.py
index e4b338f..1d9682e 100644
--- a/backend/app/engine/strategies/trading.py
+++ b/backend/app/engine/strategies/trading.py
@@ -1,10 +1,16 @@
+from __future__ import annotations
+
import logging
from enum import Enum
+from typing import TYPE_CHECKING
from app.engine.pathfinder import Pathfinder
from app.engine.strategies.base import ActionPlan, ActionType, BaseStrategy
from app.schemas.game import CharacterSchema
+if TYPE_CHECKING:
+ from app.services.artifacts_client import ArtifactsClient
+
logger = logging.getLogger(__name__)
@@ -22,10 +28,6 @@ class _TradingState(str, Enum):
DEPOSIT_ITEMS = "deposit_items"
-# ActionType extensions for GE operations (handled via params in the runner)
-# We reuse CRAFT action type slot to send GE-specific actions; the runner
-# dispatches based on action_type enum. We add new action types to base.
-
class _TradingMode(str, Enum):
SELL_LOOT = "sell_loot"
BUY_MATERIALS = "buy_materials"
@@ -49,7 +51,12 @@ class TradingStrategy(BaseStrategy):
- max_price: int (default 0) -- maximum acceptable price (0 = no limit)
"""
- def __init__(self, config: dict, pathfinder: Pathfinder) -> None:
+ def __init__(
+ self,
+ config: dict,
+ pathfinder: Pathfinder,
+ client: ArtifactsClient | None = None,
+ ) -> None:
super().__init__(config, pathfinder)
# Parse config
@@ -65,6 +72,9 @@ class TradingStrategy(BaseStrategy):
self._min_price: int = config.get("min_price", 0)
self._max_price: int = config.get("max_price", 0)
+ # Client for GE order polling
+ self._client = client
+
# Determine initial state based on mode
if self._mode == _TradingMode.SELL_LOOT:
self._state = _TradingState.MOVE_TO_BANK
@@ -78,6 +88,7 @@ class TradingStrategy(BaseStrategy):
# Runtime state
self._items_withdrawn: int = 0
self._orders_created: bool = False
+ self._active_order_id: str | None = None
self._wait_cycles: int = 0
# Cached positions
@@ -227,7 +238,7 @@ class TradingStrategy(BaseStrategy):
self._state = _TradingState.WAIT_FOR_ORDER
return ActionPlan(
- ActionType.GE_BUY,
+ ActionType.GE_CREATE_BUY,
params={
"code": self._item_code,
"quantity": self._quantity,
@@ -239,26 +250,38 @@ class TradingStrategy(BaseStrategy):
def _handle_wait_for_order(self, character: CharacterSchema) -> ActionPlan:
self._wait_cycles += 1
- # Wait for a reasonable time, then check
- if self._wait_cycles < 3:
+ # Poll every 3 cycles to avoid API spam
+ if self._wait_cycles % 3 != 0:
return ActionPlan(
ActionType.IDLE,
reason=f"Waiting for GE order to fill (cycle {self._wait_cycles})",
)
- # After waiting, check orders
+ # Check if the order is still active
self._state = _TradingState.CHECK_ORDERS
return self._handle_check_orders(character)
def _handle_check_orders(self, character: CharacterSchema) -> ActionPlan:
- # For now, just complete after creating orders
- # In a full implementation, we'd check the GE order status
+ # If we have a client and an order ID, poll the actual order status
+ # This is an async check, but since next_action is async we handle it
+ # by transitioning: the runner will call next_action again next tick
+ if self._active_order_id and self._client:
+ # We'll check on the next tick since we can't await here easily
+ # For now, just keep waiting unless we've waited a long time
+ if self._wait_cycles < 30:
+ self._state = _TradingState.WAIT_FOR_ORDER
+ return ActionPlan(
+ ActionType.IDLE,
+ reason=f"Checking order {self._active_order_id} status (cycle {self._wait_cycles})",
+ )
+
+ # After enough waiting or no client, assume order is done
if self._mode == _TradingMode.FLIP and self._orders_created:
- # For flip mode, once buy order is done, create sell
self._state = _TradingState.CREATE_SELL_ORDER
+ self._orders_created = False # Reset for sell phase
return ActionPlan(
ActionType.IDLE,
- reason="Checking order status for flip trade",
+ reason="Buy order assumed filled, preparing sell order",
)
return ActionPlan(
diff --git a/backend/app/engine/workflow/__init__.py b/backend/app/engine/workflow/__init__.py
new file mode 100644
index 0000000..8aed8f1
--- /dev/null
+++ b/backend/app/engine/workflow/__init__.py
@@ -0,0 +1,4 @@
+from app.engine.workflow.conditions import TransitionEvaluator, TransitionType
+from app.engine.workflow.runner import WorkflowRunner
+
+__all__ = ["TransitionEvaluator", "TransitionType", "WorkflowRunner"]
diff --git a/backend/app/engine/workflow/conditions.py b/backend/app/engine/workflow/conditions.py
new file mode 100644
index 0000000..60a498e
--- /dev/null
+++ b/backend/app/engine/workflow/conditions.py
@@ -0,0 +1,159 @@
+from __future__ import annotations
+
+import logging
+import time
+from enum import Enum
+from typing import Any
+
+from app.schemas.game import CharacterSchema
+from app.services.artifacts_client import ArtifactsClient
+
+logger = logging.getLogger(__name__)
+
+
+class TransitionType(str, Enum):
+ STRATEGY_COMPLETE = "strategy_complete"
+ LOOPS_COMPLETED = "loops_completed"
+ INVENTORY_FULL = "inventory_full"
+ INVENTORY_ITEM_COUNT = "inventory_item_count"
+ BANK_ITEM_COUNT = "bank_item_count"
+ SKILL_LEVEL = "skill_level"
+ GOLD_AMOUNT = "gold_amount"
+ ACTIONS_COUNT = "actions_count"
+ TIMER = "timer"
+
+
+def _compare(actual: int | float, operator: str, target: int | float) -> bool:
+ """Compare a value using a string operator."""
+ match operator:
+ case ">=":
+ return actual >= target
+ case "<=":
+ return actual <= target
+ case "==":
+ return actual == target
+ case ">":
+ return actual > target
+ case "<":
+ return actual < target
+ case _:
+ return actual >= target
+
+
+class TransitionEvaluator:
+ """Evaluates transition conditions for workflow steps."""
+
+ def __init__(self, client: ArtifactsClient) -> None:
+ self._client = client
+ self._bank_cache: list[dict[str, Any]] | None = None
+ self._bank_cache_tick: int = 0
+ self._tick_counter: int = 0
+
+ async def should_transition(
+ self,
+ condition: dict,
+ character: CharacterSchema,
+ *,
+ actions_count: int = 0,
+ step_start_time: float = 0.0,
+ strategy_completed: bool = False,
+ ) -> bool:
+ """Check whether the transition condition is met.
+
+ Parameters
+ ----------
+ condition:
+ The transition condition dict with keys: type, operator, value,
+ item_code, skill, seconds.
+ character:
+ Current character state.
+ actions_count:
+ Number of actions executed in the current step.
+ step_start_time:
+ Timestamp when the current step started.
+ strategy_completed:
+ True if the underlying strategy returned COMPLETE.
+ """
+ self._tick_counter += 1
+ cond_type = condition.get("type", "")
+ operator = condition.get("operator", ">=")
+ target_value = condition.get("value", 0)
+
+ try:
+ match cond_type:
+ case TransitionType.STRATEGY_COMPLETE:
+ return strategy_completed
+
+ case TransitionType.LOOPS_COMPLETED:
+ # This is handled externally by the workflow runner
+ return False
+
+ case TransitionType.INVENTORY_FULL:
+ free_slots = character.inventory_max_items - len(character.inventory)
+ return free_slots == 0
+
+ case TransitionType.INVENTORY_ITEM_COUNT:
+ item_code = condition.get("item_code", "")
+ count = sum(
+ s.quantity
+ for s in character.inventory
+ if s.code == item_code
+ )
+ return _compare(count, operator, target_value)
+
+ case TransitionType.BANK_ITEM_COUNT:
+ item_code = condition.get("item_code", "")
+ bank_count = await self._get_bank_item_count(item_code)
+ return _compare(bank_count, operator, target_value)
+
+ case TransitionType.SKILL_LEVEL:
+ skill = condition.get("skill", "")
+ level = getattr(character, f"{skill}_level", 0)
+ return _compare(level, operator, target_value)
+
+ case TransitionType.GOLD_AMOUNT:
+ return _compare(character.gold, operator, target_value)
+
+ case TransitionType.ACTIONS_COUNT:
+ return _compare(actions_count, operator, target_value)
+
+ case TransitionType.TIMER:
+ seconds = condition.get("seconds", 0)
+ if step_start_time <= 0:
+ return False
+ elapsed = time.time() - step_start_time
+ return elapsed >= seconds
+
+ case _:
+ logger.warning("Unknown transition type: %s", cond_type)
+ return False
+
+ except Exception:
+ logger.exception("Error evaluating transition condition: %s", condition)
+ return False
+
+ async def _get_bank_item_count(self, item_code: str) -> int:
+ """Get item count from bank, with rate-limited caching (every 10 ticks)."""
+ if (
+ self._bank_cache is None
+ or self._tick_counter - self._bank_cache_tick >= 10
+ ):
+ try:
+ self._bank_cache = await self._client.get_bank_items()
+ self._bank_cache_tick = self._tick_counter
+ except Exception:
+ logger.exception("Failed to fetch bank items for transition check")
+ return 0
+
+ if self._bank_cache is None:
+ return 0
+
+ for item in self._bank_cache:
+ if isinstance(item, dict) and item.get("code") == item_code:
+ return item.get("quantity", 0)
+ return 0
+
+ def reset(self) -> None:
+ """Reset caches when advancing to a new step."""
+ self._bank_cache = None
+ self._bank_cache_tick = 0
diff --git a/backend/app/engine/workflow/runner.py b/backend/app/engine/workflow/runner.py
new file mode 100644
index 0000000..2afa81a
--- /dev/null
+++ b/backend/app/engine/workflow/runner.py
@@ -0,0 +1,543 @@
+from __future__ import annotations
+
+import asyncio
+import logging
+import time
+from datetime import datetime, timezone
+from typing import TYPE_CHECKING, Any
+
+from sqlalchemy import select
+from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker
+
+from app.engine.action_executor import execute_action
+from app.engine.cooldown import CooldownTracker
+from app.engine.pathfinder import Pathfinder
+from app.engine.strategies.base import ActionPlan, ActionType, BaseStrategy
+from app.engine.workflow.conditions import TransitionEvaluator
+from app.models.automation import AutomationLog
+from app.models.workflow import WorkflowRun
+from app.services.artifacts_client import ArtifactsClient
+
+if TYPE_CHECKING:
+ from app.websocket.event_bus import EventBus
+
+logger = logging.getLogger(__name__)
+
+_ERROR_RETRY_DELAY: float = 2.0
+_MAX_CONSECUTIVE_ERRORS: int = 10
+
+
+class WorkflowRunner:
+ """Runs a multi-step workflow pipeline for a single character.
+
+ Each step contains a strategy that is driven in a loop identical to
+ :class:`AutomationRunner`. After each tick the runner evaluates the
+ step's transition condition; when met it advances to the next step.
+ """
+
+ def __init__(
+ self,
+ workflow_id: int,
+ character_name: str,
+ steps: list[dict],
+ loop: bool,
+ max_loops: int,
+ strategy_factory: Any, # callable(strategy_type, config) -> BaseStrategy
+ client: ArtifactsClient,
+ cooldown_tracker: CooldownTracker,
+ db_factory: async_sessionmaker[AsyncSession],
+ run_id: int,
+ event_bus: EventBus | None = None,
+ ) -> None:
+ self._workflow_id = workflow_id
+ self._character_name = character_name
+ self._steps = steps
+ self._loop = loop
+ self._max_loops = max_loops
+ self._strategy_factory = strategy_factory
+ self._client = client
+ self._cooldown = cooldown_tracker
+ self._db_factory = db_factory
+ self._run_id = run_id
+ self._event_bus = event_bus
+
+ self._running = False
+ self._paused = False
+ self._task: asyncio.Task[None] | None = None
+
+ # Runtime state
+ self._current_step_index: int = 0
+ self._loop_count: int = 0
+ self._total_actions: int = 0
+ self._step_actions: int = 0
+ self._step_start_time: float = 0.0
+ self._step_history: list[dict] = []
+ self._consecutive_errors: int = 0
+
+ # Current strategy
+ self._strategy: BaseStrategy | None = None
+ self._transition_evaluator = TransitionEvaluator(client)
+
+ # ------------------------------------------------------------------
+ # Public properties
+ # ------------------------------------------------------------------
+
+ @property
+ def workflow_id(self) -> int:
+ return self._workflow_id
+
+ @property
+ def character_name(self) -> str:
+ return self._character_name
+
+ @property
+ def run_id(self) -> int:
+ return self._run_id
+
+ @property
+ def current_step_index(self) -> int:
+ return self._current_step_index
+
+ @property
+ def current_step_id(self) -> str:
+ if 0 <= self._current_step_index < len(self._steps):
+ return self._steps[self._current_step_index].get("id", "")
+ return ""
+
+ @property
+ def loop_count(self) -> int:
+ return self._loop_count
+
+ @property
+ def total_actions_count(self) -> int:
+ return self._total_actions
+
+ @property
+ def step_actions_count(self) -> int:
+ return self._step_actions
+
+ @property
+ def is_running(self) -> bool:
+ return self._running and not self._paused
+
+ @property
+ def is_paused(self) -> bool:
+ return self._running and self._paused
+
+ @property
+ def status(self) -> str:
+ if not self._running:
+ return "stopped"
+ if self._paused:
+ return "paused"
+ return "running"
+
+ @property
+ def strategy_state(self) -> str:
+ if self._strategy is not None:
+ return self._strategy.get_state()
+ return ""
+
+ # ------------------------------------------------------------------
+ # Event bus helpers
+ # ------------------------------------------------------------------
+
+ async def _publish(self, event_type: str, data: dict) -> None:
+ if self._event_bus is not None:
+ try:
+ await self._event_bus.publish(event_type, data)
+ except Exception:
+ logger.exception("Failed to publish event %s", event_type)
+
+ async def _publish_status(self, status: str) -> None:
+ await self._publish(
+ "workflow_status_changed",
+ {
+ "workflow_id": self._workflow_id,
+ "character_name": self._character_name,
+ "status": status,
+ "run_id": self._run_id,
+ "current_step_index": self._current_step_index,
+ "loop_count": self._loop_count,
+ },
+ )
+
+ async def _publish_action(
+ self,
+ action_type: str,
+ success: bool,
+ details: dict | None = None,
+ ) -> None:
+ await self._publish(
+ "workflow_action",
+ {
+ "workflow_id": self._workflow_id,
+ "character_name": self._character_name,
+ "action_type": action_type,
+ "success": success,
+ "details": details or {},
+ "total_actions_count": self._total_actions,
+ "step_index": self._current_step_index,
+ },
+ )
+
+ # ------------------------------------------------------------------
+ # Lifecycle
+ # ------------------------------------------------------------------
+
+ async def start(self) -> None:
+ if self._running:
+ return
+ self._running = True
+ self._paused = False
+
+ # Initialize strategy for the first step
+ self._init_step(self._current_step_index)
+
+ self._task = asyncio.create_task(
+ self._run_loop(),
+ name=f"workflow-{self._workflow_id}-{self._character_name}",
+ )
+ logger.info(
+ "Started workflow runner workflow=%d character=%s run=%d",
+ self._workflow_id,
+ self._character_name,
+ self._run_id,
+ )
+ await self._publish_status("running")
+
+ async def stop(self, error_message: str | None = None) -> None:
+ self._running = False
+ if self._task is not None and not self._task.done():
+ self._task.cancel()
+ try:
+ await self._task
+ except asyncio.CancelledError:
+ pass
+ self._task = None
+
+ final_status = "error" if error_message else "stopped"
+ await self._finalize_run(status=final_status, error_message=error_message)
+ logger.info(
+ "Stopped workflow runner workflow=%d (actions=%d)",
+ self._workflow_id,
+ self._total_actions,
+ )
+ await self._publish_status(final_status)
+
+ async def pause(self) -> None:
+ self._paused = True
+ await self._update_run_status("paused")
+ await self._publish_status("paused")
+
+ async def resume(self) -> None:
+ self._paused = False
+ await self._update_run_status("running")
+ await self._publish_status("running")
+
+ # ------------------------------------------------------------------
+ # Main loop
+ # ------------------------------------------------------------------
+
+ async def _run_loop(self) -> None:
+ try:
+ while self._running:
+ if self._paused:
+ await asyncio.sleep(1)
+ continue
+
+ try:
+ await self._tick()
+ self._consecutive_errors = 0
+ except asyncio.CancelledError:
+ raise
+ except Exception as exc:
+ self._consecutive_errors += 1
+ logger.exception(
+ "Error in workflow loop workflow=%d (error %d/%d): %s",
+ self._workflow_id,
+ self._consecutive_errors,
+ _MAX_CONSECUTIVE_ERRORS,
+ exc,
+ )
+ await self._log_action(
+ ActionPlan(ActionType.IDLE, reason=str(exc)),
+ success=False,
+ )
+ if self._consecutive_errors >= _MAX_CONSECUTIVE_ERRORS:
+ logger.error(
+ "Too many consecutive errors for workflow %d, stopping",
+ self._workflow_id,
+ )
+ await self._finalize_run(
+ status="error",
+ error_message=f"Stopped after {_MAX_CONSECUTIVE_ERRORS} consecutive errors. Last: {exc}",
+ )
+ self._running = False
+ await self._publish_status("error")
+ return
+ await asyncio.sleep(_ERROR_RETRY_DELAY)
+
+ except asyncio.CancelledError:
+ logger.info("Workflow loop for %d was cancelled", self._workflow_id)
+
+ async def _tick(self) -> None:
+ """Execute a single iteration of the workflow loop."""
+ if self._strategy is None:
+ logger.error("No strategy for workflow %d step %d", self._workflow_id, self._current_step_index)
+ self._running = False
+ return
+
+ # 1. Wait for cooldown
+ await self._cooldown.wait(self._character_name)
+
+ # 2. Fetch character
+ character = await self._client.get_character(self._character_name)
+
+ # 3. Ask strategy for next action
+ plan = await self._strategy.next_action(character)
+
+ strategy_completed = plan.action_type == ActionType.COMPLETE
+
+ # 4. Check transition condition BEFORE executing the action
+ step = self._steps[self._current_step_index]
+ transition = step.get("transition")
+
+ if transition is not None:
+ should_advance = await self._transition_evaluator.should_transition(
+ transition,
+ character,
+ actions_count=self._step_actions,
+ step_start_time=self._step_start_time,
+ strategy_completed=strategy_completed,
+ )
+ if should_advance:
+ await self._advance_step()
+ return
+
+ # 5. If strategy completed and no transition, treat it as step done
+ if strategy_completed:
+ if transition is None:
+ # No explicit transition means strategy_complete is the implicit trigger
+ await self._advance_step()
+ return
+ # Strategy completed but transition not met yet -- idle
+ await asyncio.sleep(1)
+ return
+
+ if plan.action_type == ActionType.IDLE:
+ await asyncio.sleep(1)
+ return
+
+ # 6. Execute the action
+ result = await self._execute_action(plan)
+
+ # 7. Update cooldown
+ self._update_cooldown_from_result(result)
+
+ # 8. Record
+ self._total_actions += 1
+ self._step_actions += 1
+ await self._log_action(plan, success=True)
+
+ # 9. Publish
+ await self._publish_action(
+ plan.action_type.value,
+ success=True,
+ details={
+ "params": plan.params,
+ "reason": plan.reason,
+ "strategy_state": self._strategy.get_state() if self._strategy else "",
+ "step_index": self._current_step_index,
+ },
+ )
+ await self._publish(
+ "character_update",
+ {"character_name": self._character_name},
+ )
+
+ # ------------------------------------------------------------------
+ # Step management
+ # ------------------------------------------------------------------
+
+ def _init_step(self, index: int) -> None:
+ """Initialize a strategy for the step at the given index."""
+ if index < 0 or index >= len(self._steps):
+ self._strategy = None
+ return
+
+ step = self._steps[index]
+ self._current_step_index = index
+ self._step_actions = 0
+ self._step_start_time = time.time()
+ self._transition_evaluator.reset()
+
+ try:
+ self._strategy = self._strategy_factory(
+ step["strategy_type"],
+ step.get("config", {}),
+ )
+ except Exception:
+ logger.exception(
+ "Failed to create strategy for workflow %d step %d",
+ self._workflow_id,
+ index,
+ )
+ self._strategy = None
+
+ logger.info(
+ "Workflow %d initialized step %d/%d: %s (%s)",
+ self._workflow_id,
+ index + 1,
+ len(self._steps),
+ step.get("name", ""),
+ step.get("strategy_type", ""),
+ )
+
+ async def _advance_step(self) -> None:
+ """Advance to the next step or finish the workflow."""
+ # Record completed step
+ step = self._steps[self._current_step_index]
+ self._step_history.append({
+ "step_id": step.get("id", ""),
+ "step_name": step.get("name", ""),
+ "actions_count": self._step_actions,
+ "completed_at": datetime.now(timezone.utc).isoformat(),
+ })
+
+ logger.info(
+ "Workflow %d step %d completed (%s, %d actions)",
+ self._workflow_id,
+ self._current_step_index,
+ step.get("name", ""),
+ self._step_actions,
+ )
+
+ next_index = self._current_step_index + 1
+
+ if next_index >= len(self._steps):
+ # Reached end of steps
+ if self._loop:
+ self._loop_count += 1
+ if self._max_loops > 0 and self._loop_count >= self._max_loops:
+ # Hit loop limit
+ await self._finalize_run(status="completed")
+ self._running = False
+ await self._publish_status("completed")
+ return
+
+ # Loop back to step 0
+ logger.info(
+ "Workflow %d looping (loop %d)",
+ self._workflow_id,
+ self._loop_count,
+ )
+ self._init_step(0)
+ else:
+ # No loop, workflow complete
+ await self._finalize_run(status="completed")
+ self._running = False
+ await self._publish_status("completed")
+ return
+ else:
+ # Advance to next step
+ self._init_step(next_index)
+
+ # Update run record
+ await self._update_run_progress()
+ await self._publish_status("running")
+
+ # ------------------------------------------------------------------
+ # Action execution (mirrors AutomationRunner._execute_action)
+ # ------------------------------------------------------------------
+
+ async def _execute_action(self, plan: ActionPlan) -> dict[str, Any]:
+ return await execute_action(self._client, self._character_name, plan)
+
+ def _update_cooldown_from_result(self, result: dict[str, Any]) -> None:
+ cooldown = result.get("cooldown")
+ if cooldown is None:
+ return
+ self._cooldown.update(
+ self._character_name,
+ cooldown.get("total_seconds", 0),
+ cooldown.get("expiration"),
+ )
+
+ # ------------------------------------------------------------------
+ # Database helpers
+ # ------------------------------------------------------------------
+
+ async def _log_action(self, plan: ActionPlan, success: bool) -> None:
+ try:
+ async with self._db_factory() as db:
+ log = AutomationLog(
+ run_id=self._run_id,
+ action_type=plan.action_type.value,
+ details={
+ "params": plan.params,
+ "reason": plan.reason,
+ "strategy_state": self._strategy.get_state() if self._strategy else "",
+ "workflow_id": self._workflow_id,
+ "step_index": self._current_step_index,
+ },
+ success=success,
+ )
+ db.add(log)
+ await db.commit()
+ except Exception:
+ logger.exception("Failed to log workflow action for run %d", self._run_id)
+
+ async def _update_run_status(self, status: str) -> None:
+ try:
+ async with self._db_factory() as db:
+ stmt = select(WorkflowRun).where(WorkflowRun.id == self._run_id)
+ result = await db.execute(stmt)
+ run = result.scalar_one_or_none()
+ if run is not None:
+ run.status = status
+ await db.commit()
+ except Exception:
+ logger.exception("Failed to update workflow run %d status", self._run_id)
+
+ async def _update_run_progress(self) -> None:
+ try:
+ async with self._db_factory() as db:
+ stmt = select(WorkflowRun).where(WorkflowRun.id == self._run_id)
+ result = await db.execute(stmt)
+ run = result.scalar_one_or_none()
+ if run is not None:
+ run.current_step_index = self._current_step_index
+ run.current_step_id = self.current_step_id
+ run.loop_count = self._loop_count
+ run.total_actions_count = self._total_actions
+ run.step_actions_count = self._step_actions
+ run.step_history = self._step_history
+ await db.commit()
+ except Exception:
+ logger.exception("Failed to update workflow run %d progress", self._run_id)
+
+ async def _finalize_run(
+ self,
+ status: str,
+ error_message: str | None = None,
+ ) -> None:
+ try:
+ async with self._db_factory() as db:
+ stmt = select(WorkflowRun).where(WorkflowRun.id == self._run_id)
+ result = await db.execute(stmt)
+ run = result.scalar_one_or_none()
+ if run is not None:
+ run.status = status
+ run.stopped_at = datetime.now(timezone.utc)
+ run.current_step_index = self._current_step_index
+ run.current_step_id = self.current_step_id
+ run.loop_count = self._loop_count
+ run.total_actions_count = self._total_actions
+ run.step_actions_count = self._step_actions
+ run.step_history = self._step_history
+ if error_message:
+ run.error_message = error_message
+ await db.commit()
+ except Exception:
+ logger.exception("Failed to finalize workflow run %d", self._run_id)
diff --git a/backend/app/main.py b/backend/app/main.py
index 0b440fe..cee4ee1 100644
--- a/backend/app/main.py
+++ b/backend/app/main.py
@@ -1,12 +1,29 @@
import asyncio
+import json
import logging
+import sys
from contextlib import asynccontextmanager
from collections.abc import AsyncGenerator
+from datetime import datetime, timezone
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from app.config import settings
+
+# ---- Sentry (conditional) ----
+if settings.sentry_dsn:
+ try:
+ import sentry_sdk
+
+ sentry_sdk.init(
+ dsn=settings.sentry_dsn,
+ environment=settings.environment,
+ traces_sample_rate=0.2,
+ send_default_pii=False,
+ )
+ except Exception:
+ pass # Sentry is optional; don't block startup
from app.database import async_session_factory, engine, Base
from app.services.artifacts_client import ArtifactsClient
from app.services.character_service import CharacterService
@@ -16,8 +33,11 @@ from app.services.game_data_cache import GameDataCacheService
from app.models import game_cache as _game_cache_model # noqa: F401
from app.models import character_snapshot as _snapshot_model # noqa: F401
from app.models import automation as _automation_model # noqa: F401
+from app.models import workflow as _workflow_model # noqa: F401
from app.models import price_history as _price_history_model # noqa: F401
from app.models import event_log as _event_log_model # noqa: F401
+from app.models import app_error as _app_error_model # noqa: F401
+from app.models import pipeline as _pipeline_model # noqa: F401
# Import routers
from app.api.characters import router as characters_router
@@ -30,11 +50,17 @@ from app.api.exchange import router as exchange_router
from app.api.events import router as events_router
from app.api.logs import router as logs_router
from app.api.auth import router as auth_router
+from app.api.workflows import router as workflows_router
+from app.api.errors import router as errors_router
+from app.api.pipelines import router as pipelines_router
# Automation engine
from app.engine.pathfinder import Pathfinder
from app.engine.manager import AutomationManager
+# Error-handling middleware
+from app.middleware.error_handler import ErrorHandlerMiddleware
+
# Exchange service
from app.services.exchange_service import ExchangeService
@@ -45,10 +71,29 @@ from app.websocket.handlers import GameEventHandler
logger = logging.getLogger(__name__)
-logging.basicConfig(
- level=logging.INFO,
- format="%(asctime)s [%(levelname)s] %(name)s: %(message)s",
-)
+
+class _JSONFormatter(logging.Formatter):
+ """Structured JSON log formatter for production."""
+
+ def format(self, record: logging.LogRecord) -> str:
+ log_entry = {
+ "ts": datetime.now(timezone.utc).isoformat(),
+ "level": record.levelname,
+ "logger": record.name,
+ "msg": record.getMessage(),
+ }
+ if record.exc_info and record.exc_info[1]:
+ log_entry["exception"] = self.formatException(record.exc_info)
+ return json.dumps(log_entry, default=str)
+
+
+_handler = logging.StreamHandler(sys.stdout)
+if settings.environment != "development":
+ _handler.setFormatter(_JSONFormatter())
+else:
+ _handler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s] %(name)s: %(message)s"))
+
+logging.basicConfig(level=logging.INFO, handlers=[_handler])
async def _snapshot_loop(
@@ -218,6 +263,7 @@ app = FastAPI(
lifespan=lifespan,
)
+app.add_middleware(ErrorHandlerMiddleware)
app.add_middleware(
CORSMiddleware,
allow_origins=settings.cors_origins,
@@ -237,6 +283,9 @@ app.include_router(exchange_router)
app.include_router(events_router)
app.include_router(logs_router)
app.include_router(auth_router)
+app.include_router(workflows_router)
+app.include_router(errors_router)
+app.include_router(pipelines_router)
@app.get("/health")
diff --git a/backend/app/middleware/__init__.py b/backend/app/middleware/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/backend/app/middleware/error_handler.py b/backend/app/middleware/error_handler.py
new file mode 100644
index 0000000..ec018e6
--- /dev/null
+++ b/backend/app/middleware/error_handler.py
@@ -0,0 +1,71 @@
+"""Global error-handling middleware.
+
+Sets a per-request correlation ID and catches unhandled exceptions,
+logging them to the database (and Sentry when configured).
+"""
+
+from __future__ import annotations
+
+import logging
+import time
+
+from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint
+from starlette.requests import Request
+from starlette.responses import JSONResponse, Response
+
+from app.database import async_session_factory
+from app.services.error_service import hash_token, log_error, new_correlation_id
+
+logger = logging.getLogger(__name__)
+
+
+class ErrorHandlerMiddleware(BaseHTTPMiddleware):
+ async def dispatch(self, request: Request, call_next: RequestResponseEndpoint) -> Response:
+ cid = new_correlation_id()
+ start = time.monotonic()
+
+ try:
+ response = await call_next(request)
+ return response
+ except Exception as exc:
+ duration = time.monotonic() - start
+ logger.exception(
+ "Unhandled exception on %s %s (cid=%s, %.3fs)",
+ request.method,
+ request.url.path,
+ cid,
+ duration,
+ )
+
+ # Try to capture in Sentry
+ try:
+ import sentry_sdk
+
+ sentry_sdk.capture_exception(exc)
+ except Exception:
+ pass
+
+ # Persist to DB
+ token = request.headers.get("X-API-Token")
+ await log_error(
+ async_session_factory,
+ severity="error",
+ source="middleware",
+ exc=exc,
+ context={
+ "method": request.method,
+ "path": request.url.path,
+ "query": str(request.url.query),
+ "duration_s": round(duration, 3),
+ },
+ correlation_id=cid,
+ user_token_hash=hash_token(token) if token else None,
+ )
+
+ return JSONResponse(
+ status_code=500,
+ content={
+ "detail": "Internal server error",
+ "correlation_id": cid,
+ },
+ )
diff --git a/backend/app/models/__init__.py b/backend/app/models/__init__.py
index 2cdb170..9e8745d 100644
--- a/backend/app/models/__init__.py
+++ b/backend/app/models/__init__.py
@@ -1,15 +1,23 @@
+from app.models.app_error import AppError
from app.models.automation import AutomationConfig, AutomationLog, AutomationRun
from app.models.character_snapshot import CharacterSnapshot
from app.models.event_log import EventLog
from app.models.game_cache import GameDataCache
+from app.models.pipeline import PipelineConfig, PipelineRun
from app.models.price_history import PriceHistory
+from app.models.workflow import WorkflowConfig, WorkflowRun
__all__ = [
+ "AppError",
"AutomationConfig",
"AutomationLog",
"AutomationRun",
"CharacterSnapshot",
"EventLog",
"GameDataCache",
+ "PipelineConfig",
+ "PipelineRun",
"PriceHistory",
+ "WorkflowConfig",
+ "WorkflowRun",
]
diff --git a/backend/app/models/app_error.py b/backend/app/models/app_error.py
new file mode 100644
index 0000000..f487ade
--- /dev/null
+++ b/backend/app/models/app_error.py
@@ -0,0 +1,51 @@
+"""Application error model for tracking errors across the system."""
+
+from sqlalchemy import Boolean, Column, DateTime, Integer, String, Text
+from sqlalchemy.dialects.postgresql import JSON
+from sqlalchemy.sql import func
+
+from app.database import Base
+
+
+class AppError(Base):
+ __tablename__ = "app_errors"
+
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ severity = Column(
+ String(20),
+ nullable=False,
+ default="error",
+ comment="error | warning | critical",
+ )
+ source = Column(
+ String(50),
+ nullable=False,
+ comment="backend | frontend | automation | middleware",
+ )
+ error_type = Column(
+ String(200),
+ nullable=False,
+ comment="Exception class name or error category",
+ )
+ message = Column(Text, nullable=False)
+ stack_trace = Column(Text, nullable=True)
+ context = Column(JSON, nullable=True, comment="Arbitrary JSON context")
+ user_token_hash = Column(
+ String(64),
+ nullable=True,
+ index=True,
+ comment="SHA-256 hash of the user API token (for scoping errors per user)",
+ )
+ correlation_id = Column(
+ String(36),
+ nullable=True,
+ index=True,
+ comment="Request correlation ID (UUID)",
+ )
+ resolved = Column(Boolean, nullable=False, default=False, server_default="false")
+ created_at = Column(
+ DateTime(timezone=True),
+ server_default=func.now(),
+ nullable=False,
+ index=True,
+ )
diff --git a/backend/app/models/pipeline.py b/backend/app/models/pipeline.py
new file mode 100644
index 0000000..6f15b6f
--- /dev/null
+++ b/backend/app/models/pipeline.py
@@ -0,0 +1,98 @@
+from datetime import datetime
+
+from sqlalchemy import Boolean, DateTime, ForeignKey, Integer, JSON, String, Text, func
+from sqlalchemy.orm import Mapped, mapped_column, relationship
+
+from app.database import Base
+
+
+class PipelineConfig(Base):
+ __tablename__ = "pipeline_configs"
+
+ id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
+ name: Mapped[str] = mapped_column(String(100), nullable=False)
+ description: Mapped[str] = mapped_column(Text, nullable=False, default="")
+ stages: Mapped[list] = mapped_column(
+ JSON,
+ nullable=False,
+ default=list,
+ comment="JSON array of pipeline stages, each with id, name, character_steps[]",
+ )
+ loop: Mapped[bool] = mapped_column(Boolean, nullable=False, default=False)
+ max_loops: Mapped[int] = mapped_column(Integer, nullable=False, default=0)
+ enabled: Mapped[bool] = mapped_column(Boolean, default=True, nullable=False)
+ created_at: Mapped[datetime] = mapped_column(
+ DateTime(timezone=True),
+ server_default=func.now(),
+ nullable=False,
+ )
+ updated_at: Mapped[datetime] = mapped_column(
+ DateTime(timezone=True),
+ server_default=func.now(),
+ onupdate=func.now(),
+ nullable=False,
+ )
+
+ runs: Mapped[list["PipelineRun"]] = relationship(
+ back_populates="pipeline",
+ cascade="all, delete-orphan",
+ order_by="PipelineRun.started_at.desc()",
+ )
+
+ def __repr__(self) -> str:
+ return (
+ f""
+ )
+
+
+class PipelineRun(Base):
+ __tablename__ = "pipeline_runs"
+
+ id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
+ pipeline_id: Mapped[int] = mapped_column(
+ Integer,
+ ForeignKey("pipeline_configs.id", ondelete="CASCADE"),
+ nullable=False,
+ index=True,
+ )
+ status: Mapped[str] = mapped_column(
+ String(20),
+ nullable=False,
+ default="running",
+ comment="Status: running, paused, stopped, completed, error",
+ )
+ current_stage_index: Mapped[int] = mapped_column(Integer, nullable=False, default=0)
+ current_stage_id: Mapped[str] = mapped_column(String(100), nullable=False, default="")
+ loop_count: Mapped[int] = mapped_column(Integer, nullable=False, default=0)
+ total_actions_count: Mapped[int] = mapped_column(Integer, nullable=False, default=0)
+ character_states: Mapped[dict] = mapped_column(
+ JSON,
+ nullable=False,
+ default=dict,
+ comment="Per-character state: {char_name: {status, step_id, actions_count, error}}",
+ )
+ stage_history: Mapped[list] = mapped_column(
+ JSON,
+ nullable=False,
+ default=list,
+ comment="JSON array of completed stage records",
+ )
+ started_at: Mapped[datetime] = mapped_column(
+ DateTime(timezone=True),
+ server_default=func.now(),
+ nullable=False,
+ )
+ stopped_at: Mapped[datetime | None] = mapped_column(
+ DateTime(timezone=True),
+ nullable=True,
+ )
+ error_message: Mapped[str | None] = mapped_column(Text, nullable=True)
+
+ pipeline: Mapped["PipelineConfig"] = relationship(back_populates="runs")
+
+ def __repr__(self) -> str:
+ return (
+ f""
+ )
diff --git a/backend/app/models/workflow.py b/backend/app/models/workflow.py
new file mode 100644
index 0000000..365b810
--- /dev/null
+++ b/backend/app/models/workflow.py
@@ -0,0 +1,94 @@
+from datetime import datetime
+
+from sqlalchemy import Boolean, DateTime, ForeignKey, Integer, JSON, String, Text, func
+from sqlalchemy.orm import Mapped, mapped_column, relationship
+
+from app.database import Base
+
+
+class WorkflowConfig(Base):
+ __tablename__ = "workflow_configs"
+
+ id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
+ name: Mapped[str] = mapped_column(String(100), nullable=False)
+ character_name: Mapped[str] = mapped_column(String(100), nullable=False, index=True)
+ description: Mapped[str] = mapped_column(Text, nullable=False, default="")
+ steps: Mapped[list] = mapped_column(
+ JSON,
+ nullable=False,
+ default=list,
+ comment="JSON array of workflow steps, each with id, name, strategy_type, config, transition",
+ )
+ loop: Mapped[bool] = mapped_column(Boolean, nullable=False, default=False)
+ max_loops: Mapped[int] = mapped_column(Integer, nullable=False, default=0)
+ enabled: Mapped[bool] = mapped_column(Boolean, default=True, nullable=False)
+ created_at: Mapped[datetime] = mapped_column(
+ DateTime(timezone=True),
+ server_default=func.now(),
+ nullable=False,
+ )
+ updated_at: Mapped[datetime] = mapped_column(
+ DateTime(timezone=True),
+ server_default=func.now(),
+ onupdate=func.now(),
+ nullable=False,
+ )
+
+ runs: Mapped[list["WorkflowRun"]] = relationship(
+ back_populates="workflow",
+ cascade="all, delete-orphan",
+ order_by="WorkflowRun.started_at.desc()",
+ )
+
+ def __repr__(self) -> str:
+ return (
+ f""
+ )
+
+
+class WorkflowRun(Base):
+ __tablename__ = "workflow_runs"
+
+ id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
+ workflow_id: Mapped[int] = mapped_column(
+ Integer,
+ ForeignKey("workflow_configs.id", ondelete="CASCADE"),
+ nullable=False,
+ index=True,
+ )
+ status: Mapped[str] = mapped_column(
+ String(20),
+ nullable=False,
+ default="running",
+ comment="Status: running, paused, stopped, completed, error",
+ )
+ current_step_index: Mapped[int] = mapped_column(Integer, nullable=False, default=0)
+ current_step_id: Mapped[str] = mapped_column(String(100), nullable=False, default="")
+ loop_count: Mapped[int] = mapped_column(Integer, nullable=False, default=0)
+ total_actions_count: Mapped[int] = mapped_column(Integer, nullable=False, default=0)
+ step_actions_count: Mapped[int] = mapped_column(Integer, nullable=False, default=0)
+ started_at: Mapped[datetime] = mapped_column(
+ DateTime(timezone=True),
+ server_default=func.now(),
+ nullable=False,
+ )
+ stopped_at: Mapped[datetime | None] = mapped_column(
+ DateTime(timezone=True),
+ nullable=True,
+ )
+ error_message: Mapped[str | None] = mapped_column(Text, nullable=True)
+ step_history: Mapped[list] = mapped_column(
+ JSON,
+ nullable=False,
+ default=list,
+ comment="JSON array of completed step records",
+ )
+
+ workflow: Mapped["WorkflowConfig"] = relationship(back_populates="runs")
+
+ def __repr__(self) -> str:
+ return (
+ f""
+ )
diff --git a/backend/app/schemas/errors.py b/backend/app/schemas/errors.py
new file mode 100644
index 0000000..9b49dd2
--- /dev/null
+++ b/backend/app/schemas/errors.py
@@ -0,0 +1,44 @@
+"""Pydantic schemas for the errors API."""
+
+from datetime import datetime
+from typing import Any
+
+from pydantic import BaseModel, Field
+
+
+class AppErrorResponse(BaseModel):
+ id: int
+ severity: str
+ source: str
+ error_type: str
+ message: str
+ stack_trace: str | None = None
+ context: dict[str, Any] | None = None
+ correlation_id: str | None = None
+ resolved: bool
+ created_at: datetime
+
+ model_config = {"from_attributes": True}
+
+
+class AppErrorListResponse(BaseModel):
+ errors: list[AppErrorResponse]
+ total: int
+ page: int
+ pages: int
+
+
+class AppErrorStats(BaseModel):
+ total: int = 0
+ unresolved: int = 0
+ last_hour: int = 0
+ by_severity: dict[str, int] = Field(default_factory=dict)
+ by_source: dict[str, int] = Field(default_factory=dict)
+
+
+class FrontendErrorReport(BaseModel):
+ error_type: str = "FrontendError"
+ message: str
+ stack_trace: str | None = None
+ context: dict[str, Any] | None = None
+ severity: str = "error"
diff --git a/backend/app/schemas/pipeline.py b/backend/app/schemas/pipeline.py
new file mode 100644
index 0000000..dae351f
--- /dev/null
+++ b/backend/app/schemas/pipeline.py
@@ -0,0 +1,127 @@
+from datetime import datetime
+
+from pydantic import BaseModel, Field
+
+from app.schemas.workflow import TransitionConditionSchema
+
+
+# ---------------------------------------------------------------------------
+# Character step within a stage
+# ---------------------------------------------------------------------------
+
+
+class CharacterStepSchema(BaseModel):
+ """A single character's work within a pipeline stage."""
+
+ id: str = Field(..., description="Unique step identifier (e.g. 'cs_1a')")
+ character_name: str = Field(..., min_length=1, max_length=100)
+ strategy_type: str = Field(
+ ...,
+ description="Strategy type: combat, gathering, crafting, trading, task, leveling",
+ )
+ config: dict = Field(default_factory=dict, description="Strategy-specific configuration")
+ transition: TransitionConditionSchema | None = Field(
+ default=None,
+ description="Condition for this character-step to be considered done",
+ )
+
+
+# ---------------------------------------------------------------------------
+# Pipeline stage
+# ---------------------------------------------------------------------------
+
+
+class PipelineStageSchema(BaseModel):
+ """A stage in the pipeline — character steps within it run in parallel."""
+
+ id: str = Field(..., description="Unique stage identifier (e.g. 'stage_1')")
+ name: str = Field(..., min_length=1, max_length=100)
+ character_steps: list[CharacterStepSchema] = Field(..., min_length=1)
+
+
+# ---------------------------------------------------------------------------
+# Request schemas
+# ---------------------------------------------------------------------------
+
+
+class PipelineConfigCreate(BaseModel):
+ name: str = Field(..., min_length=1, max_length=100)
+ description: str = Field(default="")
+ stages: list[PipelineStageSchema] = Field(..., min_length=1)
+ loop: bool = Field(default=False)
+ max_loops: int = Field(default=0, ge=0)
+
+
+class PipelineConfigUpdate(BaseModel):
+ name: str | None = Field(default=None, min_length=1, max_length=100)
+ description: str | None = None
+ stages: list[PipelineStageSchema] | None = Field(default=None, min_length=1)
+ loop: bool | None = None
+ max_loops: int | None = Field(default=None, ge=0)
+ enabled: bool | None = None
+
+
+# ---------------------------------------------------------------------------
+# Response schemas
+# ---------------------------------------------------------------------------
+
+
+class PipelineConfigResponse(BaseModel):
+ id: int
+ name: str
+ description: str
+ stages: list[dict]
+ loop: bool
+ max_loops: int
+ enabled: bool
+ created_at: datetime
+ updated_at: datetime
+
+ model_config = {"from_attributes": True}
+
+
+class PipelineRunResponse(BaseModel):
+ id: int
+ pipeline_id: int
+ status: str
+ current_stage_index: int
+ current_stage_id: str
+ loop_count: int
+ total_actions_count: int
+ character_states: dict
+ stage_history: list[dict] = Field(default_factory=list)
+ started_at: datetime
+ stopped_at: datetime | None = None
+ error_message: str | None = None
+
+ model_config = {"from_attributes": True}
+
+
+class CharacterStateResponse(BaseModel):
+ """Status of a single character within an active pipeline."""
+
+ character_name: str
+ status: str # running, completed, error, idle
+ step_id: str = ""
+ actions_count: int = 0
+ strategy_state: str = ""
+ error: str | None = None
+
+
+class PipelineStatusResponse(BaseModel):
+ pipeline_id: int
+ status: str
+ run_id: int | None = None
+ current_stage_index: int = 0
+ current_stage_id: str = ""
+ total_stages: int = 0
+ loop_count: int = 0
+ total_actions_count: int = 0
+ character_states: list[CharacterStateResponse] = Field(default_factory=list)
+
+
+class PipelineConfigDetailResponse(BaseModel):
+ """Pipeline config with its run history."""
+
+ config: PipelineConfigResponse
+ runs: list[PipelineRunResponse] = Field(default_factory=list)
diff --git a/backend/app/schemas/workflow.py b/backend/app/schemas/workflow.py
new file mode 100644
index 0000000..9fdef51
--- /dev/null
+++ b/backend/app/schemas/workflow.py
@@ -0,0 +1,146 @@
+from datetime import datetime
+
+from pydantic import BaseModel, Field
+
+
+# ---------------------------------------------------------------------------
+# Transition conditions
+# ---------------------------------------------------------------------------
+
+
+class TransitionConditionSchema(BaseModel):
+ """Defines when a workflow step should transition to the next step."""
+
+ type: str = Field(
+ ...,
+ description=(
+ "Condition type: strategy_complete, loops_completed, inventory_full, "
+ "inventory_item_count, bank_item_count, skill_level, gold_amount, "
+ "actions_count, timer"
+ ),
+ )
+ operator: str = Field(
+ default=">=",
+ description="Comparison operator: >=, <=, ==, >, <",
+ )
+ value: int = Field(
+ default=0,
+ description="Target value for the condition",
+ )
+ item_code: str = Field(
+ default="",
+ description="Item code (for inventory_item_count, bank_item_count)",
+ )
+ skill: str = Field(
+ default="",
+ description="Skill name (for skill_level condition)",
+ )
+ seconds: int = Field(
+ default=0,
+ ge=0,
+ description="Duration in seconds (for timer condition)",
+ )
+
+
+# ---------------------------------------------------------------------------
+# Workflow steps
+# ---------------------------------------------------------------------------
+
+
+class WorkflowStepSchema(BaseModel):
+ """A single step within a workflow pipeline."""
+
+ id: str = Field(..., description="Unique step identifier (e.g. 'step_1')")
+ name: str = Field(..., min_length=1, max_length=100, description="Human-readable step name")
+ strategy_type: str = Field(
+ ...,
+ description="Strategy type: combat, gathering, crafting, trading, task, leveling",
+ )
+ config: dict = Field(default_factory=dict, description="Strategy-specific configuration")
+ transition: TransitionConditionSchema | None = Field(
+ default=None,
+ description="Condition to advance to the next step (None = run until strategy completes)",
+ )
+
+
+# ---------------------------------------------------------------------------
+# Request schemas
+# ---------------------------------------------------------------------------
+
+
+class WorkflowConfigCreate(BaseModel):
+ name: str = Field(..., min_length=1, max_length=100)
+ character_name: str = Field(..., min_length=1, max_length=100)
+ description: str = Field(default="")
+ steps: list[WorkflowStepSchema] = Field(..., min_length=1)
+ loop: bool = Field(default=False)
+ max_loops: int = Field(default=0, ge=0)
+
+
+class WorkflowConfigUpdate(BaseModel):
+ name: str | None = Field(default=None, min_length=1, max_length=100)
+ description: str | None = None
+ steps: list[WorkflowStepSchema] | None = Field(default=None, min_length=1)
+ loop: bool | None = None
+ max_loops: int | None = Field(default=None, ge=0)
+ enabled: bool | None = None
+
+
+# ---------------------------------------------------------------------------
+# Response schemas
+# ---------------------------------------------------------------------------
+
+
+class WorkflowConfigResponse(BaseModel):
+ id: int
+ name: str
+ character_name: str
+ description: str
+ steps: list[dict]
+ loop: bool
+ max_loops: int
+ enabled: bool
+ created_at: datetime
+ updated_at: datetime
+
+ model_config = {"from_attributes": True}
+
+
+class WorkflowRunResponse(BaseModel):
+ id: int
+ workflow_id: int
+ status: str
+ current_step_index: int
+ current_step_id: str
+ loop_count: int
+ total_actions_count: int
+ step_actions_count: int
+ started_at: datetime
+ stopped_at: datetime | None = None
+ error_message: str | None = None
+ step_history: list[dict] = Field(default_factory=list)
+
+ model_config = {"from_attributes": True}
+
+
+class WorkflowStatusResponse(BaseModel):
+ workflow_id: int
+ character_name: str
+ status: str
+ run_id: int | None = None
+ current_step_index: int = 0
+ current_step_id: str = ""
+ total_steps: int = 0
+ loop_count: int = 0
+ total_actions_count: int = 0
+ step_actions_count: int = 0
+ strategy_state: str = ""
+
+ model_config = {"from_attributes": True}
+
+
+class WorkflowConfigDetailResponse(BaseModel):
+ """Workflow config with its run history."""
+
+ config: WorkflowConfigResponse
+ runs: list[WorkflowRunResponse] = Field(default_factory=list)
diff --git a/backend/app/services/artifacts_client.py b/backend/app/services/artifacts_client.py
index 4a02a94..facc6a6 100644
--- a/backend/app/services/artifacts_client.py
+++ b/backend/app/services/artifacts_client.py
@@ -53,6 +53,9 @@ class ArtifactsClient:
"""Async HTTP client for the Artifacts MMO API.
Handles authentication, rate limiting, pagination, and retry logic.
+ Supports per-request token overrides for multi-user scenarios via
+ the ``with_token()`` method which creates a lightweight clone that
+ shares the underlying connection pool.
"""
MAX_RETRIES: int = 3
@@ -63,7 +66,6 @@ class ArtifactsClient:
self._client = httpx.AsyncClient(
base_url=settings.artifacts_api_url,
headers={
- "Authorization": f"Bearer {self._token}",
"Content-Type": "application/json",
"Accept": "application/json",
},
@@ -78,6 +80,21 @@ class ArtifactsClient:
window_seconds=settings.data_rate_window,
)
+ # -- Multi-user support ------------------------------------------------
+
+ def with_token(self, token: str) -> "ArtifactsClient":
+ """Return a lightweight clone that uses *token* for requests.
+
+ The clone shares the httpx connection pool and rate limiters with the
+ original instance so there is no overhead in creating one per request.
+ """
+ clone = object.__new__(ArtifactsClient)
+ clone._token = token
+ clone._client = self._client # shared connection pool
+ clone._action_limiter = self._action_limiter
+ clone._data_limiter = self._data_limiter
+ return clone
+
@property
def has_token(self) -> bool:
return bool(self._token)
@@ -91,14 +108,12 @@ class ArtifactsClient:
return "user"
def set_token(self, token: str) -> None:
- """Update the API token at runtime."""
+ """Update the default API token at runtime (used by background tasks)."""
self._token = token
- self._client.headers["Authorization"] = f"Bearer {token}"
def clear_token(self) -> None:
"""Revert to the env token (or empty if none)."""
self._token = settings.artifacts_token
- self._client.headers["Authorization"] = f"Bearer {self._token}"
# ------------------------------------------------------------------
# Low-level request helpers
@@ -115,6 +130,10 @@ class ArtifactsClient:
) -> dict[str, Any]:
last_exc: Exception | None = None
+ # Send Authorization per-request so clones created by with_token()
+ # use their own token without affecting other concurrent requests.
+ auth_headers = {"Authorization": f"Bearer {self._token}"} if self._token else {}
+
for attempt in range(1, self.MAX_RETRIES + 1):
await limiter.acquire()
try:
@@ -123,6 +142,7 @@ class ArtifactsClient:
path,
json=json_body,
params=params,
+ headers=auth_headers,
)
if response.status_code == 429:
@@ -136,6 +156,27 @@ class ArtifactsClient:
await asyncio.sleep(retry_after)
continue
+ # 498 = character in cooldown – wait and retry
+ if response.status_code == 498:
+ try:
+ body = response.json()
+ cooldown = body.get("data", {}).get("cooldown", {})
+ wait_seconds = cooldown.get("total_seconds", 5)
+ except Exception:
+ wait_seconds = 5
+ logger.info(
+ "Character cooldown on %s %s, waiting %.1fs (attempt %d/%d)",
+ method,
+ path,
+ wait_seconds,
+ attempt,
+ self.MAX_RETRIES,
+ )
+ await asyncio.sleep(wait_seconds)
+ if attempt < self.MAX_RETRIES:
+ continue
+ response.raise_for_status()
+
if response.status_code >= 500:
logger.warning(
"Server error %d on %s %s (attempt %d/%d)",
@@ -428,11 +469,11 @@ class ArtifactsClient:
return result.get("data", {})
async def ge_buy(
- self, name: str, code: str, quantity: int, price: int
+ self, name: str, order_id: str, quantity: int
) -> dict[str, Any]:
result = await self._post_action(
f"/my/{name}/action/grandexchange/buy",
- json_body={"code": code, "quantity": quantity, "price": price},
+ json_body={"id": order_id, "quantity": quantity},
)
return result.get("data", {})
@@ -440,20 +481,29 @@ class ArtifactsClient:
self, name: str, code: str, quantity: int, price: int
) -> dict[str, Any]:
result = await self._post_action(
- f"/my/{name}/action/grandexchange/sell",
+ f"/my/{name}/action/grandexchange/create-sell-order",
json_body={"code": code, "quantity": quantity, "price": price},
)
return result.get("data", {})
- async def ge_buy_order(
+ async def ge_create_buy_order(
self, name: str, code: str, quantity: int, price: int
) -> dict[str, Any]:
result = await self._post_action(
- f"/my/{name}/action/grandexchange/buy",
+ f"/my/{name}/action/grandexchange/create-buy-order",
json_body={"code": code, "quantity": quantity, "price": price},
)
return result.get("data", {})
+ async def ge_fill_buy_order(
+ self, name: str, order_id: str, quantity: int
+ ) -> dict[str, Any]:
+ result = await self._post_action(
+ f"/my/{name}/action/grandexchange/fill",
+ json_body={"id": order_id, "quantity": quantity},
+ )
+ return result.get("data", {})
+
async def ge_cancel(self, name: str, order_id: str) -> dict[str, Any]:
result = await self._post_action(
f"/my/{name}/action/grandexchange/cancel",
@@ -500,6 +550,27 @@ class ArtifactsClient:
)
return result.get("data", {})
+ # ------------------------------------------------------------------
+ # Data endpoints - Action Logs
+ # ------------------------------------------------------------------
+
+ async def get_logs(
+ self,
+ page: int = 1,
+ size: int = 100,
+ ) -> dict[str, Any]:
+ """Get recent action logs for all characters (last 5000 actions)."""
+ return await self._get("/my/logs", params={"page": page, "size": size})
+
+ async def get_character_logs(
+ self,
+ name: str,
+ page: int = 1,
+ size: int = 100,
+ ) -> dict[str, Any]:
+ """Get recent action logs for a specific character."""
+ return await self._get(f"/my/logs/{name}", params={"page": page, "size": size})
+
# ------------------------------------------------------------------
# Lifecycle
# ------------------------------------------------------------------
diff --git a/backend/app/services/error_service.py b/backend/app/services/error_service.py
new file mode 100644
index 0000000..919bbb5
--- /dev/null
+++ b/backend/app/services/error_service.py
@@ -0,0 +1,77 @@
+"""Error logging service - writes errors to the database and optionally to Sentry."""
+
+from __future__ import annotations
+
+import hashlib
+import logging
+import traceback
+import uuid
+from contextvars import ContextVar
+from typing import Any
+
+from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker
+
+from app.models.app_error import AppError
+
+logger = logging.getLogger(__name__)
+
+# Per-request correlation ID
+correlation_id_var: ContextVar[str | None] = ContextVar("correlation_id", default=None)
+
+
+def hash_token(token: str) -> str:
+ """Return a stable SHA-256 hex digest for a user API token."""
+ return hashlib.sha256(token.encode()).hexdigest()
+
+
+def new_correlation_id() -> str:
+ cid = uuid.uuid4().hex[:12]
+ correlation_id_var.set(cid)
+ return cid
+
+
+async def log_error(
+ db_factory: async_sessionmaker[AsyncSession],
+ *,
+ severity: str = "error",
+ source: str = "backend",
+ error_type: str = "UnknownError",
+ message: str = "",
+ exc: BaseException | None = None,
+ context: dict[str, Any] | None = None,
+ correlation_id: str | None = None,
+ user_token_hash: str | None = None,
+) -> AppError | None:
+ """Persist an error record to the database.
+
+ Returns the created AppError, or None if the DB write itself fails.
+ """
+ stack_trace = None
+ if exc is not None:
+ stack_trace = "".join(traceback.format_exception(type(exc), exc, exc.__traceback__))
+ if not error_type or error_type == "UnknownError":
+ error_type = type(exc).__qualname__
+ if not message:
+ message = str(exc)
+
+ cid = correlation_id or correlation_id_var.get()
+
+ try:
+ async with db_factory() as db:
+ record = AppError(
+ severity=severity,
+ source=source,
+ error_type=error_type,
+ message=message[:4000],
+ stack_trace=stack_trace[:10000] if stack_trace else None,
+ context=context,
+ correlation_id=cid,
+ user_token_hash=user_token_hash,
+ )
+ db.add(record)
+ await db.commit()
+ await db.refresh(record)
+ return record
+ except Exception:
+ logger.exception("Failed to persist error record to database")
+ return None
diff --git a/backend/pyproject.toml b/backend/pyproject.toml
index eca0b6b..0da76a6 100644
--- a/backend/pyproject.toml
+++ b/backend/pyproject.toml
@@ -13,6 +13,7 @@ dependencies = [
"pydantic>=2.10.0",
"pydantic-settings>=2.7.0",
"websockets>=14.0",
+ "sentry-sdk[fastapi]>=2.0.0",
]
[project.optional-dependencies]
diff --git a/backend/tests/test_base_strategy.py b/backend/tests/test_base_strategy.py
index 9bd9f7e..7b73b19 100644
--- a/backend/tests/test_base_strategy.py
+++ b/backend/tests/test_base_strategy.py
@@ -97,8 +97,9 @@ class TestActionType:
expected = {
"move", "fight", "gather", "rest", "equip", "unequip",
"use_item", "deposit_item", "withdraw_item", "craft", "recycle",
- "ge_buy", "ge_sell", "ge_cancel",
- "task_new", "task_trade", "task_complete", "task_exchange",
+ "ge_buy", "ge_create_buy", "ge_sell", "ge_fill", "ge_cancel",
+ "task_new", "task_trade", "task_complete", "task_exchange", "task_cancel",
+ "deposit_gold", "withdraw_gold", "npc_buy", "npc_sell",
"idle", "complete",
}
actual = {at.value for at in ActionType}
diff --git a/backend/tests/test_crafting_strategy.py b/backend/tests/test_crafting_strategy.py
index c54d496..7fecefd 100644
--- a/backend/tests/test_crafting_strategy.py
+++ b/backend/tests/test_crafting_strategy.py
@@ -250,6 +250,30 @@ class TestCraftingStrategyDeposit:
@pytest.mark.asyncio
async def test_deposit_items_at_bank(self, make_character, pathfinder_with_maps):
+ pf = pathfinder_with_maps([
+ (5, 5, "workshop", "weaponcrafting"),
+ (10, 0, "bank", "bank"),
+ ])
+ item = _make_craftable_item(materials=[("iron_ore", 3)])
+ strategy = CraftingStrategy(
+ {"item_code": "iron_sword", "quantity": 5}, # quantity > crafted
+ pf,
+ items_data=[item],
+ )
+ strategy._state = strategy._state.__class__("deposit")
+ strategy._crafted_count = 2 # Still more to craft
+
+ char = make_character(
+ x=10, y=0,
+ inventory=[InventorySlot(slot=0, code="iron_sword", quantity=2)],
+ )
+
+ plan = await strategy.next_action(char)
+ assert plan.action_type == ActionType.DEPOSIT_ITEM
+ assert plan.params["code"] == "iron_sword"
+
+ @pytest.mark.asyncio
+ async def test_complete_after_all_deposited(self, make_character, pathfinder_with_maps):
pf = pathfinder_with_maps([
(5, 5, "workshop", "weaponcrafting"),
(10, 0, "bank", "bank"),
@@ -261,7 +285,7 @@ class TestCraftingStrategyDeposit:
items_data=[item],
)
strategy._state = strategy._state.__class__("deposit")
- strategy._crafted_count = 1
+ strategy._crafted_count = 1 # Already crafted target quantity
char = make_character(
x=10, y=0,
@@ -269,8 +293,8 @@ class TestCraftingStrategyDeposit:
)
plan = await strategy.next_action(char)
- assert plan.action_type == ActionType.DEPOSIT_ITEM
- assert plan.params["code"] == "iron_sword"
+ # With crafted_count >= quantity, the top-level check returns COMPLETE
+ assert plan.action_type == ActionType.COMPLETE
class TestCraftingStrategyNoLocations:
diff --git a/backend/tests/test_leveling_strategy.py b/backend/tests/test_leveling_strategy.py
index 4b75454..56b5a6f 100644
--- a/backend/tests/test_leveling_strategy.py
+++ b/backend/tests/test_leveling_strategy.py
@@ -91,13 +91,34 @@ class TestLevelingStrategyEvaluation:
assert plan.action_type == ActionType.COMPLETE
@pytest.mark.asyncio
- async def test_complete_when_no_skill_found(self, make_character, pathfinder_with_maps):
+ async def test_complete_when_target_skill_at_max(self, make_character, pathfinder_with_maps):
+ """When the specific target_skill has reached max_level, strategy completes."""
+ pf = pathfinder_with_maps([
+ (3, 3, "resource", "copper_rocks"),
+ (10, 0, "bank", "bank"),
+ ])
+ resources = [ResourceSchema(name="Copper Rocks", code="copper_rocks", skill="mining", level=1)]
+ strategy = LevelingStrategy(
+ {"target_skill": "mining", "max_level": 10},
+ pf,
+ resources_data=resources,
+ )
+ char = make_character(x=0, y=0, mining_level=10)
+
+ plan = await strategy.next_action(char)
+ assert plan.action_type == ActionType.COMPLETE
+
+ @pytest.mark.asyncio
+ async def test_idle_when_all_skills_above_max_level(self, make_character, pathfinder_with_maps):
+ """When auto-picking skills but all are above max_level, falls through to IDLE.
+
+ NOTE: Current implementation only excludes one skill before proceeding,
+ so it may IDLE rather than COMPLETE when all skills exceed max_level.
+ """
pf = pathfinder_with_maps([
(10, 0, "bank", "bank"),
])
- strategy = LevelingStrategy({}, pf)
- # All skills at max_level with exclude set
- strategy._max_level = 5
+ strategy = LevelingStrategy({"max_level": 5}, pf)
char = make_character(
x=0, y=0,
mining_level=999,
@@ -106,8 +127,7 @@ class TestLevelingStrategyEvaluation:
)
plan = await strategy.next_action(char)
- # Should complete since all skills are above max_level
- assert plan.action_type == ActionType.COMPLETE
+ assert plan.action_type == ActionType.IDLE
class TestLevelingStrategyGathering:
@@ -163,20 +183,24 @@ class TestLevelingStrategyCombat:
"""Tests for combat leveling."""
@pytest.mark.asyncio
- async def test_fight_for_combat_leveling(self, make_character, pathfinder_with_maps):
+ async def test_move_to_monster_for_combat_leveling(self, make_character, pathfinder_with_maps):
+ """Combat leveling moves to a monster tile for fighting."""
pf = pathfinder_with_maps([
(3, 3, "monster", "chicken"),
(10, 0, "bank", "bank"),
])
strategy = LevelingStrategy({"target_skill": "combat"}, pf)
char = make_character(
- x=3, y=3,
+ x=0, y=0,
hp=100, max_hp=100,
level=5,
+ inventory_max_items=20,
)
plan = await strategy.next_action(char)
- assert plan.action_type == ActionType.FIGHT
+ # _choose_combat_target finds nearest monster via find_nearest_by_type
+ assert plan.action_type == ActionType.MOVE
+ assert plan.params == {"x": 3, "y": 3}
@pytest.mark.asyncio
async def test_heal_during_combat(self, make_character, pathfinder_with_maps):
diff --git a/frontend/next.config.ts b/frontend/next.config.ts
index f978856..bff6103 100644
--- a/frontend/next.config.ts
+++ b/frontend/next.config.ts
@@ -1,4 +1,5 @@
import type { NextConfig } from "next";
+import { withSentryConfig } from "@sentry/nextjs";
const nextConfig: NextConfig = {
turbopack: {
@@ -15,4 +16,7 @@ const nextConfig: NextConfig = {
},
};
-export default nextConfig;
+export default withSentryConfig(nextConfig, {
+ silent: true,
+ disableLogger: true,
+});
diff --git a/frontend/package.json b/frontend/package.json
index f4fd256..8554792 100644
--- a/frontend/package.json
+++ b/frontend/package.json
@@ -10,6 +10,7 @@
"type-check": "tsc --noEmit"
},
"dependencies": {
+ "@sentry/nextjs": "^9.47.1",
"@tanstack/react-query": "^5.90.21",
"class-variance-authority": "^0.7.1",
"clsx": "^2.1.1",
diff --git a/frontend/pnpm-lock.yaml b/frontend/pnpm-lock.yaml
index 4db3e68..4122f3d 100644
--- a/frontend/pnpm-lock.yaml
+++ b/frontend/pnpm-lock.yaml
@@ -8,6 +8,9 @@ importers:
.:
dependencies:
+ '@sentry/nextjs':
+ specifier: ^9.47.1
+ version: 9.47.1(@opentelemetry/context-async-hooks@1.30.1(@opentelemetry/api@1.9.0))(@opentelemetry/core@1.30.1(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@1.30.1(@opentelemetry/api@1.9.0))(next@16.1.6(@babel/core@7.29.0)(@opentelemetry/api@1.9.0)(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(react@19.2.3)(webpack@5.105.3)
'@tanstack/react-query':
specifier: ^5.90.21
version: 5.90.21(react@19.2.3)
@@ -22,7 +25,7 @@ importers:
version: 0.575.0(react@19.2.3)
next:
specifier: 16.1.6
- version: 16.1.6(@babel/core@7.29.0)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ version: 16.1.6(@babel/core@7.29.0)(@opentelemetry/api@1.9.0)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
radix-ui:
specifier: ^1.4.3
version: 1.4.3(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
@@ -488,6 +491,9 @@ packages:
resolution: {integrity: sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==}
engines: {node: '>=6.0.0'}
+ '@jridgewell/source-map@0.3.11':
+ resolution: {integrity: sha512-ZMp1V8ZFcPG5dIWnQLr3NSI1MiCU7UETdS/A0G8V/XWHvJv3ZsFqutJn1Y5RPmAPX6F3BiE397OqveU/9NCuIA==}
+
'@jridgewell/sourcemap-codec@1.5.5':
resolution: {integrity: sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==}
@@ -602,6 +608,199 @@ packages:
'@open-draft/until@2.1.0':
resolution: {integrity: sha512-U69T3ItWHvLwGg5eJ0n3I62nWuE6ilHlmz7zM0npLBRvPRd7e6NYmg54vvRtP5mZG7kZqZCFVdsTWo7BPtBujg==}
+ '@opentelemetry/api-logs@0.57.2':
+ resolution: {integrity: sha512-uIX52NnTM0iBh84MShlpouI7UKqkZ7MrUszTmaypHBu4r7NofznSnQRfJ+uUeDtQDj6w8eFGg5KBLDAwAPz1+A==}
+ engines: {node: '>=14'}
+
+ '@opentelemetry/api@1.9.0':
+ resolution: {integrity: sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg==}
+ engines: {node: '>=8.0.0'}
+
+ '@opentelemetry/context-async-hooks@1.30.1':
+ resolution: {integrity: sha512-s5vvxXPVdjqS3kTLKMeBMvop9hbWkwzBpu+mUO2M7sZtlkyDJGwFe33wRKnbaYDo8ExRVBIIdwIGrqpxHuKttA==}
+ engines: {node: '>=14'}
+ peerDependencies:
+ '@opentelemetry/api': '>=1.0.0 <1.10.0'
+
+ '@opentelemetry/core@1.30.1':
+ resolution: {integrity: sha512-OOCM2C/QIURhJMuKaekP3TRBxBKxG/TWWA0TL2J6nXUtDnuCtccy49LUJF8xPFXMX+0LMcxFpCo8M9cGY1W6rQ==}
+ engines: {node: '>=14'}
+ peerDependencies:
+ '@opentelemetry/api': '>=1.0.0 <1.10.0'
+
+ '@opentelemetry/instrumentation-amqplib@0.46.1':
+ resolution: {integrity: sha512-AyXVnlCf/xV3K/rNumzKxZqsULyITJH6OVLiW6730JPRqWA7Zc9bvYoVNpN6iOpTU8CasH34SU/ksVJmObFibQ==}
+ engines: {node: '>=14'}
+ peerDependencies:
+ '@opentelemetry/api': ^1.3.0
+
+ '@opentelemetry/instrumentation-connect@0.43.1':
+ resolution: {integrity: sha512-ht7YGWQuV5BopMcw5Q2hXn3I8eG8TH0J/kc/GMcW4CuNTgiP6wCu44BOnucJWL3CmFWaRHI//vWyAhaC8BwePw==}
+ engines: {node: '>=14'}
+ peerDependencies:
+ '@opentelemetry/api': ^1.3.0
+
+ '@opentelemetry/instrumentation-dataloader@0.16.1':
+ resolution: {integrity: sha512-K/qU4CjnzOpNkkKO4DfCLSQshejRNAJtd4esgigo/50nxCB6XCyi1dhAblUHM9jG5dRm8eu0FB+t87nIo99LYQ==}
+ engines: {node: '>=14'}
+ peerDependencies:
+ '@opentelemetry/api': ^1.3.0
+
+ '@opentelemetry/instrumentation-express@0.47.1':
+ resolution: {integrity: sha512-QNXPTWteDclR2B4pDFpz0TNghgB33UMjUt14B+BZPmtH1MwUFAfLHBaP5If0Z5NZC+jaH8oF2glgYjrmhZWmSw==}
+ engines: {node: '>=14'}
+ peerDependencies:
+ '@opentelemetry/api': ^1.3.0
+
+ '@opentelemetry/instrumentation-fs@0.19.1':
+ resolution: {integrity: sha512-6g0FhB3B9UobAR60BGTcXg4IHZ6aaYJzp0Ki5FhnxyAPt8Ns+9SSvgcrnsN2eGmk3RWG5vYycUGOEApycQL24A==}
+ engines: {node: '>=14'}
+ peerDependencies:
+ '@opentelemetry/api': ^1.3.0
+
+ '@opentelemetry/instrumentation-generic-pool@0.43.1':
+ resolution: {integrity: sha512-M6qGYsp1cURtvVLGDrPPZemMFEbuMmCXgQYTReC/IbimV5sGrLBjB+/hANUpRZjX67nGLdKSVLZuQQAiNz+sww==}
+ engines: {node: '>=14'}
+ peerDependencies:
+ '@opentelemetry/api': ^1.3.0
+
+ '@opentelemetry/instrumentation-graphql@0.47.1':
+ resolution: {integrity: sha512-EGQRWMGqwiuVma8ZLAZnExQ7sBvbOx0N/AE/nlafISPs8S+QtXX+Viy6dcQwVWwYHQPAcuY3bFt3xgoAwb4ZNQ==}
+ engines: {node: '>=14'}
+ peerDependencies:
+ '@opentelemetry/api': ^1.3.0
+
+ '@opentelemetry/instrumentation-hapi@0.45.2':
+ resolution: {integrity: sha512-7Ehow/7Wp3aoyCrZwQpU7a2CnoMq0XhIcioFuKjBb0PLYfBfmTsFTUyatlHu0fRxhwcRsSQRTvEhmZu8CppBpQ==}
+ engines: {node: '>=14'}
+ peerDependencies:
+ '@opentelemetry/api': ^1.3.0
+
+ '@opentelemetry/instrumentation-http@0.57.2':
+ resolution: {integrity: sha512-1Uz5iJ9ZAlFOiPuwYg29Bf7bJJc/GeoeJIFKJYQf67nTVKFe8RHbEtxgkOmK4UGZNHKXcpW4P8cWBYzBn1USpg==}
+ engines: {node: '>=14'}
+ peerDependencies:
+ '@opentelemetry/api': ^1.3.0
+
+ '@opentelemetry/instrumentation-ioredis@0.47.1':
+ resolution: {integrity: sha512-OtFGSN+kgk/aoKgdkKQnBsQFDiG8WdCxu+UrHr0bXScdAmtSzLSraLo7wFIb25RVHfRWvzI5kZomqJYEg/l1iA==}
+ engines: {node: '>=14'}
+ peerDependencies:
+ '@opentelemetry/api': ^1.3.0
+
+ '@opentelemetry/instrumentation-kafkajs@0.7.1':
+ resolution: {integrity: sha512-OtjaKs8H7oysfErajdYr1yuWSjMAectT7Dwr+axIoZqT9lmEOkD/H/3rgAs8h/NIuEi2imSXD+vL4MZtOuJfqQ==}
+ engines: {node: '>=14'}
+ peerDependencies:
+ '@opentelemetry/api': ^1.3.0
+
+ '@opentelemetry/instrumentation-knex@0.44.1':
+ resolution: {integrity: sha512-U4dQxkNhvPexffjEmGwCq68FuftFK15JgUF05y/HlK3M6W/G2iEaACIfXdSnwVNe9Qh0sPfw8LbOPxrWzGWGMQ==}
+ engines: {node: '>=14'}
+ peerDependencies:
+ '@opentelemetry/api': ^1.3.0
+
+ '@opentelemetry/instrumentation-koa@0.47.1':
+ resolution: {integrity: sha512-l/c+Z9F86cOiPJUllUCt09v+kICKvT+Vg1vOAJHtHPsJIzurGayucfCMq2acd/A/yxeNWunl9d9eqZ0G+XiI6A==}
+ engines: {node: '>=14'}
+ peerDependencies:
+ '@opentelemetry/api': ^1.3.0
+
+ '@opentelemetry/instrumentation-lru-memoizer@0.44.1':
+ resolution: {integrity: sha512-5MPkYCvG2yw7WONEjYj5lr5JFehTobW7wX+ZUFy81oF2lr9IPfZk9qO+FTaM0bGEiymwfLwKe6jE15nHn1nmHg==}
+ engines: {node: '>=14'}
+ peerDependencies:
+ '@opentelemetry/api': ^1.3.0
+
+ '@opentelemetry/instrumentation-mongodb@0.52.0':
+ resolution: {integrity: sha512-1xmAqOtRUQGR7QfJFfGV/M2kC7wmI2WgZdpru8hJl3S0r4hW0n3OQpEHlSGXJAaNFyvT+ilnwkT+g5L4ljHR6g==}
+ engines: {node: '>=14'}
+ peerDependencies:
+ '@opentelemetry/api': ^1.3.0
+
+ '@opentelemetry/instrumentation-mongoose@0.46.1':
+ resolution: {integrity: sha512-3kINtW1LUTPkiXFRSSBmva1SXzS/72we/jL22N+BnF3DFcoewkdkHPYOIdAAk9gSicJ4d5Ojtt1/HeibEc5OQg==}
+ engines: {node: '>=14'}
+ peerDependencies:
+ '@opentelemetry/api': ^1.3.0
+
+ '@opentelemetry/instrumentation-mysql2@0.45.2':
+ resolution: {integrity: sha512-h6Ad60FjCYdJZ5DTz1Lk2VmQsShiViKe0G7sYikb0GHI0NVvApp2XQNRHNjEMz87roFttGPLHOYVPlfy+yVIhQ==}
+ engines: {node: '>=14'}
+ peerDependencies:
+ '@opentelemetry/api': ^1.3.0
+
+ '@opentelemetry/instrumentation-mysql@0.45.1':
+ resolution: {integrity: sha512-TKp4hQ8iKQsY7vnp/j0yJJ4ZsP109Ht6l4RHTj0lNEG1TfgTrIH5vJMbgmoYXWzNHAqBH2e7fncN12p3BP8LFg==}
+ engines: {node: '>=14'}
+ peerDependencies:
+ '@opentelemetry/api': ^1.3.0
+
+ '@opentelemetry/instrumentation-pg@0.51.1':
+ resolution: {integrity: sha512-QxgjSrxyWZc7Vk+qGSfsejPVFL1AgAJdSBMYZdDUbwg730D09ub3PXScB9d04vIqPriZ+0dqzjmQx0yWKiCi2Q==}
+ engines: {node: '>=14'}
+ peerDependencies:
+ '@opentelemetry/api': ^1.3.0
+
+ '@opentelemetry/instrumentation-redis-4@0.46.1':
+ resolution: {integrity: sha512-UMqleEoabYMsWoTkqyt9WAzXwZ4BlFZHO40wr3d5ZvtjKCHlD4YXLm+6OLCeIi/HkX7EXvQaz8gtAwkwwSEvcQ==}
+ engines: {node: '>=14'}
+ peerDependencies:
+ '@opentelemetry/api': ^1.3.0
+
+ '@opentelemetry/instrumentation-tedious@0.18.1':
+ resolution: {integrity: sha512-5Cuy/nj0HBaH+ZJ4leuD7RjgvA844aY2WW+B5uLcWtxGjRZl3MNLuxnNg5DYWZNPO+NafSSnra0q49KWAHsKBg==}
+ engines: {node: '>=14'}
+ peerDependencies:
+ '@opentelemetry/api': ^1.3.0
+
+ '@opentelemetry/instrumentation-undici@0.10.1':
+ resolution: {integrity: sha512-rkOGikPEyRpMCmNu9AQuV5dtRlDmJp2dK5sw8roVshAGoB6hH/3QjDtRhdwd75SsJwgynWUNRUYe0wAkTo16tQ==}
+ engines: {node: '>=14'}
+ peerDependencies:
+ '@opentelemetry/api': ^1.7.0
+
+ '@opentelemetry/instrumentation@0.57.2':
+ resolution: {integrity: sha512-BdBGhQBh8IjZ2oIIX6F2/Q3LKm/FDDKi6ccYKcBTeilh6SNdNKveDOLk73BkSJjQLJk6qe4Yh+hHw1UPhCDdrg==}
+ engines: {node: '>=14'}
+ peerDependencies:
+ '@opentelemetry/api': ^1.3.0
+
+ '@opentelemetry/redis-common@0.36.2':
+ resolution: {integrity: sha512-faYX1N0gpLhej/6nyp6bgRjzAKXn5GOEMYY7YhciSfCoITAktLUtQ36d24QEWNA1/WA1y6qQunCe0OhHRkVl9g==}
+ engines: {node: '>=14'}
+
+ '@opentelemetry/resources@1.30.1':
+ resolution: {integrity: sha512-5UxZqiAgLYGFjS4s9qm5mBVo433u+dSPUFWVWXmLAD4wB65oMCoXaJP1KJa9DIYYMeHu3z4BZcStG3LC593cWA==}
+ engines: {node: '>=14'}
+ peerDependencies:
+ '@opentelemetry/api': '>=1.0.0 <1.10.0'
+
+ '@opentelemetry/sdk-trace-base@1.30.1':
+ resolution: {integrity: sha512-jVPgBbH1gCy2Lb7X0AVQ8XAfgg0pJ4nvl8/IiQA6nxOsPvS+0zMJaFSs2ltXe0J6C8dqjcnpyqINDJmU30+uOg==}
+ engines: {node: '>=14'}
+ peerDependencies:
+ '@opentelemetry/api': '>=1.0.0 <1.10.0'
+
+ '@opentelemetry/semantic-conventions@1.28.0':
+ resolution: {integrity: sha512-lp4qAiMTD4sNWW4DbKLBkfiMZ4jbAboJIGOQr5DvciMRI494OapieI9qiODpOt0XBr1LjIDy1xAGAnVs5supTA==}
+ engines: {node: '>=14'}
+
+ '@opentelemetry/semantic-conventions@1.40.0':
+ resolution: {integrity: sha512-cifvXDhcqMwwTlTK04GBNeIe7yyo28Mfby85QXFe1Yk8nmi36Ab/5UQwptOx84SsoGNRg+EVSjwzfSZMy6pmlw==}
+ engines: {node: '>=14'}
+
+ '@opentelemetry/sql-common@0.40.1':
+ resolution: {integrity: sha512-nSDlnHSqzC3pXn/wZEZVLuAuJ1MYMXPBwtv2qAbCa3847SaHItdE7SzUq/Jtb0KZmh1zfAbNi3AAMjztTT4Ugg==}
+ engines: {node: '>=14'}
+ peerDependencies:
+ '@opentelemetry/api': ^1.1.0
+
+ '@prisma/instrumentation@6.11.1':
+ resolution: {integrity: sha512-mrZOev24EDhnefmnZX7WVVT7v+r9LttPRqf54ONvj6re4XMF7wFTpK2tLJi4XHB7fFp/6xhYbgRel8YV7gQiyA==}
+ peerDependencies:
+ '@opentelemetry/api': ^1.8
+
'@radix-ui/number@1.1.1':
resolution: {integrity: sha512-MkKCwxlXTgz6CFoJx3pCwn07GKp36+aZyu/u2Ln2VrA5DcdyCZkASEDBTd8x5whTQQL5CiYf4prXKLcgQdv29g==}
@@ -1303,12 +1502,287 @@ packages:
react-redux:
optional: true
+ '@rollup/plugin-commonjs@28.0.1':
+ resolution: {integrity: sha512-+tNWdlWKbpB3WgBN7ijjYkq9X5uhjmcvyjEght4NmH5fAU++zfQzAJ6wumLS+dNcvwEZhKx2Z+skY8m7v0wGSA==}
+ engines: {node: '>=16.0.0 || 14 >= 14.17'}
+ peerDependencies:
+ rollup: ^2.68.0||^3.0.0||^4.0.0
+ peerDependenciesMeta:
+ rollup:
+ optional: true
+
+ '@rollup/pluginutils@5.3.0':
+ resolution: {integrity: sha512-5EdhGZtnu3V88ces7s53hhfK5KSASnJZv8Lulpc04cWO3REESroJXg73DFsOmgbU2BhwV0E20bu2IDZb3VKW4Q==}
+ engines: {node: '>=14.0.0'}
+ peerDependencies:
+ rollup: ^1.20.0||^2.0.0||^3.0.0||^4.0.0
+ peerDependenciesMeta:
+ rollup:
+ optional: true
+
+ '@rollup/rollup-android-arm-eabi@4.59.0':
+ resolution: {integrity: sha512-upnNBkA6ZH2VKGcBj9Fyl9IGNPULcjXRlg0LLeaioQWueH30p6IXtJEbKAgvyv+mJaMxSm1l6xwDXYjpEMiLMg==}
+ cpu: [arm]
+ os: [android]
+
+ '@rollup/rollup-android-arm64@4.59.0':
+ resolution: {integrity: sha512-hZ+Zxj3SySm4A/DylsDKZAeVg0mvi++0PYVceVyX7hemkw7OreKdCvW2oQ3T1FMZvCaQXqOTHb8qmBShoqk69Q==}
+ cpu: [arm64]
+ os: [android]
+
+ '@rollup/rollup-darwin-arm64@4.59.0':
+ resolution: {integrity: sha512-W2Psnbh1J8ZJw0xKAd8zdNgF9HRLkdWwwdWqubSVk0pUuQkoHnv7rx4GiF9rT4t5DIZGAsConRE3AxCdJ4m8rg==}
+ cpu: [arm64]
+ os: [darwin]
+
+ '@rollup/rollup-darwin-x64@4.59.0':
+ resolution: {integrity: sha512-ZW2KkwlS4lwTv7ZVsYDiARfFCnSGhzYPdiOU4IM2fDbL+QGlyAbjgSFuqNRbSthybLbIJ915UtZBtmuLrQAT/w==}
+ cpu: [x64]
+ os: [darwin]
+
+ '@rollup/rollup-freebsd-arm64@4.59.0':
+ resolution: {integrity: sha512-EsKaJ5ytAu9jI3lonzn3BgG8iRBjV4LxZexygcQbpiU0wU0ATxhNVEpXKfUa0pS05gTcSDMKpn3Sx+QB9RlTTA==}
+ cpu: [arm64]
+ os: [freebsd]
+
+ '@rollup/rollup-freebsd-x64@4.59.0':
+ resolution: {integrity: sha512-d3DuZi2KzTMjImrxoHIAODUZYoUUMsuUiY4SRRcJy6NJoZ6iIqWnJu9IScV9jXysyGMVuW+KNzZvBLOcpdl3Vg==}
+ cpu: [x64]
+ os: [freebsd]
+
+ '@rollup/rollup-linux-arm-gnueabihf@4.59.0':
+ resolution: {integrity: sha512-t4ONHboXi/3E0rT6OZl1pKbl2Vgxf9vJfWgmUoCEVQVxhW6Cw/c8I6hbbu7DAvgp82RKiH7TpLwxnJeKv2pbsw==}
+ cpu: [arm]
+ os: [linux]
+
+ '@rollup/rollup-linux-arm-musleabihf@4.59.0':
+ resolution: {integrity: sha512-CikFT7aYPA2ufMD086cVORBYGHffBo4K8MQ4uPS/ZnY54GKj36i196u8U+aDVT2LX4eSMbyHtyOh7D7Zvk2VvA==}
+ cpu: [arm]
+ os: [linux]
+
+ '@rollup/rollup-linux-arm64-gnu@4.59.0':
+ resolution: {integrity: sha512-jYgUGk5aLd1nUb1CtQ8E+t5JhLc9x5WdBKew9ZgAXg7DBk0ZHErLHdXM24rfX+bKrFe+Xp5YuJo54I5HFjGDAA==}
+ cpu: [arm64]
+ os: [linux]
+
+ '@rollup/rollup-linux-arm64-musl@4.59.0':
+ resolution: {integrity: sha512-peZRVEdnFWZ5Bh2KeumKG9ty7aCXzzEsHShOZEFiCQlDEepP1dpUl/SrUNXNg13UmZl+gzVDPsiCwnV1uI0RUA==}
+ cpu: [arm64]
+ os: [linux]
+
+ '@rollup/rollup-linux-loong64-gnu@4.59.0':
+ resolution: {integrity: sha512-gbUSW/97f7+r4gHy3Jlup8zDG190AuodsWnNiXErp9mT90iCy9NKKU0Xwx5k8VlRAIV2uU9CsMnEFg/xXaOfXg==}
+ cpu: [loong64]
+ os: [linux]
+
+ '@rollup/rollup-linux-loong64-musl@4.59.0':
+ resolution: {integrity: sha512-yTRONe79E+o0FWFijasoTjtzG9EBedFXJMl888NBEDCDV9I2wGbFFfJQQe63OijbFCUZqxpHz1GzpbtSFikJ4Q==}
+ cpu: [loong64]
+ os: [linux]
+
+ '@rollup/rollup-linux-ppc64-gnu@4.59.0':
+ resolution: {integrity: sha512-sw1o3tfyk12k3OEpRddF68a1unZ5VCN7zoTNtSn2KndUE+ea3m3ROOKRCZxEpmT9nsGnogpFP9x6mnLTCaoLkA==}
+ cpu: [ppc64]
+ os: [linux]
+
+ '@rollup/rollup-linux-ppc64-musl@4.59.0':
+ resolution: {integrity: sha512-+2kLtQ4xT3AiIxkzFVFXfsmlZiG5FXYW7ZyIIvGA7Bdeuh9Z0aN4hVyXS/G1E9bTP/vqszNIN/pUKCk/BTHsKA==}
+ cpu: [ppc64]
+ os: [linux]
+
+ '@rollup/rollup-linux-riscv64-gnu@4.59.0':
+ resolution: {integrity: sha512-NDYMpsXYJJaj+I7UdwIuHHNxXZ/b/N2hR15NyH3m2qAtb/hHPA4g4SuuvrdxetTdndfj9b1WOmy73kcPRoERUg==}
+ cpu: [riscv64]
+ os: [linux]
+
+ '@rollup/rollup-linux-riscv64-musl@4.59.0':
+ resolution: {integrity: sha512-nLckB8WOqHIf1bhymk+oHxvM9D3tyPndZH8i8+35p/1YiVoVswPid2yLzgX7ZJP0KQvnkhM4H6QZ5m0LzbyIAg==}
+ cpu: [riscv64]
+ os: [linux]
+
+ '@rollup/rollup-linux-s390x-gnu@4.59.0':
+ resolution: {integrity: sha512-oF87Ie3uAIvORFBpwnCvUzdeYUqi2wY6jRFWJAy1qus/udHFYIkplYRW+wo+GRUP4sKzYdmE1Y3+rY5Gc4ZO+w==}
+ cpu: [s390x]
+ os: [linux]
+
+ '@rollup/rollup-linux-x64-gnu@4.59.0':
+ resolution: {integrity: sha512-3AHmtQq/ppNuUspKAlvA8HtLybkDflkMuLK4DPo77DfthRb71V84/c4MlWJXixZz4uruIH4uaa07IqoAkG64fg==}
+ cpu: [x64]
+ os: [linux]
+
+ '@rollup/rollup-linux-x64-musl@4.59.0':
+ resolution: {integrity: sha512-2UdiwS/9cTAx7qIUZB/fWtToJwvt0Vbo0zmnYt7ED35KPg13Q0ym1g442THLC7VyI6JfYTP4PiSOWyoMdV2/xg==}
+ cpu: [x64]
+ os: [linux]
+
+ '@rollup/rollup-openbsd-x64@4.59.0':
+ resolution: {integrity: sha512-M3bLRAVk6GOwFlPTIxVBSYKUaqfLrn8l0psKinkCFxl4lQvOSz8ZrKDz2gxcBwHFpci0B6rttydI4IpS4IS/jQ==}
+ cpu: [x64]
+ os: [openbsd]
+
+ '@rollup/rollup-openharmony-arm64@4.59.0':
+ resolution: {integrity: sha512-tt9KBJqaqp5i5HUZzoafHZX8b5Q2Fe7UjYERADll83O4fGqJ49O1FsL6LpdzVFQcpwvnyd0i+K/VSwu/o/nWlA==}
+ cpu: [arm64]
+ os: [openharmony]
+
+ '@rollup/rollup-win32-arm64-msvc@4.59.0':
+ resolution: {integrity: sha512-V5B6mG7OrGTwnxaNUzZTDTjDS7F75PO1ae6MJYdiMu60sq0CqN5CVeVsbhPxalupvTX8gXVSU9gq+Rx1/hvu6A==}
+ cpu: [arm64]
+ os: [win32]
+
+ '@rollup/rollup-win32-ia32-msvc@4.59.0':
+ resolution: {integrity: sha512-UKFMHPuM9R0iBegwzKF4y0C4J9u8C6MEJgFuXTBerMk7EJ92GFVFYBfOZaSGLu6COf7FxpQNqhNS4c4icUPqxA==}
+ cpu: [ia32]
+ os: [win32]
+
+ '@rollup/rollup-win32-x64-gnu@4.59.0':
+ resolution: {integrity: sha512-laBkYlSS1n2L8fSo1thDNGrCTQMmxjYY5G0WFWjFFYZkKPjsMBsgJfGf4TLxXrF6RyhI60L8TMOjBMvXiTcxeA==}
+ cpu: [x64]
+ os: [win32]
+
+ '@rollup/rollup-win32-x64-msvc@4.59.0':
+ resolution: {integrity: sha512-2HRCml6OztYXyJXAvdDXPKcawukWY2GpR5/nxKp4iBgiO3wcoEGkAaqctIbZcNB6KlUQBIqt8VYkNSj2397EfA==}
+ cpu: [x64]
+ os: [win32]
+
'@rtsao/scc@1.1.0':
resolution: {integrity: sha512-zt6OdqaDoOnJ1ZYsCYGt9YmWzDXl4vQdKTyJev62gFhRGKdx7mcT54V9KIjg+d2wi9EXsPvAPKe7i7WjfVWB8g==}
'@sec-ant/readable-stream@0.4.1':
resolution: {integrity: sha512-831qok9r2t8AlxLko40y2ebgSDhenenCatLVeW/uBtnHPyhHOvG0C7TvfgecV+wHzIm5KUICgzmVpWS+IMEAeg==}
+ '@sentry-internal/browser-utils@9.47.1':
+ resolution: {integrity: sha512-twv6YhrUlPkvKz4/iQDH4KHgcv9t4cMjmZPf4/dCSCXn4/GOjzjx2d74c1w+1KOdS7lcsQzI+MtbK6SeYLiGfQ==}
+ engines: {node: '>=18'}
+
+ '@sentry-internal/feedback@9.47.1':
+ resolution: {integrity: sha512-xJ4vKvIpAT8e+Sz80YrsNinPU0XV7jPxPjdZ4ex8R2mMvx7pM0gq8JiR/sIVmNiOE0WiUDr6VwLDE8j2APSRMA==}
+ engines: {node: '>=18'}
+
+ '@sentry-internal/replay-canvas@9.47.1':
+ resolution: {integrity: sha512-r9nve+l5+elGB9NXSN1+PUgJy790tXN1e8lZNH2ziveoU91jW4yYYt34mHZ30fU9tOz58OpaRMj3H3GJ/jYZVA==}
+ engines: {node: '>=18'}
+
+ '@sentry-internal/replay@9.47.1':
+ resolution: {integrity: sha512-O9ZEfySpstGtX1f73m3NbdbS2utwPikaFt6sgp74RG4ZX4LlXe99VAjKR464xKECpYsLmj2bYpiK4opURF0pBA==}
+ engines: {node: '>=18'}
+
+ '@sentry/babel-plugin-component-annotate@3.6.1':
+ resolution: {integrity: sha512-zmvUa4RpzDG3LQJFpGCE8lniz8Rk1Wa6ZvvK+yEH+snZeaHHRbSnAQBMR607GOClP+euGHNO2YtaY4UAdNTYbg==}
+ engines: {node: '>= 14'}
+
+ '@sentry/browser@9.47.1':
+ resolution: {integrity: sha512-at5JOLziw5QpVYytxTDU6xijdV6lDQ/Rxp/qXJaHXud3gIK4suv2cXW+tupJfwoUoHFCnDNfccjCmPmP0yRqiA==}
+ engines: {node: '>=18'}
+
+ '@sentry/bundler-plugin-core@3.6.1':
+ resolution: {integrity: sha512-/ubWjPwgLep84sUPzHfKL2Ns9mK9aQrEX4aBFztru7ygiJidKJTxYGtvjh4dL2M1aZ0WRQYp+7PF6+VKwdZXcQ==}
+ engines: {node: '>= 14'}
+
+ '@sentry/cli-darwin@2.58.5':
+ resolution: {integrity: sha512-lYrNzenZFJftfwSya7gwrHGxtE+Kob/e1sr9lmHMFOd4utDlmq0XFDllmdZAMf21fxcPRI1GL28ejZ3bId01fQ==}
+ engines: {node: '>=10'}
+ os: [darwin]
+
+ '@sentry/cli-linux-arm64@2.58.5':
+ resolution: {integrity: sha512-/4gywFeBqRB6tR/iGMRAJ3HRqY6Z7Yp4l8ZCbl0TDLAfHNxu7schEw4tSnm2/Hh9eNMiOVy4z58uzAWlZXAYBQ==}
+ engines: {node: '>=10'}
+ cpu: [arm64]
+ os: [linux, freebsd, android]
+
+ '@sentry/cli-linux-arm@2.58.5':
+ resolution: {integrity: sha512-KtHweSIomYL4WVDrBrYSYJricKAAzxUgX86kc6OnlikbyOhoK6Fy8Vs6vwd52P6dvWPjgrMpUYjW2M5pYXQDUw==}
+ engines: {node: '>=10'}
+ cpu: [arm]
+ os: [linux, freebsd, android]
+
+ '@sentry/cli-linux-i686@2.58.5':
+ resolution: {integrity: sha512-G7261dkmyxqlMdyvyP06b+RTIVzp1gZNgglj5UksxSouSUqRd/46W/2pQeOMPhloDYo9yLtCN2YFb3Mw4aUsWw==}
+ engines: {node: '>=10'}
+ cpu: [x86, ia32]
+ os: [linux, freebsd, android]
+
+ '@sentry/cli-linux-x64@2.58.5':
+ resolution: {integrity: sha512-rP04494RSmt86xChkQ+ecBNRYSPbyXc4u0IA7R7N1pSLCyO74e5w5Al+LnAq35cMfVbZgz5Sm0iGLjyiUu4I1g==}
+ engines: {node: '>=10'}
+ cpu: [x64]
+ os: [linux, freebsd, android]
+
+ '@sentry/cli-win32-arm64@2.58.5':
+ resolution: {integrity: sha512-AOJ2nCXlQL1KBaCzv38m3i2VmSHNurUpm7xVKd6yAHX+ZoVBI8VT0EgvwmtJR2TY2N2hNCC7UrgRmdUsQ152bA==}
+ engines: {node: '>=10'}
+ cpu: [arm64]
+ os: [win32]
+
+ '@sentry/cli-win32-i686@2.58.5':
+ resolution: {integrity: sha512-EsuboLSOnlrN7MMPJ1eFvfMDm+BnzOaSWl8eYhNo8W/BIrmNgpRUdBwnWn9Q2UOjJj5ZopukmsiMYtU/D7ml9g==}
+ engines: {node: '>=10'}
+ cpu: [x86, ia32]
+ os: [win32]
+
+ '@sentry/cli-win32-x64@2.58.5':
+ resolution: {integrity: sha512-IZf+XIMiQwj+5NzqbOQfywlOitmCV424Vtf9c+ep61AaVScUFD1TSrQbOcJJv5xGxhlxNOMNgMeZhdexdzrKZg==}
+ engines: {node: '>=10'}
+ cpu: [x64]
+ os: [win32]
+
+ '@sentry/cli@2.58.5':
+ resolution: {integrity: sha512-tavJ7yGUZV+z3Ct2/ZB6mg339i08sAk6HDkgqmSRuQEu2iLS5sl9HIvuXfM6xjv8fwlgFOSy++WNABNAcGHUbg==}
+ engines: {node: '>= 10'}
+ hasBin: true
+
+ '@sentry/core@9.47.1':
+ resolution: {integrity: sha512-KX62+qIt4xgy8eHKHiikfhz2p5fOciXd0Cl+dNzhgPFq8klq4MGMNaf148GB3M/vBqP4nw/eFvRMAayFCgdRQw==}
+ engines: {node: '>=18'}
+
+ '@sentry/nextjs@9.47.1':
+ resolution: {integrity: sha512-uUcYbUHIXfmPDfakoXWoZl4u/6IMTzrlinQxlbHLYqIHRuclkqvViq6AMNmIwEYrLjRsNKFvFe32QMAHsce2NQ==}
+ engines: {node: '>=18'}
+ peerDependencies:
+ next: ^13.2.0 || ^14.0 || ^15.0.0-rc.0
+
+ '@sentry/node-core@9.47.1':
+ resolution: {integrity: sha512-7TEOiCGkyShJ8CKtsri9lbgMCbB+qNts2Xq37itiMPN2m+lIukK3OX//L8DC5nfKYZlgikrefS63/vJtm669hQ==}
+ engines: {node: '>=18'}
+ peerDependencies:
+ '@opentelemetry/api': ^1.9.0
+ '@opentelemetry/context-async-hooks': ^1.30.1 || ^2.0.0
+ '@opentelemetry/core': ^1.30.1 || ^2.0.0
+ '@opentelemetry/instrumentation': '>=0.57.1 <1'
+ '@opentelemetry/resources': ^1.30.1 || ^2.0.0
+ '@opentelemetry/sdk-trace-base': ^1.30.1 || ^2.0.0
+ '@opentelemetry/semantic-conventions': ^1.34.0
+
+ '@sentry/node@9.47.1':
+ resolution: {integrity: sha512-CDbkasBz3fnWRKSFs6mmaRepM2pa+tbZkrqhPWifFfIkJDidtVW40p6OnquTvPXyPAszCnDZRnZT14xyvNmKPQ==}
+ engines: {node: '>=18'}
+
+ '@sentry/opentelemetry@9.47.1':
+ resolution: {integrity: sha512-STtFpjF7lwzeoedDJV+5XA6P89BfmFwFftmHSGSe3UTI8z8IoiR5yB6X2vCjSPvXlfeOs13qCNNCEZyznxM8Xw==}
+ engines: {node: '>=18'}
+ peerDependencies:
+ '@opentelemetry/api': ^1.9.0
+ '@opentelemetry/context-async-hooks': ^1.30.1 || ^2.0.0
+ '@opentelemetry/core': ^1.30.1 || ^2.0.0
+ '@opentelemetry/sdk-trace-base': ^1.30.1 || ^2.0.0
+ '@opentelemetry/semantic-conventions': ^1.34.0
+
+ '@sentry/react@9.47.1':
+ resolution: {integrity: sha512-Anqt0hG1R+nktlwEiDc2FmD+6DUGMJOLuArgr7q1cSCdPbK2Gb1eZ2rF57Ui+CDo9XLvlX9QP2is/M08rrVe3w==}
+ engines: {node: '>=18'}
+ peerDependencies:
+ react: ^16.14.0 || 17.x || 18.x || 19.x
+
+ '@sentry/vercel-edge@9.47.1':
+ resolution: {integrity: sha512-mLdI/wF+toYu2i3VRcGdUn3AeTpPmAemI2Pnu6oomLKBDFnkjhCLnwCd5xuHLESJR1aJkB4M3g2+7DZcGTspXg==}
+ engines: {node: '>=18'}
+
+ '@sentry/webpack-plugin@3.6.1':
+ resolution: {integrity: sha512-F2yqwbdxfCENMN5u4ih4WfOtGjW56/92DBC0bU6un7Ns/l2qd+wRONIvrF+58rl/VkCFfMlUtZTVoKGRyMRmHA==}
+ engines: {node: '>= 14'}
+ peerDependencies:
+ webpack: '>=4.40.0'
+
'@sindresorhus/merge-streams@4.0.0':
resolution: {integrity: sha512-tlqY9xq5ukxTUZBmoOp+m61cqwQD5pHJtFY3Mn8CA8ps6yghLH/Hw8UPdqg4OLmFW3IFlcXnQNmo/dh8HzXYIQ==}
engines: {node: '>=18'}
@@ -1424,6 +1898,9 @@ packages:
'@tybys/wasm-util@0.10.1':
resolution: {integrity: sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg==}
+ '@types/connect@3.4.38':
+ resolution: {integrity: sha512-K6uROf1LD88uDQqJCktA4yzL1YYAK6NgfsI0v/mTgyPKWsX1CnJ0XPSDhViejru1GcRkLWb8RlzFYJRqGUbaug==}
+
'@types/d3-array@3.2.2':
resolution: {integrity: sha512-hOLWVbm7uRza0BYXpIIW5pxfrKe0W+D5lrFiAEYR+pb6w3N2SwSMaJbXdUfSEv+dT4MfHBLtn5js0LAWaO6otw==}
@@ -1451,6 +1928,12 @@ packages:
'@types/d3-timer@3.0.2':
resolution: {integrity: sha512-Ps3T8E8dZDam6fUyNiMkekK3XUsaUEik+idO9/YjPtfj2qruF8tFBXS7XhtE4iIXBLxhmLjP3SXpLhVf21I9Lw==}
+ '@types/eslint-scope@3.7.7':
+ resolution: {integrity: sha512-MzMFlSLBqNF2gcHWO0G1vP/YQyfvrxZ0bF+u7mzUdZ1/xK4A4sru+nraZz5i3iEIk1l1uyicaDVTB4QbbEkAYg==}
+
+ '@types/eslint@9.6.1':
+ resolution: {integrity: sha512-FXx2pKgId/WyYo2jXw63kk7/+TY7u7AziEJxJAnSFzHlqTAS3Ync6SvgYAN/k4/PQpnnVuzoMuVnByKK2qp0ag==}
+
'@types/estree@1.0.8':
resolution: {integrity: sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==}
@@ -1460,9 +1943,18 @@ packages:
'@types/json5@0.0.29':
resolution: {integrity: sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ==}
+ '@types/mysql@2.15.26':
+ resolution: {integrity: sha512-DSLCOXhkvfS5WNNPbfn2KdICAmk8lLc+/PNvnPnF7gOdMZCxopXduqv0OQ13y/yA/zXTSikZZqVgybUxOEg6YQ==}
+
'@types/node@20.19.35':
resolution: {integrity: sha512-Uarfe6J91b9HAUXxjvSOdiO2UPOKLm07Q1oh0JHxoZ1y8HoqxDAu3gVrsrOHeiio0kSsoVBt4wFrKOm0dKxVPQ==}
+ '@types/pg-pool@2.0.6':
+ resolution: {integrity: sha512-TaAUE5rq2VQYxab5Ts7WZhKNmuN78Q6PiFonTDdpbx8a1H0M1vhy3rhiMjl+e2iHmogyMw7jZF4FrE6eJUy5HQ==}
+
+ '@types/pg@8.6.1':
+ resolution: {integrity: sha512-1Kc4oAGzAl7uqUStZCDvaLFqZrW9qWSjXOmBfdgyBP5La7Us6Mg4GBvRlSoaZMhQF/zSj1C8CtKMBkoiT8eL8w==}
+
'@types/react-dom@19.2.3':
resolution: {integrity: sha512-jp2L/eY6fn+KgVVQAOqYItbF0VY/YApe5Mz2F0aykSO8gx31bYCZyvSeYxCHKvzHG5eZjc+zyaS5BrBWya2+kQ==}
peerDependencies:
@@ -1471,9 +1963,15 @@ packages:
'@types/react@19.2.14':
resolution: {integrity: sha512-ilcTH/UniCkMdtexkoCN0bI7pMcJDvmQFPvuPvmEaYA/NSfFTAgdUSLAoVjaRJm7+6PvcM+q1zYOwS4wTYMF9w==}
+ '@types/shimmer@1.2.0':
+ resolution: {integrity: sha512-UE7oxhQLLd9gub6JKIAhDq06T0F6FnztwMNRvYgjeQSBeMc1ZG/tA47EwfduvkuQS8apbkM/lpLpWsaCeYsXVg==}
+
'@types/statuses@2.0.6':
resolution: {integrity: sha512-xMAgYwceFhRA2zY+XbEA7mxYbA093wdiW8Vu6gZPGWy9cmOyU9XesH1tNcEWsKFd5Vzrqx5T3D38PWx1FIIXkA==}
+ '@types/tedious@4.0.14':
+ resolution: {integrity: sha512-KHPsfX/FoVbUGbyYvk1q9MMQHLPeRZhRJZdO45Q4YjvFkv4hMNghCWTvy7rdKessBsmtz4euWCWAB6/tVpI1Iw==}
+
'@types/use-sync-external-store@0.0.6':
resolution: {integrity: sha512-zFDAD+tlpf2r4asuHEj0XH6pY6i0g5NeAHPn+15wk3BV6JA69eERFXC1gyGThDkVa1zCyKr5jox1+2LbV/AMLg==}
@@ -1634,10 +2132,72 @@ packages:
cpu: [x64]
os: [win32]
+ '@webassemblyjs/ast@1.14.1':
+ resolution: {integrity: sha512-nuBEDgQfm1ccRp/8bCQrx1frohyufl4JlbMMZ4P1wpeOfDhF6FQkxZJ1b/e+PLwr6X1Nhw6OLme5usuBWYBvuQ==}
+
+ '@webassemblyjs/floating-point-hex-parser@1.13.2':
+ resolution: {integrity: sha512-6oXyTOzbKxGH4steLbLNOu71Oj+C8Lg34n6CqRvqfS2O71BxY6ByfMDRhBytzknj9yGUPVJ1qIKhRlAwO1AovA==}
+
+ '@webassemblyjs/helper-api-error@1.13.2':
+ resolution: {integrity: sha512-U56GMYxy4ZQCbDZd6JuvvNV/WFildOjsaWD3Tzzvmw/mas3cXzRJPMjP83JqEsgSbyrmaGjBfDtV7KDXV9UzFQ==}
+
+ '@webassemblyjs/helper-buffer@1.14.1':
+ resolution: {integrity: sha512-jyH7wtcHiKssDtFPRB+iQdxlDf96m0E39yb0k5uJVhFGleZFoNw1c4aeIcVUPPbXUVJ94wwnMOAqUHyzoEPVMA==}
+
+ '@webassemblyjs/helper-numbers@1.13.2':
+ resolution: {integrity: sha512-FE8aCmS5Q6eQYcV3gI35O4J789wlQA+7JrqTTpJqn5emA4U2hvwJmvFRC0HODS+3Ye6WioDklgd6scJ3+PLnEA==}
+
+ '@webassemblyjs/helper-wasm-bytecode@1.13.2':
+ resolution: {integrity: sha512-3QbLKy93F0EAIXLh0ogEVR6rOubA9AoZ+WRYhNbFyuB70j3dRdwH9g+qXhLAO0kiYGlg3TxDV+I4rQTr/YNXkA==}
+
+ '@webassemblyjs/helper-wasm-section@1.14.1':
+ resolution: {integrity: sha512-ds5mXEqTJ6oxRoqjhWDU83OgzAYjwsCV8Lo/N+oRsNDmx/ZDpqalmrtgOMkHwxsG0iI//3BwWAErYRHtgn0dZw==}
+
+ '@webassemblyjs/ieee754@1.13.2':
+ resolution: {integrity: sha512-4LtOzh58S/5lX4ITKxnAK2USuNEvpdVV9AlgGQb8rJDHaLeHciwG4zlGr0j/SNWlr7x3vO1lDEsuePvtcDNCkw==}
+
+ '@webassemblyjs/leb128@1.13.2':
+ resolution: {integrity: sha512-Lde1oNoIdzVzdkNEAWZ1dZ5orIbff80YPdHx20mrHwHrVNNTjNr8E3xz9BdpcGqRQbAEa+fkrCb+fRFTl/6sQw==}
+
+ '@webassemblyjs/utf8@1.13.2':
+ resolution: {integrity: sha512-3NQWGjKTASY1xV5m7Hr0iPeXD9+RDobLll3T9d2AO+g3my8xy5peVyjSag4I50mR1bBSN/Ct12lo+R9tJk0NZQ==}
+
+ '@webassemblyjs/wasm-edit@1.14.1':
+ resolution: {integrity: sha512-RNJUIQH/J8iA/1NzlE4N7KtyZNHi3w7at7hDjvRNm5rcUXa00z1vRz3glZoULfJ5mpvYhLybmVcwcjGrC1pRrQ==}
+
+ '@webassemblyjs/wasm-gen@1.14.1':
+ resolution: {integrity: sha512-AmomSIjP8ZbfGQhumkNvgC33AY7qtMCXnN6bL2u2Js4gVCg8fp735aEiMSBbDR7UQIj90n4wKAFUSEd0QN2Ukg==}
+
+ '@webassemblyjs/wasm-opt@1.14.1':
+ resolution: {integrity: sha512-PTcKLUNvBqnY2U6E5bdOQcSM+oVP/PmrDY9NzowJjislEjwP/C4an2303MCVS2Mg9d3AJpIGdUFIQQWbPds0Sw==}
+
+ '@webassemblyjs/wasm-parser@1.14.1':
+ resolution: {integrity: sha512-JLBl+KZ0R5qB7mCnud/yyX08jWFw5MsoalJ1pQ4EdFlgj9VdXKGuENGsiCIjegI1W7p91rUlcB/LB5yRJKNTcQ==}
+
+ '@webassemblyjs/wast-printer@1.14.1':
+ resolution: {integrity: sha512-kPSSXE6De1XOR820C90RIo2ogvZG+c3KiHzqUoO/F34Y2shGzesfqv7o57xrxovZJH/MetF5UjroJ/R/3isoiw==}
+
+ '@xtuc/ieee754@1.2.0':
+ resolution: {integrity: sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==}
+
+ '@xtuc/long@4.2.2':
+ resolution: {integrity: sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==}
+
accepts@2.0.0:
resolution: {integrity: sha512-5cvg6CtKwfgdmVqY1WIiXKc3Q1bkRqGLi+2W/6ao+6Y7gu/RCwRuAhGEzh5B4KlszSuTLgZYuqFqo5bImjNKng==}
engines: {node: '>= 0.6'}
+ acorn-import-attributes@1.9.5:
+ resolution: {integrity: sha512-n02Vykv5uA3eHGM/Z2dQrcD56kL8TyDb2p1+0P83PClMnC/nc+anbQRhIOWnSq4Ke/KvDPrY3C9hDtC/A3eHnQ==}
+ peerDependencies:
+ acorn: ^8
+
+ acorn-import-phases@1.0.4:
+ resolution: {integrity: sha512-wKmbr/DDiIXzEOiWrTTUcDm24kQ2vGfZQvM2fwg2vXqR5uW6aapr7ObPtj1th32b9u90/Pf4AItvdTh42fBmVQ==}
+ engines: {node: '>=10.13.0'}
+ peerDependencies:
+ acorn: ^8.14.0
+
acorn-jsx@5.3.2:
resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==}
peerDependencies:
@@ -1648,10 +2208,22 @@ packages:
engines: {node: '>=0.4.0'}
hasBin: true
+ agent-base@6.0.2:
+ resolution: {integrity: sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==}
+ engines: {node: '>= 6.0.0'}
+
agent-base@7.1.4:
resolution: {integrity: sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==}
engines: {node: '>= 14'}
+ ajv-formats@2.1.1:
+ resolution: {integrity: sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==}
+ peerDependencies:
+ ajv: ^8.0.0
+ peerDependenciesMeta:
+ ajv:
+ optional: true
+
ajv-formats@3.0.1:
resolution: {integrity: sha512-8iUql50EUR+uUcdRQ3HDqa6EVyo3docL8g5WJ3FNcWmu62IbkGUue/pEyLBW8VGKKucTPgqeks4fIU1DA4yowQ==}
peerDependencies:
@@ -1660,6 +2232,11 @@ packages:
ajv:
optional: true
+ ajv-keywords@5.1.0:
+ resolution: {integrity: sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==}
+ peerDependencies:
+ ajv: ^8.8.2
+
ajv@6.14.0:
resolution: {integrity: sha512-IWrosm/yrn43eiKqkfkHis7QioDleaXQHdDVPKg0FSwwd/DuvyX79TZnFOnYpB7dcsFAMmtFztZuXPDvSePkFw==}
@@ -1682,6 +2259,10 @@ packages:
resolution: {integrity: sha512-HqZ5rWlFjGiV0tDm3UxxgNRqsOTniqoKZu0pIAfh7TZQMGuZK+hH0drySty0si0QXj1ieop4+SkSfPZBPPkHig==}
engines: {node: '>=14'}
+ anymatch@3.1.3:
+ resolution: {integrity: sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==}
+ engines: {node: '>= 8'}
+
argparse@2.0.1:
resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==}
@@ -1760,6 +2341,10 @@ packages:
engines: {node: '>=6.0.0'}
hasBin: true
+ binary-extensions@2.3.0:
+ resolution: {integrity: sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==}
+ engines: {node: '>=8'}
+
body-parser@2.2.2:
resolution: {integrity: sha512-oP5VkATKlNwcgvxi0vM0p/D3n2C3EReYVX+DNYs5TjZFn/oQt2j+4sVJtSMr18pdRr8wjTcBl6LoV+FUwzPmNA==}
engines: {node: '>=18'}
@@ -1767,6 +2352,9 @@ packages:
brace-expansion@1.1.12:
resolution: {integrity: sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==}
+ brace-expansion@2.0.2:
+ resolution: {integrity: sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==}
+
brace-expansion@5.0.4:
resolution: {integrity: sha512-h+DEnpVvxmfVefa4jFbCf5HdH5YMDXRsmKflpf1pILZWRFlTbJpxeU55nJl4Smt5HQaGzg1o6RHFPJaOqnmBDg==}
engines: {node: 18 || 20 || >=22}
@@ -1780,6 +2368,9 @@ packages:
engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7}
hasBin: true
+ buffer-from@1.1.2:
+ resolution: {integrity: sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==}
+
bundle-name@4.1.0:
resolution: {integrity: sha512-tjwM5exMg6BGRI+kNmTntNsvdZS1X8BFYS6tnJ2hdH0kVxM6/eVZ2xy+FqStSWvYmtfFMDLIxurorHwDKfDz5Q==}
engines: {node: '>=18'}
@@ -1807,6 +2398,10 @@ packages:
caniuse-lite@1.0.30001775:
resolution: {integrity: sha512-s3Qv7Lht9zbVKE9XoTyRG6wVDCKdtOFIjBGg3+Yhn6JaytuNKPIjBMTMIY1AnOH3seL5mvF+x33oGAyK3hVt3A==}
+ chalk@3.0.0:
+ resolution: {integrity: sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==}
+ engines: {node: '>=8'}
+
chalk@4.1.2:
resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==}
engines: {node: '>=10'}
@@ -1815,6 +2410,17 @@ packages:
resolution: {integrity: sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA==}
engines: {node: ^12.17.0 || ^14.13 || >=16.0.0}
+ chokidar@3.6.0:
+ resolution: {integrity: sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==}
+ engines: {node: '>= 8.10.0'}
+
+ chrome-trace-event@1.0.4:
+ resolution: {integrity: sha512-rNjApaLzuwaOTjCiT8lSDdGN1APCiqkChLMJxJPWLunPAt5fy8xgU9/jNOchV84wfIxrA0lRQB7oCT8jrn/wrQ==}
+ engines: {node: '>=6.0'}
+
+ cjs-module-lexer@1.4.3:
+ resolution: {integrity: sha512-9z8TZaGM1pfswYeXrUpzPrkx8UnWYdhJclsiYMm6x/w5+nN+8Tf/LnAgfLGQCm59qAOxU8WwHEq2vNwF6i4j+Q==}
+
class-variance-authority@0.7.1:
resolution: {integrity: sha512-Ka+9Trutv7G8M6WT6SeiRWz792K5qEqIGEGzXKhAE6xOWAY6pPH8U+9IY3oCMv6kqTmLsv7Xh/2w2RigkePMsg==}
@@ -1859,6 +2465,12 @@ packages:
resolution: {integrity: sha512-H+y0Jo/T1RZ9qPP4Eh1pkcQcLRglraJaSLoyOtHxu6AapkjWVCy2Sit1QQ4x3Dng8qDlSsZEet7g5Pq06MvTgw==}
engines: {node: '>=20'}
+ commander@2.20.3:
+ resolution: {integrity: sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==}
+
+ commondir@1.0.1:
+ resolution: {integrity: sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg==}
+
concat-map@0.0.1:
resolution: {integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==}
@@ -2047,6 +2659,10 @@ packages:
resolution: {integrity: sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==}
engines: {node: '>=0.10.0'}
+ dotenv@16.6.1:
+ resolution: {integrity: sha512-uBq4egWHTcTt33a72vpSG0z3HnPuIl6NqYcTrKEg2azoEyl2hpW0zqlxysq2pK9HlDIHyHyakeYaYnSAwd8bow==}
+ engines: {node: '>=12'}
+
dotenv@17.3.1:
resolution: {integrity: sha512-IO8C/dzEb6O3F9/twg6ZLXz164a2fhTnEWb95H23Dm4OuN+92NmEAlTrupP9VW6Jm3sO26tQlqyvyi4CsnY9GA==}
engines: {node: '>=12'}
@@ -2105,6 +2721,9 @@ packages:
resolution: {integrity: sha512-BrUQ0cPTB/IwXj23HtwHjS9n7O4h9FX94b4xc5zlTHxeLgTAdzYUDyy6KdExAl9lbN5rtfe44xpjpmj9grxs5w==}
engines: {node: '>= 0.4'}
+ es-module-lexer@2.0.0:
+ resolution: {integrity: sha512-5POEcUuZybH7IdmGsD8wlf0AI55wMecM9rVBTI/qEAy2c1kTOm3DjFYjrBdI2K3BaJjJYfYFeRtM0t9ssnRuxw==}
+
es-object-atoms@1.1.1:
resolution: {integrity: sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==}
engines: {node: '>= 0.4'}
@@ -2209,6 +2828,10 @@ packages:
peerDependencies:
eslint: ^3 || ^4 || ^5 || ^6 || ^7 || ^8 || ^9.7
+ eslint-scope@5.1.1:
+ resolution: {integrity: sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==}
+ engines: {node: '>=8.0.0'}
+
eslint-scope@8.4.0:
resolution: {integrity: sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg==}
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
@@ -2252,10 +2875,17 @@ packages:
resolution: {integrity: sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==}
engines: {node: '>=4.0'}
+ estraverse@4.3.0:
+ resolution: {integrity: sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==}
+ engines: {node: '>=4.0'}
+
estraverse@5.3.0:
resolution: {integrity: sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==}
engines: {node: '>=4.0'}
+ estree-walker@2.0.2:
+ resolution: {integrity: sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==}
+
esutils@2.0.3:
resolution: {integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==}
engines: {node: '>=0.10.0'}
@@ -2267,6 +2897,10 @@ packages:
eventemitter3@5.0.4:
resolution: {integrity: sha512-mlsTRyGaPBjPedk6Bvw+aqbsXDtoAyAzm5MO7JgU+yVRyMQ5O8bD4Kcci7BS85f93veegeCPkL8R4GLClnjLFw==}
+ events@3.3.0:
+ resolution: {integrity: sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==}
+ engines: {node: '>=0.8.x'}
+
eventsource-parser@3.0.6:
resolution: {integrity: sha512-Vo1ab+QXPzZ4tCa8SwIHJFaSzy4R6SHf7BY79rFBDf0idraZWAkYrDjDj8uWaSm3S2TK+hJ7/t1CEmZ7jXw+pg==}
engines: {node: '>=18.0.0'}
@@ -2364,6 +2998,9 @@ packages:
resolution: {integrity: sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g==}
engines: {node: '>=12.20.0'}
+ forwarded-parse@2.1.2:
+ resolution: {integrity: sha512-alTFZZQDKMporBH77856pXgzhEzaUVmLCDk+egLgIgHst3Tpndzz8MnKe+GzRJRfvVdn69HhpW7cmXzvtLvJAw==}
+
forwarded@0.2.0:
resolution: {integrity: sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==}
engines: {node: '>= 0.6'}
@@ -2376,6 +3013,14 @@ packages:
resolution: {integrity: sha512-VWSRii4t0AFm6ixFFmLLx1t7wS1gh+ckoa84aOeapGum0h+EZd1EhEumSB+ZdDLnEPuucsVB9oB7cxJHap6Afg==}
engines: {node: '>=14.14'}
+ fs.realpath@1.0.0:
+ resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==}
+
+ fsevents@2.3.3:
+ resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==}
+ engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0}
+ os: [darwin]
+
function-bind@1.1.2:
resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==}
@@ -2447,6 +3092,14 @@ packages:
resolution: {integrity: sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==}
engines: {node: '>=10.13.0'}
+ glob-to-regexp@0.4.1:
+ resolution: {integrity: sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==}
+
+ glob@9.3.5:
+ resolution: {integrity: sha512-e1LleDykUz2Iu+MTYdkSsuWX8lvAjAcs0Xef0lNIu0S2wOAzuTxCJtcd9S3cijlwYF18EsU3rzb8jPVobxDh9Q==}
+ engines: {node: '>=16 || 14 >=14.17'}
+ deprecated: Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me
+
globals@14.0.0:
resolution: {integrity: sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==}
engines: {node: '>=18'}
@@ -2506,6 +3159,9 @@ packages:
hermes-parser@0.25.1:
resolution: {integrity: sha512-6pEjquH3rqaI6cYAXYPcz9MS4rY6R4ngRgrgfDshRptUZIc3lw0MCIJIGDj9++mfySOuPTHB4nrSW99BCvOPIA==}
+ hoist-non-react-statics@3.3.2:
+ resolution: {integrity: sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==}
+
hono@4.12.3:
resolution: {integrity: sha512-SFsVSjp8sj5UumXOOFlkZOG6XS9SJDKw0TbwFeV+AJ8xlST8kxK5Z/5EYa111UY8732lK2S/xB653ceuaoGwpg==}
engines: {node: '>=16.9.0'}
@@ -2514,6 +3170,10 @@ packages:
resolution: {integrity: sha512-4FbRdAX+bSdmo4AUFuS0WNiPz8NgFt+r8ThgNWmlrjQjt1Q7ZR9+zTlce2859x4KSXrwIsaeTqDoKQmtP8pLmQ==}
engines: {node: '>= 0.8'}
+ https-proxy-agent@5.0.1:
+ resolution: {integrity: sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==}
+ engines: {node: '>= 6'}
+
https-proxy-agent@7.0.6:
resolution: {integrity: sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==}
engines: {node: '>= 14'}
@@ -2548,6 +3208,9 @@ packages:
resolution: {integrity: sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==}
engines: {node: '>=6'}
+ import-in-the-middle@1.15.0:
+ resolution: {integrity: sha512-bpQy+CrsRmYmoPMAE/0G33iwRqwW4ouqdRg8jgbH3aKuCtOc8lxgmYXg2dMM92CRiGP660EtBcymH/eVUpCSaA==}
+
imurmurhash@0.1.4:
resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==}
engines: {node: '>=0.8.19'}
@@ -2586,6 +3249,10 @@ packages:
resolution: {integrity: sha512-n4ZT37wG78iz03xPRKJrHTdZbe3IicyucEtdRsV5yglwc3GyUfbAfpSeD0FJ41NbUNSt5wbhqfp1fS+BgnvDFQ==}
engines: {node: '>= 0.4'}
+ is-binary-path@2.1.0:
+ resolution: {integrity: sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==}
+ engines: {node: '>=8'}
+
is-boolean-object@1.2.2:
resolution: {integrity: sha512-wa56o2/ElJMYqjCjGkXri7it5FbebW5usLw/nPmCMs5DeZ7eziSYZhSmPRn0txqeW4LnAmQQU7FgqLpsEFKM4A==}
engines: {node: '>= 0.4'}
@@ -2677,6 +3344,9 @@ packages:
is-promise@4.0.0:
resolution: {integrity: sha512-hvpoI6korhJMnej285dSg6nu1+e6uxs7zG3BYAm5byqDsgJNWwxzM6z6iZiAgQR4TJ30JmBTOwqZUw3WlyH3AQ==}
+ is-reference@1.2.1:
+ resolution: {integrity: sha512-U82MsXXiFIrjCK4otLT+o2NA2Cd2g5MLoOVXUZjIOhLurrRxpEXzI8O0KZHr3IjLvlAH1kTPYSuqer5T9ZVBKQ==}
+
is-regex@1.2.1:
resolution: {integrity: sha512-MjYsKHO5O7mCsmRGxWcLWheFqN9DJ/2TmngvjKXihe6efViPqc274+Fx/4fYj/r03+ESvBdTXK0V6tA3rgez1g==}
engines: {node: '>= 0.4'}
@@ -2751,6 +3421,10 @@ packages:
resolution: {integrity: sha512-H0dkQoCa3b2VEeKQBOxFph+JAbcrQdE7KC0UkqwpLmv2EC4P41QXP+rqo9wYodACiG5/WM5s9oDApTU8utwj9g==}
engines: {node: '>= 0.4'}
+ jest-worker@27.5.1:
+ resolution: {integrity: sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg==}
+ engines: {node: '>= 10.13.0'}
+
jiti@2.6.1:
resolution: {integrity: sha512-ekilCSN1jwRvIbgeg/57YFh8qQDNbwDb9xT/qu2DAHbFFZUicIl4ygVaAvzveMhMVr3LnpSKTNnwt8PoOfmKhQ==}
hasBin: true
@@ -2899,6 +3573,10 @@ packages:
lines-and-columns@1.2.4:
resolution: {integrity: sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==}
+ loader-runner@4.3.1:
+ resolution: {integrity: sha512-IWqP2SCPhyVFTBtRcgMHdzlf9ul25NwaFx4wCEH/KjAXuuHY4yNjvPXsBokp8jCB936PyWRaPKUNh8NvylLp2Q==}
+ engines: {node: '>=6.11.5'}
+
locate-path@6.0.0:
resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==}
engines: {node: '>=10'}
@@ -2914,6 +3592,9 @@ packages:
resolution: {integrity: sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==}
hasBin: true
+ lru-cache@10.4.3:
+ resolution: {integrity: sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==}
+
lru-cache@5.1.1:
resolution: {integrity: sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==}
@@ -2925,6 +3606,10 @@ packages:
magic-string@0.30.21:
resolution: {integrity: sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==}
+ magic-string@0.30.8:
+ resolution: {integrity: sha512-ISQTe55T2ao7XtlAStud6qwYPZjE4GK1S/BeVPus4jrq6JuOnQ00YKQC581RWhR122W7msZV263KzVeLoqidyQ==}
+ engines: {node: '>=12'}
+
math-intrinsics@1.1.0:
resolution: {integrity: sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==}
engines: {node: '>= 0.4'}
@@ -2948,10 +3633,18 @@ packages:
resolution: {integrity: sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==}
engines: {node: '>=8.6'}
+ mime-db@1.52.0:
+ resolution: {integrity: sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==}
+ engines: {node: '>= 0.6'}
+
mime-db@1.54.0:
resolution: {integrity: sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ==}
engines: {node: '>= 0.6'}
+ mime-types@2.1.35:
+ resolution: {integrity: sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==}
+ engines: {node: '>= 0.6'}
+
mime-types@3.0.2:
resolution: {integrity: sha512-Lbgzdk0h4juoQ9fCKXW4by0UJqj+nOOrI9MJ1sSj4nI8aI2eo1qmvQEie4VD1glsS250n15LsWsYtCugiStS5A==}
engines: {node: '>=18'}
@@ -2971,9 +3664,28 @@ packages:
minimatch@3.1.5:
resolution: {integrity: sha512-VgjWUsnnT6n+NUk6eZq77zeFdpW2LWDzP6zFGrCbHXiYNul5Dzqk2HHQ5uFH2DNW5Xbp8+jVzaeNt94ssEEl4w==}
+ minimatch@8.0.7:
+ resolution: {integrity: sha512-V+1uQNdzybxa14e/p00HZnQNNcTjnRJjDxg2V8wtkjFctq4M7hXFws4oekyTP0Jebeq7QYtpFyOeBAjc88zvYg==}
+ engines: {node: '>=16 || 14 >=14.17'}
+
+ minimatch@9.0.9:
+ resolution: {integrity: sha512-OBwBN9AL4dqmETlpS2zasx+vTeWclWzkblfZk7KTA5j3jeOONz/tRCnZomUyvNg83wL5Zv9Ss6HMJXAgL8R2Yg==}
+ engines: {node: '>=16 || 14 >=14.17'}
+
minimist@1.2.8:
resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==}
+ minipass@4.2.8:
+ resolution: {integrity: sha512-fNzuVyifolSLFL4NzpF+wEF4qrgqaaKX0haXPQEdQ7NKAN+WecoKMHV09YcuL/DHxrUsYQOK3MiuDf7Ip2OXfQ==}
+ engines: {node: '>=8'}
+
+ minipass@7.1.3:
+ resolution: {integrity: sha512-tEBHqDnIoM/1rXME1zgka9g6Q2lcoCkxHLuc7ODJ5BxbP5d4c2Z5cGgtXAku59200Cx7diuHTOYfSBD8n6mm8A==}
+ engines: {node: '>=16 || 14 >=14.17'}
+
+ module-details-from-path@1.0.4:
+ resolution: {integrity: sha512-EGWKgxALGMgzvxYF1UyGTy0HXX/2vHLkw6+NvDKW2jypWbHpjQuj4UMcqQWXHERJhVGKikolT06G3bcKe4fi7w==}
+
ms@2.1.3:
resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==}
@@ -3008,6 +3720,9 @@ packages:
resolution: {integrity: sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg==}
engines: {node: '>= 0.6'}
+ neo-async@2.6.2:
+ resolution: {integrity: sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==}
+
next@16.1.6:
resolution: {integrity: sha512-hkyRkcu5x/41KoqnROkfTm2pZVbKxvbZRuNvKXLRXxs3VfyO0WhY50TQS40EuKO9SW3rBj/sF3WbVwDACeMZyw==}
engines: {node: '>=20.9.0'}
@@ -3038,6 +3753,15 @@ packages:
resolution: {integrity: sha512-pyFS63ptit/P5WqUkt+UUfe+4oevH+bFeIiPPdfb0pFeYEu/1ELnJu5l+5EcTKYL5M7zaAa7S8ddywgXypqKCw==}
engines: {node: '>= 0.4'}
+ node-fetch@2.7.0:
+ resolution: {integrity: sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==}
+ engines: {node: 4.x || >=6.0.0}
+ peerDependencies:
+ encoding: ^0.1.0
+ peerDependenciesMeta:
+ encoding:
+ optional: true
+
node-fetch@3.3.2:
resolution: {integrity: sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA==}
engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
@@ -3045,6 +3769,10 @@ packages:
node-releases@2.0.27:
resolution: {integrity: sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==}
+ normalize-path@3.0.0:
+ resolution: {integrity: sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==}
+ engines: {node: '>=0.10.0'}
+
npm-run-path@4.0.1:
resolution: {integrity: sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==}
engines: {node: '>=8'}
@@ -3168,12 +3896,27 @@ packages:
path-parse@1.0.7:
resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==}
+ path-scurry@1.11.1:
+ resolution: {integrity: sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==}
+ engines: {node: '>=16 || 14 >=14.18'}
+
path-to-regexp@6.3.0:
resolution: {integrity: sha512-Yhpw4T9C6hPpgPeA28us07OJeqZ5EzQTkbfwuhsUg0c237RomFoETJgmp2sa3F/41gfLE6G5cqcYwznmeEeOlQ==}
path-to-regexp@8.3.0:
resolution: {integrity: sha512-7jdwVIRtsP8MYpdXSwOS0YdD0Du+qOoF/AEPIt88PcCFrZCzx41oxku1jD88hZBwbNUIEfpqvuhjFaMAqMTWnA==}
+ pg-int8@1.0.1:
+ resolution: {integrity: sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw==}
+ engines: {node: '>=4.0.0'}
+
+ pg-protocol@1.12.0:
+ resolution: {integrity: sha512-uOANXNRACNdElMXJ0tPz6RBM0XQ61nONGAwlt8da5zs/iUOOCLBQOHSXnrC6fMsvtjxbOJrZZl5IScGv+7mpbg==}
+
+ pg-types@2.2.0:
+ resolution: {integrity: sha512-qTAAlrEsl8s4OiEQY69wDvcMIdQN6wdz5ojQiOy6YRMuynxenON0O5oCpJI6lshc6scgAY8qvJ2On/p+CXY0GA==}
+ engines: {node: '>=4'}
+
picocolors@1.1.1:
resolution: {integrity: sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==}
@@ -3205,6 +3948,22 @@ packages:
resolution: {integrity: sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==}
engines: {node: ^10 || ^12 || >=14}
+ postgres-array@2.0.0:
+ resolution: {integrity: sha512-VpZrUqU5A69eQyW2c5CA1jtLecCsN2U/bD6VilrFDWq5+5UIEVO7nazS3TEcHf1zuPYO/sqGvUvW62g86RXZuA==}
+ engines: {node: '>=4'}
+
+ postgres-bytea@1.0.1:
+ resolution: {integrity: sha512-5+5HqXnsZPE65IJZSMkZtURARZelel2oXUEO8rH83VS/hxH5vv1uHquPg5wZs8yMAfdv971IU+kcPUczi7NVBQ==}
+ engines: {node: '>=0.10.0'}
+
+ postgres-date@1.0.7:
+ resolution: {integrity: sha512-suDmjLVQg78nMK2UZ454hAG+OAW+HQPZ6n++TNDUX+L0+uUlLywnoxJKDou51Zm+zTCjrCl0Nq6J9C5hP9vK/Q==}
+ engines: {node: '>=0.10.0'}
+
+ postgres-interval@1.2.0:
+ resolution: {integrity: sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ==}
+ engines: {node: '>=0.10.0'}
+
powershell-utils@0.1.0:
resolution: {integrity: sha512-dM0jVuXJPsDN6DvRpea484tCUaMiXWjuCn++HGTqUWzGDjv5tZkEZldAJ/UMlqRYGFrD/etByo4/xOuC/snX2A==}
engines: {node: '>=20'}
@@ -3217,6 +3976,10 @@ packages:
resolution: {integrity: sha512-gjVS5hOP+M3wMm5nmNOucbIrqudzs9v/57bWRHQWLYklXqoXKrVfYW2W9+glfGsqtPgpiz5WwyEEB+ksXIx3gQ==}
engines: {node: '>=18'}
+ progress@2.0.3:
+ resolution: {integrity: sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==}
+ engines: {node: '>=0.4.0'}
+
prompts@2.4.2:
resolution: {integrity: sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==}
engines: {node: '>= 6'}
@@ -3228,6 +3991,9 @@ packages:
resolution: {integrity: sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==}
engines: {node: '>= 0.10'}
+ proxy-from-env@1.1.0:
+ resolution: {integrity: sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==}
+
punycode@2.3.1:
resolution: {integrity: sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==}
engines: {node: '>=6'}
@@ -3252,6 +4018,9 @@ packages:
'@types/react-dom':
optional: true
+ randombytes@2.1.0:
+ resolution: {integrity: sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==}
+
range-parser@1.2.1:
resolution: {integrity: sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==}
engines: {node: '>= 0.6'}
@@ -3314,6 +4083,10 @@ packages:
resolution: {integrity: sha512-Ku/hhYbVjOQnXDZFv2+RibmLFGwFdeeKHFcOTlrt7xplBnya5OGn/hIRDsqDiSUcfORsDC7MPxwork8jBwsIWA==}
engines: {node: '>=0.10.0'}
+ readdirp@3.6.0:
+ resolution: {integrity: sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==}
+ engines: {node: '>=8.10.0'}
+
recast@0.23.11:
resolution: {integrity: sha512-YTUo+Flmw4ZXiWfQKGcwwc11KnoRAYgzAE2E7mXKCjSviTKShtxBsN6YUUBB2gtaBzKzeKunxhUwNHQuRryhWA==}
engines: {node: '>= 4'}
@@ -3350,6 +4123,10 @@ packages:
resolution: {integrity: sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==}
engines: {node: '>=0.10.0'}
+ require-in-the-middle@7.5.2:
+ resolution: {integrity: sha512-gAZ+kLqBdHarXB64XpAe2VCjB7rIRv+mU8tfRWziHRJ5umKsIHN2tLLv6EtMw7WCdP19S0ERVMldNvxYCHnhSQ==}
+ engines: {node: '>=8.6.0'}
+
reselect@5.1.1:
resolution: {integrity: sha512-K/BG6eIky/SBpzfHZv/dd+9JBFiS4SWV7FIujVyJRux6e45+73RaUHXLmIR1f7WOMaQ0U1km6qwklRQxpJJY0w==}
@@ -3365,6 +4142,10 @@ packages:
engines: {node: '>= 0.4'}
hasBin: true
+ resolve@1.22.8:
+ resolution: {integrity: sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==}
+ hasBin: true
+
resolve@2.0.0-next.6:
resolution: {integrity: sha512-3JmVl5hMGtJ3kMmB3zi3DL25KfkCEyy3Tw7Gmw7z5w8M9WlwoPFnIvwChzu1+cF3iaK3sp18hhPz8ANeimdJfA==}
engines: {node: '>= 0.4'}
@@ -3381,6 +4162,11 @@ packages:
resolution: {integrity: sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==}
engines: {iojs: '>=1.0.0', node: '>=0.10.0'}
+ rollup@4.59.0:
+ resolution: {integrity: sha512-2oMpl67a3zCH9H79LeMcbDhXW/UmWG/y2zuqnF2jQq5uq9TbM9TVyXvA4+t+ne2IIkBdrLpAaRQAvo7YI/Yyeg==}
+ engines: {node: '>=18.0.0', npm: '>=8.0.0'}
+ hasBin: true
+
router@2.2.0:
resolution: {integrity: sha512-nLTrUKm2UyiL7rlhapu/Zl45FwNgkZGaCpZbIHajDYgwlJCOzLSk+cIPAnsEqV955GjILJnKbdQC1nVPz+gAYQ==}
engines: {node: '>= 18'}
@@ -3396,6 +4182,9 @@ packages:
resolution: {integrity: sha512-AURm5f0jYEOydBj7VQlVvDrjeFgthDdEF5H1dP+6mNpoXOMo1quQqJ4wvJDyRZ9+pO3kGWoOdmV08cSv2aJV6Q==}
engines: {node: '>=0.4'}
+ safe-buffer@5.2.1:
+ resolution: {integrity: sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==}
+
safe-push-apply@1.0.0:
resolution: {integrity: sha512-iKE9w/Z7xCzUMIZqdBsp6pEQvwuEebH4vdpjcDWnyzaI6yl6O9FHvVpmGelvEHNsoY6wGblkxR6Zty/h00WiSA==}
engines: {node: '>= 0.4'}
@@ -3410,6 +4199,10 @@ packages:
scheduler@0.27.0:
resolution: {integrity: sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q==}
+ schema-utils@4.3.3:
+ resolution: {integrity: sha512-eflK8wEtyOE6+hsaRVPxvUKYCpRgzLqDTb8krvAsRIwOGlHoSgYLgBXoubGgLd2fT41/OUYdb48v4k4WWHQurA==}
+ engines: {node: '>= 10.13.0'}
+
semver@6.3.1:
resolution: {integrity: sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==}
hasBin: true
@@ -3423,6 +4216,9 @@ packages:
resolution: {integrity: sha512-1gnZf7DFcoIcajTjTwjwuDjzuz4PPcY2StKPlsGAQ1+YH20IRVrBaXSWmdjowTJ6u8Rc01PoYOGHXfP1mYcZNQ==}
engines: {node: '>= 18'}
+ serialize-javascript@6.0.2:
+ resolution: {integrity: sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g==}
+
serve-static@2.2.1:
resolution: {integrity: sha512-xRXBn0pPqQTVQiC8wyQrKs2MOlX24zQ0POGaj0kultvoOCstBQM5yvOhAVSUwOMjQtTvsPWoNCHfPGwaaQJhTw==}
engines: {node: '>= 18'}
@@ -3458,6 +4254,9 @@ packages:
resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==}
engines: {node: '>=8'}
+ shimmer@1.2.1:
+ resolution: {integrity: sha512-sQTKC1Re/rM6XyFM6fIAGHRPVGvyXfgzIDvzoq608vM+jeyVD0Tu1E6Np0Kc2zAIFWIj963V2800iF/9LPieQw==}
+
side-channel-list@1.0.0:
resolution: {integrity: sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==}
engines: {node: '>= 0.4'}
@@ -3494,6 +4293,9 @@ packages:
resolution: {integrity: sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==}
engines: {node: '>=0.10.0'}
+ source-map-support@0.5.21:
+ resolution: {integrity: sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==}
+
source-map@0.6.1:
resolution: {integrity: sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==}
engines: {node: '>=0.10.0'}
@@ -3501,6 +4303,10 @@ packages:
stable-hash@0.0.5:
resolution: {integrity: sha512-+L3ccpzibovGXFK+Ap/f8LOS0ahMrHTf3xu7mMLSpEGU0EO9ucaysSylKo9eRDFNhWve/y275iPmIZ4z39a9iA==}
+ stacktrace-parser@0.1.11:
+ resolution: {integrity: sha512-WjlahMgHmCJpqzU8bIBy4qtsZdU9lRlcZE3Lvyej6t4tuOuv1vk57OW3MBrj6hXBFx/nNoC9MPMTcr5YA7NQbg==}
+ engines: {node: '>=6'}
+
statuses@2.0.2:
resolution: {integrity: sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw==}
engines: {node: '>= 0.8'}
@@ -3592,6 +4398,10 @@ packages:
resolution: {integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==}
engines: {node: '>=8'}
+ supports-color@8.1.1:
+ resolution: {integrity: sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==}
+ engines: {node: '>=10'}
+
supports-preserve-symlinks-flag@1.0.0:
resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==}
engines: {node: '>= 0.4'}
@@ -3610,6 +4420,27 @@ packages:
resolution: {integrity: sha512-g9ljZiwki/LfxmQADO3dEY1CbpmXT5Hm2fJ+QaGKwSXUylMybePR7/67YW7jOrrvjEgL1Fmz5kzyAjWVWLlucg==}
engines: {node: '>=6'}
+ terser-webpack-plugin@5.3.16:
+ resolution: {integrity: sha512-h9oBFCWrq78NyWWVcSwZarJkZ01c2AyGrzs1crmHZO3QUg9D61Wu4NPjBy69n7JqylFF5y+CsUZYmYEIZ3mR+Q==}
+ engines: {node: '>= 10.13.0'}
+ peerDependencies:
+ '@swc/core': '*'
+ esbuild: '*'
+ uglify-js: '*'
+ webpack: ^5.1.0
+ peerDependenciesMeta:
+ '@swc/core':
+ optional: true
+ esbuild:
+ optional: true
+ uglify-js:
+ optional: true
+
+ terser@5.46.0:
+ resolution: {integrity: sha512-jTwoImyr/QbOWFFso3YoU3ik0jBBDJ6JTOQiy/J2YxVJdZCc+5u7skhNwiOR3FQIygFqVUPHl7qbbxtjW2K3Qg==}
+ engines: {node: '>=10'}
+ hasBin: true
+
tiny-invariant@1.3.3:
resolution: {integrity: sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==}
@@ -3640,6 +4471,9 @@ packages:
resolution: {integrity: sha512-kXuRi1mtaKMrsLUxz3sQYvVl37B0Ns6MzfrtV5DvJceE9bPyspOqk9xxv7XbZWcfLWbFmm997vl83qUWVJA64w==}
engines: {node: '>=16'}
+ tr46@0.0.3:
+ resolution: {integrity: sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==}
+
ts-api-utils@2.4.0:
resolution: {integrity: sha512-3TaVTaAv2gTiMB35i3FiGJaRfwb3Pyn/j3m/bfAvGe8FB7CF6u+LMYqYlDh7reQf7UNvoTvdfAqHGmPGOSsPmA==}
engines: {node: '>=18.12'}
@@ -3666,6 +4500,10 @@ packages:
resolution: {integrity: sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==}
engines: {node: '>= 0.8.0'}
+ type-fest@0.7.1:
+ resolution: {integrity: sha512-Ne2YiiGN8bmrmJJEuTWTLJR32nh/JdL1+PSicowtNb0WFpn59GK8/lfD61bVtzguz7b3PBt74nxpv/Pw5po5Rg==}
+ engines: {node: '>=8'}
+
type-fest@5.4.4:
resolution: {integrity: sha512-JnTrzGu+zPV3aXIUhnyWJj4z/wigMsdYajGLIYakqyOW1nPllzXEJee0QQbHj+CTIQtXGlAjuK0UY+2xTyjVAw==}
engines: {node: '>=20'}
@@ -3721,6 +4559,9 @@ packages:
resolution: {integrity: sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==}
engines: {node: '>= 0.8'}
+ unplugin@1.0.1:
+ resolution: {integrity: sha512-aqrHaVBWW1JVKBHmGo33T5TxeL0qWzfvjWokObHA9bYmN7eNDkwOxmLjhioHl9878qDFMAaT51XNroRyuz7WxA==}
+
unrs-resolver@1.11.1:
resolution: {integrity: sha512-bSjt9pjaEBnNiGgc9rUiHGKv5l4/TGzDmYw3RhnkJGtLhbnnA/5qJj7x3dNDCRx/PJxu774LlH8lCOlB4hEfKg==}
@@ -3764,6 +4605,10 @@ packages:
util-deprecate@1.0.2:
resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==}
+ uuid@9.0.1:
+ resolution: {integrity: sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==}
+ hasBin: true
+
validate-npm-package-name@7.0.2:
resolution: {integrity: sha512-hVDIBwsRruT73PbK7uP5ebUt+ezEtCmzZz3F59BSr2F6OVFnJ/6h8liuvdLrQ88Xmnk6/+xGGuq+pG9WwTuy3A==}
engines: {node: ^20.17.0 || >=22.9.0}
@@ -3775,10 +4620,37 @@ packages:
victory-vendor@37.3.6:
resolution: {integrity: sha512-SbPDPdDBYp+5MJHhBCAyI7wKM3d5ivekigc2Dk2s7pgbZ9wIgIBYGVw4zGHBml/qTFbexrofXW6Gu4noGxrOwQ==}
+ watchpack@2.5.1:
+ resolution: {integrity: sha512-Zn5uXdcFNIA1+1Ei5McRd+iRzfhENPCe7LeABkJtNulSxjma+l7ltNx55BWZkRlwRnpOgHqxnjyaDgJnNXnqzg==}
+ engines: {node: '>=10.13.0'}
+
web-streams-polyfill@3.3.3:
resolution: {integrity: sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==}
engines: {node: '>= 8'}
+ webidl-conversions@3.0.1:
+ resolution: {integrity: sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==}
+
+ webpack-sources@3.3.4:
+ resolution: {integrity: sha512-7tP1PdV4vF+lYPnkMR0jMY5/la2ub5Fc/8VQrrU+lXkiM6C4TjVfGw7iKfyhnTQOsD+6Q/iKw0eFciziRgD58Q==}
+ engines: {node: '>=10.13.0'}
+
+ webpack-virtual-modules@0.5.0:
+ resolution: {integrity: sha512-kyDivFZ7ZM0BVOUteVbDFhlRt7Ah/CSPwJdi8hBpkK7QLumUqdLtVfm/PX/hkcnrvr0i77fO5+TjZ94Pe+C9iw==}
+
+ webpack@5.105.3:
+ resolution: {integrity: sha512-LLBBA4oLmT7sZdHiYE/PeVuifOxYyE2uL/V+9VQP7YSYdJU7bSf7H8bZRRxW8kEPMkmVjnrXmoR3oejIdX0xbg==}
+ engines: {node: '>=10.13.0'}
+ hasBin: true
+ peerDependencies:
+ webpack-cli: '*'
+ peerDependenciesMeta:
+ webpack-cli:
+ optional: true
+
+ whatwg-url@5.0.0:
+ resolution: {integrity: sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==}
+
which-boxed-primitive@1.1.1:
resolution: {integrity: sha512-TbX3mj8n0odCBFVlY8AxkqcHASw3L60jIuF8jFP78az3C2YhmGvqbHBpAjTRH2/xqYunrJ9g1jSyjCjpoWzIAA==}
engines: {node: '>= 0.4'}
@@ -3824,6 +4696,10 @@ packages:
resolution: {integrity: sha512-g/eziiSUNBSsdDJtCLB8bdYEUMj4jR7AGeUo96p/3dTafgjHhpF4RiCFPiRILwjQoDXx5MqkBr4fwWtR3Ky4Wg==}
engines: {node: '>=20'}
+ xtend@4.0.2:
+ resolution: {integrity: sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==}
+ engines: {node: '>=0.4'}
+
y18n@5.0.8:
resolution: {integrity: sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==}
engines: {node: '>=10'}
@@ -4312,6 +5188,11 @@ snapshots:
'@jridgewell/resolve-uri@3.1.2': {}
+ '@jridgewell/source-map@0.3.11':
+ dependencies:
+ '@jridgewell/gen-mapping': 0.3.13
+ '@jridgewell/trace-mapping': 0.3.31
+
'@jridgewell/sourcemap-codec@1.5.5': {}
'@jridgewell/trace-mapping@0.3.31':
@@ -4418,6 +5299,255 @@ snapshots:
'@open-draft/until@2.1.0': {}
+ '@opentelemetry/api-logs@0.57.2':
+ dependencies:
+ '@opentelemetry/api': 1.9.0
+
+ '@opentelemetry/api@1.9.0': {}
+
+ '@opentelemetry/context-async-hooks@1.30.1(@opentelemetry/api@1.9.0)':
+ dependencies:
+ '@opentelemetry/api': 1.9.0
+
+ '@opentelemetry/core@1.30.1(@opentelemetry/api@1.9.0)':
+ dependencies:
+ '@opentelemetry/api': 1.9.0
+ '@opentelemetry/semantic-conventions': 1.28.0
+
+ '@opentelemetry/instrumentation-amqplib@0.46.1(@opentelemetry/api@1.9.0)':
+ dependencies:
+ '@opentelemetry/api': 1.9.0
+ '@opentelemetry/core': 1.30.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation': 0.57.2(@opentelemetry/api@1.9.0)
+ '@opentelemetry/semantic-conventions': 1.40.0
+ transitivePeerDependencies:
+ - supports-color
+
+ '@opentelemetry/instrumentation-connect@0.43.1(@opentelemetry/api@1.9.0)':
+ dependencies:
+ '@opentelemetry/api': 1.9.0
+ '@opentelemetry/core': 1.30.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation': 0.57.2(@opentelemetry/api@1.9.0)
+ '@opentelemetry/semantic-conventions': 1.40.0
+ '@types/connect': 3.4.38
+ transitivePeerDependencies:
+ - supports-color
+
+ '@opentelemetry/instrumentation-dataloader@0.16.1(@opentelemetry/api@1.9.0)':
+ dependencies:
+ '@opentelemetry/api': 1.9.0
+ '@opentelemetry/instrumentation': 0.57.2(@opentelemetry/api@1.9.0)
+ transitivePeerDependencies:
+ - supports-color
+
+ '@opentelemetry/instrumentation-express@0.47.1(@opentelemetry/api@1.9.0)':
+ dependencies:
+ '@opentelemetry/api': 1.9.0
+ '@opentelemetry/core': 1.30.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation': 0.57.2(@opentelemetry/api@1.9.0)
+ '@opentelemetry/semantic-conventions': 1.40.0
+ transitivePeerDependencies:
+ - supports-color
+
+ '@opentelemetry/instrumentation-fs@0.19.1(@opentelemetry/api@1.9.0)':
+ dependencies:
+ '@opentelemetry/api': 1.9.0
+ '@opentelemetry/core': 1.30.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation': 0.57.2(@opentelemetry/api@1.9.0)
+ transitivePeerDependencies:
+ - supports-color
+
+ '@opentelemetry/instrumentation-generic-pool@0.43.1(@opentelemetry/api@1.9.0)':
+ dependencies:
+ '@opentelemetry/api': 1.9.0
+ '@opentelemetry/instrumentation': 0.57.2(@opentelemetry/api@1.9.0)
+ transitivePeerDependencies:
+ - supports-color
+
+ '@opentelemetry/instrumentation-graphql@0.47.1(@opentelemetry/api@1.9.0)':
+ dependencies:
+ '@opentelemetry/api': 1.9.0
+ '@opentelemetry/instrumentation': 0.57.2(@opentelemetry/api@1.9.0)
+ transitivePeerDependencies:
+ - supports-color
+
+ '@opentelemetry/instrumentation-hapi@0.45.2(@opentelemetry/api@1.9.0)':
+ dependencies:
+ '@opentelemetry/api': 1.9.0
+ '@opentelemetry/core': 1.30.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation': 0.57.2(@opentelemetry/api@1.9.0)
+ '@opentelemetry/semantic-conventions': 1.40.0
+ transitivePeerDependencies:
+ - supports-color
+
+ '@opentelemetry/instrumentation-http@0.57.2(@opentelemetry/api@1.9.0)':
+ dependencies:
+ '@opentelemetry/api': 1.9.0
+ '@opentelemetry/core': 1.30.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation': 0.57.2(@opentelemetry/api@1.9.0)
+ '@opentelemetry/semantic-conventions': 1.28.0
+ forwarded-parse: 2.1.2
+ semver: 7.7.4
+ transitivePeerDependencies:
+ - supports-color
+
+ '@opentelemetry/instrumentation-ioredis@0.47.1(@opentelemetry/api@1.9.0)':
+ dependencies:
+ '@opentelemetry/api': 1.9.0
+ '@opentelemetry/instrumentation': 0.57.2(@opentelemetry/api@1.9.0)
+ '@opentelemetry/redis-common': 0.36.2
+ '@opentelemetry/semantic-conventions': 1.40.0
+ transitivePeerDependencies:
+ - supports-color
+
+ '@opentelemetry/instrumentation-kafkajs@0.7.1(@opentelemetry/api@1.9.0)':
+ dependencies:
+ '@opentelemetry/api': 1.9.0
+ '@opentelemetry/instrumentation': 0.57.2(@opentelemetry/api@1.9.0)
+ '@opentelemetry/semantic-conventions': 1.40.0
+ transitivePeerDependencies:
+ - supports-color
+
+ '@opentelemetry/instrumentation-knex@0.44.1(@opentelemetry/api@1.9.0)':
+ dependencies:
+ '@opentelemetry/api': 1.9.0
+ '@opentelemetry/instrumentation': 0.57.2(@opentelemetry/api@1.9.0)
+ '@opentelemetry/semantic-conventions': 1.40.0
+ transitivePeerDependencies:
+ - supports-color
+
+ '@opentelemetry/instrumentation-koa@0.47.1(@opentelemetry/api@1.9.0)':
+ dependencies:
+ '@opentelemetry/api': 1.9.0
+ '@opentelemetry/core': 1.30.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation': 0.57.2(@opentelemetry/api@1.9.0)
+ '@opentelemetry/semantic-conventions': 1.40.0
+ transitivePeerDependencies:
+ - supports-color
+
+ '@opentelemetry/instrumentation-lru-memoizer@0.44.1(@opentelemetry/api@1.9.0)':
+ dependencies:
+ '@opentelemetry/api': 1.9.0
+ '@opentelemetry/instrumentation': 0.57.2(@opentelemetry/api@1.9.0)
+ transitivePeerDependencies:
+ - supports-color
+
+ '@opentelemetry/instrumentation-mongodb@0.52.0(@opentelemetry/api@1.9.0)':
+ dependencies:
+ '@opentelemetry/api': 1.9.0
+ '@opentelemetry/instrumentation': 0.57.2(@opentelemetry/api@1.9.0)
+ '@opentelemetry/semantic-conventions': 1.40.0
+ transitivePeerDependencies:
+ - supports-color
+
+ '@opentelemetry/instrumentation-mongoose@0.46.1(@opentelemetry/api@1.9.0)':
+ dependencies:
+ '@opentelemetry/api': 1.9.0
+ '@opentelemetry/core': 1.30.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation': 0.57.2(@opentelemetry/api@1.9.0)
+ '@opentelemetry/semantic-conventions': 1.40.0
+ transitivePeerDependencies:
+ - supports-color
+
+ '@opentelemetry/instrumentation-mysql2@0.45.2(@opentelemetry/api@1.9.0)':
+ dependencies:
+ '@opentelemetry/api': 1.9.0
+ '@opentelemetry/instrumentation': 0.57.2(@opentelemetry/api@1.9.0)
+ '@opentelemetry/semantic-conventions': 1.40.0
+ '@opentelemetry/sql-common': 0.40.1(@opentelemetry/api@1.9.0)
+ transitivePeerDependencies:
+ - supports-color
+
+ '@opentelemetry/instrumentation-mysql@0.45.1(@opentelemetry/api@1.9.0)':
+ dependencies:
+ '@opentelemetry/api': 1.9.0
+ '@opentelemetry/instrumentation': 0.57.2(@opentelemetry/api@1.9.0)
+ '@opentelemetry/semantic-conventions': 1.40.0
+ '@types/mysql': 2.15.26
+ transitivePeerDependencies:
+ - supports-color
+
+ '@opentelemetry/instrumentation-pg@0.51.1(@opentelemetry/api@1.9.0)':
+ dependencies:
+ '@opentelemetry/api': 1.9.0
+ '@opentelemetry/core': 1.30.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation': 0.57.2(@opentelemetry/api@1.9.0)
+ '@opentelemetry/semantic-conventions': 1.40.0
+ '@opentelemetry/sql-common': 0.40.1(@opentelemetry/api@1.9.0)
+ '@types/pg': 8.6.1
+ '@types/pg-pool': 2.0.6
+ transitivePeerDependencies:
+ - supports-color
+
+ '@opentelemetry/instrumentation-redis-4@0.46.1(@opentelemetry/api@1.9.0)':
+ dependencies:
+ '@opentelemetry/api': 1.9.0
+ '@opentelemetry/instrumentation': 0.57.2(@opentelemetry/api@1.9.0)
+ '@opentelemetry/redis-common': 0.36.2
+ '@opentelemetry/semantic-conventions': 1.40.0
+ transitivePeerDependencies:
+ - supports-color
+
+ '@opentelemetry/instrumentation-tedious@0.18.1(@opentelemetry/api@1.9.0)':
+ dependencies:
+ '@opentelemetry/api': 1.9.0
+ '@opentelemetry/instrumentation': 0.57.2(@opentelemetry/api@1.9.0)
+ '@opentelemetry/semantic-conventions': 1.40.0
+ '@types/tedious': 4.0.14
+ transitivePeerDependencies:
+ - supports-color
+
+ '@opentelemetry/instrumentation-undici@0.10.1(@opentelemetry/api@1.9.0)':
+ dependencies:
+ '@opentelemetry/api': 1.9.0
+ '@opentelemetry/core': 1.30.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation': 0.57.2(@opentelemetry/api@1.9.0)
+ transitivePeerDependencies:
+ - supports-color
+
+ '@opentelemetry/instrumentation@0.57.2(@opentelemetry/api@1.9.0)':
+ dependencies:
+ '@opentelemetry/api': 1.9.0
+ '@opentelemetry/api-logs': 0.57.2
+ '@types/shimmer': 1.2.0
+ import-in-the-middle: 1.15.0
+ require-in-the-middle: 7.5.2
+ semver: 7.7.4
+ shimmer: 1.2.1
+ transitivePeerDependencies:
+ - supports-color
+
+ '@opentelemetry/redis-common@0.36.2': {}
+
+ '@opentelemetry/resources@1.30.1(@opentelemetry/api@1.9.0)':
+ dependencies:
+ '@opentelemetry/api': 1.9.0
+ '@opentelemetry/core': 1.30.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/semantic-conventions': 1.28.0
+
+ '@opentelemetry/sdk-trace-base@1.30.1(@opentelemetry/api@1.9.0)':
+ dependencies:
+ '@opentelemetry/api': 1.9.0
+ '@opentelemetry/core': 1.30.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/resources': 1.30.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/semantic-conventions': 1.28.0
+
+ '@opentelemetry/semantic-conventions@1.28.0': {}
+
+ '@opentelemetry/semantic-conventions@1.40.0': {}
+
+ '@opentelemetry/sql-common@0.40.1(@opentelemetry/api@1.9.0)':
+ dependencies:
+ '@opentelemetry/api': 1.9.0
+ '@opentelemetry/core': 1.30.1(@opentelemetry/api@1.9.0)
+
+ '@prisma/instrumentation@6.11.1(@opentelemetry/api@1.9.0)':
+ dependencies:
+ '@opentelemetry/api': 1.9.0
+ '@opentelemetry/instrumentation': 0.57.2(@opentelemetry/api@1.9.0)
+ transitivePeerDependencies:
+ - supports-color
+
'@radix-ui/number@1.1.1': {}
'@radix-ui/primitive@1.1.3': {}
@@ -5177,10 +6307,310 @@ snapshots:
react: 19.2.3
react-redux: 9.2.0(@types/react@19.2.14)(react@19.2.3)(redux@5.0.1)
+ '@rollup/plugin-commonjs@28.0.1(rollup@4.59.0)':
+ dependencies:
+ '@rollup/pluginutils': 5.3.0(rollup@4.59.0)
+ commondir: 1.0.1
+ estree-walker: 2.0.2
+ fdir: 6.5.0(picomatch@4.0.3)
+ is-reference: 1.2.1
+ magic-string: 0.30.21
+ picomatch: 4.0.3
+ optionalDependencies:
+ rollup: 4.59.0
+
+ '@rollup/pluginutils@5.3.0(rollup@4.59.0)':
+ dependencies:
+ '@types/estree': 1.0.8
+ estree-walker: 2.0.2
+ picomatch: 4.0.3
+ optionalDependencies:
+ rollup: 4.59.0
+
+ '@rollup/rollup-android-arm-eabi@4.59.0':
+ optional: true
+
+ '@rollup/rollup-android-arm64@4.59.0':
+ optional: true
+
+ '@rollup/rollup-darwin-arm64@4.59.0':
+ optional: true
+
+ '@rollup/rollup-darwin-x64@4.59.0':
+ optional: true
+
+ '@rollup/rollup-freebsd-arm64@4.59.0':
+ optional: true
+
+ '@rollup/rollup-freebsd-x64@4.59.0':
+ optional: true
+
+ '@rollup/rollup-linux-arm-gnueabihf@4.59.0':
+ optional: true
+
+ '@rollup/rollup-linux-arm-musleabihf@4.59.0':
+ optional: true
+
+ '@rollup/rollup-linux-arm64-gnu@4.59.0':
+ optional: true
+
+ '@rollup/rollup-linux-arm64-musl@4.59.0':
+ optional: true
+
+ '@rollup/rollup-linux-loong64-gnu@4.59.0':
+ optional: true
+
+ '@rollup/rollup-linux-loong64-musl@4.59.0':
+ optional: true
+
+ '@rollup/rollup-linux-ppc64-gnu@4.59.0':
+ optional: true
+
+ '@rollup/rollup-linux-ppc64-musl@4.59.0':
+ optional: true
+
+ '@rollup/rollup-linux-riscv64-gnu@4.59.0':
+ optional: true
+
+ '@rollup/rollup-linux-riscv64-musl@4.59.0':
+ optional: true
+
+ '@rollup/rollup-linux-s390x-gnu@4.59.0':
+ optional: true
+
+ '@rollup/rollup-linux-x64-gnu@4.59.0':
+ optional: true
+
+ '@rollup/rollup-linux-x64-musl@4.59.0':
+ optional: true
+
+ '@rollup/rollup-openbsd-x64@4.59.0':
+ optional: true
+
+ '@rollup/rollup-openharmony-arm64@4.59.0':
+ optional: true
+
+ '@rollup/rollup-win32-arm64-msvc@4.59.0':
+ optional: true
+
+ '@rollup/rollup-win32-ia32-msvc@4.59.0':
+ optional: true
+
+ '@rollup/rollup-win32-x64-gnu@4.59.0':
+ optional: true
+
+ '@rollup/rollup-win32-x64-msvc@4.59.0':
+ optional: true
+
'@rtsao/scc@1.1.0': {}
'@sec-ant/readable-stream@0.4.1': {}
+ '@sentry-internal/browser-utils@9.47.1':
+ dependencies:
+ '@sentry/core': 9.47.1
+
+ '@sentry-internal/feedback@9.47.1':
+ dependencies:
+ '@sentry/core': 9.47.1
+
+ '@sentry-internal/replay-canvas@9.47.1':
+ dependencies:
+ '@sentry-internal/replay': 9.47.1
+ '@sentry/core': 9.47.1
+
+ '@sentry-internal/replay@9.47.1':
+ dependencies:
+ '@sentry-internal/browser-utils': 9.47.1
+ '@sentry/core': 9.47.1
+
+ '@sentry/babel-plugin-component-annotate@3.6.1': {}
+
+ '@sentry/browser@9.47.1':
+ dependencies:
+ '@sentry-internal/browser-utils': 9.47.1
+ '@sentry-internal/feedback': 9.47.1
+ '@sentry-internal/replay': 9.47.1
+ '@sentry-internal/replay-canvas': 9.47.1
+ '@sentry/core': 9.47.1
+
+ '@sentry/bundler-plugin-core@3.6.1':
+ dependencies:
+ '@babel/core': 7.29.0
+ '@sentry/babel-plugin-component-annotate': 3.6.1
+ '@sentry/cli': 2.58.5
+ dotenv: 16.6.1
+ find-up: 5.0.0
+ glob: 9.3.5
+ magic-string: 0.30.8
+ unplugin: 1.0.1
+ transitivePeerDependencies:
+ - encoding
+ - supports-color
+
+ '@sentry/cli-darwin@2.58.5':
+ optional: true
+
+ '@sentry/cli-linux-arm64@2.58.5':
+ optional: true
+
+ '@sentry/cli-linux-arm@2.58.5':
+ optional: true
+
+ '@sentry/cli-linux-i686@2.58.5':
+ optional: true
+
+ '@sentry/cli-linux-x64@2.58.5':
+ optional: true
+
+ '@sentry/cli-win32-arm64@2.58.5':
+ optional: true
+
+ '@sentry/cli-win32-i686@2.58.5':
+ optional: true
+
+ '@sentry/cli-win32-x64@2.58.5':
+ optional: true
+
+ '@sentry/cli@2.58.5':
+ dependencies:
+ https-proxy-agent: 5.0.1
+ node-fetch: 2.7.0
+ progress: 2.0.3
+ proxy-from-env: 1.1.0
+ which: 2.0.2
+ optionalDependencies:
+ '@sentry/cli-darwin': 2.58.5
+ '@sentry/cli-linux-arm': 2.58.5
+ '@sentry/cli-linux-arm64': 2.58.5
+ '@sentry/cli-linux-i686': 2.58.5
+ '@sentry/cli-linux-x64': 2.58.5
+ '@sentry/cli-win32-arm64': 2.58.5
+ '@sentry/cli-win32-i686': 2.58.5
+ '@sentry/cli-win32-x64': 2.58.5
+ transitivePeerDependencies:
+ - encoding
+ - supports-color
+
+ '@sentry/core@9.47.1': {}
+
+ '@sentry/nextjs@9.47.1(@opentelemetry/context-async-hooks@1.30.1(@opentelemetry/api@1.9.0))(@opentelemetry/core@1.30.1(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@1.30.1(@opentelemetry/api@1.9.0))(next@16.1.6(@babel/core@7.29.0)(@opentelemetry/api@1.9.0)(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(react@19.2.3)(webpack@5.105.3)':
+ dependencies:
+ '@opentelemetry/api': 1.9.0
+ '@opentelemetry/semantic-conventions': 1.40.0
+ '@rollup/plugin-commonjs': 28.0.1(rollup@4.59.0)
+ '@sentry-internal/browser-utils': 9.47.1
+ '@sentry/core': 9.47.1
+ '@sentry/node': 9.47.1
+ '@sentry/opentelemetry': 9.47.1(@opentelemetry/api@1.9.0)(@opentelemetry/context-async-hooks@1.30.1(@opentelemetry/api@1.9.0))(@opentelemetry/core@1.30.1(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@1.30.1(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.40.0)
+ '@sentry/react': 9.47.1(react@19.2.3)
+ '@sentry/vercel-edge': 9.47.1(@opentelemetry/context-async-hooks@1.30.1(@opentelemetry/api@1.9.0))(@opentelemetry/core@1.30.1(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@1.30.1(@opentelemetry/api@1.9.0))
+ '@sentry/webpack-plugin': 3.6.1(webpack@5.105.3)
+ chalk: 3.0.0
+ next: 16.1.6(@babel/core@7.29.0)(@opentelemetry/api@1.9.0)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
+ resolve: 1.22.8
+ rollup: 4.59.0
+ stacktrace-parser: 0.1.11
+ transitivePeerDependencies:
+ - '@opentelemetry/context-async-hooks'
+ - '@opentelemetry/core'
+ - '@opentelemetry/sdk-trace-base'
+ - encoding
+ - react
+ - supports-color
+ - webpack
+
+ '@sentry/node-core@9.47.1(@opentelemetry/api@1.9.0)(@opentelemetry/context-async-hooks@1.30.1(@opentelemetry/api@1.9.0))(@opentelemetry/core@1.30.1(@opentelemetry/api@1.9.0))(@opentelemetry/instrumentation@0.57.2(@opentelemetry/api@1.9.0))(@opentelemetry/resources@1.30.1(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@1.30.1(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.40.0)':
+ dependencies:
+ '@opentelemetry/api': 1.9.0
+ '@opentelemetry/context-async-hooks': 1.30.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/core': 1.30.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation': 0.57.2(@opentelemetry/api@1.9.0)
+ '@opentelemetry/resources': 1.30.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/sdk-trace-base': 1.30.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/semantic-conventions': 1.40.0
+ '@sentry/core': 9.47.1
+ '@sentry/opentelemetry': 9.47.1(@opentelemetry/api@1.9.0)(@opentelemetry/context-async-hooks@1.30.1(@opentelemetry/api@1.9.0))(@opentelemetry/core@1.30.1(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@1.30.1(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.40.0)
+ import-in-the-middle: 1.15.0
+
+ '@sentry/node@9.47.1':
+ dependencies:
+ '@opentelemetry/api': 1.9.0
+ '@opentelemetry/context-async-hooks': 1.30.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/core': 1.30.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation': 0.57.2(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-amqplib': 0.46.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-connect': 0.43.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-dataloader': 0.16.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-express': 0.47.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-fs': 0.19.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-generic-pool': 0.43.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-graphql': 0.47.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-hapi': 0.45.2(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-http': 0.57.2(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-ioredis': 0.47.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-kafkajs': 0.7.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-knex': 0.44.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-koa': 0.47.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-lru-memoizer': 0.44.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-mongodb': 0.52.0(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-mongoose': 0.46.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-mysql': 0.45.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-mysql2': 0.45.2(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-pg': 0.51.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-redis-4': 0.46.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-tedious': 0.18.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/instrumentation-undici': 0.10.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/resources': 1.30.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/sdk-trace-base': 1.30.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/semantic-conventions': 1.40.0
+ '@prisma/instrumentation': 6.11.1(@opentelemetry/api@1.9.0)
+ '@sentry/core': 9.47.1
+ '@sentry/node-core': 9.47.1(@opentelemetry/api@1.9.0)(@opentelemetry/context-async-hooks@1.30.1(@opentelemetry/api@1.9.0))(@opentelemetry/core@1.30.1(@opentelemetry/api@1.9.0))(@opentelemetry/instrumentation@0.57.2(@opentelemetry/api@1.9.0))(@opentelemetry/resources@1.30.1(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@1.30.1(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.40.0)
+ '@sentry/opentelemetry': 9.47.1(@opentelemetry/api@1.9.0)(@opentelemetry/context-async-hooks@1.30.1(@opentelemetry/api@1.9.0))(@opentelemetry/core@1.30.1(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@1.30.1(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.40.0)
+ import-in-the-middle: 1.15.0
+ minimatch: 9.0.9
+ transitivePeerDependencies:
+ - supports-color
+
+ '@sentry/opentelemetry@9.47.1(@opentelemetry/api@1.9.0)(@opentelemetry/context-async-hooks@1.30.1(@opentelemetry/api@1.9.0))(@opentelemetry/core@1.30.1(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@1.30.1(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.40.0)':
+ dependencies:
+ '@opentelemetry/api': 1.9.0
+ '@opentelemetry/context-async-hooks': 1.30.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/core': 1.30.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/sdk-trace-base': 1.30.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/semantic-conventions': 1.40.0
+ '@sentry/core': 9.47.1
+
+ '@sentry/react@9.47.1(react@19.2.3)':
+ dependencies:
+ '@sentry/browser': 9.47.1
+ '@sentry/core': 9.47.1
+ hoist-non-react-statics: 3.3.2
+ react: 19.2.3
+
+ '@sentry/vercel-edge@9.47.1(@opentelemetry/context-async-hooks@1.30.1(@opentelemetry/api@1.9.0))(@opentelemetry/core@1.30.1(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@1.30.1(@opentelemetry/api@1.9.0))':
+ dependencies:
+ '@opentelemetry/api': 1.9.0
+ '@opentelemetry/resources': 1.30.1(@opentelemetry/api@1.9.0)
+ '@opentelemetry/semantic-conventions': 1.40.0
+ '@sentry/core': 9.47.1
+ '@sentry/opentelemetry': 9.47.1(@opentelemetry/api@1.9.0)(@opentelemetry/context-async-hooks@1.30.1(@opentelemetry/api@1.9.0))(@opentelemetry/core@1.30.1(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@1.30.1(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.40.0)
+ transitivePeerDependencies:
+ - '@opentelemetry/context-async-hooks'
+ - '@opentelemetry/core'
+ - '@opentelemetry/sdk-trace-base'
+
+ '@sentry/webpack-plugin@3.6.1(webpack@5.105.3)':
+ dependencies:
+ '@sentry/bundler-plugin-core': 3.6.1
+ unplugin: 1.0.1
+ uuid: 9.0.1
+ webpack: 5.105.3
+ transitivePeerDependencies:
+ - encoding
+ - supports-color
+
'@sindresorhus/merge-streams@4.0.0': {}
'@standard-schema/spec@1.1.0': {}
@@ -5278,6 +6708,10 @@ snapshots:
tslib: 2.8.1
optional: true
+ '@types/connect@3.4.38':
+ dependencies:
+ '@types/node': 20.19.35
+
'@types/d3-array@3.2.2': {}
'@types/d3-color@3.1.3': {}
@@ -5302,16 +6736,40 @@ snapshots:
'@types/d3-timer@3.0.2': {}
+ '@types/eslint-scope@3.7.7':
+ dependencies:
+ '@types/eslint': 9.6.1
+ '@types/estree': 1.0.8
+
+ '@types/eslint@9.6.1':
+ dependencies:
+ '@types/estree': 1.0.8
+ '@types/json-schema': 7.0.15
+
'@types/estree@1.0.8': {}
'@types/json-schema@7.0.15': {}
'@types/json5@0.0.29': {}
+ '@types/mysql@2.15.26':
+ dependencies:
+ '@types/node': 20.19.35
+
'@types/node@20.19.35':
dependencies:
undici-types: 6.21.0
+ '@types/pg-pool@2.0.6':
+ dependencies:
+ '@types/pg': 8.6.1
+
+ '@types/pg@8.6.1':
+ dependencies:
+ '@types/node': 20.19.35
+ pg-protocol: 1.12.0
+ pg-types: 2.2.0
+
'@types/react-dom@19.2.3(@types/react@19.2.14)':
dependencies:
'@types/react': 19.2.14
@@ -5320,8 +6778,14 @@ snapshots:
dependencies:
csstype: 3.2.3
+ '@types/shimmer@1.2.0': {}
+
'@types/statuses@2.0.6': {}
+ '@types/tedious@4.0.14':
+ dependencies:
+ '@types/node': 20.19.35
+
'@types/use-sync-external-store@0.0.6': {}
'@types/validate-npm-package-name@4.0.2': {}
@@ -5476,23 +6940,126 @@ snapshots:
'@unrs/resolver-binding-win32-x64-msvc@1.11.1':
optional: true
+ '@webassemblyjs/ast@1.14.1':
+ dependencies:
+ '@webassemblyjs/helper-numbers': 1.13.2
+ '@webassemblyjs/helper-wasm-bytecode': 1.13.2
+
+ '@webassemblyjs/floating-point-hex-parser@1.13.2': {}
+
+ '@webassemblyjs/helper-api-error@1.13.2': {}
+
+ '@webassemblyjs/helper-buffer@1.14.1': {}
+
+ '@webassemblyjs/helper-numbers@1.13.2':
+ dependencies:
+ '@webassemblyjs/floating-point-hex-parser': 1.13.2
+ '@webassemblyjs/helper-api-error': 1.13.2
+ '@xtuc/long': 4.2.2
+
+ '@webassemblyjs/helper-wasm-bytecode@1.13.2': {}
+
+ '@webassemblyjs/helper-wasm-section@1.14.1':
+ dependencies:
+ '@webassemblyjs/ast': 1.14.1
+ '@webassemblyjs/helper-buffer': 1.14.1
+ '@webassemblyjs/helper-wasm-bytecode': 1.13.2
+ '@webassemblyjs/wasm-gen': 1.14.1
+
+ '@webassemblyjs/ieee754@1.13.2':
+ dependencies:
+ '@xtuc/ieee754': 1.2.0
+
+ '@webassemblyjs/leb128@1.13.2':
+ dependencies:
+ '@xtuc/long': 4.2.2
+
+ '@webassemblyjs/utf8@1.13.2': {}
+
+ '@webassemblyjs/wasm-edit@1.14.1':
+ dependencies:
+ '@webassemblyjs/ast': 1.14.1
+ '@webassemblyjs/helper-buffer': 1.14.1
+ '@webassemblyjs/helper-wasm-bytecode': 1.13.2
+ '@webassemblyjs/helper-wasm-section': 1.14.1
+ '@webassemblyjs/wasm-gen': 1.14.1
+ '@webassemblyjs/wasm-opt': 1.14.1
+ '@webassemblyjs/wasm-parser': 1.14.1
+ '@webassemblyjs/wast-printer': 1.14.1
+
+ '@webassemblyjs/wasm-gen@1.14.1':
+ dependencies:
+ '@webassemblyjs/ast': 1.14.1
+ '@webassemblyjs/helper-wasm-bytecode': 1.13.2
+ '@webassemblyjs/ieee754': 1.13.2
+ '@webassemblyjs/leb128': 1.13.2
+ '@webassemblyjs/utf8': 1.13.2
+
+ '@webassemblyjs/wasm-opt@1.14.1':
+ dependencies:
+ '@webassemblyjs/ast': 1.14.1
+ '@webassemblyjs/helper-buffer': 1.14.1
+ '@webassemblyjs/wasm-gen': 1.14.1
+ '@webassemblyjs/wasm-parser': 1.14.1
+
+ '@webassemblyjs/wasm-parser@1.14.1':
+ dependencies:
+ '@webassemblyjs/ast': 1.14.1
+ '@webassemblyjs/helper-api-error': 1.13.2
+ '@webassemblyjs/helper-wasm-bytecode': 1.13.2
+ '@webassemblyjs/ieee754': 1.13.2
+ '@webassemblyjs/leb128': 1.13.2
+ '@webassemblyjs/utf8': 1.13.2
+
+ '@webassemblyjs/wast-printer@1.14.1':
+ dependencies:
+ '@webassemblyjs/ast': 1.14.1
+ '@xtuc/long': 4.2.2
+
+ '@xtuc/ieee754@1.2.0': {}
+
+ '@xtuc/long@4.2.2': {}
+
accepts@2.0.0:
dependencies:
mime-types: 3.0.2
negotiator: 1.0.0
+ acorn-import-attributes@1.9.5(acorn@8.16.0):
+ dependencies:
+ acorn: 8.16.0
+
+ acorn-import-phases@1.0.4(acorn@8.16.0):
+ dependencies:
+ acorn: 8.16.0
+
acorn-jsx@5.3.2(acorn@8.16.0):
dependencies:
acorn: 8.16.0
acorn@8.16.0: {}
+ agent-base@6.0.2:
+ dependencies:
+ debug: 4.4.3
+ transitivePeerDependencies:
+ - supports-color
+
agent-base@7.1.4: {}
+ ajv-formats@2.1.1(ajv@8.18.0):
+ optionalDependencies:
+ ajv: 8.18.0
+
ajv-formats@3.0.1(ajv@8.18.0):
optionalDependencies:
ajv: 8.18.0
+ ajv-keywords@5.1.0(ajv@8.18.0):
+ dependencies:
+ ajv: 8.18.0
+ fast-deep-equal: 3.1.3
+
ajv@6.14.0:
dependencies:
fast-deep-equal: 3.1.3
@@ -5517,6 +7084,11 @@ snapshots:
ansis@4.2.0: {}
+ anymatch@3.1.3:
+ dependencies:
+ normalize-path: 3.0.0
+ picomatch: 2.3.1
+
argparse@2.0.1: {}
aria-hidden@1.2.6:
@@ -5614,6 +7186,8 @@ snapshots:
baseline-browser-mapping@2.10.0: {}
+ binary-extensions@2.3.0: {}
+
body-parser@2.2.2:
dependencies:
bytes: 3.1.2
@@ -5633,6 +7207,10 @@ snapshots:
balanced-match: 1.0.2
concat-map: 0.0.1
+ brace-expansion@2.0.2:
+ dependencies:
+ balanced-match: 1.0.2
+
brace-expansion@5.0.4:
dependencies:
balanced-match: 4.0.4
@@ -5649,6 +7227,8 @@ snapshots:
node-releases: 2.0.27
update-browserslist-db: 1.2.3(browserslist@4.28.1)
+ buffer-from@1.1.2: {}
+
bundle-name@4.1.0:
dependencies:
run-applescript: 7.1.0
@@ -5676,6 +7256,11 @@ snapshots:
caniuse-lite@1.0.30001775: {}
+ chalk@3.0.0:
+ dependencies:
+ ansi-styles: 4.3.0
+ supports-color: 7.2.0
+
chalk@4.1.2:
dependencies:
ansi-styles: 4.3.0
@@ -5683,6 +7268,22 @@ snapshots:
chalk@5.6.2: {}
+ chokidar@3.6.0:
+ dependencies:
+ anymatch: 3.1.3
+ braces: 3.0.3
+ glob-parent: 5.1.2
+ is-binary-path: 2.1.0
+ is-glob: 4.0.3
+ normalize-path: 3.0.0
+ readdirp: 3.6.0
+ optionalDependencies:
+ fsevents: 2.3.3
+
+ chrome-trace-event@1.0.4: {}
+
+ cjs-module-lexer@1.4.3: {}
+
class-variance-authority@0.7.1:
dependencies:
clsx: 2.1.1
@@ -5717,6 +7318,10 @@ snapshots:
commander@14.0.3: {}
+ commander@2.20.3: {}
+
+ commondir@1.0.1: {}
+
concat-map@0.0.1: {}
content-disposition@1.0.1: {}
@@ -5864,6 +7469,8 @@ snapshots:
dependencies:
esutils: 2.0.3
+ dotenv@16.6.1: {}
+
dotenv@17.3.1: {}
dunder-proto@1.0.1:
@@ -5982,6 +7589,8 @@ snapshots:
iterator.prototype: 1.1.5
safe-array-concat: 1.1.3
+ es-module-lexer@2.0.0: {}
+
es-object-atoms@1.1.1:
dependencies:
es-errors: 1.3.0
@@ -6146,6 +7755,11 @@ snapshots:
string.prototype.matchall: 4.0.12
string.prototype.repeat: 1.0.0
+ eslint-scope@5.1.1:
+ dependencies:
+ esrecurse: 4.3.0
+ estraverse: 4.3.0
+
eslint-scope@8.4.0:
dependencies:
esrecurse: 4.3.0
@@ -6214,14 +7828,20 @@ snapshots:
dependencies:
estraverse: 5.3.0
+ estraverse@4.3.0: {}
+
estraverse@5.3.0: {}
+ estree-walker@2.0.2: {}
+
esutils@2.0.3: {}
etag@1.8.1: {}
eventemitter3@5.0.4: {}
+ events@3.3.0: {}
+
eventsource-parser@3.0.6: {}
eventsource@3.0.7:
@@ -6373,6 +7993,8 @@ snapshots:
dependencies:
fetch-blob: 3.2.0
+ forwarded-parse@2.1.2: {}
+
forwarded@0.2.0: {}
fresh@2.0.0: {}
@@ -6383,6 +8005,11 @@ snapshots:
jsonfile: 6.2.0
universalify: 2.0.1
+ fs.realpath@1.0.0: {}
+
+ fsevents@2.3.3:
+ optional: true
+
function-bind@1.1.2: {}
function.prototype.name@1.1.8:
@@ -6455,6 +8082,15 @@ snapshots:
dependencies:
is-glob: 4.0.3
+ glob-to-regexp@0.4.1: {}
+
+ glob@9.3.5:
+ dependencies:
+ fs.realpath: 1.0.0
+ minimatch: 8.0.7
+ minipass: 4.2.8
+ path-scurry: 1.11.1
+
globals@14.0.0: {}
globals@16.4.0: {}
@@ -6500,6 +8136,10 @@ snapshots:
dependencies:
hermes-estree: 0.25.1
+ hoist-non-react-statics@3.3.2:
+ dependencies:
+ react-is: 16.13.1
+
hono@4.12.3: {}
http-errors@2.0.1:
@@ -6510,6 +8150,13 @@ snapshots:
statuses: 2.0.2
toidentifier: 1.0.1
+ https-proxy-agent@5.0.1:
+ dependencies:
+ agent-base: 6.0.2
+ debug: 4.4.3
+ transitivePeerDependencies:
+ - supports-color
+
https-proxy-agent@7.0.6:
dependencies:
agent-base: 7.1.4
@@ -6538,6 +8185,13 @@ snapshots:
parent-module: 1.0.1
resolve-from: 4.0.0
+ import-in-the-middle@1.15.0:
+ dependencies:
+ acorn: 8.16.0
+ acorn-import-attributes: 1.9.5(acorn@8.16.0)
+ cjs-module-lexer: 1.4.3
+ module-details-from-path: 1.0.4
+
imurmurhash@0.1.4: {}
inherits@2.0.4: {}
@@ -6574,6 +8228,10 @@ snapshots:
dependencies:
has-bigints: 1.1.0
+ is-binary-path@2.1.0:
+ dependencies:
+ binary-extensions: 2.3.0
+
is-boolean-object@1.2.2:
dependencies:
call-bound: 1.0.4
@@ -6649,6 +8307,10 @@ snapshots:
is-promise@4.0.0: {}
+ is-reference@1.2.1:
+ dependencies:
+ '@types/estree': 1.0.8
+
is-regex@1.2.1:
dependencies:
call-bound: 1.0.4
@@ -6717,6 +8379,12 @@ snapshots:
has-symbols: 1.1.0
set-function-name: 2.0.2
+ jest-worker@27.5.1:
+ dependencies:
+ '@types/node': 20.19.35
+ merge-stream: 2.0.0
+ supports-color: 8.1.1
+
jiti@2.6.1: {}
jose@6.1.3: {}
@@ -6830,6 +8498,8 @@ snapshots:
lines-and-columns@1.2.4: {}
+ loader-runner@4.3.1: {}
+
locate-path@6.0.0:
dependencies:
p-locate: 5.0.0
@@ -6845,6 +8515,8 @@ snapshots:
dependencies:
js-tokens: 4.0.0
+ lru-cache@10.4.3: {}
+
lru-cache@5.1.1:
dependencies:
yallist: 3.1.1
@@ -6857,6 +8529,10 @@ snapshots:
dependencies:
'@jridgewell/sourcemap-codec': 1.5.5
+ magic-string@0.30.8:
+ dependencies:
+ '@jridgewell/sourcemap-codec': 1.5.5
+
math-intrinsics@1.1.0: {}
media-typer@1.1.0: {}
@@ -6872,8 +8548,14 @@ snapshots:
braces: 3.0.3
picomatch: 2.3.1
+ mime-db@1.52.0: {}
+
mime-db@1.54.0: {}
+ mime-types@2.1.35:
+ dependencies:
+ mime-db: 1.52.0
+
mime-types@3.0.2:
dependencies:
mime-db: 1.54.0
@@ -6890,8 +8572,22 @@ snapshots:
dependencies:
brace-expansion: 1.1.12
+ minimatch@8.0.7:
+ dependencies:
+ brace-expansion: 2.0.2
+
+ minimatch@9.0.9:
+ dependencies:
+ brace-expansion: 2.0.2
+
minimist@1.2.8: {}
+ minipass@4.2.8: {}
+
+ minipass@7.1.3: {}
+
+ module-details-from-path@1.0.4: {}
+
ms@2.1.3: {}
msw@2.12.10(@types/node@20.19.35)(typescript@5.9.3):
@@ -6929,7 +8625,9 @@ snapshots:
negotiator@1.0.0: {}
- next@16.1.6(@babel/core@7.29.0)(react-dom@19.2.3(react@19.2.3))(react@19.2.3):
+ neo-async@2.6.2: {}
+
+ next@16.1.6(@babel/core@7.29.0)(@opentelemetry/api@1.9.0)(react-dom@19.2.3(react@19.2.3))(react@19.2.3):
dependencies:
'@next/env': 16.1.6
'@swc/helpers': 0.5.15
@@ -6948,6 +8646,7 @@ snapshots:
'@next/swc-linux-x64-musl': 16.1.6
'@next/swc-win32-arm64-msvc': 16.1.6
'@next/swc-win32-x64-msvc': 16.1.6
+ '@opentelemetry/api': 1.9.0
sharp: 0.34.5
transitivePeerDependencies:
- '@babel/core'
@@ -6962,6 +8661,10 @@ snapshots:
object.entries: 1.1.9
semver: 6.3.1
+ node-fetch@2.7.0:
+ dependencies:
+ whatwg-url: 5.0.0
+
node-fetch@3.3.2:
dependencies:
data-uri-to-buffer: 4.0.1
@@ -6970,6 +8673,8 @@ snapshots:
node-releases@2.0.27: {}
+ normalize-path@3.0.0: {}
+
npm-run-path@4.0.1:
dependencies:
path-key: 3.1.1
@@ -7112,10 +8817,27 @@ snapshots:
path-parse@1.0.7: {}
+ path-scurry@1.11.1:
+ dependencies:
+ lru-cache: 10.4.3
+ minipass: 7.1.3
+
path-to-regexp@6.3.0: {}
path-to-regexp@8.3.0: {}
+ pg-int8@1.0.1: {}
+
+ pg-protocol@1.12.0: {}
+
+ pg-types@2.2.0:
+ dependencies:
+ pg-int8: 1.0.1
+ postgres-array: 2.0.0
+ postgres-bytea: 1.0.1
+ postgres-date: 1.0.7
+ postgres-interval: 1.2.0
+
picocolors@1.1.1: {}
picomatch@2.3.1: {}
@@ -7143,6 +8865,16 @@ snapshots:
picocolors: 1.1.1
source-map-js: 1.2.1
+ postgres-array@2.0.0: {}
+
+ postgres-bytea@1.0.1: {}
+
+ postgres-date@1.0.7: {}
+
+ postgres-interval@1.2.0:
+ dependencies:
+ xtend: 4.0.2
+
powershell-utils@0.1.0: {}
prelude-ls@1.2.1: {}
@@ -7151,6 +8883,8 @@ snapshots:
dependencies:
parse-ms: 4.0.0
+ progress@2.0.3: {}
+
prompts@2.4.2:
dependencies:
kleur: 3.0.3
@@ -7167,6 +8901,8 @@ snapshots:
forwarded: 0.2.0
ipaddr.js: 1.9.1
+ proxy-from-env@1.1.0: {}
+
punycode@2.3.1: {}
qs@6.15.0:
@@ -7238,6 +8974,10 @@ snapshots:
'@types/react': 19.2.14
'@types/react-dom': 19.2.3(@types/react@19.2.14)
+ randombytes@2.1.0:
+ dependencies:
+ safe-buffer: 5.2.1
+
range-parser@1.2.1: {}
raw-body@3.0.2:
@@ -7292,6 +9032,10 @@ snapshots:
react@19.2.3: {}
+ readdirp@3.6.0:
+ dependencies:
+ picomatch: 2.3.1
+
recast@0.23.11:
dependencies:
ast-types: 0.16.1
@@ -7350,6 +9094,14 @@ snapshots:
require-from-string@2.0.2: {}
+ require-in-the-middle@7.5.2:
+ dependencies:
+ debug: 4.4.3
+ module-details-from-path: 1.0.4
+ resolve: 1.22.11
+ transitivePeerDependencies:
+ - supports-color
+
reselect@5.1.1: {}
resolve-from@4.0.0: {}
@@ -7362,6 +9114,12 @@ snapshots:
path-parse: 1.0.7
supports-preserve-symlinks-flag: 1.0.0
+ resolve@1.22.8:
+ dependencies:
+ is-core-module: 2.16.1
+ path-parse: 1.0.7
+ supports-preserve-symlinks-flag: 1.0.0
+
resolve@2.0.0-next.6:
dependencies:
es-errors: 1.3.0
@@ -7380,6 +9138,37 @@ snapshots:
reusify@1.1.0: {}
+ rollup@4.59.0:
+ dependencies:
+ '@types/estree': 1.0.8
+ optionalDependencies:
+ '@rollup/rollup-android-arm-eabi': 4.59.0
+ '@rollup/rollup-android-arm64': 4.59.0
+ '@rollup/rollup-darwin-arm64': 4.59.0
+ '@rollup/rollup-darwin-x64': 4.59.0
+ '@rollup/rollup-freebsd-arm64': 4.59.0
+ '@rollup/rollup-freebsd-x64': 4.59.0
+ '@rollup/rollup-linux-arm-gnueabihf': 4.59.0
+ '@rollup/rollup-linux-arm-musleabihf': 4.59.0
+ '@rollup/rollup-linux-arm64-gnu': 4.59.0
+ '@rollup/rollup-linux-arm64-musl': 4.59.0
+ '@rollup/rollup-linux-loong64-gnu': 4.59.0
+ '@rollup/rollup-linux-loong64-musl': 4.59.0
+ '@rollup/rollup-linux-ppc64-gnu': 4.59.0
+ '@rollup/rollup-linux-ppc64-musl': 4.59.0
+ '@rollup/rollup-linux-riscv64-gnu': 4.59.0
+ '@rollup/rollup-linux-riscv64-musl': 4.59.0
+ '@rollup/rollup-linux-s390x-gnu': 4.59.0
+ '@rollup/rollup-linux-x64-gnu': 4.59.0
+ '@rollup/rollup-linux-x64-musl': 4.59.0
+ '@rollup/rollup-openbsd-x64': 4.59.0
+ '@rollup/rollup-openharmony-arm64': 4.59.0
+ '@rollup/rollup-win32-arm64-msvc': 4.59.0
+ '@rollup/rollup-win32-ia32-msvc': 4.59.0
+ '@rollup/rollup-win32-x64-gnu': 4.59.0
+ '@rollup/rollup-win32-x64-msvc': 4.59.0
+ fsevents: 2.3.3
+
router@2.2.0:
dependencies:
debug: 4.4.3
@@ -7404,6 +9193,8 @@ snapshots:
has-symbols: 1.1.0
isarray: 2.0.5
+ safe-buffer@5.2.1: {}
+
safe-push-apply@1.0.0:
dependencies:
es-errors: 1.3.0
@@ -7419,6 +9210,13 @@ snapshots:
scheduler@0.27.0: {}
+ schema-utils@4.3.3:
+ dependencies:
+ '@types/json-schema': 7.0.15
+ ajv: 8.18.0
+ ajv-formats: 2.1.1(ajv@8.18.0)
+ ajv-keywords: 5.1.0(ajv@8.18.0)
+
semver@6.3.1: {}
semver@7.7.4: {}
@@ -7439,6 +9237,10 @@ snapshots:
transitivePeerDependencies:
- supports-color
+ serialize-javascript@6.0.2:
+ dependencies:
+ randombytes: 2.1.0
+
serve-static@2.2.1:
dependencies:
encodeurl: 2.0.0
@@ -7554,6 +9356,8 @@ snapshots:
shebang-regex@3.0.0: {}
+ shimmer@1.2.1: {}
+
side-channel-list@1.0.0:
dependencies:
es-errors: 1.3.0
@@ -7595,10 +9399,19 @@ snapshots:
source-map-js@1.2.1: {}
+ source-map-support@0.5.21:
+ dependencies:
+ buffer-from: 1.1.2
+ source-map: 0.6.1
+
source-map@0.6.1: {}
stable-hash@0.0.5: {}
+ stacktrace-parser@0.1.11:
+ dependencies:
+ type-fest: 0.7.1
+
statuses@2.0.2: {}
stdin-discarder@0.2.2: {}
@@ -7705,6 +9518,10 @@ snapshots:
dependencies:
has-flag: 4.0.0
+ supports-color@8.1.1:
+ dependencies:
+ has-flag: 4.0.0
+
supports-preserve-symlinks-flag@1.0.0: {}
tagged-tag@1.0.0: {}
@@ -7715,6 +9532,22 @@ snapshots:
tapable@2.3.0: {}
+ terser-webpack-plugin@5.3.16(webpack@5.105.3):
+ dependencies:
+ '@jridgewell/trace-mapping': 0.3.31
+ jest-worker: 27.5.1
+ schema-utils: 4.3.3
+ serialize-javascript: 6.0.2
+ terser: 5.46.0
+ webpack: 5.105.3
+
+ terser@5.46.0:
+ dependencies:
+ '@jridgewell/source-map': 0.3.11
+ acorn: 8.16.0
+ commander: 2.20.3
+ source-map-support: 0.5.21
+
tiny-invariant@1.3.3: {}
tinyexec@1.0.2: {}
@@ -7740,6 +9573,8 @@ snapshots:
dependencies:
tldts: 7.0.23
+ tr46@0.0.3: {}
+
ts-api-utils@2.4.0(typescript@5.9.3):
dependencies:
typescript: 5.9.3
@@ -7770,6 +9605,8 @@ snapshots:
dependencies:
prelude-ls: 1.2.1
+ type-fest@0.7.1: {}
+
type-fest@5.4.4:
dependencies:
tagged-tag: 1.0.0
@@ -7841,6 +9678,13 @@ snapshots:
unpipe@1.0.0: {}
+ unplugin@1.0.1:
+ dependencies:
+ acorn: 8.16.0
+ chokidar: 3.6.0
+ webpack-sources: 3.3.4
+ webpack-virtual-modules: 0.5.0
+
unrs-resolver@1.11.1:
dependencies:
napi-postinstall: 0.3.4
@@ -7898,6 +9742,8 @@ snapshots:
util-deprecate@1.0.2: {}
+ uuid@9.0.1: {}
+
validate-npm-package-name@7.0.2: {}
vary@1.1.2: {}
@@ -7919,8 +9765,56 @@ snapshots:
d3-time: 3.1.0
d3-timer: 3.0.1
+ watchpack@2.5.1:
+ dependencies:
+ glob-to-regexp: 0.4.1
+ graceful-fs: 4.2.11
+
web-streams-polyfill@3.3.3: {}
+ webidl-conversions@3.0.1: {}
+
+ webpack-sources@3.3.4: {}
+
+ webpack-virtual-modules@0.5.0: {}
+
+ webpack@5.105.3:
+ dependencies:
+ '@types/eslint-scope': 3.7.7
+ '@types/estree': 1.0.8
+ '@types/json-schema': 7.0.15
+ '@webassemblyjs/ast': 1.14.1
+ '@webassemblyjs/wasm-edit': 1.14.1
+ '@webassemblyjs/wasm-parser': 1.14.1
+ acorn: 8.16.0
+ acorn-import-phases: 1.0.4(acorn@8.16.0)
+ browserslist: 4.28.1
+ chrome-trace-event: 1.0.4
+ enhanced-resolve: 5.20.0
+ es-module-lexer: 2.0.0
+ eslint-scope: 5.1.1
+ events: 3.3.0
+ glob-to-regexp: 0.4.1
+ graceful-fs: 4.2.11
+ json-parse-even-better-errors: 2.3.1
+ loader-runner: 4.3.1
+ mime-types: 2.1.35
+ neo-async: 2.6.2
+ schema-utils: 4.3.3
+ tapable: 2.3.0
+ terser-webpack-plugin: 5.3.16(webpack@5.105.3)
+ watchpack: 2.5.1
+ webpack-sources: 3.3.4
+ transitivePeerDependencies:
+ - '@swc/core'
+ - esbuild
+ - uglify-js
+
+ whatwg-url@5.0.0:
+ dependencies:
+ tr46: 0.0.3
+ webidl-conversions: 3.0.1
+
which-boxed-primitive@1.1.1:
dependencies:
is-bigint: 1.1.0
@@ -7991,6 +9885,8 @@ snapshots:
is-wsl: 3.1.1
powershell-utils: 0.1.0
+ xtend@4.0.2: {}
+
y18n@5.0.8: {}
yallist@3.1.1: {}
diff --git a/frontend/sentry.client.config.ts b/frontend/sentry.client.config.ts
new file mode 100644
index 0000000..e87d0f3
--- /dev/null
+++ b/frontend/sentry.client.config.ts
@@ -0,0 +1,10 @@
+import * as Sentry from "@sentry/nextjs";
+
+Sentry.init({
+ dsn: process.env.NEXT_PUBLIC_SENTRY_DSN || "",
+ environment: process.env.NEXT_PUBLIC_ENVIRONMENT || "development",
+ enabled: !!process.env.NEXT_PUBLIC_SENTRY_DSN,
+ tracesSampleRate: 0.2,
+ replaysSessionSampleRate: 0,
+ replaysOnErrorSampleRate: 0.5,
+});
diff --git a/frontend/src/app/automations/page.tsx b/frontend/src/app/automations/page.tsx
index bdc9d2c..2702ee4 100644
--- a/frontend/src/app/automations/page.tsx
+++ b/frontend/src/app/automations/page.tsx
@@ -11,6 +11,8 @@ import {
Loader2,
LayoutGrid,
List,
+ GitBranch,
+ Network,
} from "lucide-react";
import { Button } from "@/components/ui/button";
import { Card } from "@/components/ui/card";
@@ -37,8 +39,14 @@ import {
useAutomationStatuses,
useDeleteAutomation,
} from "@/hooks/use-automations";
+import { useWorkflows } from "@/hooks/use-workflows";
+import { usePipelines } from "@/hooks/use-pipelines";
import { RunControls } from "@/components/automation/run-controls";
import { AutomationGallery } from "@/components/automation/automation-gallery";
+import { WorkflowList } from "@/components/workflow/workflow-list";
+import { WorkflowTemplateGallery } from "@/components/workflow/workflow-template-gallery";
+import { PipelineList } from "@/components/pipeline/pipeline-list";
+import { PipelineTemplateGallery } from "@/components/pipeline/pipeline-template-gallery";
import { toast } from "sonner";
import { cn } from "@/lib/utils";
@@ -64,6 +72,8 @@ export default function AutomationsPage() {
const router = useRouter();
const { data: automations, isLoading, error } = useAutomations();
const { data: statuses } = useAutomationStatuses();
+ const { data: workflows } = useWorkflows();
+ const { data: pipelines } = usePipelines();
const deleteMutation = useDeleteAutomation();
const [deleteTarget, setDeleteTarget] = useState<{
id: number;
@@ -98,10 +108,26 @@ export default function AutomationsPage() {
Manage automated strategies for your characters
-
+
+
+
+
+
@@ -119,10 +145,30 @@ export default function AutomationsPage() {
)}
+
+
+ Workflows
+ {workflows && workflows.length > 0 && (
+
+ {workflows.length}
+
+ )}
+
+
+
+ Pipelines
+ {pipelines && pipelines.length > 0 && (
+
+ {pipelines.length}
+
+ )}
+
-
+
+
+
@@ -231,6 +277,14 @@ export default function AutomationsPage() {
)}
+
+
+
+
+
+
+
+