# To enable / disable self-hosting features. SELF_HOSTED = true # SimpleFIN runtime flags (default-off) # Accepted truthy values: 1, true, yes, on # SIMPLEFIN_DEBUG_RAW: when truthy, logs the raw payload returned by SimpleFIN (debug-only; can be noisy) SIMPLEFIN_DEBUG_RAW=false # SIMPLEFIN_INCLUDE_PENDING: when truthy, forces `pending=1` on SimpleFIN fetches when caller doesn't specify `pending:` SIMPLEFIN_INCLUDE_PENDING=false # Controls onboarding flow (valid: open, closed, invite_only) ONBOARDING_STATE = open # Enable Twelve market data (careful, this will use your API credits) TWELVE_DATA_API_KEY = # OpenAI-compatible API endpoint config OPENAI_ACCESS_TOKEN = OPENAI_URI_BASE = OPENAI_MODEL = # (example: LM Studio/Docker config) OpenAI-compatible API endpoint config # OPENAI_URI_BASE = http://host.docker.internal:1234/ # OPENAI_MODEL = qwen/qwen3-vl-4b # OpenID Connect for development OIDC_CLIENT_ID= OIDC_CLIENT_SECRET= OIDC_ISSUER= OIDC_REDIRECT_URI=http://localhost:3000/auth/openid_connect/callback # Langfuse config LANGFUSE_PUBLIC_KEY = LANGFUSE_SECRET_KEY = LANGFUSE_HOST = https://cloud.langfuse.com # Set to `true` to get error messages rendered in the /chats UI AI_DEBUG_MODE =