mirror of
https://github.com/we-promise/sure.git
synced 2026-04-07 14:31:25 +00:00
* feat(config): add Lunchflow runtime configuration flags * feat(api): add include_pending parameter to Lunchflow API * feat(processor): add pending metadata support to Lunchflow processor * feat(processor): generate temporary IDs for pending transactions * feat(importer): integrate pending transaction support in sync * fix(importer): improve deduplication for transactions without IDs * feat(model): add Lunchflow pending support to Transaction scopes * test: add Lunchflow processor pending metadata tests * docs: update AGENTS.md for Lunchflow pending support * chore: remove unused variable * fix: simplify key check * fix: dotenv-linter key order * fix: avoid collapsing distinct pending transactions * fix: prevent unbounded raw payload growth for blank IDs
45 lines
1.5 KiB
Plaintext
45 lines
1.5 KiB
Plaintext
# To enable / disable self-hosting features.
|
|
SELF_HOSTED = true
|
|
|
|
# SimpleFIN runtime flags (default-off)
|
|
# Accepted truthy values: 1, true, yes, on
|
|
# SIMPLEFIN_DEBUG_RAW: when truthy, logs the raw payload returned by SimpleFIN (debug-only; can be noisy)
|
|
SIMPLEFIN_DEBUG_RAW=false
|
|
# SIMPLEFIN_INCLUDE_PENDING: when truthy, forces `pending=1` on SimpleFIN fetches when caller doesn't specify `pending:`
|
|
SIMPLEFIN_INCLUDE_PENDING=false
|
|
|
|
# Lunchflow runtime flags (default-off)
|
|
# LUNCHFLOW_DEBUG_RAW: when truthy, logs the raw payload returned by Lunchflow (debug-only; can be noisy)
|
|
LUNCHFLOW_DEBUG_RAW=false
|
|
# LUNCHFLOW_INCLUDE_PENDING: when truthy, adds `include_pending=true` to Lunchflow transaction fetch requests
|
|
LUNCHFLOW_INCLUDE_PENDING=false
|
|
|
|
# Controls onboarding flow (valid: open, closed, invite_only)
|
|
ONBOARDING_STATE = open
|
|
|
|
# Enable Twelve market data (careful, this will use your API credits)
|
|
TWELVE_DATA_API_KEY =
|
|
|
|
# OpenAI-compatible API endpoint config
|
|
OPENAI_ACCESS_TOKEN =
|
|
OPENAI_URI_BASE =
|
|
OPENAI_MODEL =
|
|
|
|
# (example: LM Studio/Docker config) OpenAI-compatible API endpoint config
|
|
# OPENAI_URI_BASE = http://host.docker.internal:1234/
|
|
# OPENAI_MODEL = qwen/qwen3-vl-4b
|
|
|
|
# OpenID Connect for development
|
|
OIDC_CLIENT_ID=
|
|
OIDC_CLIENT_SECRET=
|
|
OIDC_ISSUER=
|
|
OIDC_REDIRECT_URI=http://localhost:3000/auth/openid_connect/callback
|
|
|
|
# Langfuse config
|
|
LANGFUSE_PUBLIC_KEY =
|
|
LANGFUSE_SECRET_KEY =
|
|
LANGFUSE_HOST = https://cloud.langfuse.com
|
|
|
|
# Set to `true` to get error messages rendered in the /chats UI
|
|
AI_DEBUG_MODE =
|