mirror of
https://github.com/we-promise/sure.git
synced 2026-04-13 17:14:05 +00:00
feat: add compose example with local LLM (#489)
* feat: add ai compose example * Rename for consistency * Small edits * Update brakeman gem to 7.1.2 * Update volume and port configuration for ollama-webui Signed-off-by: Blaž Dular <22869613+xBlaz3kx@users.noreply.github.com> --------- Signed-off-by: Blaž Dular <22869613+xBlaz3kx@users.noreply.github.com> Co-authored-by: Juan José Mata <juanjo.mata@gmail.com> Co-authored-by: Juan José Mata <jjmata@jjmata.com>
This commit is contained in:
170
compose.example.ai.yml
Normal file
170
compose.example.ai.yml
Normal file
@@ -0,0 +1,170 @@
|
||||
# ===========================================================================
|
||||
# Example Docker Compose file with additional Ollama service for LLM tools
|
||||
# ===========================================================================
|
||||
#
|
||||
# Purpose:
|
||||
# --------
|
||||
#
|
||||
# This file is an example Docker Compose configuration for self hosting
|
||||
# Sure with Ollama on your local machine or on a cloud VPS.
|
||||
#
|
||||
# The configuration below is a "standard" setup that works out of the box,
|
||||
# but if you're running this outside of a local network, it is recommended
|
||||
# to set the environment variables for extra security.
|
||||
#
|
||||
# Setup:
|
||||
# ------
|
||||
#
|
||||
# To run this, you should read the setup guide:
|
||||
#
|
||||
# https://github.com/we-promise/sure/blob/main/docs/hosting/docker.md
|
||||
#
|
||||
# Troubleshooting:
|
||||
# ----------------
|
||||
#
|
||||
# If you run into problems, you should open a Discussion here:
|
||||
#
|
||||
# https://github.com/we-promise/sure/discussions/categories/general
|
||||
#
|
||||
|
||||
x-db-env: &db_env
|
||||
POSTGRES_USER: ${POSTGRES_USER:-sure_user}
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-sure_password}
|
||||
POSTGRES_DB: ${POSTGRES_DB:-sure_production}
|
||||
|
||||
x-rails-env: &rails_env
|
||||
<<: *db_env
|
||||
SECRET_KEY_BASE: ${SECRET_KEY_BASE:-a7523c3d0ae56415046ad8abae168d71074a79534a7062258f8d1d51ac2f76d3c3bc86d86b6b0b307df30d9a6a90a2066a3fa9e67c5e6f374dbd7dd4e0778e13}
|
||||
SELF_HOSTED: "true"
|
||||
RAILS_FORCE_SSL: "false"
|
||||
RAILS_ASSUME_SSL: "false"
|
||||
DB_HOST: db
|
||||
DB_PORT: 5432
|
||||
REDIS_URL: redis://redis:6379/1
|
||||
AI_DEBUG_MODE: "true" # Useful for debugging, set to "false" in production
|
||||
# Ollama using OpenAI API compatible endpoints
|
||||
OPENAI_ACCESS_TOKEN: token-can-be-any-value-for-ollama
|
||||
OPENAI_MODEL: llama3.1:8b # Note: Use tool-enabled model
|
||||
OPENAI_URI_BASE: http://ollama:11434/v1
|
||||
# NOTE: enabling OpenAI will incur costs when you use AI-related features in the app (chat, rules). Make sure you have set appropriate spend limits on your account before adding this.
|
||||
# OPENAI_ACCESS_TOKEN: ${OPENAI_ACCESS_TOKEN}
|
||||
|
||||
services:
|
||||
# Note: You still have to download models manually using the ollama CLI or via Open WebUI
|
||||
ollama:
|
||||
volumes:
|
||||
- ollama:/root/.ollama
|
||||
container_name: ollama
|
||||
hostname: ollama
|
||||
restart: unless-stopped
|
||||
image: docker.io/ollama/ollama:latest
|
||||
ports:
|
||||
- "11434:11434"
|
||||
environment:
|
||||
- OLLAMA_KEEP_ALIVE=1h
|
||||
- OLLAMA_MODELS=deepseek-r1:8b,llama3.1:8b # Pre-load model on startup, you can change this to your preferred model
|
||||
networks:
|
||||
- sure_net
|
||||
# Recommended: Enable GPU support
|
||||
# deploy:
|
||||
# resources:
|
||||
# reservations:
|
||||
# devices:
|
||||
# - driver: nvidia
|
||||
# count: all
|
||||
# capabilities: [ gpu ]
|
||||
|
||||
ollama-webui:
|
||||
image: ghcr.io/open-webui/open-webui
|
||||
container_name: ollama-webui
|
||||
volumes:
|
||||
- ollama-webui:/app/backend/data
|
||||
depends_on:
|
||||
- ollama
|
||||
ports:
|
||||
- "8080:8080"
|
||||
environment: # https://docs.openwebui.com/getting-started/env-configuration#default_models
|
||||
- OLLAMA_BASE_URLS=http://host.docker.internal:11434 #comma separated ollama hosts
|
||||
- ENV=dev
|
||||
- WEBUI_AUTH=False
|
||||
- WEBUI_NAME=AI
|
||||
- WEBUI_URL=http://localhost:8080
|
||||
- WEBUI_SECRET_KEY=t0p-s3cr3t
|
||||
- NO_PROXY=host.docker.internal
|
||||
extra_hosts:
|
||||
- host.docker.internal:host-gateway
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- sure_net
|
||||
|
||||
web:
|
||||
image: ghcr.io/we-promise/sure:latest
|
||||
volumes:
|
||||
- app-storage:/rails/storage
|
||||
ports:
|
||||
- "3000:3000"
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
<<: *rails_env
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
redis:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- sure_net
|
||||
|
||||
worker:
|
||||
image: ghcr.io/we-promise/sure:latest
|
||||
command: bundle exec sidekiq
|
||||
volumes:
|
||||
- app-storage:/rails/storage
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
redis:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
<<: *rails_env
|
||||
networks:
|
||||
- sure_net
|
||||
|
||||
db:
|
||||
image: postgres:16
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- postgres-data:/var/lib/postgresql/data
|
||||
environment:
|
||||
<<: *db_env
|
||||
healthcheck:
|
||||
test: [ "CMD-SHELL", "pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB" ]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
networks:
|
||||
- sure_net
|
||||
|
||||
redis:
|
||||
image: redis:latest
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- redis-data:/data
|
||||
healthcheck:
|
||||
test: [ "CMD", "redis-cli", "ping" ]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
networks:
|
||||
- sure_net
|
||||
|
||||
volumes:
|
||||
app-storage:
|
||||
postgres-data:
|
||||
redis-data:
|
||||
ollama:
|
||||
ollama-webui:
|
||||
|
||||
networks:
|
||||
sure_net:
|
||||
driver: bridge
|
||||
Reference in New Issue
Block a user