mirror of
https://github.com/kevinveenbirkenbach/computer-playbook.git
synced 2025-09-24 11:06:24 +02:00
• Rename ports: web-app-ai_minio_* → web-app-minio_* in group_vars • Remove MinIO from web-app-ai (service, volumes, ENV) • Add new role web-app-minio (config, tasks, compose, env, vars) incl. front-proxy matrix • AI role: front-proxy loop via matrix; unify domain/port vars (OPENWEBUI/Flowise *_PORT_PUBLIC/_PORT_INTERNAL, *_DOMAIN) • Update compose templates accordingly Ref: https://chatgpt.com/share/68d15cb8-cf18-800f-b853-78962f751f81
22 lines
604 B
Django/Jinja
22 lines
604 B
Django/Jinja
# Open WebUI
|
|
OLLAMA_BASE_URL={{ AI_OLLAMA_BASE_URL }}
|
|
OFFLINE_MODE={{ AI_OPENWEBUI_OFFLINE_MODE | ternary(1, 0) }}
|
|
HF_HUB_OFFLINE={{ AI_OPENWEBUI_HF_HUB_OFFLINE | ternary(1, 0) }}
|
|
ENABLE_PERSISTENT_CONFIG=False
|
|
|
|
# LiteLLM
|
|
LITELLM_MASTER_KEY=dummy-key
|
|
LITELLM_CONFIG=/etc/litellm/config.yaml
|
|
|
|
# Flowise
|
|
PORT={{ AI_FLOWISE_PORT_INTERNAL }}
|
|
FLOWISE_USERNAME=admin
|
|
FLOWISE_PASSWORD=admin
|
|
DATABASE_PATH=/root/.flowise
|
|
FLOWISE_FILE_STORAGE_PATH=/root/.flowise/storage
|
|
|
|
# Qdrant + LiteLLM/Ollama:
|
|
QDRANT_URL={{ AI_QDRANT_INTERNAL_URL }}
|
|
OPENAI_API_BASE={{ AI_LITELLM_INTERNAL_URL }}/v1
|
|
OPENAI_API_KEY=dummy-key
|