mirror of
https://github.com/kevinveenbirkenbach/computer-playbook.git
synced 2025-09-24 11:06:24 +02:00
feat(ai): introduce dedicated AI roles and wiring; clean up legacy AI stack
• Add svc-ai category under roles and load it in constructor stage • Create new 'svc-ai-ollama' role (vars, tasks, compose, meta, README) and dedicated network • Refactor former AI stack into separate app roles: web-app-flowise and web-app-openwebui • Add web-app-minio role; adjust config (no central DB), meta (fa-database, run_after), compose networks include, volume key • Provide user-focused READMEs for Flowise, OpenWebUI, MinIO, Ollama • Networks: add subnets for web-app-openwebui, web-app-flowise, web-app-minio; rename web-app-ai → svc-ai-ollama • Ports: rename ai_* keys to web-app-openwebui / web-app-flowise; keep minio_api/minio_console • Add group_vars/all/17_ai.yml (OLLAMA_BASE_LOCAL_URL, OLLAMA_LOCAL_ENABLED) • Replace hardcoded include paths with path_join in multiple roles (svc-db-postgres, sys-service, sys-stk-front-proxy, sys-stk-full-stateful, sys-svc-webserver, web-svc-cdn, web-app-keycloak) • Remove obsolete web-app-ai templates/vars/env; split Flowise into its own role • Minor config cleanups (CSP flags to {}, central_database=false) https://chatgpt.com/share/68d15cb8-cf18-800f-b853-78962f751f81
This commit is contained in:
@@ -104,6 +104,12 @@ defaults_networks:
|
||||
subnet: 192.168.103.224/28
|
||||
web-app-xwiki:
|
||||
subnet: 192.168.103.240/28
|
||||
web-app-openwebui:
|
||||
subnet: 192.168.104.0/28
|
||||
web-app-flowise:
|
||||
subnet: 192.168.104.16/28
|
||||
web-app-minio:
|
||||
subnet: 192.168.104.32/28
|
||||
|
||||
# /24 Networks / 254 Usable Clients
|
||||
web-app-bigbluebutton:
|
||||
@@ -116,5 +122,5 @@ defaults_networks:
|
||||
subnet: 192.168.201.0/24
|
||||
svc-db-openldap:
|
||||
subnet: 192.168.202.0/24
|
||||
web-app-ai:
|
||||
svc-ai-ollama:
|
||||
subnet: 192.168.203.0/24 # Big network to bridge applications into ai
|
||||
|
@@ -76,8 +76,8 @@ ports:
|
||||
web-app-magento: 8052
|
||||
web-app-bridgy-fed: 8053
|
||||
web-app-xwiki: 8054
|
||||
web-app-ai_openwebui: 8055
|
||||
web-app-ai_flowise: 8056
|
||||
web-app-openwebui: 8055
|
||||
web-app-flowise: 8056
|
||||
web-app-minio_api: 8057
|
||||
web-app-minio_console: 8058
|
||||
web-app-bigbluebutton: 48087 # This port is predefined by bbb. @todo Try to change this to a 8XXX port
|
||||
|
3
group_vars/all/17_ai.yml
Normal file
3
group_vars/all/17_ai.yml
Normal file
@@ -0,0 +1,3 @@
|
||||
# URL of Local Ollama Container
|
||||
OLLAMA_BASE_LOCAL_URL: "http://{{ applications | get_app_conf('svc-ai-ollama', 'docker.services.ollama.name') }}:{{ applications | get_app_conf(application_id, 'docker.services.ollama.port') }}"
|
||||
OLLAMA_LOCAL_ENABLED: "{{ applications | get_app_conf(application_id, 'server.domains.canonical.flowise') }}"
|
@@ -148,6 +148,11 @@ roles:
|
||||
description: "Network setup (DNS, Let's Encrypt HTTP, WireGuard, etc.)"
|
||||
icon: "fas fa-globe"
|
||||
invokable: true
|
||||
ai:
|
||||
title: "AI Services"
|
||||
description: "Core AI building blocks—model serving, OpenAI-compatible gateways, vector databases, orchestration, and chat UIs."
|
||||
icon: "fas fa-brain"
|
||||
invokable: true
|
||||
user:
|
||||
title: "Users & Access"
|
||||
description: "User accounts & access control"
|
||||
|
23
roles/svc-ai-ollama/README.md
Normal file
23
roles/svc-ai-ollama/README.md
Normal file
@@ -0,0 +1,23 @@
|
||||
|
||||
# Ollama
|
||||
|
||||
## Description
|
||||
|
||||
**Ollama** is a local model server that runs open LLMs on your hardware and exposes a simple HTTP API. It’s the backbone for privacy-first AI: prompts and data stay on your machines.
|
||||
|
||||
## Overview
|
||||
|
||||
After the first model pull, Ollama serves models to clients like Open WebUI (for chat) and Flowise (for workflows). Models are cached locally for quick reuse and can run fully offline when required.
|
||||
|
||||
## Features
|
||||
|
||||
* Run popular open models (chat, code, embeddings) locally
|
||||
* Simple, predictable HTTP API for developers
|
||||
* Local caching to avoid repeated downloads
|
||||
* Works seamlessly with Open WebUI and Flowise
|
||||
* Offline-capable for air-gapped deployments
|
||||
|
||||
## Further Resources
|
||||
|
||||
* Ollama — [https://ollama.com](https://ollama.com)
|
||||
* Ollama Model Library — [https://ollama.com/library](https://ollama.com/library)
|
12
roles/svc-ai-ollama/config/main.yml
Normal file
12
roles/svc-ai-ollama/config/main.yml
Normal file
@@ -0,0 +1,12 @@
|
||||
docker:
|
||||
services:
|
||||
ollama:
|
||||
backup:
|
||||
no_stop_required: true
|
||||
image: ollama/ollama
|
||||
version: latest
|
||||
name: ollama
|
||||
port: 11434
|
||||
volumes:
|
||||
models: "ollama_models"
|
||||
network: "ollama"
|
25
roles/svc-ai-ollama/meta/main.yml
Normal file
25
roles/svc-ai-ollama/meta/main.yml
Normal file
@@ -0,0 +1,25 @@
|
||||
---
|
||||
galaxy_info:
|
||||
author: "Kevin Veen-Birkenbach"
|
||||
description: "Installs Ollama — a local model server for running open LLMs with a simple HTTP API."
|
||||
license: "Infinito.Nexus NonCommercial License"
|
||||
license_url: "https://s.infinito.nexus/license"
|
||||
company: |
|
||||
Kevin Veen-Birkenbach
|
||||
Consulting & Coaching Solutions
|
||||
https://www.veen.world
|
||||
galaxy_tags:
|
||||
- ai
|
||||
- llm
|
||||
- inference
|
||||
- offline
|
||||
- privacy
|
||||
- self-hosted
|
||||
- ollama
|
||||
repository: "https://s.infinito.nexus/code"
|
||||
issue_tracker_url: "https://s.infinito.nexus/issues"
|
||||
documentation: "https://s.infinito.nexus/code/"
|
||||
logo:
|
||||
class: "fa-solid fa-microchip"
|
||||
run_after: []
|
||||
dependencies: []
|
12
roles/svc-ai-ollama/tasks/01_core.yml
Normal file
12
roles/svc-ai-ollama/tasks/01_core.yml
Normal file
@@ -0,0 +1,12 @@
|
||||
- name: Include dependency 'sys-svc-docker'
|
||||
include_role:
|
||||
name: sys-svc-docker
|
||||
when: run_once_sys_svc_docker is not defined
|
||||
|
||||
- name: "include docker-compose role"
|
||||
include_role:
|
||||
name: docker-compose
|
||||
vars:
|
||||
docker_compose_flush_handlers: true
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
5
roles/svc-ai-ollama/tasks/main.yml
Normal file
5
roles/svc-ai-ollama/tasks/main.yml
Normal file
@@ -0,0 +1,5 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
vars:
|
||||
flush_handlers: true
|
||||
when: run_once_svc_ai_ollama is not defined
|
17
roles/svc-ai-ollama/templates/docker-compose.yml.j2
Normal file
17
roles/svc-ai-ollama/templates/docker-compose.yml.j2
Normal file
@@ -0,0 +1,17 @@
|
||||
{% include 'roles/docker-compose/templates/base.yml.j2' %}
|
||||
|
||||
ollama:
|
||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||
image: {{ OLLAMA_IMAGE }}:{{ OLLAMA_VERSION }}
|
||||
container_name: {{ OLLAMA_CONTAINER }}
|
||||
expose:
|
||||
- "{{ OLLAMA_PORT }}"
|
||||
volumes:
|
||||
- ollama_models:/root/.ollama
|
||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||
|
||||
{% include 'roles/docker-compose/templates/networks.yml.j2' %}
|
||||
|
||||
{% include 'roles/docker-compose/templates/volumes.yml.j2' %}
|
||||
ollama_models:
|
||||
name: {{ OLLAMA_VOLUME }}
|
13
roles/svc-ai-ollama/vars/main.yml
Normal file
13
roles/svc-ai-ollama/vars/main.yml
Normal file
@@ -0,0 +1,13 @@
|
||||
# General
|
||||
application_id: "svc-ai-ollama"
|
||||
|
||||
# Docker
|
||||
docker_compose_flush_handlers: true
|
||||
|
||||
# Ollama
|
||||
# https://ollama.com/
|
||||
OLLAMA_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.ollama.version') }}"
|
||||
OLLAMA_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.ollama.image') }}"
|
||||
OLLAMA_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.ollama.name') }}"
|
||||
OLLAMA_PORT: "{{ applications | get_app_conf(application_id, 'docker.services.ollama.port') }}"
|
||||
OLLAMA_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.models') }}"
|
@@ -5,7 +5,7 @@
|
||||
flush_handlers: true
|
||||
when: run_once_svc_db_postgres is not defined
|
||||
|
||||
- include_tasks: "{{ playbook_dir }}/tasks/utils/load_handlers.yml"
|
||||
- include_tasks: "{{ [ playbook_dir, 'tasks/utils/load_handlers.yml' ] | path_join }}"
|
||||
# Necessary because docker handlers are overwritten by condition
|
||||
vars:
|
||||
handler_role_name: "docker-compose"
|
||||
|
@@ -1,5 +1,5 @@
|
||||
- name: "Reload sys-daemon handlers"
|
||||
include_tasks: "{{ playbook_dir }}/tasks/utils/load_handlers.yml"
|
||||
include_tasks: "{{ [ playbook_dir, 'tasks/utils/load_handlers.yml' ] | path_join }}"
|
||||
vars:
|
||||
handler_role_name: "sys-daemon"
|
||||
when: run_once_sys_service is defined
|
||||
|
@@ -9,7 +9,7 @@
|
||||
- include_tasks: "02_cloudflare.yml"
|
||||
when: DNS_PROVIDER == "cloudflare"
|
||||
|
||||
- include_tasks: "{{ playbook_dir }}/tasks/utils/load_handlers.yml"
|
||||
- include_tasks: "{{ [ playbook_dir, 'tasks/utils/load_handlers.yml' ] | path_join }}"
|
||||
vars:
|
||||
handler_role_name: "svc-prx-openresty"
|
||||
|
||||
|
@@ -1,6 +1,6 @@
|
||||
# run_once_sys_stk_full_stateful: deactivated
|
||||
|
||||
- include_tasks: "{{ playbook_dir }}/tasks/utils/load_handlers.yml"
|
||||
- include_tasks: "{{ [ playbook_dir, 'tasks/utils/load_handlers.yml' ] | path_join }}"
|
||||
vars:
|
||||
handler_role_name: "svc-prx-openresty"
|
||||
|
||||
|
@@ -12,7 +12,7 @@
|
||||
include_vars: "{{ DOCKER_VARS_FILE }}"
|
||||
|
||||
- name: "Load docker compose handlers"
|
||||
include_tasks: "{{ playbook_dir }}/tasks/utils/load_handlers.yml"
|
||||
include_tasks: "{{ [ playbook_dir, 'tasks/utils/load_handlers.yml' ] | path_join }}"
|
||||
vars:
|
||||
handler_role_name: "docker-compose"
|
||||
|
||||
|
@@ -1,74 +0,0 @@
|
||||
{% include 'roles/docker-compose/templates/base.yml.j2' %}
|
||||
ollama:
|
||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||
image: {{ AI_OLLAMA_IMAGE }}:{{ AI_OLLAMA_VERSION }}
|
||||
container_name: {{ AI_OLLAMA_CONTAINER }}
|
||||
expose:
|
||||
- "{{ AI_OLLAMA_PORT }}"
|
||||
volumes:
|
||||
- ollama_models:/root/.ollama
|
||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||
|
||||
openwebui:
|
||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||
image: {{ AI_OPENWEBUI_IMAGE }}:{{ AI_OPENWEBUI_VERSION }}
|
||||
container_name: {{ AI_OPENWEBUI_CONTAINER }}
|
||||
depends_on:
|
||||
- ollama
|
||||
ports:
|
||||
- "127.0.0.1:{{ AI_OPENWEBUI_PORT_PUBLIC }}:8080"
|
||||
volumes:
|
||||
- openwebui_data:/app/backend/data
|
||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||
|
||||
litellm:
|
||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||
image: {{ AI_LITELLM_IMAGE }}:{{ AI_LITELLM_VERSION }}
|
||||
container_name: {{ AI_LITELLM_CONTAINER }}
|
||||
depends_on:
|
||||
- ollama
|
||||
expose:
|
||||
- {{ AI_LITELLM_PORT }}
|
||||
volumes:
|
||||
- {{ AI_LITELLM_CONFIG_PATH_HOST }}:{{ AI_LITELLM_CONFIG_PATH_DOCKER }}:ro
|
||||
command: >
|
||||
--host 0.0.0.0
|
||||
--port {{ AI_LITELLM_PORT }}
|
||||
--config {{ AI_LITELLM_CONFIG_PATH_DOCKER }}
|
||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||
|
||||
qdrant:
|
||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||
image: {{ AI_QDRANT_IMAGE }}:{{ AI_QDRANT_VERSION }}
|
||||
container_name: {{ AI_QDRANT_CONTAINER }}
|
||||
ports:
|
||||
- {{ AI_MINIO_HTTP_PORT }}
|
||||
- {{ AI_MINIO_GRPC_PORT }}
|
||||
volumes:
|
||||
- qdrant_data:/qdrant/storage
|
||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||
|
||||
flowise:
|
||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||
image: {{ AI_FLOWISE_IMAGE }}:{{ AI_FLOWISE_VERSION }}
|
||||
container_name: {{ AI_FLOWISE_CONTAINER }}
|
||||
depends_on:
|
||||
- qdrant
|
||||
- litellm
|
||||
ports:
|
||||
- "127.0.0.1:{{ AI_FLOWISE_PORT_PUBLIC }}:{{ AI_FLOWISE_PORT_INTERNAL }}"
|
||||
volumes:
|
||||
- flowise_data:/root/.flowise
|
||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||
|
||||
{% include 'roles/docker-compose/templates/networks.yml.j2' %}
|
||||
|
||||
{% include 'roles/docker-compose/templates/volumes.yml.j2' %}
|
||||
ollama_models:
|
||||
name: {{ AI_OLLAMA_VOLUME }}
|
||||
openwebui_data:
|
||||
name: {{ AI_OPENWEBUI_VOLUME }}
|
||||
qdrant_data:
|
||||
name: {{ AI_QDRANT_VOLUME }}
|
||||
flowise_data:
|
||||
name: {{ AI_FLOWISE_VOLUME }}
|
@@ -1,21 +0,0 @@
|
||||
# Open WebUI
|
||||
OLLAMA_BASE_URL={{ AI_OLLAMA_BASE_URL }}
|
||||
OFFLINE_MODE={{ AI_OPENWEBUI_OFFLINE_MODE | ternary(1, 0) }}
|
||||
HF_HUB_OFFLINE={{ AI_OPENWEBUI_HF_HUB_OFFLINE | ternary(1, 0) }}
|
||||
ENABLE_PERSISTENT_CONFIG=False
|
||||
|
||||
# LiteLLM
|
||||
LITELLM_MASTER_KEY=dummy-key
|
||||
LITELLM_CONFIG=/etc/litellm/config.yaml
|
||||
|
||||
# Flowise
|
||||
PORT={{ AI_FLOWISE_PORT_INTERNAL }}
|
||||
FLOWISE_USERNAME=admin
|
||||
FLOWISE_PASSWORD=admin
|
||||
DATABASE_PATH=/root/.flowise
|
||||
FLOWISE_FILE_STORAGE_PATH=/root/.flowise/storage
|
||||
|
||||
# Qdrant + LiteLLM/Ollama:
|
||||
QDRANT_URL={{ AI_QDRANT_INTERNAL_URL }}
|
||||
OPENAI_API_BASE={{ AI_LITELLM_INTERNAL_URL }}/v1
|
||||
OPENAI_API_KEY=dummy-key
|
@@ -1,66 +0,0 @@
|
||||
|
||||
# General
|
||||
application_id: "web-app-ai"
|
||||
|
||||
# Docker
|
||||
docker_pull_git_repository: false
|
||||
docker_compose_file_creation_enabled: true
|
||||
|
||||
# Open WebUI
|
||||
# https://openwebui.com/
|
||||
AI_OPENWEBUI_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.openwebui.version') }}"
|
||||
AI_OPENWEBUI_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.openwebui.image') }}"
|
||||
AI_OPENWEBUI_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.openwebui.name') }}"
|
||||
AI_OPENWEBUI_OFFLINE_MODE: "{{ applications | get_app_conf(application_id, 'docker.services.openwebui.offline_mode') }}"
|
||||
AI_OPENWEBUI_HF_HUB_OFFLINE: "{{ applications | get_app_conf(application_id, 'docker.services.openwebui.hf_hub_offline') }}"
|
||||
AI_OPENWEBUI_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.openwebui') }}"
|
||||
AI_OPENWEBUI_PORT_PUBLIC: "{{ ports.localhost.http[application_id ~ '_openwebui'] }}"
|
||||
AI_OPENWEBUI_DOMAIN: "{{ applications | get_app_conf(application_id, 'server.domains.canonical.openwebui') }}"
|
||||
|
||||
# Ollama
|
||||
# https://ollama.com/
|
||||
AI_OLLAMA_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.ollama.version') }}"
|
||||
AI_OLLAMA_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.ollama.image') }}"
|
||||
AI_OLLAMA_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.ollama.name') }}"
|
||||
AI_OLLAMA_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.ollama') }}"
|
||||
AI_OLLAMA_PORT: 11434
|
||||
AI_OLLAMA_BASE_URL: "http://ollama:{{ AI_OLLAMA_PORT }}"
|
||||
|
||||
# LiteLLM
|
||||
# https://www.litellm.ai/
|
||||
AI_LITELLM_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.litellm.version') }}"
|
||||
AI_LITELLM_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.litellm.image') }}"
|
||||
AI_LITELLM_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.litellm.name') }}"
|
||||
AI_LITELLM_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.litellm') }}"
|
||||
AI_LITELLM_PORT: 4000
|
||||
AI_LITELLM_INTERNAL_URL: "http://litellm:{{ AI_LITELLM_PORT }}"
|
||||
AI_LITELLM_CONFIG_PATH_HOST: "{{ docker_compose.directories.config }}litellm.config.yaml"
|
||||
AI_LITELLM_CONFIG_PATH_DOCKER: "/etc/litellm/config.yaml"
|
||||
|
||||
# Qdrant
|
||||
# https://qdrant.tech/
|
||||
AI_QDRANT_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.qdrant.version') }}"
|
||||
AI_QDRANT_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.qdrant.image') }}"
|
||||
AI_QDRANT_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.qdrant.name') }}"
|
||||
AI_QDRANT_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.qdrant') }}"
|
||||
AI_QDRANT_HTTP_PORT: 6333
|
||||
AI_QDRANT_GRPC_PORT: 6334
|
||||
AI_QDRANT_INTERNAL_URL: "http://qdrant:{{ AI_QDRANT_HTTP_PORT }}"
|
||||
|
||||
# Flowise
|
||||
# https://flowiseai.com/
|
||||
AI_FLOWISE_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.flowise.version') }}"
|
||||
AI_FLOWISE_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.flowise.image') }}"
|
||||
AI_FLOWISE_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.flowise.name') }}"
|
||||
AI_FLOWISE_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.flowise') }}"
|
||||
AI_FLOWISE_PORT_PUBLIC: "{{ ports.localhost.http[application_id ~ '_flowise'] }}"
|
||||
AI_FLOWISE_PORT_INTERNAL: 3000
|
||||
AI_FLOWISE_DOMAIN: "{{ applications | get_app_conf(application_id, 'server.domains.canonical.flowise') }}"
|
||||
|
||||
AI_FRONT_PROXY_MATRIX: >-
|
||||
{{
|
||||
[
|
||||
{ 'domain': AI_OPENWEBUI_DOMAIN, 'http_port': AI_OPENWEBUI_PORT_PUBLIC },
|
||||
{ 'domain': AI_FLOWISE_DOMAIN, 'http_port': AI_FLOWISE_PORT_PUBLIC }
|
||||
]
|
||||
}}
|
24
roles/web-app-flowise/README.md
Normal file
24
roles/web-app-flowise/README.md
Normal file
@@ -0,0 +1,24 @@
|
||||
# Flowise
|
||||
|
||||
## Description
|
||||
|
||||
**Flowise** is a visual builder for AI workflows. Create, test, and publish chains that combine LLMs, your documents, tools, and vector search—without writing glue code.
|
||||
|
||||
## Overview
|
||||
|
||||
Users design flows on a drag-and-drop canvas (LLM, RAG, tools, webhooks), test them interactively, and publish endpoints that applications or bots can call. Flowise works well with local backends such as **Ollama** (directly or via **LiteLLM**) and **Qdrant** for retrieval.
|
||||
|
||||
## Features
|
||||
|
||||
* No/low-code canvas to build assistants and pipelines
|
||||
* Publish flows as HTTP endpoints for easy integration
|
||||
* Retrieval-augmented generation (RAG) with vector DBs (e.g., Qdrant)
|
||||
* Pluggable model backends via OpenAI-compatible API or direct Ollama
|
||||
* Keep data and prompts on your own infrastructure
|
||||
|
||||
## Further Resources
|
||||
|
||||
* Flowise — [https://flowiseai.com](https://flowiseai.com)
|
||||
* Qdrant — [https://qdrant.tech](https://qdrant.tech)
|
||||
* LiteLLM — [https://www.litellm.ai](https://www.litellm.ai)
|
||||
* Ollama — [https://ollama.com](https://ollama.com)
|
@@ -2,17 +2,16 @@ features:
|
||||
matomo: true
|
||||
css: true
|
||||
desktop: true
|
||||
central_database: true
|
||||
central_database: false
|
||||
logout: true
|
||||
javascript: false
|
||||
server:
|
||||
domains:
|
||||
canonical:
|
||||
openwebui: "chat.ai.{{ PRIMARY_DOMAIN }}"
|
||||
flowise: "flowise.ai.{{ PRIMARY_DOMAIN }}"
|
||||
aliases: []
|
||||
csp:
|
||||
flags: []
|
||||
flags: {}
|
||||
#script-src-elem:
|
||||
# unsafe-inline: true
|
||||
#script-src:
|
||||
@@ -25,20 +24,6 @@ server:
|
||||
connect-src: []
|
||||
docker:
|
||||
services:
|
||||
ollama:
|
||||
backup:
|
||||
no_stop_required: true
|
||||
image: ollama/ollama
|
||||
version: latest
|
||||
name: ollama
|
||||
open-webui:
|
||||
backup:
|
||||
no_stop_required: true
|
||||
image: ghcr.io/open-webui/open-webui
|
||||
version: main
|
||||
name: open-webui
|
||||
offline_mode: false
|
||||
hf_hub_offline: false
|
||||
litellm:
|
||||
backup:
|
||||
no_stop_required: true
|
||||
@@ -61,10 +46,10 @@ docker:
|
||||
enabled: false
|
||||
database:
|
||||
enabled: false
|
||||
ollama:
|
||||
enabled: true
|
||||
volumes:
|
||||
openwebui: ai_openwebui_data
|
||||
ollama: ai_ollama_models
|
||||
qdrant: ai_qdrant_data
|
||||
flowise: ai_flowise_data
|
||||
qdrant: qdrant_data
|
||||
flowise: flowise_data
|
||||
credentials: {}
|
||||
|
30
roles/web-app-flowise/meta/main.yml
Normal file
30
roles/web-app-flowise/meta/main.yml
Normal file
@@ -0,0 +1,30 @@
|
||||
---
|
||||
galaxy_info:
|
||||
author: "Kevin Veen-Birkenbach"
|
||||
description: "Installs Flowise — a visual builder to create, test, and publish AI workflows (RAG, tools, webhooks)."
|
||||
license: "Infinito.Nexus NonCommercial License"
|
||||
license_url: "https://s.infinito.nexus/license"
|
||||
company: |
|
||||
Kevin Veen-Birkenbach
|
||||
Consulting & Coaching Solutions
|
||||
https://www.veen.world
|
||||
galaxy_tags:
|
||||
- ai
|
||||
- llm
|
||||
- rag
|
||||
- workflow
|
||||
- orchestration
|
||||
- self-hosted
|
||||
- qdrant
|
||||
- litellm
|
||||
- ollama
|
||||
- flowise
|
||||
repository: "https://s.infinito.nexus/code"
|
||||
issue_tracker_url: "https://s.infinito.nexus/issues"
|
||||
documentation: "https://s.infinito.nexus/code/"
|
||||
logo:
|
||||
class: "fa-solid fa-diagram-project"
|
||||
run_after:
|
||||
- web-app-keycloak
|
||||
- web-app-matomo
|
||||
dependencies: []
|
@@ -1,13 +1,23 @@
|
||||
---
|
||||
- name: "Install Ollama Dependency"
|
||||
include_role:
|
||||
name: svc-ai-ollama
|
||||
vars:
|
||||
flush_handlers: true
|
||||
when:
|
||||
- run_once_svc_ai_ollama is not defined
|
||||
- OLLAMA_LOCAL_ENABLED | bool
|
||||
|
||||
- name: "load docker and db for {{ application_id }}"
|
||||
include_role:
|
||||
name: sys-stk-back-stateless
|
||||
vars:
|
||||
docker_compose_flush_handlers: false
|
||||
|
||||
- name: "create {{ AI_LITELLM_CONFIG_PATH_HOST }}"
|
||||
template: src: "litellm.config.yaml.j2"
|
||||
dest: "{{ AI_LITELLM_CONFIG_PATH_HOST }}"
|
||||
- name: "create {{ FLOWISE_LITELLM_CONFIG_PATH_HOST }}"
|
||||
template:
|
||||
src: "litellm.config.yaml.j2"
|
||||
dest: "{{ FLOWISE_LITELLM_CONFIG_PATH_HOST }}"
|
||||
notify: docker compose up
|
||||
|
||||
- name: "flush handlers of docker compose"
|
||||
@@ -19,6 +29,3 @@
|
||||
vars:
|
||||
domain: "{{ item.domain }}"
|
||||
http_port: "{{ item.http_port }}"
|
||||
loop: "{{ AI_FRONT_PROXY_MATRIX }}"
|
||||
loop_control:
|
||||
label: "{{ item.domain }} -> {{ item.http_port }}"
|
48
roles/web-app-flowise/templates/docker-compose.yml.j2
Normal file
48
roles/web-app-flowise/templates/docker-compose.yml.j2
Normal file
@@ -0,0 +1,48 @@
|
||||
{% include 'roles/docker-compose/templates/base.yml.j2' %}
|
||||
litellm:
|
||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||
image: {{ FLOWISE_LITELLM_IMAGE }}:{{ FLOWISE_LITELLM_VERSION }}
|
||||
container_name: {{ FLOWISE_LITELLM_CONTAINER }}
|
||||
depends_on:
|
||||
- ollama
|
||||
expose:
|
||||
- {{ FLOWISE_LITELLM_PORT }}
|
||||
volumes:
|
||||
- {{ FLOWISE_LITELLM_CONFIG_PATH_HOST }}:{{ FLOWISE_LITELLM_CONFIG_PATH_DOCKER }}:ro
|
||||
command: >
|
||||
--host 0.0.0.0
|
||||
--port {{ FLOWISE_LITELLM_PORT }}
|
||||
--config {{ FLOWISE_LITELLM_CONFIG_PATH_DOCKER }}
|
||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||
|
||||
qdrant:
|
||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||
image: {{ FLOWISE_QDRANT_IMAGE }}:{{ FLOWISE_QDRANT_VERSION }}
|
||||
container_name: {{ FLOWISE_QDRANT_CONTAINER }}
|
||||
ports:
|
||||
- {{ FLOWISE_QDRANT_HTTP_PORT }}
|
||||
- {{ FLOWISE_QDRANT_GRPC_PORT }}
|
||||
volumes:
|
||||
- qdrant_data:/qdrant/storage
|
||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||
|
||||
flowise:
|
||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||
image: {{ FLOWISE_IMAGE }}:{{ FLOWISE_VERSION }}
|
||||
container_name: {{ FLOWISE_CONTAINER }}
|
||||
depends_on:
|
||||
- qdrant
|
||||
- litellm
|
||||
ports:
|
||||
- "127.0.0.1:{{ FLOWISE_PORT_PUBLIC }}:{{ FLOWISE_PORT_INTERNAL }}"
|
||||
volumes:
|
||||
- flowise_data:/root/.flowise
|
||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||
|
||||
{% include 'roles/docker-compose/templates/networks.yml.j2' %}
|
||||
|
||||
{% include 'roles/docker-compose/templates/volumes.yml.j2' %}
|
||||
qdrant_data:
|
||||
name: {{ FLOWISE_QDRANT_VOLUME }}
|
||||
flowise_data:
|
||||
name: {{ FLOWISE_VOLUME }}
|
15
roles/web-app-flowise/templates/env.j2
Normal file
15
roles/web-app-flowise/templates/env.j2
Normal file
@@ -0,0 +1,15 @@
|
||||
# LiteLLM
|
||||
LITELLM_MASTER_KEY=dummy-key
|
||||
LITELLM_CONFIG=/etc/litellm/config.yaml
|
||||
|
||||
# Flowise
|
||||
PORT={{ FLOWISE_PORT_INTERNAL }}
|
||||
FLOWISE_USERNAME=admin
|
||||
FLOWISE_PASSWORD=admin
|
||||
DATABASE_PATH=/root/.flowise
|
||||
FLOWISE_FILE_STORAGE_PATH=/root/.flowise/storage
|
||||
|
||||
# Qdrant + LiteLLM/Ollama:
|
||||
QDRANT_URL={{ FLOWISE_QDRANT_INTERNAL_URL }}
|
||||
OPENAI_API_BASE={{ FLOWISE_LITELLM_INTERNAL_URL }}/v1
|
||||
OPENAI_API_KEY=dummy-key
|
@@ -2,12 +2,12 @@ model_list:
|
||||
- model_name: ollama/llama3
|
||||
litellm_params:
|
||||
model: ollama/llama3
|
||||
api_base: {{ AI_OLLAMA_BASE_URL }}
|
||||
api_base: {{ OLLAMA_BASE_LOCAL_URL }}
|
||||
rpm: 1000
|
||||
- model_name: ollama/nomic-embed-text
|
||||
litellm_params:
|
||||
model: ollama/nomic-embed-text
|
||||
api_base: {{ AI_OLLAMA_BASE_URL }}
|
||||
api_base: {{ OLLAMA_BASE_LOCAL_URL }}
|
||||
rpm: 1000
|
||||
|
||||
litellm_settings:
|
33
roles/web-app-flowise/vars/main.yml
Normal file
33
roles/web-app-flowise/vars/main.yml
Normal file
@@ -0,0 +1,33 @@
|
||||
# General
|
||||
application_id: "web-app-flowise"
|
||||
|
||||
# Flowise
|
||||
# https://flowiseai.com/
|
||||
FLOWISE_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.flowise.version') }}"
|
||||
FLOWISE_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.flowise.image') }}"
|
||||
FLOWISE_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.flowise.name') }}"
|
||||
FLOWISE_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.flowise') }}"
|
||||
FLOWISE_PORT_PUBLIC: "{{ ports.localhost.http[application_id] }}"
|
||||
FLOWISE_PORT_INTERNAL: 3000
|
||||
|
||||
# Dependencies
|
||||
|
||||
## LiteLLM
|
||||
# https://www.litellm.ai/
|
||||
FLOWISE_LITELLM_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.litellm.version') }}"
|
||||
FLOWISE_LITELLM_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.litellm.image') }}"
|
||||
FLOWISE_LITELLM_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.litellm.name') }}"
|
||||
FLOWISE_LITELLM_PORT: 4000
|
||||
FLOWISE_LITELLM_INTERNAL_URL: "http://litellm:{{ FLOWISE_LITELLM_PORT }}"
|
||||
FLOWISE_LITELLM_CONFIG_PATH_HOST: "{{ [ docker_compose.directories.config, 'litellm.config.yaml' ] | path_join }}"
|
||||
FLOWISE_LITELLM_CONFIG_PATH_DOCKER: "/etc/litellm/config.yaml"
|
||||
|
||||
## Qdrant
|
||||
# https://qdrant.tech/
|
||||
FLOWISE_QDRANT_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.qdrant.version') }}"
|
||||
FLOWISE_QDRANT_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.qdrant.image') }}"
|
||||
FLOWISE_QDRANT_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.qdrant.name') }}"
|
||||
FLOWISE_QDRANT_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.qdrant') }}"
|
||||
FLOWISE_QDRANT_HTTP_PORT: 6333
|
||||
FLOWISE_QDRANT_GRPC_PORT: 6334
|
||||
FLOWISE_QDRANT_INTERNAL_URL: "http://qdrant:{{ FLOWISE_QDRANT_HTTP_PORT }}"
|
@@ -1,4 +1,4 @@
|
||||
- include_tasks: "{{ playbook_dir }}/tasks/utils/load_handlers.yml"
|
||||
- include_tasks: "{{ [ playbook_dir, 'tasks/utils/load_handlers.yml' ] | path_join }}"
|
||||
vars:
|
||||
handler_role_name: "docker-compose"
|
||||
- ansible.builtin.include_vars:
|
||||
|
25
roles/web-app-minio/README.md
Normal file
25
roles/web-app-minio/README.md
Normal file
@@ -0,0 +1,25 @@
|
||||
|
||||
---
|
||||
|
||||
# MinIO
|
||||
|
||||
## Description
|
||||
|
||||
**MinIO** is an S3-compatible object storage service for files, media, backups, and AI artifacts—self-hosted for performance and control.
|
||||
|
||||
## Overview
|
||||
|
||||
Applications that speak “S3” (Pixelfed, Mastodon, Nextcloud, Flowise, etc.) store and retrieve objects from MinIO buckets using familiar SDKs and CLIs. Admins manage buckets, users, and access policies through a browser console while keeping everything on-prem.
|
||||
|
||||
## Features
|
||||
|
||||
* S3-compatible API for broad app compatibility
|
||||
* Buckets, users, access keys, and fine-grained policies
|
||||
* Optional versioning, lifecycle rules, and object lock
|
||||
* Presigned URLs for secure, time-limited uploads/downloads
|
||||
* Ideal for AI stacks: datasets, embeddings, and artifacts
|
||||
|
||||
## Further Resources
|
||||
|
||||
* MinIO — [https://www.min.io](https://www.min.io)
|
||||
* AWS S3 (API background) — [https://aws.amazon.com/s3](https://aws.amazon.com/s3)
|
@@ -2,7 +2,7 @@ features:
|
||||
matomo: true
|
||||
css: true
|
||||
desktop: true
|
||||
central_database: true
|
||||
central_database: false
|
||||
logout: true
|
||||
javascript: false
|
||||
server:
|
||||
@@ -12,7 +12,7 @@ server:
|
||||
api: "api.s3.{{ PRIMARY_DOMAIN }}"
|
||||
aliases: []
|
||||
csp:
|
||||
flags: []
|
||||
flags: {}
|
||||
#script-src-elem:
|
||||
# unsafe-inline: true
|
||||
#script-src:
|
||||
|
29
roles/web-app-minio/meta/main.yml
Normal file
29
roles/web-app-minio/meta/main.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
---
|
||||
galaxy_info:
|
||||
author: "Kevin Veen-Birkenbach"
|
||||
description: "Installs MinIO — an S3-compatible object storage service for media, backups, and AI artifacts."
|
||||
license: "Infinito.Nexus NonCommercial License"
|
||||
license_url: "https://s.infinito.nexus/license"
|
||||
company: |
|
||||
Kevin Veen-Birkenbach
|
||||
Consulting & Coaching Solutions
|
||||
https://www.veen.world
|
||||
galaxy_tags:
|
||||
- s3
|
||||
- object-storage
|
||||
- storage
|
||||
- buckets
|
||||
- minio
|
||||
- self-hosted
|
||||
- privacy
|
||||
- backup
|
||||
- devops
|
||||
repository: "https://s.infinito.nexus/code"
|
||||
issue_tracker_url: "https://s.infinito.nexus/issues"
|
||||
documentation: "https://s.infinito.nexus/code/"
|
||||
logo:
|
||||
class: "fa-solid fa-database"
|
||||
run_after:
|
||||
- web-app-keycloak
|
||||
- web-app-matomo
|
||||
dependencies: []
|
@@ -1,4 +1,13 @@
|
||||
---
|
||||
- name: "Install Ollama Dependency"
|
||||
include_role:
|
||||
name: svc-ai-ollama
|
||||
vars:
|
||||
flush_handlers: true
|
||||
when:
|
||||
- run_once_svc_ai_ollama is not defined
|
||||
- OLLAMA_LOCAL_ENABLED | bool
|
||||
|
||||
- name: "load docker and db for {{ application_id }}"
|
||||
include_role:
|
||||
name: sys-stk-back-stateless
|
||||
|
@@ -12,6 +12,8 @@
|
||||
- data:/data
|
||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||
|
||||
{% include 'roles/docker-compose/templates/networks.yml.j2' %}
|
||||
|
||||
{% include 'roles/docker-compose/templates/volumes.yml.j2' %}
|
||||
data:
|
||||
name: {{ MINIO_VOLUME }}
|
@@ -10,7 +10,7 @@ docker_compose_file_creation_enabled: true
|
||||
MINIO_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.minio.version') }}"
|
||||
MINIO_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.minio.image') }}"
|
||||
MINIO_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.minio.name') }}"
|
||||
MINIO_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.minio') }}"
|
||||
MINIO_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.data') }}"
|
||||
|
||||
## Api
|
||||
MINIO_API_DOMAIN: "{{ applications | get_app_conf(application_id, 'server.domains.canonical.api') }}"
|
||||
|
24
roles/web-app-openwebui/README.md
Normal file
24
roles/web-app-openwebui/README.md
Normal file
@@ -0,0 +1,24 @@
|
||||
Here are user-focused **README.md** drafts for the four roles, following your template structure and describing the **role** (what the installed software does for users), not the folder.
|
||||
|
||||
# Open WebUI
|
||||
|
||||
## Description
|
||||
|
||||
**Open WebUI** provides a clean, fast chat interface for working with local AI models (e.g., via Ollama). It delivers a ChatGPT-like experience on your own infrastructure to keep prompts and data private.
|
||||
|
||||
## Overview
|
||||
|
||||
End users access a web page, pick a model, and start chatting. Conversations remain on your servers. Admins can enable strict offline behavior so no external network calls occur. The UI can also point at OpenAI-compatible endpoints if needed.
|
||||
|
||||
## Features
|
||||
|
||||
* Familiar multi-chat interface with quick model switching
|
||||
* Supports local backends (Ollama) and OpenAI-compatible APIs
|
||||
* Optional **offline mode** for air-gapped environments
|
||||
* File/paste input for summaries and extraction (model dependent)
|
||||
* Suitable for teams: predictable, private, reproducible
|
||||
|
||||
## Further Resources
|
||||
|
||||
* Open WebUI — [https://openwebui.com](https://openwebui.com)
|
||||
* Ollama — [https://ollama.com](https://ollama.com)
|
41
roles/web-app-openwebui/config/main.yml
Normal file
41
roles/web-app-openwebui/config/main.yml
Normal file
@@ -0,0 +1,41 @@
|
||||
features:
|
||||
matomo: true
|
||||
css: true
|
||||
desktop: true
|
||||
central_database: false
|
||||
logout: true
|
||||
javascript: false
|
||||
server:
|
||||
domains:
|
||||
canonical:
|
||||
openwebui: "chat.ai.{{ PRIMARY_DOMAIN }}"
|
||||
aliases: []
|
||||
csp:
|
||||
flags: {}
|
||||
#script-src-elem:
|
||||
# unsafe-inline: true
|
||||
#script-src:
|
||||
# unsafe-inline: true
|
||||
# unsafe-eval: true
|
||||
#style-src:
|
||||
# unsafe-inline: true
|
||||
whitelist:
|
||||
font-src: []
|
||||
connect-src: []
|
||||
docker:
|
||||
services:
|
||||
openwebui:
|
||||
backup:
|
||||
no_stop_required: true
|
||||
image: ghcr.io/open-webui/open-webui
|
||||
version: main
|
||||
name: open-webui
|
||||
offline_mode: false
|
||||
hf_hub_offline: false
|
||||
redis:
|
||||
enabled: false
|
||||
database:
|
||||
enabled: false
|
||||
volumes:
|
||||
openwebui: ai_openwebui_data
|
||||
credentials: {}
|
28
roles/web-app-openwebui/meta/main.yml
Normal file
28
roles/web-app-openwebui/meta/main.yml
Normal file
@@ -0,0 +1,28 @@
|
||||
---
|
||||
galaxy_info:
|
||||
author: "Kevin Veen-Birkenbach"
|
||||
description: "Installs Open WebUI — a clean, fast chat interface for local/private AI models (e.g., via Ollama)."
|
||||
license: "Infinito.Nexus NonCommercial License"
|
||||
license_url: "https://s.infinito.nexus/license"
|
||||
company: |
|
||||
Kevin Veen-Birkenbach
|
||||
Consulting & Coaching Solutions
|
||||
https://www.veen.world
|
||||
galaxy_tags:
|
||||
- ai
|
||||
- llm
|
||||
- chat
|
||||
- privacy
|
||||
- self-hosted
|
||||
- offline
|
||||
- openwebui
|
||||
- ollama
|
||||
repository: "https://s.infinito.nexus/code"
|
||||
issue_tracker_url: "https://s.infinito.nexus/issues"
|
||||
documentation: "https://s.infinito.nexus/code/"
|
||||
logo:
|
||||
class: "fa-solid fa-comments"
|
||||
run_after:
|
||||
- web-app-keycloak
|
||||
- web-app-matomo
|
||||
dependencies: []
|
13
roles/web-app-openwebui/tasks/main.yml
Normal file
13
roles/web-app-openwebui/tasks/main.yml
Normal file
@@ -0,0 +1,13 @@
|
||||
---
|
||||
- name: "Install Ollama Dependency"
|
||||
include_role:
|
||||
name: svc-ai-ollama
|
||||
vars:
|
||||
flush_handlers: true
|
||||
when:
|
||||
- run_once_svc_ai_ollama is not defined
|
||||
- OLLAMA_LOCAL_ENABLED | bool
|
||||
|
||||
- name: "load docker, proxy for '{{ application_id }}'"
|
||||
include_role:
|
||||
name: sys-stk-full-stateless
|
30
roles/web-app-openwebui/templates/docker-compose.yml.j2
Normal file
30
roles/web-app-openwebui/templates/docker-compose.yml.j2
Normal file
@@ -0,0 +1,30 @@
|
||||
{% include 'roles/docker-compose/templates/base.yml.j2' %}
|
||||
ollama:
|
||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||
image: {{ OLLAMA_IMAGE }}:{{ OLLAMA_VERSION }}
|
||||
container_name: {{ OLLAMA_CONTAINER }}
|
||||
expose:
|
||||
- "{{ OLLAMA_PORT }}"
|
||||
volumes:
|
||||
- ollama_models:/root/.ollama
|
||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||
|
||||
openwebui:
|
||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||
image: {{ OPENWEBUI_IMAGE }}:{{ OPENWEBUI_VERSION }}
|
||||
container_name: {{ OPENWEBUI_CONTAINER }}
|
||||
depends_on:
|
||||
- ollama
|
||||
ports:
|
||||
- "127.0.0.1:{{ OPENWEBUI_PORT_PUBLIC }}:8080"
|
||||
volumes:
|
||||
- openwebui_data:/app/backend/data
|
||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||
|
||||
{% include 'roles/docker-compose/templates/networks.yml.j2' %}
|
||||
|
||||
{% include 'roles/docker-compose/templates/volumes.yml.j2' %}
|
||||
ollama_models:
|
||||
name: {{ OLLAMA_VOLUME }}
|
||||
openwebui_data:
|
||||
name: {{ OPENWEBUI_VOLUME }}
|
5
roles/web-app-openwebui/templates/env.j2
Normal file
5
roles/web-app-openwebui/templates/env.j2
Normal file
@@ -0,0 +1,5 @@
|
||||
# Open WebUI
|
||||
OLLAMA_BASE_URL={{ OLLAMA_BASE_LOCAL_URL }}
|
||||
OFFLINE_MODE={{ OPENWEBUI_OFFLINE_MODE | ternary(1, 0) }}
|
||||
HF_HUB_OFFLINE={{ OPENWEBUI_HF_HUB_OFFLINE | ternary(1, 0) }}
|
||||
ENABLE_PERSISTENT_CONFIG=False
|
17
roles/web-app-openwebui/vars/main.yml
Normal file
17
roles/web-app-openwebui/vars/main.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
|
||||
# General
|
||||
application_id: "web-app-openwebui"
|
||||
|
||||
# Docker
|
||||
docker_pull_git_repository: false
|
||||
docker_compose_file_creation_enabled: true
|
||||
|
||||
# Open WebUI
|
||||
# https://openwebui.com/
|
||||
OPENWEBUI_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.openwebui.version') }}"
|
||||
OPENWEBUI_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.openwebui.image') }}"
|
||||
OPENWEBUI_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.openwebui.name') }}"
|
||||
OPENWEBUI_OFFLINE_MODE: "{{ applications | get_app_conf(application_id, 'docker.services.openwebui.offline_mode') }}"
|
||||
OPENWEBUI_HF_HUB_OFFLINE: "{{ applications | get_app_conf(application_id, 'docker.services.openwebui.hf_hub_offline') }}"
|
||||
OPENWEBUI_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.openwebui') }}"
|
||||
OPENWEBUI_PORT_PUBLIC: "{{ ports.localhost.http[application_id] }}"
|
@@ -2,6 +2,6 @@
|
||||
- include_tasks: 01_core.yml
|
||||
when: run_once_web_svc_cdn is not defined
|
||||
|
||||
- include_tasks: "{{ playbook_dir }}/tasks/utils/load_handlers.yml"
|
||||
- include_tasks: "{{ [ playbook_dir, 'tasks/utils/load_handlers.yml' ] | path_join }}"
|
||||
vars:
|
||||
handler_role_name: "docker-compose"
|
||||
|
@@ -128,5 +128,6 @@
|
||||
- svc-net # 3. Load network roles
|
||||
- svc-db # 4. Load database roles
|
||||
- svc-prx # 5. Load proxy roles
|
||||
- svc-ai # 6. Load ai roles
|
||||
loop_control:
|
||||
label: "{{ item }}-roles.yml"
|
Reference in New Issue
Block a user