diff --git a/group_vars/all/00_general.yml b/group_vars/all/00_general.yml index 2789e592..1e2380ad 100644 --- a/group_vars/all/00_general.yml +++ b/group_vars/all/00_general.yml @@ -86,4 +86,4 @@ _applications_nextcloud_oidc_flavor: >- RBAC: GROUP: NAME: "/roles" # Name of the group which holds the RBAC roles - CLAIM: "groups" # Name of the claim containing the RBAC groups \ No newline at end of file + CLAIM: "groups" # Name of the claim containing the RBAC groups diff --git a/group_vars/all/09_networks.yml b/group_vars/all/09_networks.yml index cb88f7ca..f8f9a901 100644 --- a/group_vars/all/09_networks.yml +++ b/group_vars/all/09_networks.yml @@ -104,6 +104,12 @@ defaults_networks: subnet: 192.168.103.224/28 web-app-xwiki: subnet: 192.168.103.240/28 + web-app-openwebui: + subnet: 192.168.104.0/28 + web-app-flowise: + subnet: 192.168.104.16/28 + web-app-minio: + subnet: 192.168.104.32/28 # /24 Networks / 254 Usable Clients web-app-bigbluebutton: @@ -116,5 +122,5 @@ defaults_networks: subnet: 192.168.201.0/24 svc-db-openldap: subnet: 192.168.202.0/24 - web-app-ai: + svc-ai-ollama: subnet: 192.168.203.0/24 # Big network to bridge applications into ai diff --git a/group_vars/all/10_ports.yml b/group_vars/all/10_ports.yml index 82c94e25..6991c181 100644 --- a/group_vars/all/10_ports.yml +++ b/group_vars/all/10_ports.yml @@ -76,8 +76,8 @@ ports: web-app-magento: 8052 web-app-bridgy-fed: 8053 web-app-xwiki: 8054 - web-app-ai_openwebui: 8055 - web-app-ai_flowise: 8056 + web-app-openwebui: 8055 + web-app-flowise: 8056 web-app-minio_api: 8057 web-app-minio_console: 8058 web-app-bigbluebutton: 48087 # This port is predefined by bbb. @todo Try to change this to a 8XXX port diff --git a/group_vars/all/17_ai.yml b/group_vars/all/17_ai.yml new file mode 100644 index 00000000..53238b87 --- /dev/null +++ b/group_vars/all/17_ai.yml @@ -0,0 +1,3 @@ +# URL of Local Ollama Container +OLLAMA_BASE_LOCAL_URL: "http://{{ applications | get_app_conf('svc-ai-ollama', 'docker.services.ollama.name') }}:{{ applications | get_app_conf(application_id, 'docker.services.ollama.port') }}" +OLLAMA_LOCAL_ENABLED: "{{ applications | get_app_conf(application_id, 'server.domains.canonical.flowise') }}" \ No newline at end of file diff --git a/roles/categories.yml b/roles/categories.yml index ea2a4dac..3cb0f304 100644 --- a/roles/categories.yml +++ b/roles/categories.yml @@ -148,6 +148,11 @@ roles: description: "Network setup (DNS, Let's Encrypt HTTP, WireGuard, etc.)" icon: "fas fa-globe" invokable: true + ai: + title: "AI Services" + description: "Core AI building blocks—model serving, OpenAI-compatible gateways, vector databases, orchestration, and chat UIs." + icon: "fas fa-brain" + invokable: true user: title: "Users & Access" description: "User accounts & access control" diff --git a/roles/svc-ai-ollama/README.md b/roles/svc-ai-ollama/README.md new file mode 100644 index 00000000..3f5ea058 --- /dev/null +++ b/roles/svc-ai-ollama/README.md @@ -0,0 +1,23 @@ + +# Ollama + +## Description + +**Ollama** is a local model server that runs open LLMs on your hardware and exposes a simple HTTP API. It’s the backbone for privacy-first AI: prompts and data stay on your machines. + +## Overview + +After the first model pull, Ollama serves models to clients like Open WebUI (for chat) and Flowise (for workflows). Models are cached locally for quick reuse and can run fully offline when required. + +## Features + +* Run popular open models (chat, code, embeddings) locally +* Simple, predictable HTTP API for developers +* Local caching to avoid repeated downloads +* Works seamlessly with Open WebUI and Flowise +* Offline-capable for air-gapped deployments + +## Further Resources + +* Ollama — [https://ollama.com](https://ollama.com) +* Ollama Model Library — [https://ollama.com/library](https://ollama.com/library) diff --git a/roles/svc-ai-ollama/config/main.yml b/roles/svc-ai-ollama/config/main.yml new file mode 100644 index 00000000..95444995 --- /dev/null +++ b/roles/svc-ai-ollama/config/main.yml @@ -0,0 +1,12 @@ +docker: + services: + ollama: + backup: + no_stop_required: true + image: ollama/ollama + version: latest + name: ollama + port: 11434 + volumes: + models: "ollama_models" + network: "ollama" \ No newline at end of file diff --git a/roles/svc-ai-ollama/meta/main.yml b/roles/svc-ai-ollama/meta/main.yml new file mode 100644 index 00000000..f7480486 --- /dev/null +++ b/roles/svc-ai-ollama/meta/main.yml @@ -0,0 +1,25 @@ +--- +galaxy_info: + author: "Kevin Veen-Birkenbach" + description: "Installs Ollama — a local model server for running open LLMs with a simple HTTP API." + license: "Infinito.Nexus NonCommercial License" + license_url: "https://s.infinito.nexus/license" + company: | + Kevin Veen-Birkenbach + Consulting & Coaching Solutions + https://www.veen.world + galaxy_tags: + - ai + - llm + - inference + - offline + - privacy + - self-hosted + - ollama + repository: "https://s.infinito.nexus/code" + issue_tracker_url: "https://s.infinito.nexus/issues" + documentation: "https://s.infinito.nexus/code/" + logo: + class: "fa-solid fa-microchip" + run_after: [] +dependencies: [] diff --git a/roles/svc-ai-ollama/tasks/01_core.yml b/roles/svc-ai-ollama/tasks/01_core.yml new file mode 100644 index 00000000..f018cb44 --- /dev/null +++ b/roles/svc-ai-ollama/tasks/01_core.yml @@ -0,0 +1,12 @@ +- name: Include dependency 'sys-svc-docker' + include_role: + name: sys-svc-docker + when: run_once_sys_svc_docker is not defined + +- name: "include docker-compose role" + include_role: + name: docker-compose + vars: + docker_compose_flush_handlers: true + +- include_tasks: utils/run_once.yml \ No newline at end of file diff --git a/roles/svc-ai-ollama/tasks/main.yml b/roles/svc-ai-ollama/tasks/main.yml new file mode 100644 index 00000000..27226500 --- /dev/null +++ b/roles/svc-ai-ollama/tasks/main.yml @@ -0,0 +1,5 @@ +- block: + - include_tasks: 01_core.yml + vars: + flush_handlers: true + when: run_once_svc_ai_ollama is not defined \ No newline at end of file diff --git a/roles/svc-ai-ollama/templates/docker-compose.yml.j2 b/roles/svc-ai-ollama/templates/docker-compose.yml.j2 new file mode 100644 index 00000000..fb059d2a --- /dev/null +++ b/roles/svc-ai-ollama/templates/docker-compose.yml.j2 @@ -0,0 +1,17 @@ +{% include 'roles/docker-compose/templates/base.yml.j2' %} + + ollama: +{% include 'roles/docker-container/templates/base.yml.j2' %} + image: {{ OLLAMA_IMAGE }}:{{ OLLAMA_VERSION }} + container_name: {{ OLLAMA_CONTAINER }} + expose: + - "{{ OLLAMA_PORT }}" + volumes: + - ollama_models:/root/.ollama +{% include 'roles/docker-container/templates/networks.yml.j2' %} + +{% include 'roles/docker-compose/templates/networks.yml.j2' %} + +{% include 'roles/docker-compose/templates/volumes.yml.j2' %} + ollama_models: + name: {{ OLLAMA_VOLUME }} diff --git a/roles/svc-ai-ollama/vars/main.yml b/roles/svc-ai-ollama/vars/main.yml new file mode 100644 index 00000000..67d3388f --- /dev/null +++ b/roles/svc-ai-ollama/vars/main.yml @@ -0,0 +1,13 @@ +# General +application_id: "svc-ai-ollama" + +# Docker +docker_compose_flush_handlers: true + +# Ollama +# https://ollama.com/ +OLLAMA_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.ollama.version') }}" +OLLAMA_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.ollama.image') }}" +OLLAMA_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.ollama.name') }}" +OLLAMA_PORT: "{{ applications | get_app_conf(application_id, 'docker.services.ollama.port') }}" +OLLAMA_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.models') }}" diff --git a/roles/svc-db-postgres/tasks/main.yml b/roles/svc-db-postgres/tasks/main.yml index f4b6d572..e2063477 100644 --- a/roles/svc-db-postgres/tasks/main.yml +++ b/roles/svc-db-postgres/tasks/main.yml @@ -5,7 +5,7 @@ flush_handlers: true when: run_once_svc_db_postgres is not defined -- include_tasks: "{{ playbook_dir }}/tasks/utils/load_handlers.yml" +- include_tasks: "{{ [ playbook_dir, 'tasks/utils/load_handlers.yml' ] | path_join }}" # Necessary because docker handlers are overwritten by condition vars: handler_role_name: "docker-compose" diff --git a/roles/sys-service/tasks/main.yml b/roles/sys-service/tasks/main.yml index 5f16e855..b224bac1 100644 --- a/roles/sys-service/tasks/main.yml +++ b/roles/sys-service/tasks/main.yml @@ -1,5 +1,5 @@ - name: "Reload sys-daemon handlers" - include_tasks: "{{ playbook_dir }}/tasks/utils/load_handlers.yml" + include_tasks: "{{ [ playbook_dir, 'tasks/utils/load_handlers.yml' ] | path_join }}" vars: handler_role_name: "sys-daemon" when: run_once_sys_service is defined diff --git a/roles/sys-stk-front-proxy/tasks/01_base.yml b/roles/sys-stk-front-proxy/tasks/01_base.yml index bd94a39e..dec5b999 100644 --- a/roles/sys-stk-front-proxy/tasks/01_base.yml +++ b/roles/sys-stk-front-proxy/tasks/01_base.yml @@ -9,7 +9,7 @@ - include_tasks: "02_cloudflare.yml" when: DNS_PROVIDER == "cloudflare" -- include_tasks: "{{ playbook_dir }}/tasks/utils/load_handlers.yml" +- include_tasks: "{{ [ playbook_dir, 'tasks/utils/load_handlers.yml' ] | path_join }}" vars: handler_role_name: "svc-prx-openresty" diff --git a/roles/sys-stk-full-stateful/tasks/main.yml b/roles/sys-stk-full-stateful/tasks/main.yml index e9d70260..f3bd3bf6 100644 --- a/roles/sys-stk-full-stateful/tasks/main.yml +++ b/roles/sys-stk-full-stateful/tasks/main.yml @@ -1,6 +1,6 @@ # run_once_sys_stk_full_stateful: deactivated -- include_tasks: "{{ playbook_dir }}/tasks/utils/load_handlers.yml" +- include_tasks: "{{ [ playbook_dir, 'tasks/utils/load_handlers.yml' ] | path_join }}" vars: handler_role_name: "svc-prx-openresty" diff --git a/roles/sys-svc-webserver/tasks/01_core.yml b/roles/sys-svc-webserver/tasks/01_core.yml index f79e100f..a095a83a 100644 --- a/roles/sys-svc-webserver/tasks/01_core.yml +++ b/roles/sys-svc-webserver/tasks/01_core.yml @@ -12,7 +12,7 @@ include_vars: "{{ DOCKER_VARS_FILE }}" - name: "Load docker compose handlers" - include_tasks: "{{ playbook_dir }}/tasks/utils/load_handlers.yml" + include_tasks: "{{ [ playbook_dir, 'tasks/utils/load_handlers.yml' ] | path_join }}" vars: handler_role_name: "docker-compose" diff --git a/roles/web-app-ai/templates/docker-compose.yml.j2 b/roles/web-app-ai/templates/docker-compose.yml.j2 deleted file mode 100644 index 846a8688..00000000 --- a/roles/web-app-ai/templates/docker-compose.yml.j2 +++ /dev/null @@ -1,74 +0,0 @@ -{% include 'roles/docker-compose/templates/base.yml.j2' %} - ollama: -{% include 'roles/docker-container/templates/base.yml.j2' %} - image: {{ AI_OLLAMA_IMAGE }}:{{ AI_OLLAMA_VERSION }} - container_name: {{ AI_OLLAMA_CONTAINER }} - expose: - - "{{ AI_OLLAMA_PORT }}" - volumes: - - ollama_models:/root/.ollama -{% include 'roles/docker-container/templates/networks.yml.j2' %} - - openwebui: -{% include 'roles/docker-container/templates/base.yml.j2' %} - image: {{ AI_OPENWEBUI_IMAGE }}:{{ AI_OPENWEBUI_VERSION }} - container_name: {{ AI_OPENWEBUI_CONTAINER }} - depends_on: - - ollama - ports: - - "127.0.0.1:{{ AI_OPENWEBUI_PORT_PUBLIC }}:8080" - volumes: - - openwebui_data:/app/backend/data -{% include 'roles/docker-container/templates/networks.yml.j2' %} - - litellm: -{% include 'roles/docker-container/templates/base.yml.j2' %} - image: {{ AI_LITELLM_IMAGE }}:{{ AI_LITELLM_VERSION }} - container_name: {{ AI_LITELLM_CONTAINER }} - depends_on: - - ollama - expose: - - {{ AI_LITELLM_PORT }} - volumes: - - {{ AI_LITELLM_CONFIG_PATH_HOST }}:{{ AI_LITELLM_CONFIG_PATH_DOCKER }}:ro - command: > - --host 0.0.0.0 - --port {{ AI_LITELLM_PORT }} - --config {{ AI_LITELLM_CONFIG_PATH_DOCKER }} -{% include 'roles/docker-container/templates/networks.yml.j2' %} - - qdrant: -{% include 'roles/docker-container/templates/base.yml.j2' %} - image: {{ AI_QDRANT_IMAGE }}:{{ AI_QDRANT_VERSION }} - container_name: {{ AI_QDRANT_CONTAINER }} - ports: - - {{ AI_MINIO_HTTP_PORT }} - - {{ AI_MINIO_GRPC_PORT }} - volumes: - - qdrant_data:/qdrant/storage -{% include 'roles/docker-container/templates/networks.yml.j2' %} - - flowise: -{% include 'roles/docker-container/templates/base.yml.j2' %} - image: {{ AI_FLOWISE_IMAGE }}:{{ AI_FLOWISE_VERSION }} - container_name: {{ AI_FLOWISE_CONTAINER }} - depends_on: - - qdrant - - litellm - ports: - - "127.0.0.1:{{ AI_FLOWISE_PORT_PUBLIC }}:{{ AI_FLOWISE_PORT_INTERNAL }}" - volumes: - - flowise_data:/root/.flowise -{% include 'roles/docker-container/templates/networks.yml.j2' %} - -{% include 'roles/docker-compose/templates/networks.yml.j2' %} - -{% include 'roles/docker-compose/templates/volumes.yml.j2' %} - ollama_models: - name: {{ AI_OLLAMA_VOLUME }} - openwebui_data: - name: {{ AI_OPENWEBUI_VOLUME }} - qdrant_data: - name: {{ AI_QDRANT_VOLUME }} - flowise_data: - name: {{ AI_FLOWISE_VOLUME }} diff --git a/roles/web-app-ai/templates/env.j2 b/roles/web-app-ai/templates/env.j2 deleted file mode 100644 index 0e811de8..00000000 --- a/roles/web-app-ai/templates/env.j2 +++ /dev/null @@ -1,21 +0,0 @@ -# Open WebUI -OLLAMA_BASE_URL={{ AI_OLLAMA_BASE_URL }} -OFFLINE_MODE={{ AI_OPENWEBUI_OFFLINE_MODE | ternary(1, 0) }} -HF_HUB_OFFLINE={{ AI_OPENWEBUI_HF_HUB_OFFLINE | ternary(1, 0) }} -ENABLE_PERSISTENT_CONFIG=False - -# LiteLLM -LITELLM_MASTER_KEY=dummy-key -LITELLM_CONFIG=/etc/litellm/config.yaml - -# Flowise -PORT={{ AI_FLOWISE_PORT_INTERNAL }} -FLOWISE_USERNAME=admin -FLOWISE_PASSWORD=admin -DATABASE_PATH=/root/.flowise -FLOWISE_FILE_STORAGE_PATH=/root/.flowise/storage - -# Qdrant + LiteLLM/Ollama: -QDRANT_URL={{ AI_QDRANT_INTERNAL_URL }} -OPENAI_API_BASE={{ AI_LITELLM_INTERNAL_URL }}/v1 -OPENAI_API_KEY=dummy-key diff --git a/roles/web-app-ai/vars/main.yml b/roles/web-app-ai/vars/main.yml deleted file mode 100644 index c47d9c91..00000000 --- a/roles/web-app-ai/vars/main.yml +++ /dev/null @@ -1,66 +0,0 @@ - -# General -application_id: "web-app-ai" - -# Docker -docker_pull_git_repository: false -docker_compose_file_creation_enabled: true - -# Open WebUI -# https://openwebui.com/ -AI_OPENWEBUI_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.openwebui.version') }}" -AI_OPENWEBUI_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.openwebui.image') }}" -AI_OPENWEBUI_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.openwebui.name') }}" -AI_OPENWEBUI_OFFLINE_MODE: "{{ applications | get_app_conf(application_id, 'docker.services.openwebui.offline_mode') }}" -AI_OPENWEBUI_HF_HUB_OFFLINE: "{{ applications | get_app_conf(application_id, 'docker.services.openwebui.hf_hub_offline') }}" -AI_OPENWEBUI_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.openwebui') }}" -AI_OPENWEBUI_PORT_PUBLIC: "{{ ports.localhost.http[application_id ~ '_openwebui'] }}" -AI_OPENWEBUI_DOMAIN: "{{ applications | get_app_conf(application_id, 'server.domains.canonical.openwebui') }}" - -# Ollama -# https://ollama.com/ -AI_OLLAMA_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.ollama.version') }}" -AI_OLLAMA_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.ollama.image') }}" -AI_OLLAMA_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.ollama.name') }}" -AI_OLLAMA_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.ollama') }}" -AI_OLLAMA_PORT: 11434 -AI_OLLAMA_BASE_URL: "http://ollama:{{ AI_OLLAMA_PORT }}" - -# LiteLLM -# https://www.litellm.ai/ -AI_LITELLM_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.litellm.version') }}" -AI_LITELLM_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.litellm.image') }}" -AI_LITELLM_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.litellm.name') }}" -AI_LITELLM_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.litellm') }}" -AI_LITELLM_PORT: 4000 -AI_LITELLM_INTERNAL_URL: "http://litellm:{{ AI_LITELLM_PORT }}" -AI_LITELLM_CONFIG_PATH_HOST: "{{ docker_compose.directories.config }}litellm.config.yaml" -AI_LITELLM_CONFIG_PATH_DOCKER: "/etc/litellm/config.yaml" - -# Qdrant -# https://qdrant.tech/ -AI_QDRANT_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.qdrant.version') }}" -AI_QDRANT_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.qdrant.image') }}" -AI_QDRANT_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.qdrant.name') }}" -AI_QDRANT_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.qdrant') }}" -AI_QDRANT_HTTP_PORT: 6333 -AI_QDRANT_GRPC_PORT: 6334 -AI_QDRANT_INTERNAL_URL: "http://qdrant:{{ AI_QDRANT_HTTP_PORT }}" - -# Flowise -# https://flowiseai.com/ -AI_FLOWISE_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.flowise.version') }}" -AI_FLOWISE_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.flowise.image') }}" -AI_FLOWISE_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.flowise.name') }}" -AI_FLOWISE_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.flowise') }}" -AI_FLOWISE_PORT_PUBLIC: "{{ ports.localhost.http[application_id ~ '_flowise'] }}" -AI_FLOWISE_PORT_INTERNAL: 3000 -AI_FLOWISE_DOMAIN: "{{ applications | get_app_conf(application_id, 'server.domains.canonical.flowise') }}" - -AI_FRONT_PROXY_MATRIX: >- - {{ - [ - { 'domain': AI_OPENWEBUI_DOMAIN, 'http_port': AI_OPENWEBUI_PORT_PUBLIC }, - { 'domain': AI_FLOWISE_DOMAIN, 'http_port': AI_FLOWISE_PORT_PUBLIC } - ] - }} \ No newline at end of file diff --git a/roles/web-app-flowise/README.md b/roles/web-app-flowise/README.md new file mode 100644 index 00000000..9ddb3c7f --- /dev/null +++ b/roles/web-app-flowise/README.md @@ -0,0 +1,24 @@ +# Flowise + +## Description + +**Flowise** is a visual builder for AI workflows. Create, test, and publish chains that combine LLMs, your documents, tools, and vector search—without writing glue code. + +## Overview + +Users design flows on a drag-and-drop canvas (LLM, RAG, tools, webhooks), test them interactively, and publish endpoints that applications or bots can call. Flowise works well with local backends such as **Ollama** (directly or via **LiteLLM**) and **Qdrant** for retrieval. + +## Features + +* No/low-code canvas to build assistants and pipelines +* Publish flows as HTTP endpoints for easy integration +* Retrieval-augmented generation (RAG) with vector DBs (e.g., Qdrant) +* Pluggable model backends via OpenAI-compatible API or direct Ollama +* Keep data and prompts on your own infrastructure + +## Further Resources + +* Flowise — [https://flowiseai.com](https://flowiseai.com) +* Qdrant — [https://qdrant.tech](https://qdrant.tech) +* LiteLLM — [https://www.litellm.ai](https://www.litellm.ai) +* Ollama — [https://ollama.com](https://ollama.com) diff --git a/roles/web-app-ai/config/main.yml b/roles/web-app-flowise/config/main.yml similarity index 63% rename from roles/web-app-ai/config/main.yml rename to roles/web-app-flowise/config/main.yml index e389ef13..e9b908d1 100644 --- a/roles/web-app-ai/config/main.yml +++ b/roles/web-app-flowise/config/main.yml @@ -2,17 +2,16 @@ features: matomo: true css: true desktop: true - central_database: true + central_database: false logout: true javascript: false server: domains: canonical: - openwebui: "chat.ai.{{ PRIMARY_DOMAIN }}" flowise: "flowise.ai.{{ PRIMARY_DOMAIN }}" aliases: [] csp: - flags: [] + flags: {} #script-src-elem: # unsafe-inline: true #script-src: @@ -25,20 +24,6 @@ server: connect-src: [] docker: services: - ollama: - backup: - no_stop_required: true - image: ollama/ollama - version: latest - name: ollama - open-webui: - backup: - no_stop_required: true - image: ghcr.io/open-webui/open-webui - version: main - name: open-webui - offline_mode: false - hf_hub_offline: false litellm: backup: no_stop_required: true @@ -61,10 +46,10 @@ docker: enabled: false database: enabled: false + ollama: + enabled: true volumes: - openwebui: ai_openwebui_data - ollama: ai_ollama_models - qdrant: ai_qdrant_data - flowise: ai_flowise_data + qdrant: qdrant_data + flowise: flowise_data credentials: {} diff --git a/roles/web-app-flowise/meta/main.yml b/roles/web-app-flowise/meta/main.yml new file mode 100644 index 00000000..271cf5e3 --- /dev/null +++ b/roles/web-app-flowise/meta/main.yml @@ -0,0 +1,30 @@ +--- +galaxy_info: + author: "Kevin Veen-Birkenbach" + description: "Installs Flowise — a visual builder to create, test, and publish AI workflows (RAG, tools, webhooks)." + license: "Infinito.Nexus NonCommercial License" + license_url: "https://s.infinito.nexus/license" + company: | + Kevin Veen-Birkenbach + Consulting & Coaching Solutions + https://www.veen.world + galaxy_tags: + - ai + - llm + - rag + - workflow + - orchestration + - self-hosted + - qdrant + - litellm + - ollama + - flowise + repository: "https://s.infinito.nexus/code" + issue_tracker_url: "https://s.infinito.nexus/issues" + documentation: "https://s.infinito.nexus/code/" + logo: + class: "fa-solid fa-diagram-project" + run_after: + - web-app-keycloak + - web-app-matomo +dependencies: [] diff --git a/roles/web-app-ai/tasks/main.yml b/roles/web-app-flowise/tasks/main.yml similarity index 55% rename from roles/web-app-ai/tasks/main.yml rename to roles/web-app-flowise/tasks/main.yml index 48f3040e..85df8f53 100644 --- a/roles/web-app-ai/tasks/main.yml +++ b/roles/web-app-flowise/tasks/main.yml @@ -1,13 +1,23 @@ --- +- name: "Install Ollama Dependency" + include_role: + name: svc-ai-ollama + vars: + flush_handlers: true + when: + - run_once_svc_ai_ollama is not defined + - OLLAMA_LOCAL_ENABLED | bool + - name: "load docker and db for {{ application_id }}" include_role: name: sys-stk-back-stateless vars: docker_compose_flush_handlers: false -- name: "create {{ AI_LITELLM_CONFIG_PATH_HOST }}" - template: src: "litellm.config.yaml.j2" - dest: "{{ AI_LITELLM_CONFIG_PATH_HOST }}" +- name: "create {{ FLOWISE_LITELLM_CONFIG_PATH_HOST }}" + template: + src: "litellm.config.yaml.j2" + dest: "{{ FLOWISE_LITELLM_CONFIG_PATH_HOST }}" notify: docker compose up - name: "flush handlers of docker compose" @@ -19,6 +29,3 @@ vars: domain: "{{ item.domain }}" http_port: "{{ item.http_port }}" - loop: "{{ AI_FRONT_PROXY_MATRIX }}" - loop_control: - label: "{{ item.domain }} -> {{ item.http_port }}" diff --git a/roles/web-app-flowise/templates/docker-compose.yml.j2 b/roles/web-app-flowise/templates/docker-compose.yml.j2 new file mode 100644 index 00000000..d0604629 --- /dev/null +++ b/roles/web-app-flowise/templates/docker-compose.yml.j2 @@ -0,0 +1,48 @@ +{% include 'roles/docker-compose/templates/base.yml.j2' %} + litellm: +{% include 'roles/docker-container/templates/base.yml.j2' %} + image: {{ FLOWISE_LITELLM_IMAGE }}:{{ FLOWISE_LITELLM_VERSION }} + container_name: {{ FLOWISE_LITELLM_CONTAINER }} + depends_on: + - ollama + expose: + - {{ FLOWISE_LITELLM_PORT }} + volumes: + - {{ FLOWISE_LITELLM_CONFIG_PATH_HOST }}:{{ FLOWISE_LITELLM_CONFIG_PATH_DOCKER }}:ro + command: > + --host 0.0.0.0 + --port {{ FLOWISE_LITELLM_PORT }} + --config {{ FLOWISE_LITELLM_CONFIG_PATH_DOCKER }} +{% include 'roles/docker-container/templates/networks.yml.j2' %} + + qdrant: +{% include 'roles/docker-container/templates/base.yml.j2' %} + image: {{ FLOWISE_QDRANT_IMAGE }}:{{ FLOWISE_QDRANT_VERSION }} + container_name: {{ FLOWISE_QDRANT_CONTAINER }} + ports: + - {{ FLOWISE_QDRANT_HTTP_PORT }} + - {{ FLOWISE_QDRANT_GRPC_PORT }} + volumes: + - qdrant_data:/qdrant/storage +{% include 'roles/docker-container/templates/networks.yml.j2' %} + + flowise: +{% include 'roles/docker-container/templates/base.yml.j2' %} + image: {{ FLOWISE_IMAGE }}:{{ FLOWISE_VERSION }} + container_name: {{ FLOWISE_CONTAINER }} + depends_on: + - qdrant + - litellm + ports: + - "127.0.0.1:{{ FLOWISE_PORT_PUBLIC }}:{{ FLOWISE_PORT_INTERNAL }}" + volumes: + - flowise_data:/root/.flowise +{% include 'roles/docker-container/templates/networks.yml.j2' %} + +{% include 'roles/docker-compose/templates/networks.yml.j2' %} + +{% include 'roles/docker-compose/templates/volumes.yml.j2' %} + qdrant_data: + name: {{ FLOWISE_QDRANT_VOLUME }} + flowise_data: + name: {{ FLOWISE_VOLUME }} diff --git a/roles/web-app-flowise/templates/env.j2 b/roles/web-app-flowise/templates/env.j2 new file mode 100644 index 00000000..9ddf75b4 --- /dev/null +++ b/roles/web-app-flowise/templates/env.j2 @@ -0,0 +1,15 @@ +# LiteLLM +LITELLM_MASTER_KEY=dummy-key +LITELLM_CONFIG=/etc/litellm/config.yaml + +# Flowise +PORT={{ FLOWISE_PORT_INTERNAL }} +FLOWISE_USERNAME=admin +FLOWISE_PASSWORD=admin +DATABASE_PATH=/root/.flowise +FLOWISE_FILE_STORAGE_PATH=/root/.flowise/storage + +# Qdrant + LiteLLM/Ollama: +QDRANT_URL={{ FLOWISE_QDRANT_INTERNAL_URL }} +OPENAI_API_BASE={{ FLOWISE_LITELLM_INTERNAL_URL }}/v1 +OPENAI_API_KEY=dummy-key diff --git a/roles/web-app-ai/templates/litellm.config.yaml.j2 b/roles/web-app-flowise/templates/litellm.config.yaml.j2 similarity index 75% rename from roles/web-app-ai/templates/litellm.config.yaml.j2 rename to roles/web-app-flowise/templates/litellm.config.yaml.j2 index d2cd10cf..8b7c6141 100644 --- a/roles/web-app-ai/templates/litellm.config.yaml.j2 +++ b/roles/web-app-flowise/templates/litellm.config.yaml.j2 @@ -2,12 +2,12 @@ model_list: - model_name: ollama/llama3 litellm_params: model: ollama/llama3 - api_base: {{ AI_OLLAMA_BASE_URL }} + api_base: {{ OLLAMA_BASE_LOCAL_URL }} rpm: 1000 - model_name: ollama/nomic-embed-text litellm_params: model: ollama/nomic-embed-text - api_base: {{ AI_OLLAMA_BASE_URL }} + api_base: {{ OLLAMA_BASE_LOCAL_URL }} rpm: 1000 litellm_settings: diff --git a/roles/web-app-flowise/vars/main.yml b/roles/web-app-flowise/vars/main.yml new file mode 100644 index 00000000..69831d5f --- /dev/null +++ b/roles/web-app-flowise/vars/main.yml @@ -0,0 +1,33 @@ +# General +application_id: "web-app-flowise" + +# Flowise +# https://flowiseai.com/ +FLOWISE_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.flowise.version') }}" +FLOWISE_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.flowise.image') }}" +FLOWISE_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.flowise.name') }}" +FLOWISE_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.flowise') }}" +FLOWISE_PORT_PUBLIC: "{{ ports.localhost.http[application_id] }}" +FLOWISE_PORT_INTERNAL: 3000 + +# Dependencies + +## LiteLLM +# https://www.litellm.ai/ +FLOWISE_LITELLM_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.litellm.version') }}" +FLOWISE_LITELLM_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.litellm.image') }}" +FLOWISE_LITELLM_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.litellm.name') }}" +FLOWISE_LITELLM_PORT: 4000 +FLOWISE_LITELLM_INTERNAL_URL: "http://litellm:{{ FLOWISE_LITELLM_PORT }}" +FLOWISE_LITELLM_CONFIG_PATH_HOST: "{{ [ docker_compose.directories.config, 'litellm.config.yaml' ] | path_join }}" +FLOWISE_LITELLM_CONFIG_PATH_DOCKER: "/etc/litellm/config.yaml" + +## Qdrant +# https://qdrant.tech/ +FLOWISE_QDRANT_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.qdrant.version') }}" +FLOWISE_QDRANT_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.qdrant.image') }}" +FLOWISE_QDRANT_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.qdrant.name') }}" +FLOWISE_QDRANT_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.qdrant') }}" +FLOWISE_QDRANT_HTTP_PORT: 6333 +FLOWISE_QDRANT_GRPC_PORT: 6334 +FLOWISE_QDRANT_INTERNAL_URL: "http://qdrant:{{ FLOWISE_QDRANT_HTTP_PORT }}" diff --git a/roles/web-app-keycloak/tasks/01_meta.yml b/roles/web-app-keycloak/tasks/01_meta.yml index 76a225d9..b997ccea 100644 --- a/roles/web-app-keycloak/tasks/01_meta.yml +++ b/roles/web-app-keycloak/tasks/01_meta.yml @@ -1,4 +1,4 @@ -- include_tasks: "{{ playbook_dir }}/tasks/utils/load_handlers.yml" +- include_tasks: "{{ [ playbook_dir, 'tasks/utils/load_handlers.yml' ] | path_join }}" vars: handler_role_name: "docker-compose" - ansible.builtin.include_vars: diff --git a/roles/web-app-minio/README.md b/roles/web-app-minio/README.md new file mode 100644 index 00000000..6330abbb --- /dev/null +++ b/roles/web-app-minio/README.md @@ -0,0 +1,25 @@ + +--- + +# MinIO + +## Description + +**MinIO** is an S3-compatible object storage service for files, media, backups, and AI artifacts—self-hosted for performance and control. + +## Overview + +Applications that speak “S3” (Pixelfed, Mastodon, Nextcloud, Flowise, etc.) store and retrieve objects from MinIO buckets using familiar SDKs and CLIs. Admins manage buckets, users, and access policies through a browser console while keeping everything on-prem. + +## Features + +* S3-compatible API for broad app compatibility +* Buckets, users, access keys, and fine-grained policies +* Optional versioning, lifecycle rules, and object lock +* Presigned URLs for secure, time-limited uploads/downloads +* Ideal for AI stacks: datasets, embeddings, and artifacts + +## Further Resources + +* MinIO — [https://www.min.io](https://www.min.io) +* AWS S3 (API background) — [https://aws.amazon.com/s3](https://aws.amazon.com/s3) diff --git a/roles/web-app-minio/config/main.yml b/roles/web-app-minio/config/main.yml index 9b266dd9..4b09668e 100644 --- a/roles/web-app-minio/config/main.yml +++ b/roles/web-app-minio/config/main.yml @@ -2,7 +2,7 @@ features: matomo: true css: true desktop: true - central_database: true + central_database: false logout: true javascript: false server: @@ -12,7 +12,7 @@ server: api: "api.s3.{{ PRIMARY_DOMAIN }}" aliases: [] csp: - flags: [] + flags: {} #script-src-elem: # unsafe-inline: true #script-src: diff --git a/roles/web-app-minio/meta/main.yml b/roles/web-app-minio/meta/main.yml new file mode 100644 index 00000000..83e9916e --- /dev/null +++ b/roles/web-app-minio/meta/main.yml @@ -0,0 +1,29 @@ +--- +galaxy_info: + author: "Kevin Veen-Birkenbach" + description: "Installs MinIO — an S3-compatible object storage service for media, backups, and AI artifacts." + license: "Infinito.Nexus NonCommercial License" + license_url: "https://s.infinito.nexus/license" + company: | + Kevin Veen-Birkenbach + Consulting & Coaching Solutions + https://www.veen.world + galaxy_tags: + - s3 + - object-storage + - storage + - buckets + - minio + - self-hosted + - privacy + - backup + - devops + repository: "https://s.infinito.nexus/code" + issue_tracker_url: "https://s.infinito.nexus/issues" + documentation: "https://s.infinito.nexus/code/" + logo: + class: "fa-solid fa-database" + run_after: + - web-app-keycloak + - web-app-matomo +dependencies: [] diff --git a/roles/web-app-minio/tasks/main.yml b/roles/web-app-minio/tasks/main.yml index f95b6361..bbde7518 100644 --- a/roles/web-app-minio/tasks/main.yml +++ b/roles/web-app-minio/tasks/main.yml @@ -1,4 +1,13 @@ --- +- name: "Install Ollama Dependency" + include_role: + name: svc-ai-ollama + vars: + flush_handlers: true + when: + - run_once_svc_ai_ollama is not defined + - OLLAMA_LOCAL_ENABLED | bool + - name: "load docker and db for {{ application_id }}" include_role: name: sys-stk-back-stateless diff --git a/roles/web-app-minio/templates/docker-compose.yml.j2 b/roles/web-app-minio/templates/docker-compose.yml.j2 index 4e2edcd8..0cee11e9 100644 --- a/roles/web-app-minio/templates/docker-compose.yml.j2 +++ b/roles/web-app-minio/templates/docker-compose.yml.j2 @@ -12,6 +12,8 @@ - data:/data {% include 'roles/docker-container/templates/networks.yml.j2' %} +{% include 'roles/docker-compose/templates/networks.yml.j2' %} + {% include 'roles/docker-compose/templates/volumes.yml.j2' %} data: name: {{ MINIO_VOLUME }} \ No newline at end of file diff --git a/roles/web-app-minio/vars/main.yml b/roles/web-app-minio/vars/main.yml index 90466624..9d2849e6 100644 --- a/roles/web-app-minio/vars/main.yml +++ b/roles/web-app-minio/vars/main.yml @@ -10,7 +10,7 @@ docker_compose_file_creation_enabled: true MINIO_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.minio.version') }}" MINIO_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.minio.image') }}" MINIO_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.minio.name') }}" -MINIO_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.minio') }}" +MINIO_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.data') }}" ## Api MINIO_API_DOMAIN: "{{ applications | get_app_conf(application_id, 'server.domains.canonical.api') }}" diff --git a/roles/web-app-openwebui/README.md b/roles/web-app-openwebui/README.md new file mode 100644 index 00000000..9719a090 --- /dev/null +++ b/roles/web-app-openwebui/README.md @@ -0,0 +1,24 @@ +Here are user-focused **README.md** drafts for the four roles, following your template structure and describing the **role** (what the installed software does for users), not the folder. + +# Open WebUI + +## Description + +**Open WebUI** provides a clean, fast chat interface for working with local AI models (e.g., via Ollama). It delivers a ChatGPT-like experience on your own infrastructure to keep prompts and data private. + +## Overview + +End users access a web page, pick a model, and start chatting. Conversations remain on your servers. Admins can enable strict offline behavior so no external network calls occur. The UI can also point at OpenAI-compatible endpoints if needed. + +## Features + +* Familiar multi-chat interface with quick model switching +* Supports local backends (Ollama) and OpenAI-compatible APIs +* Optional **offline mode** for air-gapped environments +* File/paste input for summaries and extraction (model dependent) +* Suitable for teams: predictable, private, reproducible + +## Further Resources + +* Open WebUI — [https://openwebui.com](https://openwebui.com) +* Ollama — [https://ollama.com](https://ollama.com) diff --git a/roles/web-app-openwebui/config/main.yml b/roles/web-app-openwebui/config/main.yml new file mode 100644 index 00000000..db5cce0c --- /dev/null +++ b/roles/web-app-openwebui/config/main.yml @@ -0,0 +1,41 @@ +features: + matomo: true + css: true + desktop: true + central_database: false + logout: true + javascript: false +server: + domains: + canonical: + openwebui: "chat.ai.{{ PRIMARY_DOMAIN }}" + aliases: [] + csp: + flags: {} + #script-src-elem: + # unsafe-inline: true + #script-src: + # unsafe-inline: true + # unsafe-eval: true + #style-src: + # unsafe-inline: true + whitelist: + font-src: [] + connect-src: [] +docker: + services: + openwebui: + backup: + no_stop_required: true + image: ghcr.io/open-webui/open-webui + version: main + name: open-webui + offline_mode: false + hf_hub_offline: false + redis: + enabled: false + database: + enabled: false + volumes: + openwebui: ai_openwebui_data +credentials: {} diff --git a/roles/web-app-openwebui/meta/main.yml b/roles/web-app-openwebui/meta/main.yml new file mode 100644 index 00000000..36af4245 --- /dev/null +++ b/roles/web-app-openwebui/meta/main.yml @@ -0,0 +1,28 @@ +--- +galaxy_info: + author: "Kevin Veen-Birkenbach" + description: "Installs Open WebUI — a clean, fast chat interface for local/private AI models (e.g., via Ollama)." + license: "Infinito.Nexus NonCommercial License" + license_url: "https://s.infinito.nexus/license" + company: | + Kevin Veen-Birkenbach + Consulting & Coaching Solutions + https://www.veen.world + galaxy_tags: + - ai + - llm + - chat + - privacy + - self-hosted + - offline + - openwebui + - ollama + repository: "https://s.infinito.nexus/code" + issue_tracker_url: "https://s.infinito.nexus/issues" + documentation: "https://s.infinito.nexus/code/" + logo: + class: "fa-solid fa-comments" + run_after: + - web-app-keycloak + - web-app-matomo +dependencies: [] diff --git a/roles/web-app-openwebui/tasks/main.yml b/roles/web-app-openwebui/tasks/main.yml new file mode 100644 index 00000000..e24fb25d --- /dev/null +++ b/roles/web-app-openwebui/tasks/main.yml @@ -0,0 +1,13 @@ +--- +- name: "Install Ollama Dependency" + include_role: + name: svc-ai-ollama + vars: + flush_handlers: true + when: + - run_once_svc_ai_ollama is not defined + - OLLAMA_LOCAL_ENABLED | bool + +- name: "load docker, proxy for '{{ application_id }}'" + include_role: + name: sys-stk-full-stateless diff --git a/roles/web-app-openwebui/templates/docker-compose.yml.j2 b/roles/web-app-openwebui/templates/docker-compose.yml.j2 new file mode 100644 index 00000000..9741caed --- /dev/null +++ b/roles/web-app-openwebui/templates/docker-compose.yml.j2 @@ -0,0 +1,30 @@ +{% include 'roles/docker-compose/templates/base.yml.j2' %} + ollama: +{% include 'roles/docker-container/templates/base.yml.j2' %} + image: {{ OLLAMA_IMAGE }}:{{ OLLAMA_VERSION }} + container_name: {{ OLLAMA_CONTAINER }} + expose: + - "{{ OLLAMA_PORT }}" + volumes: + - ollama_models:/root/.ollama +{% include 'roles/docker-container/templates/networks.yml.j2' %} + + openwebui: +{% include 'roles/docker-container/templates/base.yml.j2' %} + image: {{ OPENWEBUI_IMAGE }}:{{ OPENWEBUI_VERSION }} + container_name: {{ OPENWEBUI_CONTAINER }} + depends_on: + - ollama + ports: + - "127.0.0.1:{{ OPENWEBUI_PORT_PUBLIC }}:8080" + volumes: + - openwebui_data:/app/backend/data +{% include 'roles/docker-container/templates/networks.yml.j2' %} + +{% include 'roles/docker-compose/templates/networks.yml.j2' %} + +{% include 'roles/docker-compose/templates/volumes.yml.j2' %} + ollama_models: + name: {{ OLLAMA_VOLUME }} + openwebui_data: + name: {{ OPENWEBUI_VOLUME }} diff --git a/roles/web-app-openwebui/templates/env.j2 b/roles/web-app-openwebui/templates/env.j2 new file mode 100644 index 00000000..24d9a6bb --- /dev/null +++ b/roles/web-app-openwebui/templates/env.j2 @@ -0,0 +1,5 @@ +# Open WebUI +OLLAMA_BASE_URL={{ OLLAMA_BASE_LOCAL_URL }} +OFFLINE_MODE={{ OPENWEBUI_OFFLINE_MODE | ternary(1, 0) }} +HF_HUB_OFFLINE={{ OPENWEBUI_HF_HUB_OFFLINE | ternary(1, 0) }} +ENABLE_PERSISTENT_CONFIG=False diff --git a/roles/web-app-openwebui/vars/main.yml b/roles/web-app-openwebui/vars/main.yml new file mode 100644 index 00000000..975fa8a0 --- /dev/null +++ b/roles/web-app-openwebui/vars/main.yml @@ -0,0 +1,17 @@ + +# General +application_id: "web-app-openwebui" + +# Docker +docker_pull_git_repository: false +docker_compose_file_creation_enabled: true + +# Open WebUI +# https://openwebui.com/ +OPENWEBUI_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.openwebui.version') }}" +OPENWEBUI_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.openwebui.image') }}" +OPENWEBUI_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.openwebui.name') }}" +OPENWEBUI_OFFLINE_MODE: "{{ applications | get_app_conf(application_id, 'docker.services.openwebui.offline_mode') }}" +OPENWEBUI_HF_HUB_OFFLINE: "{{ applications | get_app_conf(application_id, 'docker.services.openwebui.hf_hub_offline') }}" +OPENWEBUI_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.openwebui') }}" +OPENWEBUI_PORT_PUBLIC: "{{ ports.localhost.http[application_id] }}" diff --git a/roles/web-svc-cdn/tasks/main.yml b/roles/web-svc-cdn/tasks/main.yml index 935414d4..6d4fd4f0 100644 --- a/roles/web-svc-cdn/tasks/main.yml +++ b/roles/web-svc-cdn/tasks/main.yml @@ -2,6 +2,6 @@ - include_tasks: 01_core.yml when: run_once_web_svc_cdn is not defined -- include_tasks: "{{ playbook_dir }}/tasks/utils/load_handlers.yml" +- include_tasks: "{{ [ playbook_dir, 'tasks/utils/load_handlers.yml' ] | path_join }}" vars: handler_role_name: "docker-compose" diff --git a/tasks/stages/01_constructor.yml b/tasks/stages/01_constructor.yml index c1726668..de4c3dd7 100644 --- a/tasks/stages/01_constructor.yml +++ b/tasks/stages/01_constructor.yml @@ -128,5 +128,6 @@ - svc-net # 3. Load network roles - svc-db # 4. Load database roles - svc-prx # 5. Load proxy roles + - svc-ai # 6. Load ai roles loop_control: label: "{{ item }}-roles.yml" \ No newline at end of file