mirror of
https://github.com/kevinveenbirkenbach/computer-playbook.git
synced 2025-09-24 11:06:24 +02:00
- svc-ai-ollama: - Add preload_models (llama3, mistral, nomic-embed-text) - Pre-pull task: loop_var=model, async-safe changed_when/failed_when - sys-svc-proxy (OpenResty): - Forward Authorization header - Ensure proxy_pass_request_headers on - web-app-openwebui: - ADMIN_EMAIL from users.administrator.email - Request RBAC group scope in OAUTH_SCOPES Ref: ChatGPT support (2025-09-23) — https://chatgpt.com/share/68d20588-2584-800f-aed4-26ce710c69c4
38 lines
1.2 KiB
YAML
38 lines
1.2 KiB
YAML
- name: create docker network for Ollama, so that other applications can access it
|
|
community.docker.docker_network:
|
|
name: "{{ OLLAMA_NETWORK }}"
|
|
state: present
|
|
ipam_config:
|
|
- subnet: "{{ networks.local[application_id].subnet }}"
|
|
|
|
- name: Include dependency 'sys-svc-docker'
|
|
include_role:
|
|
name: sys-svc-docker
|
|
when: run_once_sys_svc_docker is not defined
|
|
|
|
- name: "include docker-compose role"
|
|
include_role:
|
|
name: docker-compose
|
|
vars:
|
|
docker_compose_flush_handlers: true
|
|
|
|
- name: Pre-pull Ollama models
|
|
vars:
|
|
_cmd: "docker exec -i {{ OLLAMA_CONTAINER }} ollama pull {{ model }}"
|
|
shell: "{{ _cmd }}"
|
|
register: pull_result
|
|
loop: "{{ OLLAMA_PRELOAD_MODELS }}"
|
|
loop_control:
|
|
loop_var: model
|
|
async: "{{ ASYNC_TIME if ASYNC_ENABLED | bool else omit }}"
|
|
poll: "{{ ASYNC_POLL if ASYNC_ENABLED | bool else omit }}"
|
|
changed_when: >
|
|
(not (ASYNC_ENABLED | bool)) and (
|
|
'downloaded' in (pull_result.stdout | default('')) or
|
|
'pulling manifest' in (pull_result.stdout | default(''))
|
|
)
|
|
failed_when: >
|
|
(pull_result.rc | default(0)) != 0 and
|
|
('up to date' not in (pull_result.stdout | default('')))
|
|
|
|
- include_tasks: utils/run_once.yml |